import random import copy import math import time import matplotlib.pyplot as plt import matplotlib.pyplot as plt import matplotlib.patches as patches from IPython.display import display from collections import deque # Logging Function def log_to_file(log_file, message): with open(log_file, "a") as file: file.write(message + "\n") log_file = "simulation_log.txt" class Resource: def __init__(self, scarcity, renewable): self.quantity = random.uniform(0.5, 1.0) # Initial quantity self.scarcity = scarcity self.renewable = renewable class PhysicsElement: def __init__(self, effect): self.effect = effect # Add the Predator class class Predator: def __init__(self, x, y, environment): self.position = (x, y) self.environment = environment self.hunger = 0 self.sight_range = 4 # Sight range for predators self.max_lifetime = 10 # Maximum lifetime for predators self.lifetime = 0 self.conflicts_won = 0 def beat_unit(self, unit): log_to_file(log_file, f"Predator at {self.position} beats unit at {unit.position}.") self.conflicts_won += 1 def move(self): # Move the predator randomly within the environment possible_moves = [(1, 0), (-1, 0), (0, 1), (0, -1)] new_position = (self.position[0] + random.choice(possible_moves)[0], self.position[1] + random.choice(possible_moves)[1]) self.position = ( new_position[0] % self.environment.width, new_position[1] % self.environment.height ) def check_prey(self, units): # Check if there are prey units within sight range for unit in units: distance = math.sqrt((self.position[0] - unit.position[0]) ** 2 + (self.position[1] - unit.position[1]) ** 2) if distance <= self.sight_range: return True # Predator has detected prey return False def consume_prey(self, unit): # Consume a prey unit self.hunger -= 3.5 unit.hunger = 1.0 # Prey unit's hunger is reset unit.position = (random.randint(0, self.environment.width - 1), random.randint(0, self.environment.height - 1)) # Prey unit is respawned def update(self, units): # Predator behavior update self.move() self.lifetime += 1 if self.lifetime >= self.max_lifetime: #Predator dies after reaching its maximum lifetime self.environment.remove_predator(self) elif self.check_prey(units): # If prey is detected, consume it prey_candidates = [unit for unit in units if self.position == unit.position] if prey_candidates: prey = random.choice(prey_candidates) self.consume_prey(prey) class Terrain: def __init__(self, movement_cost): self.movement_cost = movement_cost class ResourceType: def __init__(self, quantity, name, scarcity, renewable,position): self.quantity = quantity self.name = name self.scarcity = scarcity self.renewable = renewable self.position = position resource_type1 = ResourceType(quantity=10, name="Resource1", scarcity=0.2, renewable=True, position=(random)) resource_type2 = ResourceType(quantity=15, name="Resource2", scarcity=0.5, renewable=False, position=(random)) # Add more resource types as needed resource_types = [resource_type1, resource_type2, ...] class CraftedItem: def __init__(self): self.position = None # Position of the crafted item class Environment: def __init__(self, width, height, obstacle_spawn_rate, num_resource_types, num_terrain_types, num_physics_elements): self.width = width self.height = height self.obstacles = set() self.obstacle_spawn_rate = obstacle_spawn_rate self.resources = [[0.0] * width for _ in range(height)] # Initialize resource matrix # Initialize an empty list for predators self.predators = [] # Initialize multiple resource types # Initialize multiple resource types with positions self.resource_types = [ResourceType( quantity=random.uniform(0.5, 1.0), name=f"Resource{i+1}", scarcity=random.uniform(0.1, 0.9), renewable=random.choice([True, False]), position=(random.randint(0, width - 1), random.randint(0, height - 1)) # Add a random position ) for i in range(num_resource_types)] # Initialize a template for crafted items self.crafted_item_template = CraftedItem() # Initialize multiple terrain types self.terrain_types = [Terrain(movement_cost=random.uniform(0.5, 1.0)) for _ in range(num_terrain_types)] # Initialize multiple physics elements self.physics_elements = [PhysicsElement(effect=random.uniform(-0.5, 0.5)) for _ in range(num_physics_elements)] # Initialize a list to store crafted items self.crafted_items = [] for i in range(width): for j in range(height): if random.uniform(0, 1) < 0.2: # Adjust probability as needed # Add obstacle for each resource type self.obstacles.add((i, j)) self.change_threshold = 0.7 # Threshold for abrupt environmental changes def remove_predator(self, predator): # Implement logic to remove the predator from the environment # For example, you can have a list of predators and remove the specified predator if predator in self.predators: self.predators.remove(predator) def update(self): # Update resource dynamics, depletion, and replenishment here for resource_type in self.resource_types: if resource_type.renewable: # Implement resource replenishment for renewable resources resource_type.quantity += 0.1 # You can adjust the replenishment rate # Ensure that the quantity doesn't exceed a maximum value resource_type.quantity = min(1.0, resource_type.quantity) else: # Implement resource depletion for non-renewable resources resource_type.quantity -= 0.05 # You can adjust the depletion rate # Ensure that the quantity doesn't go below a minimum value resource_type.quantity = max(0.0, resource_type.quantity) def cooperative_challenge(self, step): if step == 12: # Cooperative Challenge: Units need to work together to achieve a common goal self.obstacles = {(2, 3), (4, 4), (3, 2), (5, 3)} goal_location = (7, 7) # Cooperative goal location self.resources[goal_location[1]][goal_location[0]] = 1.0 # Set a high-value resource at the goal def additional_challenges(self, step): if step == 15: # Challenge 4: Units need to navigate through a narrow passage self.obstacles = {(2, 3), (3, 3), (4, 3)} elif step == 18: # Challenge 5: Units face resource scarcity and must find hidden resources self.resources[7][7] = 0.0 # Remove the high-value resource at the goal self.resources[3][5] = 1.0 # Place a hidden resource self.resources[6][2] = 1.0 # Place another hidden resource def sequential_challenge(self, step): if step == 3: # Challenge 1: Units need to navigate through a maze self.obstacles = {(2, 2), (2, 3), (2, 4), (3, 4), (4, 4), (5, 4), (5, 3), (5, 2), (4, 2)} elif step == 6: # Challenge 2: Units need to find resources scattered in a new pattern self.obstacles = {(2, 3), (4, 4), (3, 2), (5, 3)} elif step == 9: # Challenge 3: Units face a combination of obstacles and resources in a larger environment self.obstacles = {(2, 2), (2, 3), (2, 4), (3, 4), (4, 4), (5, 4), (5, 3), (5, 2), (4, 2), (7, 7), (8, 8), (9, 9)} def update_resources(self): for resource_type in self.resource_types: for i in range(self.width): for j in range(self.height): if (i, j) not in self.obstacles: # Deplete resources over time resource_type.quantity -= resource_type.scarcity * 0.02 # Randomly replenish resources (if renewable) if resource_type.renewable and random.uniform(0, 1) < 0.1: resource_type.quantity += random.uniform(0, 0.2) # Ensure resource quantity stays within bounds resource_type.quantity = max(0, min(1, resource_type.quantity)) # Check if it's time to spawn obstacles with food if random.uniform(0, 1) < self.obstacle_spawn_rate: # Spawn obstacles with food food_positions = [(i, j) for i in range(self.width) for j in range(min(self.height, len(self.resource_types))) if self.resource_types[j].quantity > 0.5] self.obstacles.update(food_positions) # Spawn obstacles with food based on available resource types for resource_type in self.resource_types: food_positions = [(i, j) for i in range(self.width) for j in range(self.height) if resource_type.quantity > 0.5 and (i, j) not in self.obstacles] self.obstacles.update(food_positions) # Simulate abrupt environmental changes if random.uniform(0, 1) < self.change_threshold: self.resources = [[random.uniform(0, 1) for _ in range(self.width)] for _ in range(self.height)] # Check if it's time to spawn obstacles with food if random.uniform(0, 1) < self.obstacle_spawn_rate: # Spawn obstacles with food food_positions = [(i, j) for i in range(self.width) for j in range(self.height) if self.resources[j][i] > 0.5] self.obstacles.update(food_positions) # Introduce a chance for dynamic changes in obstacle locations if random.uniform(0, 1) < 0.05: # Adjust probability as needed num_obstacles_to_move = random.randint(1, 5) # Adjust range as needed obstacles_to_move = random.sample(list(self.obstacles), num_obstacles_to_move) for obstacle in obstacles_to_move: new_position = ( (obstacle[0] + random.randint(-1, 1)) % self.width, (obstacle[1] + random.randint(-1, 1)) % self.height ) self.obstacles.remove(obstacle) self.obstacles.add(new_position) # Update resource dynamics self.update_resource_dynamics() # Update resource transformation self.update_resource_transformation() # Increase obstacle spawn rate over time self.obstacle_spawn_rate += 0.01 def update_resource_dynamics(self): # Simulate resource depletion or replenishing based on type for resource_type in self.resource_types: for i in range(self.width): for j in range(self.height): if (i, j) not in self.obstacles: resource_type.quantity -= 0.02 # Deplete resources over time if random.uniform(0, 1) < 0.1: resource_type.quantity += random.uniform(0, 0.2) # Randomly replenish resources # Introduce a chance for dynamic changes in obstacle locations if random.uniform(0, 1) < 0.05: # Adjust probability as needed num_obstacles_to_move = random.randint(1, 5) # Adjust range as needed obstacles_to_move = random.sample(list(self.obstacles), num_obstacles_to_move) for obstacle in obstacles_to_move: new_position = ( (obstacle[0] + random.randint(-1, 1)) % self.width, (obstacle[1] + random.randint(-1, 1)) % self.height ) self.obstacles.remove(obstacle) self.obstacles.add(new_position) # Increase obstacle spawn rate over time self.obstacle_spawn_rate += 0.01 def update_resource_transformation(self): # Implement a basic resource transformation mechanism for i in range(self.width): for j in range(self.height): if (i, j) not in self.obstacles: for resource_type in self.resource_types: if resource_type.quantity > 0.5 and random.uniform(0, 1) < 0.05: # Transform resource into a crafted item resource_type.quantity -= 0.5 crafted_item = copy.deepcopy(self.crafted_item_template) crafted_item.position = (i, j) self.crafted_items.append(crafted_item) def move_cost(self, position): # Return the movement cost of the terrain at the specified position return self.terrain_types[position[1] % len(self.terrain_types)].movement_cost def move_unit(self, unit, new_position): # Move the unit to the new position, considering terrain movement costs move_cost = self.move_cost(new_position) unit.position = new_position if random.uniform(0, 1) > move_cost else unit.position def apply_physics(self, unit): # Apply physics effects to the unit for physics_element in self.physics_elements: unit.hunger += physics_element.effect def get_resource_type_at_position(self, position): """ Get the resource type at the given position. Parameters: - position (tuple): The (x, y) position to check. Returns: - ResourceType or None: The resource type at the given position, or None if no resource is present. """ for resource_type in self.resource_types: if position in resource_type.position: return resource_type return None # Unit Class class Unit: def __init__(self, x, y,predators,resource_types, environment, strength=1, perception=1): self.position = (x, y) self.environment = environment # Add this line self.hunger = 1 self.curiosity = 0 self.memory = set() self.current_goal = None self.sight_range = 3 # Sight-like detection range self.internal_complexity = 1 # Internal complexity of decision-making processes self.successful_foraging_actions = 0 self.problem_solving_actions = 0 # attribute for problem solving self.communication_count = 0 # Add this attribute self.past_positions = set() # Add a set to store past positions self.max_memory_size = 20 # Adjust the maximum memory size as needed self.abstract_messages = set() # New attribute for abstract messages self.self_reflection_threshold = 0.8 # Threshold for self-reflection self.communication_range = 2 self.communication_timer = 0 self.communicate_drive = 0.5 # You can set the initial value accordingly self.resource_type = None self.goal = None self.strength = strength # Strength attribute representing combat ability self.perception = perception # Perception attribute representing sensory abilities self.seek_food_drive = 0.5 # Initial value, adjust as neededž self.crafted_items = set() self.used_resources = set() # Enhancement: Longer Memory Span self.past_positions = deque(maxlen=20) # Set a suitable maximum length self.self_awareness_threshold = 10 # Set a default value, adjust as needed # Enhancement: Memory Types self.short_term_memory = deque(maxlen=10) self.long_term_memory = [] self.predators = predators self.resource_types = resource_types def __deepcopy__(self, memo): # Create a new instance and copy necessary attributes new_unit = Unit(0, 0, self.predators, self.resource_types, self.environment) new_unit.hunger = self.hunger new_unit.curiosity = self.curiosity new_unit.sight_range = self.sight_range new_unit.internal_complexity = self.internal_complexity # Copy other attributes as needed # Copy the memory set explicitly new_unit.memory = set(self.memory) return new_unit def update_memory(self): # Store current position in both short-term and long-term memory self.short_term_memory.append(self.position) self.past_positions.append(self.position) # Move short-term memory to long-term memory periodically if random.uniform(0, 1) < 0.1: # Adjust probability as needed self.long_term_memory.extend(self.short_term_memory) self.short_term_memory.clear() def self_modify(self): # Your self-modification logic goes here # For example, you can modify some internal attributes or behaviors self.internal_complexity += random.uniform(0, 0.1) print(f"Unit at {self.position} has self-modified. New internal complexity: {self.internal_complexity}") log_to_file(log_file, f"Unit at {self.position} has self-modified {self.internal_complexity}.") def sense(self, environment, units): # Detect resources within a radius around the unit radius = 1 sensing_range = 2 for i in range(-radius, radius + 1): for j in range(-radius, radius + 1): neighbor_x, neighbor_y = self.position[0] + i, self.position[1] + j if 0 <= neighbor_x < environment.width and 0 <= neighbor_y < environment.height: for resource_type in environment.resource_types: if resource_type.quantity > 0.5 and (neighbor_x, neighbor_y) not in self.memory: self.memorize(("resource", resource_type, (neighbor_x, neighbor_y))) for i in range(-sensing_range, sensing_range + 1): for j in range(-sensing_range, sensing_range + 1): new_x, new_y = self.position[0] + i, self.position[1] + j if 0 <= new_x < environment.width and 0 <= new_y < environment.height: # Existing obstacle sensing logic if (new_x, new_y) in environment.obstacles: self.obstacle_sensed = True # Enhancement: Environmental Information resource_type = environment.get_resource_type_at_position((new_x, new_y)) if resource_type: self.resource_sensed = True self.sensed_resource_type = resource_type # Detect other units within sight range for other_unit in units: if other_unit != self: distance = math.sqrt((self.position[0] - other_unit.position[0]) ** 2 + (self.position[1] - other_unit.position[1]) ** 2) if distance <= self.sight_range: self.memorize(("unit", other_unit)) # Detect obstacles within sight range for obstacle in environment.obstacles: distance = math.sqrt((self.position[0] - obstacle[0]) ** 2 + (self.position[1] - obstacle[1]) ** 2) if distance <= self.sight_range: self.memorize(("obstacle", obstacle)) def use_resource(self, resource): # ... (existing implementation) self.used_resources.add(resource) def craft_item(self, item): # ... (existing implementation) self.crafted_items.add(item) # Add a method to handle encounters with predators def encounter_predator(self, predator): # ... (existing implementation) log_to_file(log_file, f"Unit at {self.position} encounters predator at {predator.position}.") def update_drives(self, environment, all_units): # Update drives based on the environment current_position = self.position for resource_type in self.environment.resource_types: resource_quantity = resource_type.quantity scarcity = resource_type.scarcity resource_value = resource_quantity * (1 - scarcity) # Check if there are crafted items at the current position for item in self.environment.crafted_items: if item.position == current_position: # Increase the resource value if a crafted item is present resource_value += 0.2 # Update seek_food_drive based on resource values self.seek_food_drive = max(0.1, min(0.9, self.seek_food_drive + 0.1 * resource_value)) # Update communicate_drive based on the presence of other units for other_unit in all_units: if other_unit != self and other_unit.position == current_position: # Increase communicate_drive if another unit is present at the same position self.communicate_drive = max(0.1, min(0.9, self.communicate_drive + 0.2)) break else: # Decrease communicate_drive if no other units are present self.communicate_drive = max(0.1, min(0.9, self.communicate_drive - 0.1)) def solve_problem(self, environment): if environment is not None: print(f"Unit at {self.position} encounters and solves a problem!") self.problem_solving_actions += 1 # Increment the problem-solving action count else: print(f"Unit at {self.position} encounters and solves a problem!") def move_towards_goal(self, environment): # Apply physics effects # Update memory after movement self.update_memory() environment.apply_physics(self) if self.current_goal == "explore": # Move towards unexplored areas with scarce resources unexplored_cells = [(i, j) for i in range(environment.width) for j in range(environment.height) if ("resource", environment.resource_types[0], (i, j)) not in self.memory] if unexplored_cells: closest_cell = min(unexplored_cells, key=lambda cell: abs(self.position[0] - cell[0]) + abs(self.position[1] - cell[1])) self.position = closest_cell if closest_cell[0] < environment.width and closest_cell[1] < environment.height else self.position # Check for obstacles and trigger problem-solving if self.position in environment.obstacles: self.solve_problem(environment) elif self.current_goal == "gather": # Move towards high-value resources based on scarcity high_value_cells = [] for resource_type in environment.resource_types: high_value_cells += [(i, j) for i in range(environment.width) for j in range(environment.height) if resource_type.quantity > 0.5 and ("resource", resource_type, (i, j)) in self.memory] if high_value_cells: closest_cell = min(high_value_cells, key=lambda cell: abs(self.position[0] - cell[0]) + abs(self.position[1] - cell[1])) self.position = closest_cell if closest_cell[0] < environment.width and closest_cell[1] < environment.height else self.position elif self.current_goal == "solve_challenge": # Check if the current position has an obstacle if self.position in environment.obstacles: # If there is an obstacle, solve the problem self.solve_problem() else: # Move towards a predefined challenge-solving location challenge_location = (environment.width // 2, environment.height // 2) self.position = challenge_location if challenge_location[0] < environment.width and challenge_location[1] < environment.height else self.position else: # Random movement with consideration for terrain types possible_moves = [(1, 0), (-1, 0), (0, 1), (0, -1)] new_position = (self.position[0] + random.choice(possible_moves)[0], self.position[1] + random.choice(possible_moves)[1]) environment.move_unit(self, new_position) def consume(self, environment): # Reduce the resource in the Environment if the unit is on a cell with resources for resource_type in environment.resource_types: if resource_type.quantity > 0.5 and ("resource", resource_type, self.position) in self.memory: resource_type.quantity -= 0.1 # Decrease the resource # Potentially decrease the unit's hunger self.hunger -= 0.1 # Successful foraging action self.successful_foraging_actions += 1 def communicate(self, other_unit): self.communication_count += 1 # Increment the communication count # Share more abstract information or intentions abstract_message = f"Let's cooperate on goal {self.current_goal}!" self.abstract_messages.add((other_unit.position, abstract_message)) def memorize(self, event): # Store the types of resources encountered, or perhaps the unit's past locations self.memory.add(event) if len(self.memory) > self.max_memory_size: # Implement a mechanism to prioritize or forget memories based on relevance and recency self.memory.pop() if event[0] == "position": self.past_positions.add(event[1]) if len(self.past_positions) > self.max_memory_size: self.past_positions.pop() def think(self, mutation_rate): # Mutate the think function if random.uniform(0, 1) < mutation_rate: self.current_goal = random.choice(["explore", "gather", None]) # Internal complexity growth over generations self.internal_complexity += random.uniform(0, 0.1) # Learn from problem-solving experiences and adapt if self.problem_solving_actions > 0: self.internal_complexity += 0.1 # Increase internal complexity with problem-solving experience def set_goal(self, new_goal): # Set a new goal for the unit self.current_goal = new_goal def react_to_predators(self, environment): # Check for predators and adjust behavior accordingly for predator in environment.predators: distance = math.sqrt((self.position[0] - predator.position[0]) ** 2 + (self.position[1] - predator.position[1]) ** 2) if distance <= self.sight_range: # Implement evasion strategy or defensive tactics self.curiosity -= 0.5 # Decrease curiosity when predators are detected def move_towards_dynamic_goal(self, environment): # Move towards dynamically changing goals if self.current_goal == "dynamic_challenge": # Move towards the dynamically changing goal location goal_location = random.sample(environment.obstacles, 1)[0] self.position = goal_location # Update memory after movement self.update_memory() else: # Continue with existing goal logic self.move_towards_goal(environment) def set_dynamic_goal(self, new_goal): # Set a new dynamically changing goal for the unit self.current_goal = new_goal def reproduce(self, mutation_rate): # Reproduction: create offspring that can adapt to dynamic challenges offspring = copy.deepcopy(self) # Mutations: introduce more radical changes, including in the think function if random.uniform(0, 1) < mutation_rate: offspring.hunger += random.uniform(-0.5, 0.5) if random.uniform(0, 1) < mutation_rate: offspring.curiosity += random.uniform(-0.5, 0.5) if random.uniform(0, 1) < mutation_rate: offspring.sight_range += random.uniform(-1, 1) if random.uniform(0, 1) < mutation_rate: offspring.think(mutation_rate) if random.uniform(0, 1) < mutation_rate: offspring.set_dynamic_goal("dynamic_challenge") return offspring def calculate_fitness(unit): return 1 / (unit.hunger + 1) + unit.successful_foraging_actions + unit.problem_solving_actions def self_reflection(self): # Rudimentary form of self-awareness based on hunger level if self.hunger > self.self_reflection_threshold: log_to_file(log_file, f"Unit at {self.position} reflects on its hunger: {self.hunger}.") # Enhanced self-awareness based on internal complexity if self.internal_complexity > self.self_awareness_threshold: log_to_file(log_file, f"Unit at {self.position} is becoming self-aware: Internal Complexity - {self.internal_complexity}.") # Self-awareness when encountering obstacles if self.position in self.environment.obstacles: log_to_file(log_file, f"Unit at {self.position} is aware of the obstacle and adjusts its behavior.") # Self-awareness when encountering predators for predator in self.predators: self.encounter_predator(predator) distance = math.sqrt((self.position[0] - predator.position[0]) ** 2 + (self.position[1] - predator.position[1]) ** 2) if distance <= 1: # Adjust the distance threshold for encounters print(f"Unit at {self.position} is aware of the predator and adjusts its behavior.") log_to_file(log_file, f"Unit at {self.position} is aware of the predator and adjusts its behavior.") # Genetic Algorithm Functions def select_parents(units): # Selection: Randomly select parents based on fitness (lower hunger is better) weights = [1.0 / (unit.hunger + 1) for unit in units] # Add a small offset to avoid a total of weights being zero total_weights = sum(weights) if total_weights <= 0: weights = [1.0 for _ in units] # Use equal weights if total_weights is zero parents = random.choices(units, weights=weights, k=2) return parents def crossover(parents): # Crossover (Recombination): Simple averaging of attributes offspring = copy.deepcopy(parents[0]) offspring.hunger = (parents[0].hunger + parents[1].hunger) / 2 offspring.curiosity = (parents[0].curiosity + parents[1].curiosity) / 2 offspring.sight_range = (parents[0].sight_range + parents[1].sight_range) / 2 offspring.internal_complexity = (parents[0].internal_complexity + parents[1].internal_complexity) / 2 return offspring # Simulation Loop with Visualization def simulate(environment_size, initial_population, num_steps, mutation_rate, log_interval): environment = Environment(width=10, height=10, obstacle_spawn_rate=0.1, num_resource_types=3, num_terrain_types=3, num_physics_elements=10) predators = [] # Declare the predators list outside the loop units = [Unit(random.randint(0, environment.width - 1), random.randint(0, environment.height - 1), predators, resource_types, environment) for _ in range(initial_population)] for step in range(num_steps): for predator in predators: predator.update(units) # Log encounters with predators log_to_file(log_file, f"Step {step}:") log_to_file(log_file, f"Predator at {predator.position} encountered.") # Add new predators to the environment if random.uniform(0, 1) < 0.1: new_predator = Predator(random.randint(0, environment.width - 1), random.randint(0, environment.height - 1), environment) predators.append(new_predator) for unit in units: unit.sense(environment, units) unit.update_drives(environment,units) unit.think(mutation_rate) unit.move_towards_goal(environment) unit.consume(environment) environment.update() environment.update_resources() # Update resource dynamics environment.sequential_challenge(step) # Logging at intervals if step % log_interval == 0: log_to_file(log_file, f"Step {step}:") log_to_file(log_file, "Unit Locations:") for unit in units: log_to_file(log_file, f" Unit at {unit.position}") log_to_file(log_file, f" Internal Complexity: {unit.internal_complexity}") log_to_file(log_file, f" Hunger: {unit.hunger}") log_to_file(log_file, f" Curiosity: {unit.curiosity}") log_to_file(log_file, f" Past Positions: {unit.past_positions}") # Check and log how the unit feels after finding food if 0 <= unit.position[1] < len(environment.resource_types): if environment.resource_types[unit.position[1]].quantity > 0.5: log_to_file(log_file, " Feeling: Found food! Yummy!") # Check and log how the unit feels after encountering danger if unit.position in environment.obstacles: log_to_file(log_file, " Feeling: Danger! Avoiding obstacle.") if random.uniform(0, 1) < environment.change_threshold: log_to_file(log_file, " Abrupt environmental change occurred.") log_to_file(log_file, f"Average Internal Complexity: {sum(unit.internal_complexity for unit in units) / len(units)}") log_to_file(log_file, f"Average Hunger: {sum(unit.hunger for unit in units) / len(units)}") log_to_file(log_file, f"Average Curiosity: {sum(unit.curiosity for unit in units) / len(units)}") log_to_file(log_file, f"Successful Foraging Actions: {sum(unit.successful_foraging_actions for unit in units)}") log_to_file(log_file, f"Problem-Solving Actions: {sum(unit.problem_solving_actions for unit in units)}") log_to_file(log_file, f"Total Communications: {sum(unit.communication_count for unit in units)}") log_to_file(log_file, "Major Environmental Changes:") log_to_file(log_file, "Communication Analysis:") log_to_file(log_file, f" Total Communications: {sum(unit.communication_count for unit in units)}") log_to_file(log_file, f"Unit at {unit.position} is solving a challenge unexpectedly.") # Spread of Solutions Analysis spread_of_solutions = [unit.communication_count for unit in units] log_to_file(log_file, f"Spread of Solutions: {spread_of_solutions}") for other_unit in units: if unit != other_unit and unit.position == other_unit.position: # Log information about the encounter log_to_file(log_file, f"Unit at {unit.position} found a solution to beat the predator at {other_unit.position}.") # Communication Leaders Analysis communication_leaders = [unit for unit in units if unit.communication_count > 0] log_to_file(log_file, "Communication Leaders:") for leader in communication_leaders: log_to_file(log_file, f" Unit at {leader.position} with Internal Complexity {leader.internal_complexity} shares solutions often.") # Log finding solutions to beat predators for other_unit in units: if isinstance(unit, Predator) and isinstance(other_unit, Predator): distance = math.sqrt((unit.position[0] - other_unit.position[0]) ** 2 + (unit.position[1] - other_unit.position[1]) ** 2) if distance <= 1: log_to_file(log_file, f"Unit at {unit.position} found a solution to beat the predator at {other_unit.position}.") # Check for communication and problem-solving for unit1 in units: for unit2 in units: if unit1 != unit2: distance = math.sqrt((unit1.position[0] - unit2.position[0]) ** 2 + (unit1.position[1] - unit2.position[1]) ** 2) if distance <= 1: # Adjust the distance threshold for communication unit1.communicate(unit2) # Introduce self-modification if random.uniform(0, 1) < 0.03: unit.self_modify() # Introduce reproduction if random.uniform(0, 1) < 0.05: offspring = unit.reproduce(mutation_rate) new_units.append(offspring) for unit in units: unit.self_reflection() # Add self-reflection for each unit # Introduce resource usage against obstacles and predators if random.uniform(0, 1) < 0.1: # Adjust probability as needed # Check if the unit's position is within valid range if 0 <= unit.position[1] < len(environment.resource_types): # Check if the resource type exists at the given index if environment.resource_types[unit.position[1]].quantity > 0.5: unit.use_resource(environment.resource_types[unit.position[1]]) log_to_file(log_file, f"Unit at {unit.position} used resource against obstacle or predator.") # Log finding solutions to beat predators for other_unit in units: if isinstance(unit, Predator) and isinstance(other_unit, Predator): distance = math.sqrt((unit.position[0] - other_unit.position[0]) ** 2 + (unit.position[1] - other_unit.position[1]) ** 2) if distance <= 1: log_to_file(log_file, f"Unit at {unit.position} found a solution to beat the predator at {other_unit.position}.") # Unexpected problem-solving: Units develop ways to overcome environmental challenges if step == 3: for unit in units: unit.set_goal("solve_challenge") unit.problem_solving_actions += 1 # Increment the problem-solving count # Log unexpected problem-solving # Intentionality Analysis log_to_file(log_file, "Intentionality Analysis:") for unit in units: if unit.position in environment.obstacles and unit.current_goal != "solve_challenge": log_to_file(log_file, f" Unit at {unit.position} is avoiding obstacles unintentionally.") else: log_to_file(log_file, f" Unit at {unit.position} is intentionally solving obstacles.") # Unexpected problem-solving: Units develop ways to overcome environmental challenges if step == 3: for unit in units: unit.set_goal("solve_challenge") unit.problem_solving_actions += 1 # Increment the problem-solving count # Adaptation: Units modify behaviors and thrive or fail in response to abrupt environmental changes if step == 2: environment.change_threshold = 0.9 # Internal complexity growth over generations if step % 25 == 0: for unit in units: unit.internal_complexity += random.uniform(0, 0.1) # Reproduction and mutation new_units = [] while len(new_units) < initial_population: parents = select_parents(units) offspring = crossover(parents).reproduce(mutation_rate) new_units.append(offspring) units = new_units # Check for encounters with predators for unit in units: for predator in predators: distance = math.sqrt((unit.position[0] - predator.position[0]) ** 2 + (unit.position[1] - predator.position[1]) ** 2) if distance <= 1: # Adjust the distance threshold for encounters unit.encounter_predator(predator) # Introduce resource usage against obstacles and predators if random.uniform(0, 1) < 0.1: # Adjust probability as needed if unit.position[1] < len(environment.resource_types): if environment.resource_types[unit.position[1]].quantity > 0.5: unit.use_resource(environment.resource_types[unit.position[1]]) log_to_file(log_file, f"Unit at {unit.position} used resource against obstacle or predator.") # Log finding solutions to beat predators for other_unit in units: if isinstance(unit, Predator) and isinstance(other_unit, Predator): distance = math.sqrt((unit.position[0] - other_unit.position[0]) ** 2 + (unit.position[1] - other_unit.position[1]) ** 2) if distance <= 1: log_to_file(log_file, f"Unit at {unit.position} found a solution to beat the predator at {other_unit.position}.") # Generalized Problem-solving: Random dynamic obstacle challenges if random.uniform(0, 1) < 0.1: # Adjust probability as needed challenge_positions = set() for _ in range(random.randint(3, 6)): # Adjust the range as needed challenge_positions.add((random.randint(0, environment.width - 1), random.randint(0, environment.height - 1))) environment.obstacles.update(challenge_positions) log_to_file(log_file, f"Random Dynamic Obstacle Challenge at Step {step}: {challenge_positions}") # Units react to dynamic changes for unit in units: if random.uniform(0, 1) < 0.2: # Adjust probability as needed unit.set_dynamic_goal("dynamic_challenge") unit.problem_solving_actions += 1 # Increment the problem-solving count log_to_file(log_file, "") # Example Usage environment_size = (10, 10) initial_population = 20 num_steps = 100 mutation_rate = 3 log_interval = 0.5 start_time = time.time() simulate(environment_size, initial_population, num_steps, mutation_rate, log_interval) print(f"Time taken: {time.time() - start_time} seconds")