Skip to content

Commit 32207b2

Browse files
committed
fixed leisure walks, improved household logic and placement for Humans.
1 parent 7de1a16 commit 32207b2

File tree

2 files changed

+261
-174
lines changed

2 files changed

+261
-174
lines changed

examples/city_walking_behaviour/city_walking_behaviour/agents.py

Lines changed: 143 additions & 106 deletions
Original file line numberDiff line numberDiff line change
@@ -67,19 +67,20 @@ def __init__(self, model: Model, cell=None):
6767
class WalkingBehaviorModel:
6868
"""Optimized walking behavior model with spatial caching and early termination."""
6969

70+
MILES_TO_METERS = 1609.34
7071
DAILY_PROBABILITIES = {
71-
ActivityType.GROCERY: 0.25,
72-
ActivityType.NON_FOOD_SHOPPING: 0.25,
73-
ActivityType.SOCIAL: 0.15,
74-
ActivityType.LEISURE: 0.20,
72+
ActivityType.GROCERY: 0.4,
73+
ActivityType.NON_FOOD_SHOPPING: 0.25, # once every 4 days
74+
ActivityType.SOCIAL: 0.20,
75+
ActivityType.LEISURE: 0.33,
7576
}
7677

7778
BASE_MAX_DISTANCES = {
78-
ActivityType.WORK: 2000,
79-
ActivityType.GROCERY: 1000,
80-
ActivityType.NON_FOOD_SHOPPING: 1500,
81-
ActivityType.SOCIAL: 2000,
82-
ActivityType.LEISURE: 3000,
79+
ActivityType.WORK: 1.125 * MILES_TO_METERS, # meters
80+
ActivityType.GROCERY: 2.000 * MILES_TO_METERS,
81+
ActivityType.NON_FOOD_SHOPPING: 1.500 * MILES_TO_METERS,
82+
ActivityType.SOCIAL: 2.500 * MILES_TO_METERS,
83+
ActivityType.LEISURE: 5.500 * MILES_TO_METERS,
8384
}
8485

8586
def __init__(self, model: Model):
@@ -195,48 +196,67 @@ def check_near_point(ref_point):
195196

196197
return walkable
197198

198-
def get_leisure_cells(self, human) -> List:
199-
"""Get valid leisure walk destinations."""
199+
def get_leisure_cells(self, human) -> List[Cell]:
200+
"""
201+
Get valid leisure walk destinations.
202+
"""
203+
if not human or not human.household:
204+
return []
205+
206+
# Calculate distances based on walking ability
200207
max_distance = self.get_max_walking_distance(
201208
human.walking_ability, ActivityType.LEISURE
202209
)
210+
# Set minimum distance to 75% of max distance
203211
min_distance = max_distance * 0.75
204212

205-
valid_cells = []
206213
household_x, household_y = human.household.coordinate
214+
valid_cells = []
215+
216+
for cell in self.model.grid.all_cells.cells:
217+
x, y = cell.coordinate
207218

208-
# Set a minimum sector size to avoid division by zero
209-
sector_size = max(int(max_distance), 1) # Ensure minimum size of 1
210-
sector_x = household_x // sector_size
211-
sector_y = household_y // sector_size
212-
213-
# Check nearby sectors only
214-
for dx in (-1, 0, 1):
215-
for dy in (-1, 0, 1):
216-
x_min = (sector_x + dx) * sector_size
217-
y_min = (sector_y + dy) * sector_size
218-
x_max = x_min + sector_size
219-
y_max = y_min + sector_size
220-
221-
for cell in self.model.grid.all_cells.cells:
222-
x, y = cell.coordinate
223-
if x_min <= x <= x_max and y_min <= y <= y_max:
224-
dist = self.calculate_distance(household_x, household_y, x, y)
225-
if min_distance <= dist <= max_distance:
226-
valid_cells.append(cell)
219+
# Quick boundary check
220+
if (
221+
abs(x - household_x) > max_distance
222+
or abs(y - household_y) > max_distance
223+
):
224+
continue
225+
226+
# Calculate exact distance
227+
dist = self.calculate_distance(household_x, household_y, x, y)
228+
if min_distance <= dist <= max_distance:
229+
valid_cells.append(cell)
230+
231+
if len(valid_cells) >= 200:
232+
return valid_cells
227233

228234
return valid_cells
229235

230236
def decide_leisure_walk(self, human) -> Optional[Cell]:
231-
"""Optimized leisure walk decision."""
232-
if (
233-
self.model.random.random()
234-
> self.DAILY_PROBABILITIES[ActivityType.LEISURE] * human.walking_attitude
235-
):
237+
"""
238+
Leisure walk decision making.
239+
"""
240+
base_probability = self.DAILY_PROBABILITIES[ActivityType.LEISURE]
241+
242+
# Consider additional factors that might encourage walking
243+
motivation_factors = 1.0
244+
if human.has_dog: # Dog owners are more likely to take leisure walks
245+
motivation_factors += 0.3
246+
if not human.is_working: # Non-working individuals have more time
247+
motivation_factors += 0.2
248+
249+
# Final probability calculation
250+
probability = base_probability * human.walking_attitude * motivation_factors
251+
252+
if self.model.random.random() > probability:
236253
return None
237254

238255
valid_cells = self.get_leisure_cells(human)
239-
return self.model.random.choice(valid_cells) if valid_cells else None
256+
if not valid_cells:
257+
return None
258+
259+
return self.model.random.choice(valid_cells)
240260

241261
def simulate_daily_walks(self, human) -> List[Tuple]:
242262
"""Optimized daily walk simulation."""
@@ -289,38 +309,63 @@ def simulate_daily_walks(self, human) -> List[Tuple]:
289309

290310
return walks
291311

312+
def __repr__(self) -> str:
313+
"""
314+
Return a detailed string representation of the WalkingBehaviorModel.
315+
316+
Returns:
317+
str: String showing model state including caches and distances
318+
"""
319+
cache_stats = {
320+
"location_cache_size": sum(
321+
len(locations) for locations in self._location_cache.values()
322+
),
323+
"distance_cache_size": len(self._distance_cache),
324+
"leisure_cache_size": len(getattr(self, "_leisure_cells_cache", {})),
325+
}
326+
327+
return (
328+
f"WalkingBehaviorModel("
329+
f"total_distance_walked={self.total_distance_walked:.2f}, "
330+
f"max_possible_distance={self._max_possible_distance}, "
331+
f"cache_sizes={cache_stats}, "
332+
f"daily_probabilities={len(self.DAILY_PROBABILITIES)} activities)"
333+
)
334+
292335

293336
class Human(CellAgent):
294337
"""Represents a person with specific attributes and daily walking behavior."""
295338

296339
def __init__(
297340
self,
298341
model: Model,
342+
gender: Optional[int] = None,
343+
family_size: Optional[int] = None,
344+
age: Optional[int] = None,
345+
SES: Optional[int] = None,
299346
unique_id: int = 0,
300347
cell=None,
301-
SES: int = 0,
302348
household: Cell = None,
303349
):
304350
super().__init__(model)
305351
self.cell = cell
306352
self.unique_id = unique_id
307-
self.SES = SES
308353
self.household = household
309354

310355
# Human Attributes
311-
self.gender = self.model.generate_gender()
312-
self.age = self.model.generate_age()
313-
self.family_size = self.model.generate_family_size()
356+
self.gender = gender
357+
self.age = age
358+
self.SES = SES
359+
self.family_size = family_size
314360
self.has_dog = self.model.generate_dog_ownership()
315361
self.walking_ability = self.get_walking_ability()
316362
self.walking_attitude = self.get_walking_attitude()
317363
self.is_working = self._determine_working_status()
318364
self.workplace = self.get_workplace()
319365
self.friends = self.get_friends()
320-
self.family = self.get_family()
366+
self.family: Human = None
321367

322368
self.previous_walking_density: float = 0
323-
self.current_walking_density: float
324369

325370
# Datacollector attributes
326371
self.daily_walking_trips: int = 0
@@ -340,33 +385,28 @@ def get_friends(self) -> AgentSet:
340385
friend_count = self.random.randint(MIN_FRIENDS, MAX_FRIENDS)
341386
friend_set = AgentSet.select(
342387
self.model.agents_by_type[Human],
343-
lambda x: (x.SES > self.SES - 1 and x.SES < self.SES + 1)
388+
lambda x: (
389+
x.SES > self.SES - 2 and x.SES < self.SES + 2
390+
) # get friends with similar SES i.e. difference no more than 3
344391
and x.unique_id != self.unique_id,
345392
at_most=friend_count,
346393
)
347394
if len(friend_set) > 0:
348395
for friend in friend_set:
349-
friend.friends.add(self)
396+
friend.friends.add(self) # add self to the friends list as well
350397
return friend_set
351398

352-
def get_family(self) -> AgentSet:
353-
if self.family_size > 1:
354-
family_set = AgentSet.select(
355-
self.model.agents_by_type[Human],
356-
lambda x: x.gender != self.gender
357-
and abs(x.age - self.age) <= 3, # age difference no more than 3 years
358-
at_most=1,
359-
)
360-
if len(family_set) > 0:
361-
family_set[0].family = AgentSet([self], random=self.random)
362-
return family_set
363-
else:
364-
return None
365-
366399
def get_workplace(self) -> Optional[Workplace | FixedAgent]:
367400
if not self.is_working:
368401
return None
369-
return self.random.choice(self.model.agents_by_type[GroceryStore])
402+
403+
# Get all workplaces like grocery stores, non-food shops, social places
404+
all_workplaces = [
405+
workplace
406+
for workplace in self.model.agents
407+
if not isinstance(workplace, Human)
408+
]
409+
return self.random.choice(all_workplaces)
370410

371411
def get_walking_ability(
372412
self,
@@ -386,72 +426,69 @@ def get_walking_attitude(
386426
) -> float: # Method from https://pmc.ncbi.nlm.nih.gov/articles/PMC3306662/
387427
return self.random.random() ** 3
388428

389-
def get_feedback(self, activity: ActivityType):
390-
a: float = 0.001 * 20 # attitude factor
391-
# 20 because the model is scaled down 20 times.
429+
def get_feedback(self):
430+
a: float = 0.001 # attitude factor
431+
432+
# 1. Social network feedback (family and friends)
433+
# Store original attitude for use in calculations
434+
At = self.walking_attitude
392435

393-
# 1. Walking attitudes of family members and friends
436+
# Family feedback (Equations 1 & 2 in literature)
394437
if self.family:
395-
self.walking_attitude = ((1 - a) * self.walking_attitude) + (
396-
a * self.family[0].walking_attitude
397-
)
438+
self.walking_attitude = (1 - a) * At + a * self.family.walking_attitude
398439

440+
# Friends feedback (Equation 3 in literature)
399441
if self.friends:
400-
cumulative_friends_attitude: float = 0 # Initialize to 0
401-
for friend in self.friends:
402-
cumulative_friends_attitude += friend.walking_attitude
403-
# Average the friends' attitudes if there are any
442+
friends_attitude = sum(friend.walking_attitude for friend in self.friends)
404443
if len(self.friends) > 0:
405-
cumulative_friends_attitude /= len(self.friends)
406-
self.walking_attitude = ((1 - a) * self.walking_attitude) + (
407-
a * cumulative_friends_attitude
408-
)
444+
friends_attitude /= len(self.friends)
445+
self.walking_attitude = (1 - a) * At + a * friends_attitude
409446

410-
# 2. Person's walking experience
447+
# 2. Walking experience feedback (Equation 4 in literature)
411448
x, y = self.cell.coordinate
412449
SE_index = (
413-
(self.model.safety_cell_layer.data[x][y] + self.random.uniform(-0.5, 0.5))
450+
(
451+
self.model.safety_cell_layer.data[x][y]
452+
+ self.model.random.uniform(-0.5, 0.5)
453+
)
414454
* (
415455
self.model.aesthetic_cell_layer.data[x][y]
416-
+ self.random.uniform(-0.5, 0.5)
456+
+ self.model.random.uniform(-0.5, 0.5)
417457
)
418458
) / np.mean(
419459
self.model.safety_cell_layer.data * self.model.aesthetic_cell_layer.data
420460
)
421461

422-
# 3. Density of other walkers
423-
neighbour_cells = self.cell.get_neighborhood(radius=2)
424-
num_neighbours = [i for i in neighbour_cells if i.agents]
425-
self.current_walking_density = len(num_neighbours) / len(neighbour_cells)
426-
density_feedback = 0
427-
if self.previous_walking_density == 0:
428-
# If previous density was zero, treat any current density as a positive change
429-
density_feedback = 1 if self.current_walking_density > 0 else 0
462+
# 3. Density feedback
463+
# Compare current walking density to previous day
464+
neighbour_cells = self.cell.get_neighborhood(radius=1)
465+
current_density = sum(len(cell.agents) for cell in neighbour_cells) / len(
466+
neighbour_cells
467+
)
468+
469+
Id = 0
470+
if self.previous_walking_density > 0:
471+
Id = current_density / self.previous_walking_density
430472
else:
431-
density_ratio = self.current_walking_density / self.previous_walking_density
432-
density_feedback = density_ratio - 1 # Centers the feedback around 0
473+
Id = 1 if current_density > 0 else 0
433474

434-
self.previous_walking_density = self.current_walking_density
475+
self.previous_walking_density = current_density
435476

436-
# 4. Total amount walked by the person during that day
437-
walking_feedback = 0
477+
# 4. Walking distance feedback (Equation 5 in literature)
478+
It = 0
438479
if self.walking_behavior.total_distance_walked > 0:
439-
max_personal_distance = (
440-
self.walking_behavior.get_max_walking_distance(
441-
self.walking_ability, activity
442-
)
443-
* self.walking_ability
444-
)
445-
walking_feedback = min(
446-
1, max_personal_distance / self.walking_behavior.total_distance_walked
480+
Ab_Da = sum(
481+
[
482+
dis * self.walking_ability
483+
for dis in self.walking_behavior.BASE_MAX_DISTANCES.values()
484+
]
447485
)
486+
d = self.walking_behavior.total_distance_walked
487+
It = min(1, Ab_Da / d)
448488

449-
# Update walking attitude
489+
# Final attitude update (Equation 6 in literature)
450490
self.walking_attitude = (
451-
self.walking_attitude
452-
* (1 - a + (a * SE_index))
453-
* (1 - a + (a * density_feedback))
454-
* (1 - a + (a * walking_feedback))
491+
At * (1 - a + a * SE_index) * (1 - a + a * Id) * (1 - a + a * It)
455492
)
456493

457494
def step(self):
@@ -480,8 +517,8 @@ def step(self):
480517
)
481518

482519
if len(daily_walks) > 0:
520+
self.get_feedback()
483521
for activity, destination in daily_walks:
484-
self.get_feedback(activity)
485522
# Move agent to new cell if applicable
486523
if isinstance(destination, FixedAgent):
487524
self.cell = destination.cell

0 commit comments

Comments
 (0)