Skip to content

Commit

Permalink
Merge pull request #4215 from sever-sever/T6917
Browse files Browse the repository at this point in the history
T6917: fix RPS ethernet settings for CPUs with more than 32 cores
  • Loading branch information
dmbaturin authored Dec 3, 2024
2 parents a4dfb3d + b4c276f commit 03a664a
Showing 1 changed file with 16 additions and 3 deletions.
19 changes: 16 additions & 3 deletions python/vyos/ifconfig/ethernet.py
Original file line number Diff line number Diff line change
Expand Up @@ -310,22 +310,35 @@ def set_rps(self, state):
rps_cpus = 0
queues = len(glob(f'/sys/class/net/{self.ifname}/queues/rx-*'))
if state:
cpu_count = os.cpu_count()

# Enable RPS on all available CPUs except CPU0 which we will not
# utilize so the system has one spare core when it's under high
# preasure to server other means. Linux sysfs excepts a bitmask
# representation of the CPUs which should participate on RPS, we
# can enable more CPUs that are physically present on the system,
# Linux will clip that internally!
rps_cpus = (1 << os.cpu_count()) -1
rps_cpus = (1 << cpu_count) - 1

# XXX: we should probably reserve one core when the system is under
# high preasure so we can still have a core left for housekeeping.
# This is done by masking out the lowst bit so CPU0 is spared from
# receive packet steering.
rps_cpus &= ~1

for i in range(0, queues):
self._write_sysfs(f'/sys/class/net/{self.ifname}/queues/rx-{i}/rps_cpus', f'{rps_cpus:x}')
# Convert the bitmask to hexadecimal chunks of 32 bits
# Split the bitmask into chunks of up to 32 bits each
hex_chunks = []
for i in range(0, cpu_count, 32):
# Extract the next 32-bit chunk
chunk = (rps_cpus >> i) & 0xFFFFFFFF
hex_chunks.append(f"{chunk:08x}")

# Join the chunks with commas
rps_cpus = ",".join(hex_chunks)

for i in range(queues):
self._write_sysfs(f'/sys/class/net/{self.ifname}/queues/rx-{i}/rps_cpus', rps_cpus)

# send bitmask representation as hex string without leading '0x'
return True
Expand Down

0 comments on commit 03a664a

Please sign in to comment.