diff --git a/.github/.keep b/.github/.keep new file mode 100644 index 0000000..e69de29 diff --git a/README.md b/README.md index 6495c30..1659ad4 100644 --- a/README.md +++ b/README.md @@ -1,10 +1,23 @@ +[![Review Assignment Due Date](https://classroom.github.com/assets/deadline-readme-button-22041afd0340ce965d47ae6ef1cefeee28c7c493a6346c4f15d667ab976d596c.svg)](https://classroom.github.com/a/AwTYhPar) # Лабораторная работа № 1: определение достижимости параллелизма и реализация параллельных алгоритмов. Шаги выполнения: 1) Выберите один из алгоритмов обхода графа (BFS или BFS). + +После тщательного анализа своих возможностей был выбран алгоритм обхода графа в ширину (BFS). + 2) Разберитесь с выбранным алгоритмом и выделите основные этапы его выполнения. Идентифицируйте зависимости между этапами и выберите те, которые можно эффективно распараллелить (для этого постройте граф зависимостей (можно в голове)) + +На каждом шаге поровну распределяем между потоками вершины из очереди и в каждом потоке обрабатываем каждую назначенную вершину (добавляем непосещенных соседей в следующую очередь) + 3) Напишите программу на выбранном вами языке программирования (java, c++), реализующую выбранный алгоритм с учётом параллельных возможностей. + +Допустим, написал. + 4) С помощью инструментов (ThreadSanitizer && Helgrind для С++, JCStress тесты для Java) проанализировать программу на предмет отсутствия ошибок синхронизации данных. Если ошибок не нашлось, то внести их и найти. + +В invalidParallelBFS используется не atomic boolean для прохода по вершинам. incorrectStressTest должен падать на этом. + 5) Эксперименты и анализ результатов:\ Проведите эксперименты, измеряя производительность параллельной реализации алгоритма на различных объемах входных данных. Сравните результаты с последовательной версией и опишите полученные выводы. * Постройте график зависимости времени выполнения параллельной версий алгоритма от выделенных ресурсов. @@ -12,3 +25,5 @@ \ **Загрузить графики в отдельную директорию в репозитории** \ **Для построения графиков можно воспользоваться чем угодно** + +plot_bfs_performance.py + tmp dir diff --git a/build.gradle.kts b/build.gradle.kts index 3341beb..b8766a3 100644 --- a/build.gradle.kts +++ b/build.gradle.kts @@ -1,5 +1,6 @@ plugins { kotlin("jvm") version "1.9.20" + java application } @@ -12,10 +13,14 @@ repositories { dependencies { testImplementation(kotlin("test")) + testImplementation("org.openjdk.jcstress:jcstress-core:0.16") + testAnnotationProcessor("org.openjdk.jcstress:jcstress-core:0.16") } tasks.test { useJUnitPlatform() + minHeapSize = "4g" + maxHeapSize = "8g" } kotlin { @@ -24,4 +29,20 @@ kotlin { application { mainClass.set("MainKt") -} \ No newline at end of file +} + +// JCStress runner task: runs JCStress tests located on the test runtime classpath +// Use: ./gradlew jcstress [-PjcstressArgs="-v -m quick"] +tasks.register("jcstress") { + group = "verification" + description = "Run JCStress stress tests" + mainClass.set("org.openjdk.jcstress.Main") + classpath = sourceSets.test.get().runtimeClasspath + dependsOn("testClasses") + + val argsProp = project.findProperty("jcstressArgs") as String? + if (!argsProp.isNullOrBlank()) { + args = argsProp.split("\\s+".toRegex()) + } +} + diff --git a/plot_bfs_performance.py b/plot_bfs_performance.py new file mode 100644 index 0000000..860dbf6 --- /dev/null +++ b/plot_bfs_performance.py @@ -0,0 +1,127 @@ +import os +import glob +import pandas as pd +import matplotlib.pyplot as plt +import numpy as np + +def plot_by_thread(df, filename): + """Plot performance vs thread count.""" + plt.figure(figsize=(10, 6)) + plt.plot(df['threads'], df['time_ms'], marker='o', linewidth=2, markersize=8) + plt.xlabel('Number of Threads', fontsize=12) + plt.ylabel('Time (ms)', fontsize=12) + plt.title('Parallel BFS Performance by Thread Count', fontsize=14, fontweight='bold') + plt.grid(True, alpha=0.3) + plt.xticks(df['threads']) + + # Add value labels on points + for _, row in df.iterrows(): + plt.annotate(f"{row['time_ms']:.1f}", + (row['threads'], row['time_ms']), + textcoords="offset points", + xytext=(0,10), + ha='center', fontsize=9) + + output_file = filename.replace('.csv', '_plot.png') + plt.tight_layout() + plt.savefig(output_file, dpi=300, bbox_inches='tight') + print(f" Saved: {output_file}") + plt.close() + +def plot_by_data_size(df, filename): + """Plot performance vs data size (vertices).""" + fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(14, 6)) + + # Plot 1: Serial vs Parallel comparison + ax1.plot(df['vertices'], df['serial_time_ms'], marker='o', linewidth=2, + markersize=8, label='Serial BFS', color='#2E86AB') + ax1.plot(df['vertices'], df['parallel_time_ms'], marker='s', linewidth=2, + markersize=8, label='Parallel BFS', color='#A23B72') + ax1.set_xlabel('Number of Vertices', fontsize=12) + ax1.set_ylabel('Time (ms)', fontsize=12) + ax1.set_title('BFS Performance: Serial vs Parallel', fontsize=13, fontweight='bold') + ax1.set_xscale('log') + ax1.set_yscale('log') + ax1.legend(fontsize=11) + ax1.grid(True, alpha=0.3, which='both') + + # Plot 2: Speedup + speedup = df['serial_time_ms'] / df['parallel_time_ms'] + ax2.plot(df['vertices'], speedup, marker='D', linewidth=2, + markersize=8, color='#F18F01') + ax2.axhline(y=1, color='r', linestyle='--', alpha=0.5, label='No speedup') + ax2.set_xlabel('Number of Vertices', fontsize=12) + ax2.set_ylabel('Speedup (Serial / Parallel)', fontsize=12) + ax2.set_title('Parallel BFS Speedup', fontsize=13, fontweight='bold') + ax2.set_xscale('log') + ax2.legend(fontsize=11) + ax2.grid(True, alpha=0.3, which='both') + + # Add value labels on speedup points + for i, (_, row) in enumerate(df.iterrows()): + if speedup.iloc[i] > 0: + ax2.annotate(f"{speedup.iloc[i]:.2f}x", + (row['vertices'], speedup.iloc[i]), + textcoords="offset points", + xytext=(0,10), + ha='center', fontsize=9) + + plt.tight_layout() + output_file = filename.replace('.csv', '_plot.png') + plt.savefig(output_file, dpi=300, bbox_inches='tight') + print(f" Saved: {output_file}") + plt.close() + +def main(): + """Main function to find and plot all BFS performance CSV files.""" + tmp_dir = 'tmp' + + if not os.path.exists(tmp_dir): + print(f"Error: Directory '{tmp_dir}' not found!") + return + + # Find all CSV files matching the pattern (handling both typo and correct spelling) + patterns = [ + os.path.join(tmp_dir, '*bfs_perfomance*.csv'), # typo version + os.path.join(tmp_dir, '*bfs_performance*.csv') # correct spelling + ] + + csv_files = [] + for pattern in patterns: + csv_files.extend(glob.glob(pattern)) + + # Remove duplicates + csv_files = list(set(csv_files)) + + if not csv_files: + print(f"No BFS performance CSV files found in '{tmp_dir}' directory!") + return + + print(f"Found {len(csv_files)} CSV file(s) to plot:\n") + + for csv_file in sorted(csv_files): + print(f"Processing: {csv_file}") + try: + df = pd.read_csv(csv_file) + + # Detect file type based on columns + if 'threads' in df.columns and 'time_ms' in df.columns: + print(" Type: Performance by thread count") + plot_by_thread(df, csv_file) + elif 'vertices' in df.columns and 'serial_time_ms' in df.columns and 'parallel_time_ms' in df.columns: + print(" Type: Performance by data size") + plot_by_data_size(df, csv_file) + else: + print(f" Warning: Unknown CSV format. Columns: {list(df.columns)}") + print(f" Skipping...") + + except Exception as e: + print(f" Error processing {csv_file}: {e}") + + print() + + print("All plots generated successfully!") + +if __name__ == '__main__': + main() + diff --git a/src/main/java/org/itmo/Graph.java b/src/main/java/org/itmo/Graph.java index 141a0b6..b6cc95e 100644 --- a/src/main/java/org/itmo/Graph.java +++ b/src/main/java/org/itmo/Graph.java @@ -2,8 +2,8 @@ import java.util.*; import java.util.concurrent.*; -import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicIntegerArray; class Graph { private final int V; @@ -23,12 +23,130 @@ void addEdge(int src, int dest) { } } - void parallelBFS(int startVertex) { + // Generated by Egor Sviridenko's brain + int parallelBFS(int startVertex) { + return parallelBFS(startVertex, Runtime.getRuntime().availableProcessors()); + } + + int parallelBFS(int startVertex, int threads) { + AtomicInteger visitedCounter = new AtomicInteger(); + AtomicIntegerArray visited = new AtomicIntegerArray(V); + visited.compareAndSet(startVertex, 0, 1); + + ExecutorService executorService = Executors.newFixedThreadPool(threads); + + // Read-only while concurrent operations are in process + List currentQueue = new ArrayList<>(); + currentQueue.add(startVertex); + + while (!currentQueue.isEmpty()) { + List nextQueue = Collections.synchronizedList(new ArrayList<>()); + List> futures = new ArrayList<>(); + + // example for threads=5, currentQueue.size()=13: 3 3 3 2 2 + int maxThreadInterval = (currentQueue.size() - 1) / threads + 1; + int maxThreadIntervalCount = currentQueue.size() % threads == 0 ? threads : currentQueue.size() % threads; + int minThreadIntervalBeginIndex = maxThreadInterval * maxThreadIntervalCount; + + int start = 0; + + while (start < currentQueue.size()) { + int currentInterval = start < minThreadIntervalBeginIndex + ? maxThreadInterval + : maxThreadInterval - 1; + + List currentSublist = currentQueue.subList(start, start + currentInterval); + futures.add(executorService.submit(() -> currentSublist.forEach(vertex -> + adjList[vertex].forEach(nextVertex -> { + if (visited.compareAndSet(nextVertex, 0, 1)) { + nextQueue.add(nextVertex); + visitedCounter.incrementAndGet(); + } + }) + ))); + + start += currentInterval; + } + + futures.forEach(future -> { + try { + future.get(); + } catch (InterruptedException | ExecutionException e) { + throw new RuntimeException(e); + } + }); + + currentQueue.clear(); + currentQueue.addAll(nextQueue); + } + + executorService.shutdown(); + return visitedCounter.get(); + } + + // Generated by Egor Sviridenko's brain + int invalidParallelBFS(int startVertex) { + AtomicInteger visitedCounter = new AtomicInteger(); + boolean[] visited = new boolean[V]; + visited[startVertex] = true; + + int threads = Runtime.getRuntime().availableProcessors(); + ExecutorService executorService = Executors.newFixedThreadPool(threads); + + // Read-only while concurrent operations are in process + List currentQueue = new ArrayList<>(); + currentQueue.add(startVertex); + + while (!currentQueue.isEmpty()) { + List nextQueue = Collections.synchronizedList(new ArrayList<>()); + List> futures = new ArrayList<>(); + + // example for threads=5, currentQueue.size()=13: 3 3 3 2 2 + int maxThreadInterval = (currentQueue.size() - 1) / threads + 1; + int maxThreadIntervalCount = currentQueue.size() % threads == 0 ? threads : currentQueue.size() % threads; + int minThreadIntervalBeginIndex = maxThreadInterval * maxThreadIntervalCount; + + int start = 0; + + while (start < currentQueue.size()) { + int currentInterval = start < minThreadIntervalBeginIndex + ? maxThreadInterval + : maxThreadInterval - 1; + + List currentSublist = currentQueue.subList(start, start + currentInterval); + futures.add(executorService.submit(() -> currentSublist.forEach(vertex -> + adjList[vertex].forEach(nextVertex -> { + if (!visited[nextVertex]) { + visited[nextVertex] = true; + nextQueue.add(nextVertex); + visitedCounter.incrementAndGet(); + } + }) + ))); + + start += currentInterval; + } + + futures.forEach(future -> { + try { + future.get(); + } catch (InterruptedException | ExecutionException e) { + throw new RuntimeException(e); + } + }); + + currentQueue.clear(); + currentQueue.addAll(nextQueue); + } + + executorService.shutdown(); + return visitedCounter.get(); } //Generated by ChatGPT - void bfs(int startVertex) { + int bfs(int startVertex) { boolean[] visited = new boolean[V]; + int visitedCounter = 0; LinkedList queue = new LinkedList<>(); @@ -42,9 +160,13 @@ void bfs(int startVertex) { if (!visited[n]) { visited[n] = true; queue.add(n); + visitedCounter++; } } + } + + return visitedCounter; } } diff --git a/src/main/java/org/itmo/UnsafeCounter.java b/src/main/java/org/itmo/UnsafeCounter.java new file mode 100644 index 0000000..1041a21 --- /dev/null +++ b/src/main/java/org/itmo/UnsafeCounter.java @@ -0,0 +1,13 @@ +package org.itmo; + +public class UnsafeCounter { + private int counter = 0; + + public void increment() { + counter++; // <-- гонка данных + } + + public int get() { + return counter; + } +} diff --git a/src/test/java/org/itmo/BFSTest.java b/src/test/java/org/itmo/BFSTest.java index 7bf9098..b64bf92 100644 --- a/src/test/java/org/itmo/BFSTest.java +++ b/src/test/java/org/itmo/BFSTest.java @@ -7,9 +7,12 @@ import java.nio.Buffer; import java.util.HashSet; import java.util.Random; +import java.util.concurrent.atomic.AtomicIntegerArray; import java.util.function.BiFunction; import java.util.stream.IntStream; +import static org.junit.jupiter.api.Assertions.assertEquals; + public class BFSTest { @Test @@ -22,9 +25,11 @@ public void bfsTest() throws IOException { System.out.println("--------------------------"); System.out.println("Generating graph of size " + sizes[i] + " ...wait"); Graph g = new RandomGraphGenerator().generateGraph(r, sizes[i], connections[i]); - System.out.println("Generation completed!\nStarting bfs"); + System.out.println("Generation completed!\nStarting serial bfs"); long serialTime = executeSerialBfsAndGetTime(g); + System.out.println("Serial bfs completed!\nStarting parallel bfs"); long parallelTime = executeParallelBfsAndGetTime(g); + System.out.println("Parallel bfs completed!\nWriting results to file"); fw.append("Times for " + sizes[i] + " vertices and " + connections[i] + " connections: "); fw.append("\nSerial: " + serialTime); fw.append("\nParallel: " + parallelTime); @@ -49,4 +54,46 @@ private long executeParallelBfsAndGetTime(Graph g) { return endTime - startTime; } + @Test + public void correctStressTest() { + int numIterations = 1000; + Random random = new Random(42); + + for (int i = 0; i < numIterations; i++) { + if (i % 100 == 0) { + System.out.println("Iteration " + i); + } + int numNodes = random.nextInt(100) + 100; + int numEdges = random.nextInt(numNodes * 2) + numNodes - 1; + Graph graph = new RandomGraphGenerator().generateGraph(random, numNodes, numEdges); + + + int serialVisitedNumber = graph.bfs(0); + int parallelVisitedNumber = graph.parallelBFS(0); + + assertEquals(parallelVisitedNumber, serialVisitedNumber); + } + } + + @Test + public void incorrectStressTest() { + int numIterations = 1000; + Random random = new Random(42); + + for (int i = 0; i < numIterations; i++) { + if (i % 100 == 0) { + System.out.println("Iteration " + i); + } + int numNodes = random.nextInt(100) + 100; + int numEdges = random.nextInt(numNodes * 2) + numNodes - 1; + Graph graph = new RandomGraphGenerator().generateGraph(random, numNodes, numEdges); + + + int serialVisitedNumber = graph.bfs(0); + int parallelVisitedNumber = graph.invalidParallelBFS(0); + + assertEquals(parallelVisitedNumber, serialVisitedNumber); + } + } + } diff --git a/src/test/java/org/itmo/ParallelBFSPerformanceTest.java b/src/test/java/org/itmo/ParallelBFSPerformanceTest.java new file mode 100644 index 0000000..44cb18f --- /dev/null +++ b/src/test/java/org/itmo/ParallelBFSPerformanceTest.java @@ -0,0 +1,111 @@ +package org.itmo; + +import org.junit.jupiter.api.Test; + +import java.io.*; +import java.util.Random; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +public class ParallelBFSPerformanceTest { + + @Test + public void measureParallelBFSPerformanceByThread() throws IOException { + System.out.println("Available processors: " + Runtime.getRuntime().availableProcessors()); + int[] threadCounts = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 16, 20, 24, 28, 32}; + int iterationsPerThreadCount = 10; + int graphSize = 250_000; + int numEdges = 1_000_000; + + try (FileWriter fw = new FileWriter("tmp/parallel_bfs_performance_by_thread.csv")) { + // Write CSV header + fw.append("threads,time_ms\n"); + + // Test each thread count + for (int threadCount : threadCounts) { + System.out.println("Testing with " + threadCount + " threads..."); + + long totalTime = 0; + + for (int i = 0; i < iterationsPerThreadCount; i++) { + Random r = new Random(); + Graph graph = new RandomGraphGenerator().generateGraph(r, graphSize, numEdges); + + long startTime = System.nanoTime(); + graph.parallelBFS(0, threadCount); + long endTime = System.nanoTime(); + + long elapsedMs = (endTime - startTime) / 1_000_000; + totalTime += elapsedMs; + + System.out.println(" Iteration " + (i + 1) + "/" + iterationsPerThreadCount + ": " + elapsedMs + " ms"); + } + + double averageTime = (double) totalTime / iterationsPerThreadCount; + System.out.println("Average time for " + threadCount + " threads: " + averageTime + " ms\n"); + + fw.append(String.valueOf(threadCount)) + .append(",") + .append(String.format("%.2f", averageTime)) + .append("\n"); + } + + fw.flush(); + System.out.println("Results written to tmp/parallel_bfs_performance_by_thread.csv"); + } + } + + @Test + public void convertResultsToCSV() throws IOException { + Pattern headerPattern = Pattern.compile("Times for (\\d+) vertices and (\\d+) connections:"); + Pattern serialPattern = Pattern.compile("Serial: (\\d+)"); + Pattern parallelPattern = Pattern.compile("Parallel: (\\d+)"); + + try (BufferedReader reader = new BufferedReader(new FileReader("tmp/results.txt")); + FileWriter writer = new FileWriter("tmp/bfs_perfomance_by_data.csv")) { + + // Write CSV header + writer.append("vertices,connections,serial_time_ms,parallel_time_ms\n"); + + String line; + int vertices = 0; + int connections = 0; + int serialTime = 0; + int parallelTime = 0; + + while ((line = reader.readLine()) != null) { + Matcher headerMatcher = headerPattern.matcher(line); + if (headerMatcher.find()) { + vertices = Integer.parseInt(headerMatcher.group(1)); + connections = Integer.parseInt(headerMatcher.group(2)); + continue; + } + + Matcher serialMatcher = serialPattern.matcher(line); + if (serialMatcher.find()) { + serialTime = Integer.parseInt(serialMatcher.group(1)); + continue; + } + + Matcher parallelMatcher = parallelPattern.matcher(line); + if (parallelMatcher.find()) { + parallelTime = Integer.parseInt(parallelMatcher.group(1)); + // Write the complete row when we have all data + writer.append(String.valueOf(vertices)) + .append(",") + .append(String.valueOf(connections)) + .append(",") + .append(String.valueOf(serialTime)) + .append(",") + .append(String.valueOf(parallelTime)) + .append("\n"); + continue; + } + } + + writer.flush(); + System.out.println("Results converted to tmp/bfs_perfomance_by_data.csv"); + } + } +} + diff --git a/src/test/java/org/itmo/RandomGraphGenerator.java b/src/test/java/org/itmo/RandomGraphGenerator.java index fdb888c..1a57226 100644 --- a/src/test/java/org/itmo/RandomGraphGenerator.java +++ b/src/test/java/org/itmo/RandomGraphGenerator.java @@ -1,7 +1,9 @@ package org.itmo; import java.util.Arrays; +import java.util.HashSet; import java.util.Random; +import java.util.Set; import java.util.SplittableRandom; import java.util.concurrent.ForkJoinPool; import java.util.stream.IntStream; @@ -11,22 +13,27 @@ public class RandomGraphGenerator { private long pack(int u, int v) { return (((long) u) << 32) | (v & 0xffffffffL); } + private int unpackU(long key) { return (int) (key >>> 32); } + private int unpackV(long key) { return (int) (key & 0xffffffffL); } Graph generateGraph(Random r, int size, int numEdges) { + if (size < 1) throw new IllegalArgumentException("size must be >= 1"); if (numEdges < size - 1) throw new IllegalArgumentException("We need min size-1 edges"); long maxDirected = (long) size * (size - 1); if (numEdges > maxDirected) throw new IllegalArgumentException("Too many edges for directed graph without self-loops"); - int[] perm = java.util.stream.IntStream.range(0, size).toArray(); - for (int i = size - 1; i > 1; i--) { - int j = 1 + r.nextInt(i); - int tmp = perm[i]; perm[i] = perm[j]; perm[j] = tmp; + int[] perm = IntStream.range(0, size).toArray(); + for (int i = size - 1; i > 0; i--) { + int j = r.nextInt(i + 1); + int tmp = perm[i]; + perm[i] = perm[j]; + perm[j] = tmp; } final int chainCount = size - 1; @@ -74,7 +81,7 @@ Graph generateGraph(Random r, int size, int numEdges) { while (unique < numEdges) { int missing = numEdges - unique; - int extra = Math.max(missing / 2, 10_000); // небольшой запас + int extra = Math.max(missing / 2, 10_000); int add = missing + extra; long[] more = new long[unique + add]; @@ -109,6 +116,31 @@ Graph generateGraph(Random r, int size, int numEdges) { keys = more; } + Set chainSet = new HashSet<>(chainCount * 2); + for (int i = 1; i < size; i++) { + chainSet.add(pack(perm[i - 1], perm[i])); + } + + int p = 0; + for (int i = 0; i < unique && p < chainCount; i++) { + long e = keys[i]; + if (chainSet.remove(e)) { + // swap keys[p] и keys[i] + long tmp = keys[p]; + keys[p] = keys[i]; + keys[i] = tmp; + p++; + } + } + + SplittableRandom shuf = base.split(); + for (int i = p; i < numEdges; i++) { + int j = i + shuf.nextInt(unique - i); + long tmp = keys[i]; + keys[i] = keys[j]; + keys[j] = tmp; + } + Graph g = new Graph(size); for (int i = 0; i < numEdges; i++) { long key = keys[i]; @@ -118,5 +150,4 @@ Graph generateGraph(Random r, int size, int numEdges) { } return g; } - -} +} \ No newline at end of file diff --git a/src/test/java/org/itmo/UnsafeCounterTest.java b/src/test/java/org/itmo/UnsafeCounterTest.java new file mode 100644 index 0000000..a831605 --- /dev/null +++ b/src/test/java/org/itmo/UnsafeCounterTest.java @@ -0,0 +1,27 @@ +package org.itmo; + +import org.openjdk.jcstress.annotations.*; +import org.openjdk.jcstress.infra.results.I_Result; + +@JCStressTest +@Outcome(id = "5", expect = Expect.ACCEPTABLE, desc = "Все 5 инкрементов выполнены корректно") +@Outcome(id = "1", expect = Expect.ACCEPTABLE_INTERESTING, desc = "Гонка данных: часть инкрементов потерялась") +@Outcome(id = "2", expect = Expect.ACCEPTABLE_INTERESTING, desc = "Гонка данных: часть инкрементов потерялась") +@Outcome(id = "3", expect = Expect.ACCEPTABLE_INTERESTING, desc = "Гонка данных: часть инкрементов потерялась") +@Outcome(id = "4", expect = Expect.ACCEPTABLE_INTERESTING, desc = "Гонка данных: часть инкрементов потерялась") +@State +public class UnsafeCounterTest { + + private UnsafeCounter counter = new UnsafeCounter(); + + @Actor public void actor1() { counter.increment(); } + @Actor public void actor2() { counter.increment(); } + @Actor public void actor3() { counter.increment(); } + @Actor public void actor4() { counter.increment(); } + @Actor public void actor5() { counter.increment(); } + + @Arbiter + public void arbiter(I_Result r) { + r.r1 = counter.get(); + } +} diff --git a/tmp/bfs_perfomance_by_data.csv b/tmp/bfs_perfomance_by_data.csv new file mode 100644 index 0000000..4fd22d4 --- /dev/null +++ b/tmp/bfs_perfomance_by_data.csv @@ -0,0 +1,10 @@ +vertices,connections,serial_time_ms,parallel_time_ms +10,50,0,4 +100,500,0,5 +1000,5000,2,10 +10000,50000,10,49 +10000,100000,5,9 +50000,1000000,69,35 +100000,1000000,69,51 +1000000,10000000,1282,394 +2000000,10000000,2042,718 diff --git a/tmp/bfs_perfomance_by_data_plot.png b/tmp/bfs_perfomance_by_data_plot.png new file mode 100644 index 0000000..c6b4c67 Binary files /dev/null and b/tmp/bfs_perfomance_by_data_plot.png differ diff --git a/tmp/parallel_bfs_performance_by_thread.csv b/tmp/parallel_bfs_performance_by_thread.csv new file mode 100644 index 0000000..935cb7c --- /dev/null +++ b/tmp/parallel_bfs_performance_by_thread.csv @@ -0,0 +1,17 @@ +threads,time_ms +1,141.50 +2,90.40 +3,69.50 +4,62.90 +5,69.40 +6,72.70 +7,72.80 +8,71.20 +9,75.40 +10,75.00 +12,72.90 +16,72.80 +20,75.90 +24,74.00 +28,74.90 +32,76.90 diff --git a/tmp/parallel_bfs_performance_by_thread_plot.png b/tmp/parallel_bfs_performance_by_thread_plot.png new file mode 100644 index 0000000..74a9f07 Binary files /dev/null and b/tmp/parallel_bfs_performance_by_thread_plot.png differ diff --git a/tmp/results.txt b/tmp/results.txt index 027e7f9..88487e9 100644 --- a/tmp/results.txt +++ b/tmp/results.txt @@ -1,32 +1,36 @@ Times for 10 vertices and 50 connections: Serial: 0 -Parallel: 0 +Parallel: 4 -------- Times for 100 vertices and 500 connections: Serial: 0 -Parallel: 0 +Parallel: 5 -------- Times for 1000 vertices and 5000 connections: -Serial: 1 -Parallel: 0 +Serial: 2 +Parallel: 10 -------- Times for 10000 vertices and 50000 connections: -Serial: 3 -Parallel: 0 +Serial: 10 +Parallel: 49 -------- Times for 10000 vertices and 100000 connections: -Serial: 2 -Parallel: 0 +Serial: 5 +Parallel: 9 -------- Times for 50000 vertices and 1000000 connections: -Serial: 30 -Parallel: 0 +Serial: 69 +Parallel: 35 -------- Times for 100000 vertices and 1000000 connections: -Serial: 18 -Parallel: 0 +Serial: 69 +Parallel: 51 -------- Times for 1000000 vertices and 10000000 connections: -Serial: 307 -Parallel: 0 +Serial: 1282 +Parallel: 394 +-------- +Times for 2000000 vertices and 10000000 connections: +Serial: 2042 +Parallel: 718 --------