Skip to content

Commit

Permalink
Send ChatGPT requests in parallel
Browse files Browse the repository at this point in the history
  • Loading branch information
dpomianbuff committed Jan 23, 2024
1 parent 4917b9f commit 5819300
Show file tree
Hide file tree
Showing 4 changed files with 35 additions and 23 deletions.
2 changes: 1 addition & 1 deletion gradle.properties
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
pluginGroup=com.intellij.ml.llm.template
pluginName=LLM-Powered Extract Method
pluginRepositoryUrl=https://github.com/JetBrains/intellij-platform-plugin-template
pluginVersion=0.7.3
pluginVersion=0.7.5

# Supported build number ranges and IntelliJ Platform versions -> https://plugins.jetbrains.com/docs/intellij/build-number-ranges.html
pluginSinceBuild = 233
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -99,10 +99,10 @@ abstract class ApplyExtractFunctionTransformationIntention(
) {
override fun run(indicator: ProgressIndicator) {
val now = System.nanoTime()
val responseList = MultishotSender(efLLMRequestProvider, project).sendRequest(text, emptyList(), 5, 1.0)
val responseList = MultishotSender(efLLMRequestProvider, project).sendRequestInPool(text, emptyList(), 5, 1.0)
if (responseList.isNotEmpty()) {
invokeLater {
llmResponseTime = responseList.sumOf { it.processingTime }
llmResponseTime = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - now)
processLLMResponse(responseList, project, editor, file)
}
}
Expand All @@ -113,25 +113,6 @@ abstract class ApplyExtractFunctionTransformationIntention(
NotificationType.INFORMATION
)
}
// val response = sendChatRequest(
// project = project,
// messages = messageList,
// model = efLLMRequestProvider.chatModel
// )
// if (response != null) {
// invokeLater {
// llmResponseTime = System.nanoTime() - now
// if (response.getSuggestions().isEmpty()) {
// showEFNotification(
// project,
// LLMBundle.message("notification.extract.function.with.llm.no.suggestions.message"),
// NotificationType.INFORMATION
// )
// } else {
// processLLMResponse(response, project, editor, file)
// }
// }
// }
}
}
ProgressManager.getInstance().runProcessWithProgressAsynchronously(task, BackgroundableProcessIndicator(task))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,12 @@ package com.intellij.ml.llm.template.models
import com.google.gson.annotations.SerializedName
import com.intellij.ml.llm.template.extractfunction.EFSettingType
import com.intellij.ml.llm.template.extractfunction.EFSettings
import com.intellij.ml.llm.template.models.openai.OpenAiChatMessage
import com.intellij.ml.llm.template.prompts.fewShotExtractSuggestion
import com.intellij.ml.llm.template.prompts.multishotExtractFunctionPrompt
import com.intellij.openapi.project.Project
import com.jetbrains.rd.util.Callable
import java.util.concurrent.ForkJoinPool
import java.util.concurrent.TimeUnit

data class LlmMultishotResponseData(
Expand Down Expand Up @@ -49,6 +52,34 @@ class MultishotSender(val llmRequestProvider: LLMRequestProvider, val project: P
return result.sortedBy { it.shotNo }
}

private fun doRequest(shotNo: Int, project: Project, messageList: MutableList<OpenAiChatMessage>, model: String, temperature: Double?): LlmMultishotResponseData {
val startTime = System.nanoTime()
val llmResponse = sendChatRequest(project=project, messages=messageList, model=model, temperature=temperature)
val processingTime = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime)
return LlmMultishotResponseData(shotNo, processingTime, llmResponse)
}
fun sendRequestInPool(
data: String,
existingShots: List<Int>,
maxShots: Int,
temperature: Double? = null
): List<LlmMultishotResponseData> {
val result = mutableListOf<LlmMultishotResponseData>()

// get prompt
val messageList = if (EFSettings.instance.hasSetting(EFSettingType.MULTISHOT_LEARNING)) multishotExtractFunctionPrompt(data) else fewShotExtractSuggestion(data)
val missingShots = getMissingShots(existingShots, maxShots)

val requests = mutableListOf<Int>()
for (shotNo in missingShots) {
requests.add(shotNo)
}
val ioPool = ForkJoinPool(5)
return ioPool.submit(Callable {
requests.parallelStream().map { doRequest(shotNo = it, project = project, messageList = messageList, model = llmRequestProvider.chatModel, temperature = temperature) }.toList()
}).get().sortedBy { it.shotNo }
}

private fun getMissingShots(existingShots: List<Int>, maxShots: Int): List<Int> {
val shots = (1..maxShots).toList()
return shots.subtract(existingShots).toList()
Expand Down
2 changes: 1 addition & 1 deletion src/main/resources/META-INF/plugin.xml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
<idea-plugin package="com.intellij.ml.llm.template">
<id>com.intellij.ml.llm.template</id>
<name>LLM-Powered Extract Method</name>
<vendor>JetBrains</vendor>
<vendor>Dorin Pomian</vendor>

<depends>com.intellij.modules.platform</depends>
<depends>com.intellij.modules.java</depends>
Expand Down

0 comments on commit 5819300

Please sign in to comment.