From df892b9da9f0bd855a4902f6b53588d40851c918 Mon Sep 17 00:00:00 2001
From: Pascal Zarrad <p.zarrad@outlook.de>
Date: Fri, 12 Jan 2024 20:59:19 +0100
Subject: [PATCH 1/8] feat: remove yml and yaml from path filters

---
 action.yml | 2 --
 1 file changed, 2 deletions(-)

diff --git a/action.yml b/action.yml
index 8c96b0ba..e7dcdbfe 100644
--- a/action.yml
+++ b/action.yml
@@ -101,8 +101,6 @@ inputs:
       !**/*.pb.go
       !**/*.lock
       !**/*.ttf
-      !**/*.yaml
-      !**/*.yml
       !**/*.cfg
       !**/*.toml
       !**/*.ini

From 23ce9b75418a755b1b14bb052b574dfcd461b884 Mon Sep 17 00:00:00 2001
From: Pascal Zarrad <p.zarrad@outlook.de>
Date: Fri, 12 Jan 2024 22:57:03 +0100
Subject: [PATCH 2/8] feat: remove poem from default configuration

---
 action.yml | 3 ---
 1 file changed, 3 deletions(-)

diff --git a/action.yml b/action.yml
index e7dcdbfe..74282701 100644
--- a/action.yml
+++ b/action.yml
@@ -210,9 +210,6 @@ inputs:
         specific files within 80 words.
       - **Changes**: A markdown table of files and their summaries. Group files 
         with similar changes together into a single row to save space.
-      - **Poem**: Below the changes, include a whimsical, short poem written by 
-        a rabbit to celebrate the changes. Format the poem as a quote using 
-        the ">" symbol and feel free to use emojis where relevant.
 
       Avoid additional commentary as this summary will be added as a comment on the 
       GitHub pull request. Use the titles "Walkthrough" and "Changes" and they must be H2.

From 9bc37c31e9307d08c5266a7ca0f7f804ea9f7ff5 Mon Sep 17 00:00:00 2001
From: Pascal Zarrad <p.zarrad@outlook.de>
Date: Fri, 12 Jan 2024 22:59:00 +0100
Subject: [PATCH 3/8] feat: temporarily fallback to ChatGPT 3.5 by default

---
 action.yml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/action.yml b/action.yml
index 74282701..0c10007e 100644
--- a/action.yml
+++ b/action.yml
@@ -154,7 +154,7 @@ inputs:
   openai_heavy_model:
     required: false
     description: 'Model to use for complex tasks such as code reviews.'
-    default: 'gpt-4'
+    default: 'gpt-3.5-turbo'
   openai_model_temperature:
     required: false
     description: 'Temperature for GPT model'

From 08961c9cf13c81d722d5618bd68afd752713dbf0 Mon Sep 17 00:00:00 2001
From: Pascal Zarrad <p.zarrad@outlook.de>
Date: Sat, 13 Jan 2024 17:20:47 +0100
Subject: [PATCH 4/8] feat: do review of simple changes by default

---
 action.yml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/action.yml b/action.yml
index 0c10007e..1313cf62 100644
--- a/action.yml
+++ b/action.yml
@@ -18,7 +18,7 @@ inputs:
   review_simple_changes:
     required: false
     description: 'Review even when the changes are simple'
-    default: 'false'
+    default: 'true'
   review_comment_lgtm:
     required: false
     description: 'Leave comments even if the patch is LGTM'

From b215f853a26f0bb822f50785f8d2118988dfa61e Mon Sep 17 00:00:00 2001
From: Pascal Zarrad <p.zarrad@outlook.de>
Date: Sat, 13 Jan 2024 17:22:20 +0100
Subject: [PATCH 5/8] feat: ignore Ansible vaults by default

---
 action.yml | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/action.yml b/action.yml
index 1313cf62..2820a6d9 100644
--- a/action.yml
+++ b/action.yml
@@ -134,6 +134,8 @@ inputs:
       !**/*.min.js.css
       !**/*.tfstate
       !**/*.tfstate.backup
+      !**/vault.yml
+      !**/vault.yaml
   disable_review:
     required: false
     description: 'Only provide the summary and skip the code review.'

From cae62ad6baada111d81089411b232ba6794638e7 Mon Sep 17 00:00:00 2001
From: Pascal Zarrad <p.zarrad@outlook.de>
Date: Wed, 28 Feb 2024 20:12:20 +0100
Subject: [PATCH 6/8] feat: switch to gpt-4-turbo-preview

---
 action.yml | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/action.yml b/action.yml
index 2820a6d9..90b8f2ee 100644
--- a/action.yml
+++ b/action.yml
@@ -18,7 +18,7 @@ inputs:
   review_simple_changes:
     required: false
     description: 'Review even when the changes are simple'
-    default: 'true'
+    default: 'false'
   review_comment_lgtm:
     required: false
     description: 'Leave comments even if the patch is LGTM'
@@ -156,7 +156,7 @@ inputs:
   openai_heavy_model:
     required: false
     description: 'Model to use for complex tasks such as code reviews.'
-    default: 'gpt-3.5-turbo'
+    default: 'gpt-4-turbo-preview'
   openai_model_temperature:
     required: false
     description: 'Temperature for GPT model'

From f1978e8870456213e7c8377a03c16d046da202db Mon Sep 17 00:00:00 2001
From: Pascal Zarrad <p.zarrad@outlook.de>
Date: Wed, 28 Feb 2024 20:13:21 +0100
Subject: [PATCH 7/8] feat: add support for gpt-4-turbo-preview

---
 src/limits.ts | 35 +++++++++++++++++++++++------------
 1 file changed, 23 insertions(+), 12 deletions(-)

diff --git a/src/limits.ts b/src/limits.ts
index aca807f6..a174ec55 100644
--- a/src/limits.ts
+++ b/src/limits.ts
@@ -6,19 +6,30 @@ export class TokenLimits {
 
   constructor(model = 'gpt-3.5-turbo') {
     this.knowledgeCutOff = '2021-09-01'
-    if (model === 'gpt-4-32k') {
-      this.maxTokens = 32600
-      this.responseTokens = 4000
-    } else if (model === 'gpt-3.5-turbo-16k') {
-      this.maxTokens = 16300
-      this.responseTokens = 3000
-    } else if (model === 'gpt-4') {
-      this.maxTokens = 8000
-      this.responseTokens = 2000
-    } else {
-      this.maxTokens = 4000
-      this.responseTokens = 1000
+    switch (model) {
+      case 'gpt-4-32k':
+        this.maxTokens = 32600
+        this.responseTokens = 4000
+        break
+      case 'gpt-3.5-turbo-16k':
+        this.maxTokens = 16300
+        this.responseTokens = 3000
+        break
+      case 'gpt-4':
+        this.maxTokens = 8000
+        this.responseTokens = 2000
+        break
+      case 'gpt-4-turbo-preview':
+        this.maxTokens = 128000
+        this.responseTokens = 4000
+        this.knowledgeCutOff = '2023-12-01'
+        break
+      default:
+        this.maxTokens = 4000
+        this.responseTokens = 1000
+        break
     }
+
     // provide some margin for the request tokens
     this.requestTokens = this.maxTokens - this.responseTokens - 100
   }

From bddbc9f91d335e5dfce3d922021d98d58578f5a5 Mon Sep 17 00:00:00 2001
From: Pascal Zarrad <p.zarrad@outlook.de>
Date: Wed, 28 Feb 2024 20:14:27 +0100
Subject: [PATCH 8/8] chore: build new action distribution package

---
 dist/index.js | 37 ++++++++++++++++++++++---------------
 1 file changed, 22 insertions(+), 15 deletions(-)

diff --git a/dist/index.js b/dist/index.js
index a6f49664..6847d334 100644
--- a/dist/index.js
+++ b/dist/index.js
@@ -6525,21 +6525,28 @@ class TokenLimits {
     knowledgeCutOff;
     constructor(model = 'gpt-3.5-turbo') {
         this.knowledgeCutOff = '2021-09-01';
-        if (model === 'gpt-4-32k') {
-            this.maxTokens = 32600;
-            this.responseTokens = 4000;
-        }
-        else if (model === 'gpt-3.5-turbo-16k') {
-            this.maxTokens = 16300;
-            this.responseTokens = 3000;
-        }
-        else if (model === 'gpt-4') {
-            this.maxTokens = 8000;
-            this.responseTokens = 2000;
-        }
-        else {
-            this.maxTokens = 4000;
-            this.responseTokens = 1000;
+        switch (model) {
+            case 'gpt-4-32k':
+                this.maxTokens = 32600;
+                this.responseTokens = 4000;
+                break;
+            case 'gpt-3.5-turbo-16k':
+                this.maxTokens = 16300;
+                this.responseTokens = 3000;
+                break;
+            case 'gpt-4':
+                this.maxTokens = 8000;
+                this.responseTokens = 2000;
+                break;
+            case 'gpt-4-turbo-preview':
+                this.maxTokens = 128000;
+                this.responseTokens = 4000;
+                this.knowledgeCutOff = '2023-12-01';
+                break;
+            default:
+                this.maxTokens = 4000;
+                this.responseTokens = 1000;
+                break;
         }
         // provide some margin for the request tokens
         this.requestTokens = this.maxTokens - this.responseTokens - 100;