We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
1 parent f29e4d9 commit 385229aCopy full SHA for 385229a
ggml.h
@@ -204,7 +204,7 @@
204
// Maximum training context of the model in use
205
// For the LLaMA models this is normally 2048, but somehow "stepping out" by 128 gives better results (tested at 7B and 13B)
206
#ifndef GGML_TRAINING_CTX
207
-#define GGML_TRAINING_CTX 2176
+#define GGML_TRAINING_CTX 2048
208
#endif
209
210
#define GGML_ASSERT(x) \
0 commit comments