Skip to content

Commit da448a2

Browse files
authored
Merge pull request #112 from ajcwebdev/refactor
Remove Ollama from Docker Image and Add RSS Retry Logic
2 parents 333e63f + cfbc7f4 commit da448a2

35 files changed

+1360
-1354
lines changed

.github/Dockerfile

Lines changed: 27 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -78,39 +78,39 @@ RUN chmod +x /usr/src/app/docker-entrypoint.sh
7878
# 2) Setup Ollama with models
7979
# ---------------------------------------------------
8080

81-
# 1. Use the Ollama image as a base
82-
FROM ollama/ollama:latest AS ollama
83-
84-
# 2. Set working directory to Ollama config directory
85-
WORKDIR /root/.ollama
86-
87-
# 3. Start Ollama server and pull models
88-
RUN ollama serve & \
89-
sleep 10 && \
90-
ollama pull qwen2.5:0.5b && \
91-
ollama ls && \
92-
echo "Listing /root/.ollama after qwen2.5:0.5b pull:" && \
93-
ls -lh /root/.ollama || true && \
94-
echo "Listing /root/.ollama/models after qwen2.5:0.5b pull:" && \
95-
ls -lh /root/.ollama/models || true && \
96-
pkill ollama
81+
# # 1. Use the Ollama image as a base
82+
# FROM ollama/ollama:latest AS ollama
83+
84+
# # 2. Set working directory to Ollama config directory
85+
# WORKDIR /root/.ollama
86+
87+
# # 3. Start Ollama server and pull models
88+
# RUN ollama serve & \
89+
# sleep 10 && \
90+
# ollama pull qwen2.5:0.5b && \
91+
# ollama ls && \
92+
# echo "Listing /root/.ollama after qwen2.5:0.5b pull:" && \
93+
# ls -lh /root/.ollama || true && \
94+
# echo "Listing /root/.ollama/models after qwen2.5:0.5b pull:" && \
95+
# ls -lh /root/.ollama/models || true && \
96+
# pkill ollama
9797

9898
# ---------------------------------------------------
9999
# 3) Final stage combining everything
100100
# ---------------------------------------------------
101101

102102
FROM base
103103

104-
# 1. Copy Ollama binary
105-
COPY --from=ollama /bin/ollama /usr/local/bin/ollama
104+
# # 1. Copy Ollama binary
105+
# COPY --from=ollama /bin/ollama /usr/local/bin/ollama
106106

107-
# 2. Copy pre-downloaded models
108-
COPY --from=ollama /root/.ollama /root/.ollama
107+
# # 2. Copy pre-downloaded models
108+
# COPY --from=ollama /root/.ollama /root/.ollama
109109

110-
RUN echo "Listing /root/.ollama in final stage:" && \
111-
ls -lh /root/.ollama || true && \
112-
echo "Listing /root/.ollama/models in final stage:" && \
113-
ls -lh /root/.ollama/models || true
110+
# RUN echo "Listing /root/.ollama in final stage:" && \
111+
# ls -lh /root/.ollama || true && \
112+
# echo "Listing /root/.ollama/models in final stage:" && \
113+
# ls -lh /root/.ollama/models || true
114114

115115
# Set environment variables for Whisper
116116
ENV WHISPER_FORCE_CPU=1
@@ -122,9 +122,9 @@ RUN mkdir -p /usr/src/app/content
122122
# 4. Set proper permissions for the entire app directory including content
123123
RUN chown -R node:node /usr/src/app \
124124
&& chmod -R 755 /usr/src/app \
125-
&& chmod 777 /usr/src/app/content \
126-
&& chown -R node:node /root/.ollama \
127-
&& chmod -R 755 /root/.ollama
125+
&& chmod 777 /usr/src/app/content
126+
# && chown -R node:node /root/.ollama \
127+
# && chmod -R 755 /root/.ollama
128128

129129
# Switch to non-root user
130130
USER node

.github/docker-entrypoint.sh

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -17,15 +17,15 @@ log_error() {
1717
exit 1
1818
}
1919

20-
echo "Debug: (docker-entrypoint.sh) Checking /root/.ollama before starting Ollama..."
21-
ls -lR /root/.ollama || true
20+
# echo "Debug: (docker-entrypoint.sh) Checking /root/.ollama before starting Ollama..."
21+
# ls -lR /root/.ollama || true
2222

23-
# Start Ollama server in the background
24-
echo "Starting Ollama server..."
25-
ollama serve &
23+
# # Start Ollama server in the background
24+
# echo "Starting Ollama server..."
25+
# ollama serve &
2626

27-
# Wait for Ollama server to start
28-
sleep 5
27+
# # Wait for Ollama server to start
28+
# sleep 5
2929

3030
# If first argument is "serve", then start the server.
3131
if [ "$1" = "serve" ]; then

src/commander.ts

Lines changed: 7 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -15,9 +15,10 @@ import { argv, exit } from 'node:process'
1515
import { fileURLToPath } from 'node:url'
1616
import { Command } from 'commander'
1717
import { selectPrompts } from './process-steps/04-select-prompt'
18-
import { validateProcessAction, validateLLM, validateTranscription, processAction } from './utils/validate-option'
19-
import { l, err, logCompletionSeparator } from './utils/logging'
18+
import { processAction, validateCLIOptions } from './utils/validate-option'
19+
import { l, err, logSeparator } from './utils/logging'
2020
import { envVarsMap } from './utils/globals/llms'
21+
2122
import type { ProcessingOptions } from './utils/types/process'
2223

2324
// Initialize the command-line interface using Commander.js
@@ -67,12 +68,7 @@ program
6768
.option('--printPrompt <sections...>', 'Print the prompt sections without processing')
6869
.option('--customPrompt <filePath>', 'Use a custom prompt from a markdown file')
6970
.option('--saveAudio', 'Do not delete intermediary files after processing')
70-
// Added options to override environment variables from CLI
71-
/**
72-
* Additional CLI options to allow passing API keys from the command line,
73-
* overriding .env values if they exist. This way, if the .env is missing
74-
* a key, the user can supply it via the CLI.
75-
*/
71+
// Options to override environment variables from CLI
7672
.option('--openaiApiKey <key>', 'Specify OpenAI API key (overrides .env variable)')
7773
.option('--anthropicApiKey <key>', 'Specify Anthropic API key (overrides .env variable)')
7874
.option('--deepgramApiKey <key>', 'Specify Deepgram API key (overrides .env variable)')
@@ -84,21 +80,6 @@ program
8480
.option('--togetherApiKey <key>', 'Specify Together API key (overrides .env variable)')
8581
.option('--fireworksApiKey <key>', 'Specify Fireworks API key (overrides .env variable)')
8682
.option('--groqApiKey <key>', 'Specify Groq API key (overrides .env variable)')
87-
// Add examples and additional help text
88-
.addHelpText(
89-
'after',
90-
`
91-
Examples:
92-
$ autoshow --video "https://www.youtube.com/watch?v=..."
93-
$ autoshow --playlist "https://www.youtube.com/playlist?list=..."
94-
$ autoshow --channel "https://www.youtube.com/channel/..."
95-
$ autoshow --file "content/audio.mp3"
96-
$ autoshow --rss "https://feeds.transistor.fm/fsjam-podcast/"
97-
98-
Documentation: https://github.com/ajcwebdev/autoshow#readme
99-
Report Issues: https://github.com/ajcwebdev/autoshow/issues
100-
`
101-
)
10283

10384
/**
10485
* Main action for the program.
@@ -125,19 +106,13 @@ program.action(async (options: ProcessingOptions) => {
125106
exit(0)
126107
}
127108

128-
// 1) Validate which action was chosen
129-
const action = validateProcessAction(options, "action")
130-
131-
// 2) Validate LLM
132-
const llmServices = validateLLM(options)
133-
134-
// 3) Validate transcription
135-
const transcriptServices = validateTranscription(options)
109+
// Validate action, LLM, and transcription inputs
110+
const { action, llmServices, transcriptServices } = validateCLIOptions(options)
136111

137112
try {
138113
// Helper to handle all action processing logic. If successful, log and exit.
139114
await processAction(action, options, llmServices, transcriptServices)
140-
logCompletionSeparator(action)
115+
logSeparator({ type: 'completion', descriptor: action })
141116
exit(0)
142117
} catch (error) {
143118
err(`Error processing ${action}:`, (error as Error).message)

src/llms/chatgpt.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
import { env } from 'node:process'
44
import { OpenAI } from 'openai'
55
import { GPT_MODELS } from '../utils/globals/llms'
6-
import { err, logAPIResults } from '../utils/logging'
6+
import { err, logLLMCost } from '../utils/logging'
77
import type { ChatGPTModelType } from '../utils/types/llms'
88

99
/**
@@ -43,7 +43,7 @@ export const callChatGPT = async (
4343

4444
const content = firstChoice.message.content
4545

46-
logAPIResults({
46+
logLLMCost({
4747
modelName: actualModel,
4848
stopReason: firstChoice.finish_reason ?? 'unknown',
4949
tokenUsage: {

src/llms/claude.ts

Lines changed: 15 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -3,9 +3,19 @@
33
import { env } from 'node:process'
44
import { Anthropic } from '@anthropic-ai/sdk'
55
import { CLAUDE_MODELS } from '../utils/globals/llms'
6-
import { err, logAPIResults } from '../utils/logging'
6+
import { err, logLLMCost } from '../utils/logging'
77
import type { ClaudeModelType } from '../utils/types/llms'
88

9+
/**
10+
* Extracts text content from the API response
11+
* @param content - The content returned by the API
12+
* @returns The extracted text content, or null if no text content is found
13+
*/
14+
interface ContentBlock {
15+
type: string;
16+
text?: string;
17+
}
18+
919
/**
1020
* Main function to call Claude API.
1121
* @param {string} prompt - The prompt or instructions to process.
@@ -42,7 +52,7 @@ export const callClaude = async (
4252
throw new Error('No text content generated from the API')
4353
}
4454

45-
logAPIResults({
55+
logLLMCost({
4656
modelName: actualModel,
4757
stopReason: response.stop_reason ?? 'unknown',
4858
tokenUsage: {
@@ -59,18 +69,13 @@ export const callClaude = async (
5969
}
6070
}
6171

62-
/**
63-
* Extracts text content from the API response
64-
* @param content - The content returned by the API
65-
* @returns The extracted text content, or null if no text content is found
66-
*/
67-
function extractTextContent(content: any[]): string | null {
72+
function extractTextContent(content: ContentBlock[]): string | null {
6873
for (const block of content) {
6974
if (typeof block === 'object' && block !== null && 'type' in block) {
7075
if (block.type === 'text' && 'text' in block) {
71-
return block.text
76+
return block.text ?? null;
7277
}
7378
}
7479
}
75-
return null
80+
return null;
7681
}

src/llms/cohere.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
import { env } from 'node:process'
44
import { CohereClient } from 'cohere-ai'
55
import { COHERE_MODELS } from '../utils/globals/llms'
6-
import { err, logAPIResults } from '../utils/logging'
6+
import { err, logLLMCost } from '../utils/logging'
77
import type { CohereModelType } from '../utils/types/llms'
88

99
/**
@@ -43,7 +43,7 @@ export const callCohere = async (
4343

4444
const { inputTokens, outputTokens } = meta?.tokens ?? {}
4545

46-
logAPIResults({
46+
logLLMCost({
4747
modelName: actualModel,
4848
stopReason: finishReason ?? 'unknown',
4949
tokenUsage: {

src/llms/fireworks.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22

33
import { env } from 'node:process'
44
import { FIREWORKS_MODELS } from '../utils/globals/llms'
5-
import { err, logAPIResults } from '../utils/logging'
5+
import { err, logLLMCost } from '../utils/logging'
66
import type { FireworksModelType, FireworksResponse } from '../utils/types/llms'
77

88
/**
@@ -59,7 +59,7 @@ export const callFireworks = async (
5959
throw new Error('No content generated from the Fireworks API')
6060
}
6161

62-
logAPIResults({
62+
logLLMCost({
6363
modelName: modelKey,
6464
stopReason: data.choices[0]?.finish_reason ?? 'unknown',
6565
tokenUsage: {

src/llms/gemini.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
import { env } from 'node:process'
44
import { GoogleGenerativeAI } from "@google/generative-ai"
55
import { GEMINI_MODELS } from '../utils/globals/llms'
6-
import { err, logAPIResults } from '../utils/logging'
6+
import { err, logLLMCost } from '../utils/logging'
77
import type { GeminiModelType } from '../utils/types/llms'
88

99
/**
@@ -46,7 +46,7 @@ export const callGemini = async (
4646
const { usageMetadata } = response
4747
const { promptTokenCount, candidatesTokenCount, totalTokenCount } = usageMetadata ?? {}
4848

49-
logAPIResults({
49+
logLLMCost({
5050
modelName: actualModel,
5151
stopReason: 'complete',
5252
tokenUsage: {

src/llms/groq.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22

33
import { env } from 'node:process'
44
import { GROQ_MODELS } from '../utils/globals/llms'
5-
import { err, logAPIResults } from '../utils/logging'
5+
import { err, logLLMCost } from '../utils/logging'
66
import type { GroqModelType, GroqChatCompletionResponse } from '../utils/types/llms'
77

88
/**
@@ -58,7 +58,7 @@ export const callGroq = async (
5858
throw new Error('No content generated from the Groq API')
5959
}
6060

61-
logAPIResults({
61+
logLLMCost({
6262
modelName: modelKey,
6363
stopReason: data.choices[0]?.finish_reason ?? 'unknown',
6464
tokenUsage: {

src/llms/mistral.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
import { env } from 'node:process'
44
import { Mistral } from '@mistralai/mistralai'
55
import { MISTRAL_MODELS } from '../utils/globals/llms'
6-
import { err, logAPIResults } from '../utils/logging'
6+
import { err, logLLMCost } from '../utils/logging'
77
import type { MistralModelType } from '../utils/types/llms'
88

99
/**
@@ -46,7 +46,7 @@ export const callMistral = async (
4646
const content = firstChoice.message.content
4747
const contentString = Array.isArray(content) ? content.join('') : content
4848

49-
logAPIResults({
49+
logLLMCost({
5050
modelName: actualModel,
5151
stopReason: firstChoice.finishReason ?? 'unknown',
5252
tokenUsage: {

src/llms/ollama.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22

33
import { env } from 'node:process'
44
import { OLLAMA_MODELS } from '../utils/globals/llms'
5-
import { l, err, logAPIResults } from '../utils/logging'
5+
import { l, err, logLLMCost } from '../utils/logging'
66
import { checkOllamaServerAndModel } from '../utils/validate-option'
77
import type { OllamaModelType, OllamaResponse } from '../utils/types/llms'
88

@@ -68,7 +68,7 @@ export const callOllama = async (
6868
const totalPromptTokens = data.prompt_eval_count ?? 0
6969
const totalCompletionTokens = data.eval_count ?? 0
7070

71-
logAPIResults({
71+
logLLMCost({
7272
modelName: modelKey,
7373
stopReason: 'stop',
7474
tokenUsage: {

src/llms/together.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22

33
import { env } from 'node:process'
44
import { TOGETHER_MODELS } from '../utils/globals/llms'
5-
import { err, logAPIResults } from '../utils/logging'
5+
import { err, logLLMCost } from '../utils/logging'
66
import type { TogetherModelType, TogetherResponse } from '../utils/types/llms'
77

88
/**
@@ -59,7 +59,7 @@ export const callTogether = async (
5959
throw new Error('No content generated from the Together AI API')
6060
}
6161

62-
logAPIResults({
62+
logLLMCost({
6363
modelName: modelKey,
6464
stopReason: data.choices[0]?.finish_reason ?? 'unknown',
6565
tokenUsage: {

0 commit comments

Comments
 (0)