From 70b465dfe28bb8c3cb2f18830b89191d718b557a Mon Sep 17 00:00:00 2001 From: John Henderson Date: Tue, 4 Jul 2023 21:44:56 +0100 Subject: [PATCH] test(stampy.spec): parameterize the test --- app/mocks/question-data/question-2400.ts | 89 ---------------------- app/mocks/question-data/question-8486.json | 1 + app/server-utils/stampy.spec.ts | 55 +++++++------ app/server-utils/stampy.ts | 2 +- scripts/refresh-mock-return-data.ts | 82 ++++++++++---------- 5 files changed, 75 insertions(+), 154 deletions(-) delete mode 100644 app/mocks/question-data/question-2400.ts create mode 100644 app/mocks/question-data/question-8486.json diff --git a/app/mocks/question-data/question-2400.ts b/app/mocks/question-data/question-2400.ts deleted file mode 100644 index 684ba192..00000000 --- a/app/mocks/question-data/question-2400.ts +++ /dev/null @@ -1,89 +0,0 @@ -export const question2400 = { - id: 'i-0d89f207fea069e23edf50d47617c9c8f21bbf5c401e7f2578e188fb309a5774', - type: 'row', - href: 'https://coda.io/apis/v1/docs/fau7sl2hmG/tables/grid-sync-1059-File/rows/i-0d89f207fea069e23edf50d47617c9c8f21bbf5c401e7f2578e188fb309a5774', - name: 'Why is AGI dangerous?', - index: 252, - createdAt: '2023-01-14T14:46:14.123Z', - updatedAt: '2023-04-27T22:22:21.189Z', - browserLink: - 'https://coda.io/d/_dfau7sl2hmG#_tugrid-sync-1059-File/_rui-0d89f207fea069e23edf50d47617c9c8f21bbf5c401e7f2578e188fb309a5774', - values: { - File: 'Why is AGI dangerous?', - Synced: false, - 'Sync account': { - '@context': 'http://schema.org/', - '@type': 'StructuredValue', - additionalType: 'row', - name: 'stampysaisafetyinfo@gmail.com', - url: 'https://coda.io/d/_dfau7sl2hmG#_tuGlobal-External-Connections/_rui-1fffa4c7-80ba-4bd0-804e-e432be8d2052', - tableId: 'Global-External-Connections', - rowId: 'i-1fffa4c7-80ba-4bd0-804e-e432be8d2052', - tableUrl: 'https://coda.io/d/_dfau7sl2hmG#_tuGlobal-External-Connections', - }, - Question: '```Why is AGI dangerous?```', - Link: { - '@context': 'http://schema.org/', - '@type': 'WebPage', - url: 'https://docs.google.com/document/d/1ItfAkZNiskwSpT20Wv_OFql2YdzSOUrZ4_2eRijdUNk/edit?usp=drivesdk', - }, - Thumbnail: { - '@context': 'http://schema.org/', - '@type': 'ImageObject', - name: 'image.jpeg', - height: 220, - width: 170, - url: 'https://codahosted.io/docs/fau7sl2hmG/blobs/bl-Ax7ivng4ml/fb82d19d696f9470914aa83504c5edabca89ff2a23ac4d9f0193e2b805a639a7131e65c1720c689ea5c8315daf4db239c15f8131517c2049e70c73f2435125988d4e40934ad455efa490fb2cd029c9b8eea8d169d801509638e183d076ab92b8cdea0c98', - status: 'live', - }, - 'Doc Created': '2023-01-14T14:54:53.674+01:00', - 'Related Answers DO NOT EDIT': [], - Tags: '', - 'Doc Last Edited': '2023-02-27T19:41:29.859+01:00', - Status: { - '@context': 'http://schema.org/', - '@type': 'StructuredValue', - additionalType: 'row', - name: 'Live on site', - url: 'https://coda.io/d/_dfau7sl2hmG#_tugrid-IWDInbu5n2/_rui-7EfvxV9G0N', - tableId: 'grid-IWDInbu5n2', - rowId: 'i-7EfvxV9G0N', - tableUrl: 'https://coda.io/d/_dfau7sl2hmG#_tugrid-IWDInbu5n2', - }, - 'Edit Answer': - '**[Why is AGI dangerous?](https://docs.google.com/document/d/1ItfAkZNiskwSpT20Wv_OFql2YdzSOUrZ4_2eRijdUNk/edit?usp=drivesdk)**', - 'Alternate Phrasings': '', - 'UI ID DO NOT EDIT': '```2400```', - 'Source Link': '', - 'aisafety.info Link': '**[Why is AGI dangerous?](https://aisafety.info/?state=2400_)**', - Source: '```Wiki```', - 'All Phrasings': '```Why is AGI dangerous?\n```', - 'Initial Order': '', - 'Related IDs': [], - 'Rich Text DO NOT EDIT': - '```1. [The Orthogonality Thesis](https://www.youtube.com/watch?v=hEUO6pjwFOo): AI could have almost any goal while at the same time having high intelligence (aka ability to succeed at those goals). This means that we could build a very powerful agent which would not necessarily share human-friendly values. For example, the classic [paperclip maximizer](https://www.lesswrong.com/tag/paperclip-maximizer) thought experiment explores this with an AI which has a goal of creating as many paperclips as possible, something that humans are (mostly) indifferent to, and as a side effect ends up destroying humanity to make room for more paperclip factories.\n\n1. [Complexity of value](https://www.lesswrong.com/posts/GNnHHmm8EzePmKzPk/value-is-fragile): What humans care about is not simple, and the space of all goals is large, so virtually all goals we could program into an AI would lead to worlds not valuable to humans if pursued by a sufficiently powerful agent. If we, for example, did not include our value of diversity of experience, we could end up with a world of endlessly looping simple pleasures, rather than beings living rich lives.\n\n1. [Instrumental Convergence](https://www.youtube.com/watch?v=ZeecOKBus3Q): For almost any goal an AI has there are shared ‘instrumental’ steps, such as acquiring resources, preserving itself, and preserving the contents of its goals. This means that a powerful AI with goals that were not explicitly human-friendly would predictably both take actions that lead to the end of humanity (e.g. using resources humans need to live to further its goals, such as replacing our crop fields with vast numbers of solar panels to power its growth, or using the carbon in our bodies to build things) and prevent us from turning it off or altering its goals.\n\n```', - 'Tag Count': 0, - 'Related Answer Count': 0, - 'Rich Text': - '```1. [The Orthogonality Thesis](https://www.youtube.com/watch?v=hEUO6pjwFOo): AI could have almost any goal while at the same time having high intelligence (aka ability to succeed at those goals). This means that we could build a very powerful agent which would not necessarily share human-friendly values. For example, the classic [paperclip maximizer](https://www.lesswrong.com/tag/paperclip-maximizer) thought experiment explores this with an AI which has a goal of creating as many paperclips as possible, something that humans are (mostly) indifferent to, and as a side effect ends up destroying humanity to make room for more paperclip factories.\n\n1. [Complexity of value](https://www.lesswrong.com/posts/GNnHHmm8EzePmKzPk/value-is-fragile): What humans care about is not simple, and the space of all goals is large, so virtually all goals we could program into an AI would lead to worlds not valuable to humans if pursued by a sufficiently powerful agent. If we, for example, did not include our value of diversity of experience, we could end up with a world of endlessly looping simple pleasures, rather than beings living rich lives.\n\n1. [Instrumental Convergence](https://www.youtube.com/watch?v=ZeecOKBus3Q): For almost any goal an AI has there are shared ‘instrumental’ steps, such as acquiring resources, preserving itself, and preserving the contents of its goals. This means that a powerful AI with goals that were not explicitly human-friendly would predictably both take actions that lead to the end of humanity (e.g. using resources humans need to live to further its goals, such as replacing our crop fields with vast numbers of solar panels to power its growth, or using the carbon in our bodies to build things) and prevent us from turning it off or altering its goals.\n\n```', - 'Stamp Count': 1, - 'Multi Answer': '', - 'Stamped By': { - '@context': 'http://schema.org/', - '@type': 'Person', - name: 'plex', - email: 'plexven@gmail.com', - }, - Priority: 4, - Asker: '```Jack Harley```', - 'External Source': '', - 'Last Asked On Discord': '', - 'UI ID': '```2400```', - 'Related Answers': [], - 'Doc Last Ingested': '2023-04-28T00:20:06.478+02:00', - 'Request Count': '', - 'Number of suggestions on answer doc': 10, - 'Total character count of suggestions on answer doc': 140, - Helpful: '', - }, -} diff --git a/app/mocks/question-data/question-8486.json b/app/mocks/question-data/question-8486.json new file mode 100644 index 00000000..fe8d3074 --- /dev/null +++ b/app/mocks/question-data/question-8486.json @@ -0,0 +1 @@ +{"items":[{"id":"i-8899afafb469e7a7e3691f2b506fec68b4567eb11d991ce0f21e99ad1527f4ee","type":"row","href":"https://coda.io/apis/v1/docs/fau7sl2hmG/tables/grid-sync-1059-File/rows/i-8899afafb469e7a7e3691f2b506fec68b4567eb11d991ce0f21e99ad1527f4ee","name":"What is AI safety?","index":349,"createdAt":"2023-01-14T14:46:14.123Z","updatedAt":"2023-06-17T00:07:28.126Z","browserLink":"https://coda.io/d/_dfau7sl2hmG#_tugrid-sync-1059-File/_rui-8899afafb469e7a7e3691f2b506fec68b4567eb11d991ce0f21e99ad1527f4ee","values":{"File":"What is AI safety?","Synced":false,"Sync account":{"@context":"http://schema.org/","@type":"StructuredValue","additionalType":"row","name":"stampysaisafetyinfo@gmail.com","url":"https://coda.io/d/_dfau7sl2hmG#_tuGlobal-External-Connections/_rui-1fffa4c7-80ba-4bd0-804e-e432be8d2052","tableId":"Global-External-Connections","rowId":"i-1fffa4c7-80ba-4bd0-804e-e432be8d2052","tableUrl":"https://coda.io/d/_dfau7sl2hmG#_tuGlobal-External-Connections"},"Question":"```What is AI safety?```","Link":{"@context":"http://schema.org/","@type":"WebPage","url":"https://docs.google.com/document/d/1zz1c6rRN8Y-CmO0-BGVKI9G6MMg2MUqD247dPcub144/edit?usp=drivesdk"},"Thumbnail":{"@context":"http://schema.org/","@type":"ImageObject","name":"image.jpeg","height":220,"width":170,"url":"https://codahosted.io/docs/fau7sl2hmG/blobs/bl-4_OLuliiY9/28f78d3dc11cb907a9e2d579ff81a56401b61b45973e6b28db154f0ffd7bb7d90a68bf483c431c84f1093036e4d635b78c7a12dd7a7cad4756446dbd4503e4d1af9013f31859a31b8bdae07984e864d1f584394864e13105331e59981ef61d91e3fbbb85","status":"live"},"Doc Created":"2023-01-14T14:48:30.226+01:00","Related Answers DO NOT EDIT":[{"@context":"http://schema.org/","@type":"StructuredValue","additionalType":"row","name":"What is the difference between AI safety, AI alignment, AI control, friendly AI, AI ethics, AI existential safety and AGI safety?","url":"https://coda.io/d/_dfau7sl2hmG#_tugrid-sync-1059-File/_rui-53897773ddbc2889ee036970bb572cffaef2ead71d29cfccecdcac6c51a181a2","tableId":"grid-sync-1059-File","rowId":"i-53897773ddbc2889ee036970bb572cffaef2ead71d29cfccecdcac6c51a181a2","tableUrl":"https://coda.io/d/_dfau7sl2hmG#_tugrid-sync-1059-File"},{"@context":"http://schema.org/","@type":"StructuredValue","additionalType":"row","name":"What approaches are AI alignment organizations working on?","url":"https://coda.io/d/_dfau7sl2hmG#_tugrid-sync-1059-File/_rui-cd5b637d614c18e592dbee9c05adce59dc98163baba9ac36604b736fa76c76ab","tableId":"grid-sync-1059-File","rowId":"i-cd5b637d614c18e592dbee9c05adce59dc98163baba9ac36604b736fa76c76ab","tableUrl":"https://coda.io/d/_dfau7sl2hmG#_tugrid-sync-1059-File"}],"Tags":"","Doc Last Edited":"2023-06-16T22:55:38.462+02:00","Status":{"@context":"http://schema.org/","@type":"StructuredValue","additionalType":"row","name":"Live on site","url":"https://coda.io/d/_dfau7sl2hmG#_tugrid-IWDInbu5n2/_rui-7EfvxV9G0N","tableId":"grid-IWDInbu5n2","rowId":"i-7EfvxV9G0N","tableUrl":"https://coda.io/d/_dfau7sl2hmG#_tugrid-IWDInbu5n2"},"Edit Answer":"**[What is AI safety?](https://docs.google.com/document/d/1zz1c6rRN8Y-CmO0-BGVKI9G6MMg2MUqD247dPcub144/edit?usp=drivesdk)**","Alternate Phrasings":"","UI ID DO NOT EDIT":"```8486```","Source Link":"","aisafety.info Link":"**[What is AI safety?](https://aisafety.info/?state=8486_)**","Source":"```Wiki```","All Phrasings":"```What is AI safety?\n```","Initial Order":"","Related IDs":["```6714```","```6178```"],"Rich Text DO NOT EDIT":"```\n\nIn the [coming decades](https://www.cold-takes.com/most-important-century/), AI systems could be invented that outperform humans on most tasks, including strategy, persuasion, economic productivity, scientific research and development, and AI design. We don't know how to [align such systems](https://www.cold-takes.com/why-ai-alignment-could-be-hard-with-modern-deep-learning/) with the intentions of their users, even when those intentions are good. This could lead to catastrophic outcomes.\n\nThe research field of AI safety was founded to prevent such disasters, and enable humanity to use the enormous potential of advanced AI to solve problems and improve the world. There are many kinds of AI risk, but the kind that this website focuses on, because it seems both plausible and extreme in scope, is [existential risk](https://en.wikipedia.org/wiki/Existential_risk_from_artificial_general_intelligence) from misaligned AI systems [disempowering or killing humanity](https://80000hours.org/problem-profiles/artificial-intelligence/#power-seeking-ai).\n\nExamples of work on AI existential safety are:\n\n- [Agent foundations](https://www.alignmentforum.org/posts/FWvzwCDRgcjb9sigb/why-agent-foundations-an-overly-abstract-explanation): Understanding what intelligence and agency are at a fundamental level\n\n- [Prosaic alignment](/?state=89LM): Developing methods like [debate](https://openai.com/research/debate) and [iterated distillation and amplification](/?state=897J) to align more powerful versions of current AI techniques\n\n- [AI policy and governance](https://80000hours.org/articles/ai-policy-guide/): Setting up institutions and mechanisms that cause the major actors to implement good AI safety practices\n\nExamples of work from the broader AI safety field are:\n\n- Getting content recommender systems to not radicalize their users\n\n- Ensuring autonomous cars don’t kill people\n\n- Advocating strict regulations for lethal autonomous weapons\n\nSome kinds of research are useful for addressing both existential risk and smaller-scale bad outcomes:\n\n- [Robustness to distribution shift](https://www.alignmentforum.org/tag/distributional-shifts): making AI systems more able to function reliably outside of the context they were trained in\n\n- [Interpretability](/?state=8241): giving humans insight into the inner workings of AI systems such as neural networks\n\nThis website is a single point of access where people can read summaries and find links to the best information on concepts related to AI existential safety. The goal is to help readers contribute to the effort to ensure that humanity avoids these risks and reaches a wonderful future.\n\n```","Tag Count":0,"Related Answer Count":2,"Rich Text":"```\n\nIn the [coming decades](https://www.cold-takes.com/most-important-century/), AI systems could be invented that outperform humans on most tasks, including strategy, persuasion, economic productivity, scientific research and development, and AI design. We don't know how to [align such systems](https://www.cold-takes.com/why-ai-alignment-could-be-hard-with-modern-deep-learning/) with the intentions of their users, even when those intentions are good. This could lead to catastrophic outcomes.\n\nThe research field of AI safety was founded to prevent such disasters, and enable humanity to use the enormous potential of advanced AI to solve problems and improve the world. There are many kinds of AI risk, but the kind that this website focuses on, because it seems both plausible and extreme in scope, is [existential risk](https://en.wikipedia.org/wiki/Existential_risk_from_artificial_general_intelligence) from misaligned AI systems [disempowering or killing humanity](https://80000hours.org/problem-profiles/artificial-intelligence/#power-seeking-ai).\n\nExamples of work on AI existential safety are:\n\n- [Agent foundations](https://www.alignmentforum.org/posts/FWvzwCDRgcjb9sigb/why-agent-foundations-an-overly-abstract-explanation): Understanding what intelligence and agency are at a fundamental level\n\n- [Prosaic alignment](/?state=89LM): Developing methods like [debate](https://openai.com/research/debate) and [iterated distillation and amplification](/?state=897J) to align more powerful versions of current AI techniques\n\n- [AI policy and governance](https://80000hours.org/articles/ai-policy-guide/): Setting up institutions and mechanisms that cause the major actors to implement good AI safety practices\n\nExamples of work from the broader AI safety field are:\n\n- Getting content recommender systems to not radicalize their users\n\n- Ensuring autonomous cars don’t kill people\n\n- Advocating strict regulations for lethal autonomous weapons\n\nSome kinds of research are useful for addressing both existential risk and smaller-scale bad outcomes:\n\n- [Robustness to distribution shift](https://www.alignmentforum.org/tag/distributional-shifts): making AI systems more able to function reliably outside of the context they were trained in\n\n- [Interpretability](/?state=8241): giving humans insight into the inner workings of AI systems such as neural networks\n\nThis website is a single point of access where people can read summaries and find links to the best information on concepts related to AI existential safety. The goal is to help readers contribute to the effort to ensure that humanity avoids these risks and reaches a wonderful future.\n\n```","Stamp Count":0,"Multi Answer":"","Stamped By":"","Priority":2,"Asker":"```Magdalena```","External Source":"","Last Asked On Discord":"","UI ID":"```8486```","Related Answers":[{"@context":"http://schema.org/","@type":"StructuredValue","additionalType":"row","name":"What is the difference between AI safety, AI alignment, AI control, friendly AI, AI ethics, AI existential safety and AGI safety?","url":"https://coda.io/d/_dfau7sl2hmG#_tugrid-sync-1059-File/_rui-53897773ddbc2889ee036970bb572cffaef2ead71d29cfccecdcac6c51a181a2","tableId":"grid-sync-1059-File","rowId":"i-53897773ddbc2889ee036970bb572cffaef2ead71d29cfccecdcac6c51a181a2","tableUrl":"https://coda.io/d/_dfau7sl2hmG#_tugrid-sync-1059-File"},{"@context":"http://schema.org/","@type":"StructuredValue","additionalType":"row","name":"What approaches are AI alignment organizations working on?","url":"https://coda.io/d/_dfau7sl2hmG#_tugrid-sync-1059-File/_rui-cd5b637d614c18e592dbee9c05adce59dc98163baba9ac36604b736fa76c76ab","tableId":"grid-sync-1059-File","rowId":"i-cd5b637d614c18e592dbee9c05adce59dc98163baba9ac36604b736fa76c76ab","tableUrl":"https://coda.io/d/_dfau7sl2hmG#_tugrid-sync-1059-File"}],"Doc Last Ingested":"2023-06-16T23:16:39.168+02:00","Request Count":"","Number of suggestions on answer doc":0,"Total character count of suggestions on answer doc":0,"Helpful":10,"Number of pending comments":16,"Length":2904}}],"href":"https://coda.io/apis/v1/docs/fau7sl2hmG/tables/grid-sync-1059-File/rows?pageToken=eyJsaW1pdCI6MjAwLCJvZmZzZXQiOjAsIm9wVmVyc2lvbiI6OTgyNDgsInF1ZXJ5IjoiYy0yMDNLd1NDMk5fOlwiODQ4NlwiIiwic2NoZW1hVmVyc2lvbiI6MTcyLCJzb3J0QnkiOiJuYXR1cmFsIiwidXNlQ29sdW1uTmFtZXMiOnRydWUsInZhbHVlRm9ybWF0IjoicmljaCJ9"} \ No newline at end of file diff --git a/app/server-utils/stampy.spec.ts b/app/server-utils/stampy.spec.ts index 8f47a73a..20b70e48 100644 --- a/app/server-utils/stampy.spec.ts +++ b/app/server-utils/stampy.spec.ts @@ -1,28 +1,37 @@ -import {loadQuestionDetail} from '~/server-utils/stampy' -import {question2400} from '~/mocks/question-data/question-2400' +import {CodaRow, loadQuestionDetail} from '~/server-utils/stampy' +import question8486 from '~/mocks/question-data/question-8486.json' describe('loadQuestionDetail', () => { - it('can load question', async () => { - const fetchMock = getMiniflareFetchMock() - // Throw when no matching mocked request is found - // (see https://undici.nodejs.org/#/docs/api/MockAgent?id=mockagentdisablenetconnect) - fetchMock.disableNetConnect() + type ChangeFields = Omit & R + type CodaRowTrimmed = ChangeFields< + CodaRow, + { + values: Omit< + CodaRow['values'], + 'Tag ID' | 'Internal?' | 'Questions tagged with this' | 'Main question' | 'Tags' + > + } + > + type TestCase = [number, {items: CodaRowTrimmed[]}] + it.each([[8486, question8486]])( + 'can load question %i', + async (questionId, questionData) => { + const fetchMock = getMiniflareFetchMock() + fetchMock.disableNetConnect() - // (see https://undici.nodejs.org/#/docs/api/MockAgent?id=mockagentgetorigin) - const mockedUrl = new URL(question2400.href) - const origin = fetchMock.get(mockedUrl.origin) - // (see https://undici.nodejs.org/#/docs/api/MockPool?id=mockpoolinterceptoptions) - origin.intercept({method: 'GET', path: mockedUrl.pathname}).reply(200, question2400) - origin - .intercept({ - method: 'GET', - path: '/apis/v1/docs/fau7sl2hmG/tables/grid-sync-1059-File/rows?useColumnNames=true&sortBy=natural&valueFormat=rich&query=%22Name%22:%22%60%60%602400%60%60%60%22', - }) - .reply(200, {items: [question2400]}) + const origin = fetchMock.get('https://coda.io') + origin + .intercept({ + method: 'GET', + path: `/apis/v1/docs/fau7sl2hmG/tables/grid-sync-1059-File/rows?useColumnNames=true&sortBy=natural&valueFormat=rich&query=%22UI%20ID%22:%22${questionId}%22`, + }) + .reply(200, questionData) - const questionDetail = await loadQuestionDetail('NEVER_RELOAD', question2400.values['UI ID']) - expect(questionDetail.data.status).toBe(question2400.values.Status.name) - const linkUrl = new URL(question2400.values.Link.url) - expect(questionDetail.data.answerEditLink).toBe(linkUrl.origin + linkUrl.pathname) - }) + const questionDetail = await loadQuestionDetail('NEVER_RELOAD', questionId.toString()) + const firstItem = questionData.items[0] + expect(questionDetail.data.status).toBe(firstItem.values.Status.name) + const linkUrl = new URL(firstItem.values.Link.url) + expect(questionDetail.data.answerEditLink).toBe(linkUrl.origin + linkUrl.pathname) + } + ) }) diff --git a/app/server-utils/stampy.ts b/app/server-utils/stampy.ts index 4631b6f7..083ac818 100644 --- a/app/server-utils/stampy.ts +++ b/app/server-utils/stampy.ts @@ -64,7 +64,7 @@ type Entity = { rowId: string tableUrl: string } -type CodaRow = { +export type CodaRow = { id: string type: string href: string diff --git a/scripts/refresh-mock-return-data.ts b/scripts/refresh-mock-return-data.ts index 6cfe7c40..f42ea7b4 100644 --- a/scripts/refresh-mock-return-data.ts +++ b/scripts/refresh-mock-return-data.ts @@ -5,62 +5,62 @@ import * as fs from 'fs' import * as https from 'https' +import * as path from 'path' +import {URL} from 'url' async function main(): Promise { - const foo = await fetchQuestionDetails() - console.log(foo) - makeRequest() + const data = await getData() + await writeFile(data) } -const makeRequest = () => { - const token = '7b71af6a-3ac7-41cd-ad58-db38dabc024e' - const options = { - hostname: 'coda.io', - port: 443, - path: '/apis/v1/docs/fau7sl2hmG/tables/grid-sync-1059-File/rows?useColumnNames=true&sortBy=natural&valueFormat=rich&query=%22Name%22:%22%60%60%602400%60%60%60%22', - method: 'GET', - headers: { - Authorization: `Bearer ${token}`, - 'Content-Type': 'application/json', - }, - } +const httpGet = (options: string | https.RequestOptions | URL): Promise => + new Promise((resolve, reject) => { + const req = https.request(options, (res) => { + let data = '' - const req = https.request(options, (res) => { - let data = '' + console.log(`StatusCode: ${res.statusCode}`) - console.log(`StatusCode: ${res.statusCode}`) + res.on('data', (chunk) => { + data += chunk + }) - res.on('data', (chunk) => { - data += chunk + res.on('end', () => { + resolve(data) + }) }) - res.on('end', () => { - console.log(JSON.parse(data)) + req.on('error', (error) => { + reject(error) }) - }) - req.on('error', (error) => { - console.error(error) + req.end() }) - req.end() -} +const getData = async () => { + const token = 'GET FROM ENV' + const options = { + hostname: 'coda.io', + port: 443, + path: '/apis/v1/docs/fau7sl2hmG/tables/grid-sync-1059-File/rows?useColumnNames=true&sortBy=natural&valueFormat=rich&query=%22UI%20ID%22:%228486%22', + method: 'GET', + headers: { + Authorization: `Bearer ${token}`, + }, + } -async function fetchQuestionDetails() { - console.log('works') - // const questionDetail = await loadQuestionDetail('NEVER_RELOAD', '2400') - // console.log(questionDetail.timestamp) + return await httpGet(options) } -// const data = 'Hello, world!' // The content you want to write to the file -// const filePath = 'path/to/file.txt' // The path to the file you want to write - -// fs.writeFile(filePath, data, (err) => { -// if (err) { -// console.error('An error occurred:', err) -// return -// } -// console.log('File has been written successfully.') -// }) +const writeFile = async (data: string) => { + const filename = 'myfile.json' + const filePath = path.join(__dirname, filename) + fs.writeFile(filePath, data, (err) => { + if (err) { + console.error('An error occurred:', err) + return + } + console.log('File has been written successfully.') + }) +} main()