repo_name string | dataset string | owner string | lang string | func_name string | code string | docstring string | url string | sha string |
|---|---|---|---|---|---|---|---|---|
AiEditor | github_2023 | aieditor-team | typescript | Todo.onClick | onClick(commands) {
commands.toggleTaskList();
} | // @ts-ignore | https://github.com/aieditor-team/AiEditor/blob/3ee22e6953a0dad22c3415c9f145f682aa354ef0/src/components/menus/Todo.ts#L16-L18 | 3ee22e6953a0dad22c3415c9f145f682aa354ef0 |
AiEditor | github_2023 | aieditor-team | typescript | Underline.onClick | onClick(commands) {
commands.toggleUnderline();
} | // @ts-ignore | https://github.com/aieditor-team/AiEditor/blob/3ee22e6953a0dad22c3415c9f145f682aa354ef0/src/components/menus/Underline.ts#L16-L18 | 3ee22e6953a0dad22c3415c9f145f682aa354ef0 |
AiEditor | github_2023 | aieditor-team | typescript | Undo.onClick | onClick(commands) {
commands.undo();
} | // @ts-ignore | https://github.com/aieditor-team/AiEditor/blob/3ee22e6953a0dad22c3415c9f145f682aa354ef0/src/components/menus/Undo.ts#L15-L17 | 3ee22e6953a0dad22c3415c9f145f682aa354ef0 |
AiEditor | github_2023 | aieditor-team | typescript | Video.onClick | onClick(commands) {
if (this.options?.video?.customMenuInvoke) {
this.options.video.customMenuInvoke((this.editor as InnerEditor).aiEditor);
} else {
this.fileInput?.click();
}
} | // @ts-ignore | https://github.com/aieditor-team/AiEditor/blob/3ee22e6953a0dad22c3415c9f145f682aa354ef0/src/components/menus/Video.ts#L40-L46 | 3ee22e6953a0dad22c3415c9f145f682aa354ef0 |
bussin | github_2023 | face-hh | typescript | token | function token(value: string = "", type: TokenType, raw: string = value): Token {
return { value, type, raw, toString: () => {return {value, type: reverseTokenType[type]}} };
} | // Returns a token of a given type and value | https://github.com/face-hh/bussin/blob/bb15898fd3024a4899c33ac5395b69717fe002f2/src/frontend/lexer.ts#L111-L113 | bb15898fd3024a4899c33ac5395b69717fe002f2 |
bussin | github_2023 | face-hh | typescript | isalpha | function isalpha(src: string, isFirstChar: boolean = false) {
if (isFirstChar) {
return /^[A-Za-z_]+$/.test(src);
}
return /^[A-Za-z0-9_]+$/.test(src);
} | /**
* Returns whether the character passed in alphabetic -> [a-zA-Z] and _
*/ | https://github.com/face-hh/bussin/blob/bb15898fd3024a4899c33ac5395b69717fe002f2/src/frontend/lexer.ts#L118-L123 | bb15898fd3024a4899c33ac5395b69717fe002f2 |
bussin | github_2023 | face-hh | typescript | isskippable | function isskippable(str: string) {
return str == " " || str == "\n" || str == "\t" || str == '\r';
} | /**
* Returns true if the character is whitespace like -> [\s, \t, \n]
*/ | https://github.com/face-hh/bussin/blob/bb15898fd3024a4899c33ac5395b69717fe002f2/src/frontend/lexer.ts#L128-L130 | bb15898fd3024a4899c33ac5395b69717fe002f2 |
bussin | github_2023 | face-hh | typescript | isint | function isint(str: string) {
const c = str.charCodeAt(0);
const bounds = ["0".charCodeAt(0), "9".charCodeAt(0)];
return c >= bounds[0] && c <= bounds[1];
} | /**
* Return whether the character is a valid integer -> [0-9]
*/ | https://github.com/face-hh/bussin/blob/bb15898fd3024a4899c33ac5395b69717fe002f2/src/frontend/lexer.ts#L135-L139 | bb15898fd3024a4899c33ac5395b69717fe002f2 |
bussin | github_2023 | face-hh | typescript | Parser.parse_call_member_expr | private parse_call_member_expr(): Expr {
const member = this.parse_member_expr();
if (this.at().type == TokenType.OpenParen) {
return this.parse_call_expr(member);
}
return member;
} | // foo.x() | https://github.com/face-hh/bussin/blob/bb15898fd3024a4899c33ac5395b69717fe002f2/src/frontend/parser.ts#L402-L410 | bb15898fd3024a4899c33ac5395b69717fe002f2 |
bussin | github_2023 | face-hh | typescript | Parser.parse_args_list | private parse_args_list(): Expr[] {
const args = [this.parse_expr()];
while (this.at().type == TokenType.Comma && this.eat()) {
args.push(this.parse_expr());
}
return args;
} | // foo(x = 5, v = "Bar") | https://github.com/face-hh/bussin/blob/bb15898fd3024a4899c33ac5395b69717fe002f2/src/frontend/parser.ts#L438-L446 | bb15898fd3024a4899c33ac5395b69717fe002f2 |
bussin | github_2023 | face-hh | typescript | Parser.parse_primary_expr | private parse_primary_expr(): Expr {
const tk = this.at().type;
switch (tk) {
case TokenType.Identifier:
return { kind: "Identifier", symbol: this.eat().value } as Identifier;
case TokenType.Number:
return {
kind: "NumericLiteral",
value: parseFloat(this.eat().value)
} as NumericLiteral;
case TokenType.String:
return {
kind: "StringLiteral",
value: this.eat().value,
} as StringLiteral;
case TokenType.Fn:
return this.parse_function_declaration();
case TokenType.OpenParen: {
this.eat(); // eat the opening paren
const value = this.parse_expr();
this.expect(TokenType.CloseParen, `Unexpected token (${JSON.stringify(this.at().toString())}) found while parsing arguments.`); // closing paren
return value;
}
default:
console.error("Unexpected token found during parsing!", this.at().toString());
process.exit(1);
}
} | // PrimaryExpr | https://github.com/face-hh/bussin/blob/bb15898fd3024a4899c33ac5395b69717fe002f2/src/frontend/parser.ts#L492-L522 | bb15898fd3024a4899c33ac5395b69717fe002f2 |
GPTHub | github_2023 | lencx | typescript | getScrollPosition | const getScrollPosition = (el: any = window) => ({
x: el.pageXOffset !== undefined ? el.pageXOffset : el.scrollLeft,
y: el.pageYOffset !== undefined ? el.pageYOffset : el.scrollTop,
}); | // eslint-disable-next-line @typescript-eslint/no-explicit-any | https://github.com/lencx/GPTHub/blob/47abc07a62390a947db4f12a2e454d6da7edd7da/src/components/BackTop.tsx#L5-L8 | 47abc07a62390a947db4f12a2e454d6da7edd7da |
Tran | github_2023 | Borber | typescript | easeOutCubic | const easeOutCubic = (t: number): number => 1 - Math.pow(1 - t, 3) | // 调整滚动范围 | https://github.com/Borber/Tran/blob/938795f2e35dbd6997e83b25d66000fce54d1c83/src/App.tsx#L83-L83 | 938795f2e35dbd6997e83b25d66000fce54d1c83 |
OpenOpenAI | github_2023 | transitive-bullshit | typescript | main | async function main() {
const defaultBaseUrl = 'https://api.openai.com/v1'
const baseUrl = process.env.OPENAI_API_BASE_URL ?? defaultBaseUrl
const isOfficalAPI = baseUrl === defaultBaseUrl
const testId =
process.env.TEST_ID ??
`test_${(await sha256(Date.now().toString())).slice(0, 24)}`
const metadata = { testId, isOfficalAPI }
const cleanupTest = !process.env.NO_TEST_CLEANUP
console.log('baseUrl', baseUrl)
console.log('testId', testId)
console.log()
const openai = new OpenAI({
baseURL: baseUrl
})
const getWeather = createAIFunction(
{
name: 'get_weather',
description: 'Gets the weather for a given location',
argsSchema: z.object({
location: z
.string()
.describe('The city and state e.g. San Francisco, CA'),
unit: z
.enum(['c', 'f'])
.optional()
.default('f')
.describe('The unit of temperature to use')
})
},
// Fake weather API implementation which returns a random temperature
// after a short delay
async function getWeather(args) {
await delay(500)
return {
location: args.location,
unit: args.unit,
temperature: (Math.random() * 100) | 0
}
}
)
let assistant: Awaited<
ReturnType<typeof openai.beta.assistants.create>
> | null = null
let thread: Awaited<ReturnType<typeof openai.beta.threads.create>> | null =
null
try {
assistant = await openai.beta.assistants.create({
name: `test ${testId}`,
model: 'gpt-4-1106-preview',
instructions: 'You are a helpful assistant.',
metadata,
tools: [
{
type: 'function',
function: getWeather.spec
}
]
})
assert(assistant)
console.log('created assistant', assistant)
thread = await openai.beta.threads.create({
metadata,
messages: [
{
role: 'user',
content: 'What is the weather in San Francisco today?',
metadata
}
]
})
assert(thread)
console.log('created thread', thread)
let listMessages = await openai.beta.threads.messages.list(thread.id)
assert(listMessages?.data)
console.log('messages', prettifyMessages(listMessages.data))
let run = await openai.beta.threads.runs.create(thread.id, {
assistant_id: assistant.id,
metadata,
instructions: assistant.instructions,
model: assistant.model,
tools: assistant.tools
})
assert(run?.id)
console.log('created run', run)
let listRunSteps = await openai.beta.threads.runs.steps.list(
thread.id,
run.id
)
assert(listRunSteps?.data)
console.log('runSteps', listRunSteps.data)
async function waitForRunStatus(
status: Run['status'],
{ intervalMs = 500 }: { intervalMs?: number } = {}
) {
assert(run?.id)
return oraPromise(async () => {
while (run.status !== status) {
await delay(intervalMs)
assert(thread?.id)
assert(run?.id)
run = await openai.beta.threads.runs.retrieve(thread.id, run.id)
assert(run?.id)
}
}, `waiting for run "${run.id}" to have status "${status}"...`)
}
await waitForRunStatus('requires_action')
console.log('run', run)
listRunSteps = await openai.beta.threads.runs.steps.list(thread.id, run.id)
assert(listRunSteps?.data)
console.log('runSteps', listRunSteps.data)
if (run.status !== 'requires_action') {
throw new Error(
`run "${run.id}" status expected to be "requires_action"; found "${run.status}"`
)
}
if (!run.required_action) {
throw new Error(
`run "${run.id}" expected to have "required_action"; none found`
)
}
if (run.required_action.type !== 'submit_tool_outputs') {
throw new Error(
`run "${run.id}" expected to have "required_action.type" of "submit_tool_outputs; found "${run.required_action.type}"`
)
}
if (!run.required_action.submit_tool_outputs?.tool_calls?.length) {
throw new Error(
`run "${run.id}" expected to have non-empty "required_action.submit_tool_outputs"`
)
}
// Resolve tool calls
const toolCalls = run.required_action.submit_tool_outputs.tool_calls
const toolOutputs = await oraPromise(
pMap(
toolCalls,
async (toolCall) => {
if (toolCall.type !== 'function') {
throw new Error(
`run "${run.id}" invalid submit_tool_outputs tool_call type "${toolCall.type}"`
)
}
if (!toolCall.function) {
throw new Error(
`run "${run.id}" invalid submit_tool_outputs tool_call function"`
)
}
if (toolCall.function.name !== getWeather.spec.name) {
throw new Error(
`run "${run.id}" invalid submit_tool_outputs tool_call function name "${toolCall.function.name}"`
)
}
const toolCallResult = await getWeather(toolCall.function.arguments)
return {
output: JSON.stringify(toolCallResult),
tool_call_id: toolCall.id
}
},
{ concurrency: 4 }
),
`run "${run.id}" resolving ${toolCalls.length} tool ${plur(
'call',
toolCalls.length
)}`
)
console.log(`submitting tool outputs for run "${run.id}"`, toolOutputs)
run = await openai.beta.threads.runs.submitToolOutputs(thread.id, run.id, {
tool_outputs: toolOutputs
})
assert(run)
console.log('run', run)
listRunSteps = await openai.beta.threads.runs.steps.list(thread.id, run.id)
assert(listRunSteps?.data)
console.log('runSteps', listRunSteps.data)
await waitForRunStatus('completed')
console.log('run', run)
listRunSteps = await openai.beta.threads.runs.steps.list(thread.id, run.id)
assert(listRunSteps?.data)
console.log('runSteps', listRunSteps.data)
thread = await openai.beta.threads.retrieve(thread.id)
assert(thread)
console.log('thread', thread)
listMessages = await openai.beta.threads.messages.list(thread.id)
assert(listMessages?.data)
console.log('messages', prettifyMessages(listMessages.data))
} catch (err) {
console.error(err)
process.exit(1)
} finally {
if (cleanupTest) {
// TODO: there's no way to delete messages, runs, or run steps...
// maybe deleting the thread implicitly causes a cascade of deletes?
// TODO: test this assumption
if (thread?.id) {
await openai.beta.threads.del(thread.id)
}
if (assistant?.id) {
await openai.beta.assistants.del(assistant.id)
}
}
}
} | /**
* This file contains an end-to-end Assistants example using an external
* `get_weather` function.
*
* To run it against the offical OpenAI API:
* ```bash
* npx tsx e2e
* ```
*
* To run it against your custom, local API:
* ```bash
* OPENAI_API_BASE_URL='http://127.0.0.1:3000' npx tsx e2e
* ```
*/ | https://github.com/transitive-bullshit/OpenOpenAI/blob/2e8dc24116baf561440c7bf72f8043d1d2dd1c45/e2e/index.ts#L29-L263 | 2e8dc24116baf561440c7bf72f8043d1d2dd1c45 |
OpenOpenAI | github_2023 | transitive-bullshit | typescript | prettifyMessages | function prettifyMessages(messages: any[]) {
return messages.map((message) => ({
...message,
content: message.content?.[0]?.text?.value ?? message.content
}))
} | // Make message content easier to read in the console | https://github.com/transitive-bullshit/OpenOpenAI/blob/2e8dc24116baf561440c7bf72f8043d1d2dd1c45/e2e/index.ts#L266-L271 | 2e8dc24116baf561440c7bf72f8043d1d2dd1c45 |
OpenOpenAI | github_2023 | transitive-bullshit | typescript | main | async function main() {
const defaultBaseUrl = 'https://api.openai.com/v1'
const baseUrl = process.env.OPENAI_API_BASE_URL ?? defaultBaseUrl
const isOfficalAPI = baseUrl === defaultBaseUrl
const testId =
process.env.TEST_ID ??
`test_${(await sha256(Date.now().toString())).slice(0, 24)}`
const metadata = { testId, isOfficalAPI }
const cleanupTest = !process.env.NO_TEST_CLEANUP
console.log('baseUrl', baseUrl)
console.log('testId', testId)
console.log()
const openai = new OpenAI({
baseURL: baseUrl
})
let assistant: Awaited<
ReturnType<typeof openai.beta.assistants.create>
> | null = null
let thread: Awaited<ReturnType<typeof openai.beta.threads.create>> | null =
null
const readmeFileStream = fs.createReadStream('readme.md', 'utf8')
const readmeFile = await openai.files.create({
file: readmeFileStream,
purpose: 'assistants'
})
console.log('created readme file', readmeFile)
try {
assistant = await openai.beta.assistants.create({
model: 'gpt-4-1106-preview',
instructions: 'You are a helpful assistant.',
metadata,
tools: [
{
type: 'retrieval'
}
],
file_ids: [readmeFile.id]
})
assert(assistant)
console.log('created assistant', assistant)
thread = await openai.beta.threads.create({
metadata,
messages: [
{
role: 'user',
content:
'Give me a concise summary of the attached file using markdown.',
metadata
}
]
})
assert(thread)
console.log('created thread', thread)
let listMessages = await openai.beta.threads.messages.list(thread.id)
assert(listMessages?.data)
console.log('messages', prettifyMessages(listMessages.data))
let run = await openai.beta.threads.runs.create(thread.id, {
assistant_id: assistant.id,
metadata,
instructions: assistant.instructions,
model: assistant.model,
tools: assistant.tools
})
assert(run?.id)
console.log('created run', run)
let listRunSteps = await openai.beta.threads.runs.steps.list(
thread.id,
run.id
)
assert(listRunSteps?.data)
console.log('runSteps', listRunSteps.data)
async function waitForRunStatus(
status: Run['status'],
{ intervalMs = 500 }: { intervalMs?: number } = {}
) {
assert(run?.id)
return oraPromise(async () => {
while (run.status !== status) {
if (
status !== run.status &&
(run.status === 'cancelled' ||
run.status === 'cancelling' ||
run.status === 'failed' ||
run.status === 'expired')
) {
throw new Error(
`Error run "${run.id}" status reached terminal status "${run.status}" while waiting for status "${status}"`
)
}
await delay(intervalMs)
assert(thread?.id)
assert(run?.id)
run = await openai.beta.threads.runs.retrieve(thread.id, run.id)
assert(run?.id)
}
}, `waiting for run "${run.id}" to have status "${status}"...`)
}
await waitForRunStatus('completed')
console.log('run', run)
listRunSteps = await openai.beta.threads.runs.steps.list(thread.id, run.id)
assert(listRunSteps?.data)
console.log('runSteps', listRunSteps.data)
thread = await openai.beta.threads.retrieve(thread.id)
assert(thread)
console.log('thread', thread)
listMessages = await openai.beta.threads.messages.list(thread.id)
assert(listMessages?.data)
console.log('messages', prettifyMessages(listMessages.data))
} catch (err) {
console.error(err)
process.exit(1)
} finally {
if (cleanupTest) {
// TODO: there's no way to delete messages, runs, or run steps...
// maybe deleting the thread implicitly causes a cascade of deletes?
// TODO: test this assumption
if (thread?.id) {
await openai.beta.threads.del(thread.id)
}
if (assistant?.id) {
await openai.beta.assistants.del(assistant.id)
}
}
}
} | /**
* This file contains an end-to-end Assistants example using the built-in
* `retrieval` tool which summarizes an attached markdown file.
*
* To run it against the offical OpenAI API:
* ```bash
* npx tsx e2e/retrieval.ts
* ```
*
* To run it against your custom, local API:
* ```bash
* OPENAI_API_BASE_URL='http://127.0.0.1:3000' npx tsx e2e/retrieval.ts
* ```
*/ | https://github.com/transitive-bullshit/OpenOpenAI/blob/2e8dc24116baf561440c7bf72f8043d1d2dd1c45/e2e/retrieval.ts#L26-L171 | 2e8dc24116baf561440c7bf72f8043d1d2dd1c45 |
OpenOpenAI | github_2023 | transitive-bullshit | typescript | prettifyMessages | function prettifyMessages(messages: any[]) {
return messages.map((message) => ({
...message,
content: message.content?.[0]?.text?.value ?? message.content
}))
} | // Make message content easier to read in the console | https://github.com/transitive-bullshit/OpenOpenAI/blob/2e8dc24116baf561440c7bf72f8043d1d2dd1c45/e2e/retrieval.ts#L174-L179 | 2e8dc24116baf561440c7bf72f8043d1d2dd1c45 |
eidolon | github_2023 | eidolon-ai | typescript | definitivePermalink | const definitivePermalink = (permalink: string): string => createPath(BASE_PATHNAME, permalink); | /** */ | https://github.com/eidolon-ai/eidolon/blob/9c32262ddddeb5e21cd3b48ec2004109fc0fac94/webui/apps/docs/src/utils/permalinks.ts#L82-L82 | 9c32262ddddeb5e21cd3b48ec2004109fc0fac94 |
eidolon | github_2023 | eidolon-ai | typescript | ComponentIsNotImplemented | const ComponentIsNotImplemented = () => {
return <div>Component is under construction...</div>;
}; | /**
* Renders "Component is under construction" boilerplate
* @component NotImplemented
*/ | https://github.com/eidolon-ai/eidolon/blob/9c32262ddddeb5e21cd3b48ec2004109fc0fac94/webui/apps/eidolon-ui2/src/components/NotImplemented.tsx#L5-L7 | 9c32262ddddeb5e21cd3b48ec2004109fc0fac94 |
eidolon | github_2023 | eidolon-ai | typescript | useWindowSize | const useWindowSize = (): WindowSize => {
const [windowSize, setWindowSize] = useState<WindowSize>(DEFAULT_WINDOWS_SIZE);
useLayoutEffect(() => {
function handleResize() {
setWindowSize({
width: window.innerWidth,
height: window.innerHeight,
});
}
window.addEventListener('resize', handleResize);
handleResize(); // Get initial/current window size
return () => window.removeEventListener('resize', handleResize);
}, []);
return windowSize;
}; | /**
* Hook to monitor Window (actually Browser) Size using "resize" event listener
* @returns {WindowSize} current window size as {width, height} object
*/ | https://github.com/eidolon-ai/eidolon/blob/9c32262ddddeb5e21cd3b48ec2004109fc0fac94/webui/apps/eidolon-ui2/src/hooks/useWindowSize.ts#L17-L35 | 9c32262ddddeb5e21cd3b48ec2004109fc0fac94 |
eidolon | github_2023 | eidolon-ai | typescript | CurrentLayout | const CurrentLayout: FunctionComponent<PropsWithChildren> = (props) => {
return <PrivateLayout {...props} />
}; | /**
* Returns the current Layout component depending on different circumstances.
* @layout CurrentLayout
*/ | https://github.com/eidolon-ai/eidolon/blob/9c32262ddddeb5e21cd3b48ec2004109fc0fac94/webui/apps/eidolon-ui2/src/layout/CurrentLayout.tsx#L9-L11 | 9c32262ddddeb5e21cd3b48ec2004109fc0fac94 |
eidolon | github_2023 | eidolon-ai | typescript | PrivateLayout | const PrivateLayout: FunctionComponent<PropsWithChildren> = ({children}) => {
return (
<div
className={"flex flex-col overflow-hidden h-screen"}
>
<HeaderProvider>
<Header/>
<div
className={"h-[inherit] overflow-hidden"}
>
<ErrorBoundary name="Content">{children}</ErrorBoundary>
</div>
</HeaderProvider>
</div>
);
}; | /**
* Renders "Private Layout" composition
* @layout PrivateLayout
*/ | https://github.com/eidolon-ai/eidolon/blob/9c32262ddddeb5e21cd3b48ec2004109fc0fac94/webui/apps/eidolon-ui2/src/layout/PrivateLayout.tsx#L12-L28 | 9c32262ddddeb5e21cd3b48ec2004109fc0fac94 |
eidolon | github_2023 | eidolon-ai | typescript | AppReducer | const AppReducer: Reducer<AppStoreState, any> = (state, action) => {
// console.log('AppReducer() - action:', action);
switch (action.type || action.action) {
case 'CURRENT_USER':
return {
...state,
currentUser: action?.currentUser || action?.payload,
};
case 'SIGN_UP':
case 'LOG_IN':
return {
...state,
isAuthenticated: true,
};
case 'LOG_OUT':
return {
...state,
isAuthenticated: false,
currentUser: undefined, // Also reset previous user data
};
case 'DARK_MODE': {
const darkMode = action?.darkMode || action?.payload;
localStorageSet('darkMode', darkMode);
const prefersDarkMode = IS_SERVER ? false : window.matchMedia('(prefers-color-scheme: dark)').matches;
const newDarkMode = darkMode === "system" ? prefersDarkMode : darkMode === "dark"
return {
...state,
themeMode: darkMode,
darkMode: newDarkMode,
};
}
case 'USER_USAGE':
return {
...state,
userUsageSeed: state.userUsageSeed+1,
};
default:
return state;
}
}; | /**
* Reducer for global AppStore using "Redux styled" actions
* @function AppReducer
* @param {object} state - current/default state
* @param {string} action.type - unique name of the action
* @param {string} action.action - alternate to action.type property, unique name of the action
* @param {*} [action.payload] - optional data object or the function to get data object
*/ | https://github.com/eidolon-ai/eidolon/blob/9c32262ddddeb5e21cd3b48ec2004109fc0fac94/webui/apps/eidolon-ui2/src/store/AppReducer.ts#L13-L52 | 9c32262ddddeb5e21cd3b48ec2004109fc0fac94 |
eidolon | github_2023 | eidolon-ai | typescript | initializeParser | const initializeParser = async () => {
return (await import("@readme/openapi-parser")).default;
}; | // Disable SSR for @readme/openapi-parser due to Turbopack incompatibility | https://github.com/eidolon-ai/eidolon/blob/9c32262ddddeb5e21cd3b48ec2004109fc0fac94/webui/packages/eidolon-client/src/client.ts#L6-L8 | 9c32262ddddeb5e21cd3b48ec2004109fc0fac94 |
eidolon | github_2023 | eidolon-ai | typescript | FilesHandler.POST | async POST(req: Request, {params}: { params: { processid: string } }) {
const machineUrl = new URL(req.url).searchParams.get('machineURL')
if (!machineUrl) {
return new Response('machineUrl is required', {status: 422})
}
const mimeType = req.headers.get('mime-type')
return processHeadersAndResponse(req, this.uploadFile(machineUrl, params.processid, await req.blob(), mimeType))
} | // upload file | https://github.com/eidolon-ai/eidolon/blob/9c32262ddddeb5e21cd3b48ec2004109fc0fac94/webui/packages/eidolon-components/src/server/processes-server-handler.ts#L271-L278 | 9c32262ddddeb5e21cd3b48ec2004109fc0fac94 |
eidolon | github_2023 | eidolon-ai | typescript | FileHandler.GET | async GET(req: Request, {params}: { params: { processid: string, fileid: string } }) {
const machineUrl = new URL(req.url).searchParams.get('machineURL')
if (!machineUrl) {
return new Response('machineUrl is required', {status: 400})
}
return convertException(this.downloadFile(machineUrl, params.processid, params.fileid).then(resp => {
const {data, mimetype} = resp
return new Response(data, {
status: 200,
headers: {
'Content-Type': mimetype || 'application/octet-stream',
},
});
}))
} | // download file | https://github.com/eidolon-ai/eidolon/blob/9c32262ddddeb5e21cd3b48ec2004109fc0fac94/webui/packages/eidolon-components/src/server/processes-server-handler.ts#L303-L317 | 9c32262ddddeb5e21cd3b48ec2004109fc0fac94 |
eidolon | github_2023 | eidolon-ai | typescript | UsageService.healthHealthGet | public static healthHealthGet(): CancelablePromise<$OpenApiTs['/health']['get']['res'][200]> {
return __request(OpenAPI, {
method: 'GET',
url: '/health'
});
} | /**
* Health
* @returns unknown Successful Response
* @throws ApiError
*/ | https://github.com/eidolon-ai/eidolon/blob/9c32262ddddeb5e21cd3b48ec2004109fc0fac94/webui/packages/usage-client/src/services.gen.ts#L15-L20 | 9c32262ddddeb5e21cd3b48ec2004109fc0fac94 |
eidolon | github_2023 | eidolon-ai | typescript | UsageService.deleteSubjectSubjectsSubjectIdDelete | public static deleteSubjectSubjectsSubjectIdDelete(data: $OpenApiTs['/subjects/{subject_id}']['delete']['req']): CancelablePromise<$OpenApiTs['/subjects/{subject_id}']['delete']['res'][200]> {
const { subjectId } = data;
return __request(OpenAPI, {
method: 'DELETE',
url: '/subjects/{subject_id}',
path: {
subject_id: subjectId
},
errors: {
422: 'Validation Error'
}
});
} | /**
* Delete Subject
* @returns unknown Successful Response
* @throws ApiError
*/ | https://github.com/eidolon-ai/eidolon/blob/9c32262ddddeb5e21cd3b48ec2004109fc0fac94/webui/packages/usage-client/src/services.gen.ts#L27-L39 | 9c32262ddddeb5e21cd3b48ec2004109fc0fac94 |
eidolon | github_2023 | eidolon-ai | typescript | UsageService.getUsageSummarySubjectsSubjectIdGet | public static getUsageSummarySubjectsSubjectIdGet(data: $OpenApiTs['/subjects/{subject_id}']['get']['req']): CancelablePromise<$OpenApiTs['/subjects/{subject_id}']['get']['res'][200]> {
const { subjectId } = data;
return __request(OpenAPI, {
method: 'GET',
url: '/subjects/{subject_id}',
path: {
subject_id: subjectId
},
errors: {
422: 'Validation Error'
}
});
} | /**
* Get Usage Summary
* @returns UsageSummary Successful Response
* @throws ApiError
*/ | https://github.com/eidolon-ai/eidolon/blob/9c32262ddddeb5e21cd3b48ec2004109fc0fac94/webui/packages/usage-client/src/services.gen.ts#L46-L58 | 9c32262ddddeb5e21cd3b48ec2004109fc0fac94 |
eidolon | github_2023 | eidolon-ai | typescript | UsageService.recordUsageTransactionSubjectsSubjectIdTransactionsPost | public static recordUsageTransactionSubjectsSubjectIdTransactionsPost(data: $OpenApiTs['/subjects/{subject_id}/transactions']['post']['req']): CancelablePromise<$OpenApiTs['/subjects/{subject_id}/transactions']['post']['res'][200]> {
const { subjectId, requestBody } = data;
return __request(OpenAPI, {
method: 'POST',
url: '/subjects/{subject_id}/transactions',
path: {
subject_id: subjectId
},
body: requestBody,
mediaType: 'application/json',
errors: {
422: 'Validation Error'
}
});
} | /**
* Record Usage Transaction
* @returns UsageSummary Successful Response
* @throws ApiError
*/ | https://github.com/eidolon-ai/eidolon/blob/9c32262ddddeb5e21cd3b48ec2004109fc0fac94/webui/packages/usage-client/src/services.gen.ts#L65-L79 | 9c32262ddddeb5e21cd3b48ec2004109fc0fac94 |
GPTPortal | github_2023 | Zaki-1052 | typescript | APIPromise.asResponse | asResponse(): Promise<Response> {
return this.responsePromise.then((p) => p.response);
} | /**
* Gets the raw `Response` instance instead of parsing the response
* data.
*
* If you want to parse the response body but still get the `Response`
* instance, you can use {@link withResponse()}.
*
* 👋 Getting the wrong TypeScript type for `Response`?
* Try setting `"moduleResolution": "NodeNext"` if you can,
* or add one of these imports before your first `import … from 'openai'`:
* - `import 'openai/shims/node'` (if you're running on Node)
* - `import 'openai/shims/web'` (otherwise)
*/ | https://github.com/Zaki-1052/GPTPortal/blob/16ffa0c14672c637cf27249d8f0bf146e56c29b7/node_modules/openai/src/core.ts#L118-L120 | 16ffa0c14672c637cf27249d8f0bf146e56c29b7 |
GPTPortal | github_2023 | Zaki-1052 | typescript | APIPromise.withResponse | async withResponse(): Promise<{ data: T; response: Response }> {
const [data, response] = await Promise.all([this.parse(), this.asResponse()]);
return { data, response };
} | /**
* Gets the parsed response data and the raw `Response` instance.
*
* If you just want to get the raw `Response` instance without parsing it,
* you can use {@link asResponse()}.
*
*
* 👋 Getting the wrong TypeScript type for `Response`?
* Try setting `"moduleResolution": "NodeNext"` if you can,
* or add one of these imports before your first `import … from 'openai'`:
* - `import 'openai/shims/node'` (if you're running on Node)
* - `import 'openai/shims/web'` (otherwise)
*/ | https://github.com/Zaki-1052/GPTPortal/blob/16ffa0c14672c637cf27249d8f0bf146e56c29b7/node_modules/openai/src/core.ts#L134-L137 | 16ffa0c14672c637cf27249d8f0bf146e56c29b7 |
GPTPortal | github_2023 | Zaki-1052 | typescript | getBrowserInfo | function getBrowserInfo(): BrowserInfo | null {
if (typeof navigator === 'undefined' || !navigator) {
return null;
}
// NOTE: The order matters here!
const browserPatterns = [
{ key: 'edge' as const, pattern: /Edge(?:\W+(\d+)\.(\d+)(?:\.(\d+))?)?/ },
{ key: 'ie' as const, pattern: /MSIE(?:\W+(\d+)\.(\d+)(?:\.(\d+))?)?/ },
{ key: 'ie' as const, pattern: /Trident(?:.*rv\:(\d+)\.(\d+)(?:\.(\d+))?)?/ },
{ key: 'chrome' as const, pattern: /Chrome(?:\W+(\d+)\.(\d+)(?:\.(\d+))?)?/ },
{ key: 'firefox' as const, pattern: /Firefox(?:\W+(\d+)\.(\d+)(?:\.(\d+))?)?/ },
{ key: 'safari' as const, pattern: /(?:Version\W+(\d+)\.(\d+)(?:\.(\d+))?)?(?:\W+Mobile\S*)?\W+Safari/ },
];
// Find the FIRST matching browser
for (const { key, pattern } of browserPatterns) {
const match = pattern.exec(navigator.userAgent);
if (match) {
const major = match[1] || 0;
const minor = match[2] || 0;
const patch = match[3] || 0;
return { browser: key, version: `${major}.${minor}.${patch}` };
}
}
return null;
} | // Note: modified from https://github.com/JS-DevTools/host-environment/blob/b1ab79ecde37db5d6e163c050e54fe7d287d7c92/src/isomorphic.browser.ts | https://github.com/Zaki-1052/GPTPortal/blob/16ffa0c14672c637cf27249d8f0bf146e56c29b7/node_modules/openai/src/core.ts#L897-L925 | 16ffa0c14672c637cf27249d8f0bf146e56c29b7 |
GPTPortal | github_2023 | Zaki-1052 | typescript | applyHeadersMut | function applyHeadersMut(targetHeaders: Headers, newHeaders: Headers): void {
for (const k in newHeaders) {
if (!hasOwn(newHeaders, k)) continue;
const lowerKey = k.toLowerCase();
if (!lowerKey) continue;
const val = newHeaders[k];
if (val === null) {
delete targetHeaders[lowerKey];
} else if (val !== undefined) {
targetHeaders[lowerKey] = val;
}
}
} | /**
* Copies headers from "newHeaders" onto "targetHeaders",
* using lower-case for all properties,
* ignoring any keys with undefined values,
* and deleting any keys with null values.
*/ | https://github.com/Zaki-1052/GPTPortal/blob/16ffa0c14672c637cf27249d8f0bf146e56c29b7/node_modules/openai/src/core.ts#L1081-L1095 | 16ffa0c14672c637cf27249d8f0bf146e56c29b7 |
GPTPortal | github_2023 | Zaki-1052 | typescript | uuid4 | const uuid4 = () => {
return 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, (c) => {
const r = (Math.random() * 16) | 0;
const v = c === 'x' ? r : (r & 0x3) | 0x8;
return v.toString(16);
});
}; | /**
* https://stackoverflow.com/a/2117523
*/ | https://github.com/Zaki-1052/GPTPortal/blob/16ffa0c14672c637cf27249d8f0bf146e56c29b7/node_modules/openai/src/core.ts#L1106-L1112 | 16ffa0c14672c637cf27249d8f0bf146e56c29b7 |
GPTPortal | github_2023 | Zaki-1052 | typescript | OpenAI.constructor | constructor({
baseURL = Core.readEnv('OPENAI_BASE_URL'),
apiKey = Core.readEnv('OPENAI_API_KEY'),
organization = Core.readEnv('OPENAI_ORG_ID') ?? null,
project = Core.readEnv('OPENAI_PROJECT_ID') ?? null,
...opts
}: ClientOptions = {}) {
if (apiKey === undefined) {
throw new Errors.OpenAIError(
"The OPENAI_API_KEY environment variable is missing or empty; either provide it, or instantiate the OpenAI client with an apiKey option, like new OpenAI({ apiKey: 'My API Key' }).",
);
}
const options: ClientOptions = {
apiKey,
organization,
project,
...opts,
baseURL: baseURL || `https://api.openai.com/v1`,
};
if (!options.dangerouslyAllowBrowser && Core.isRunningInBrowser()) {
throw new Errors.OpenAIError(
"It looks like you're running in a browser-like environment.\n\nThis is disabled by default, as it risks exposing your secret API credentials to attackers.\nIf you understand the risks and have appropriate mitigations in place,\nyou can set the `dangerouslyAllowBrowser` option to `true`, e.g.,\n\nnew OpenAI({ apiKey, dangerouslyAllowBrowser: true });\n\nhttps://help.openai.com/en/articles/5112595-best-practices-for-api-key-safety\n",
);
}
super({
baseURL: options.baseURL!,
timeout: options.timeout ?? 600000 /* 10 minutes */,
httpAgent: options.httpAgent,
maxRetries: options.maxRetries,
fetch: options.fetch,
});
this._options = options;
this.apiKey = apiKey;
this.organization = organization;
this.project = project;
} | /**
* API Client for interfacing with the OpenAI API.
*
* @param {string | undefined} [opts.apiKey=process.env['OPENAI_API_KEY'] ?? undefined]
* @param {string | null | undefined} [opts.organization=process.env['OPENAI_ORG_ID'] ?? null]
* @param {string | null | undefined} [opts.project=process.env['OPENAI_PROJECT_ID'] ?? null]
* @param {string} [opts.baseURL=process.env['OPENAI_BASE_URL'] ?? https://api.openai.com/v1] - Override the default base URL for the API.
* @param {number} [opts.timeout=10 minutes] - The maximum amount of time (in milliseconds) the client will wait for a response before timing out.
* @param {number} [opts.httpAgent] - An HTTP agent used to manage HTTP(s) connections.
* @param {Core.Fetch} [opts.fetch] - Specify a custom `fetch` function implementation.
* @param {number} [opts.maxRetries=2] - The maximum number of times the client will retry a request.
* @param {Core.Headers} opts.defaultHeaders - Default headers to include with every request to the API.
* @param {Core.DefaultQuery} opts.defaultQuery - Default query parameters to include with every request to the API.
* @param {boolean} [opts.dangerouslyAllowBrowser=false] - By default, client-side use of this library is not allowed, as it risks exposing your secret API credentials to attackers.
*/ | https://github.com/Zaki-1052/GPTPortal/blob/16ffa0c14672c637cf27249d8f0bf146e56c29b7/node_modules/openai/src/index.ts#L114-L154 | 16ffa0c14672c637cf27249d8f0bf146e56c29b7 |
GPTPortal | github_2023 | Zaki-1052 | typescript | AzureOpenAI.constructor | constructor({
baseURL = Core.readEnv('OPENAI_BASE_URL'),
apiKey = Core.readEnv('AZURE_OPENAI_API_KEY'),
apiVersion = Core.readEnv('OPENAI_API_VERSION'),
endpoint,
deployment,
azureADTokenProvider,
dangerouslyAllowBrowser,
...opts
}: AzureClientOptions = {}) {
if (!apiVersion) {
throw new Errors.OpenAIError(
"The OPENAI_API_VERSION environment variable is missing or empty; either provide it, or instantiate the AzureOpenAI client with an apiVersion option, like new AzureOpenAI({ apiVersion: 'My API Version' }).",
);
}
if (typeof azureADTokenProvider === 'function') {
dangerouslyAllowBrowser = true;
}
if (!azureADTokenProvider && !apiKey) {
throw new Errors.OpenAIError(
'Missing credentials. Please pass one of `apiKey` and `azureADTokenProvider`, or set the `AZURE_OPENAI_API_KEY` environment variable.',
);
}
if (azureADTokenProvider && apiKey) {
throw new Errors.OpenAIError(
'The `apiKey` and `azureADTokenProvider` arguments are mutually exclusive; only one can be passed at a time.',
);
}
// define a sentinel value to avoid any typing issues
apiKey ??= API_KEY_SENTINEL;
opts.defaultQuery = { ...opts.defaultQuery, 'api-version': apiVersion };
if (!baseURL) {
if (!endpoint) {
endpoint = process.env['AZURE_OPENAI_ENDPOINT'];
}
if (!endpoint) {
throw new Errors.OpenAIError(
'Must provide one of the `baseURL` or `endpoint` arguments, or the `AZURE_OPENAI_ENDPOINT` environment variable',
);
}
baseURL = `${endpoint}/openai`;
} else {
if (endpoint) {
throw new Errors.OpenAIError('baseURL and endpoint are mutually exclusive');
}
}
super({
apiKey,
baseURL,
...opts,
...(dangerouslyAllowBrowser !== undefined ? { dangerouslyAllowBrowser } : {}),
});
this._azureADTokenProvider = azureADTokenProvider;
this.apiVersion = apiVersion;
this._deployment = deployment;
} | /**
* API Client for interfacing with the Azure OpenAI API.
*
* @param {string | undefined} [opts.apiVersion=process.env['OPENAI_API_VERSION'] ?? undefined]
* @param {string | undefined} [opts.endpoint=process.env['AZURE_OPENAI_ENDPOINT'] ?? undefined] - Your Azure endpoint, including the resource, e.g. `https://example-resource.azure.openai.com/`
* @param {string | undefined} [opts.apiKey=process.env['AZURE_OPENAI_API_KEY'] ?? undefined]
* @param {string | undefined} opts.deployment - A model deployment, if given, sets the base client URL to include `/deployments/{deployment}`.
* @param {string | null | undefined} [opts.organization=process.env['OPENAI_ORG_ID'] ?? null]
* @param {string} [opts.baseURL=process.env['OPENAI_BASE_URL']] - Sets the base URL for the API.
* @param {number} [opts.timeout=10 minutes] - The maximum amount of time (in milliseconds) the client will wait for a response before timing out.
* @param {number} [opts.httpAgent] - An HTTP agent used to manage HTTP(s) connections.
* @param {Core.Fetch} [opts.fetch] - Specify a custom `fetch` function implementation.
* @param {number} [opts.maxRetries=2] - The maximum number of times the client will retry a request.
* @param {Core.Headers} opts.defaultHeaders - Default headers to include with every request to the API.
* @param {Core.DefaultQuery} opts.defaultQuery - Default query parameters to include with every request to the API.
* @param {boolean} [opts.dangerouslyAllowBrowser=false] - By default, client-side use of this library is not allowed, as it risks exposing your secret API credentials to attackers.
*/ | https://github.com/Zaki-1052/GPTPortal/blob/16ffa0c14672c637cf27249d8f0bf146e56c29b7/node_modules/openai/src/index.ts#L381-L446 | 16ffa0c14672c637cf27249d8f0bf146e56c29b7 |
GPTPortal | github_2023 | Zaki-1052 | typescript | Page.nextPageParams | nextPageParams(): null {
return null;
} | /**
* This page represents a response that isn't actually paginated at the API level
* so there will never be any next page params.
*/ | https://github.com/Zaki-1052/GPTPortal/blob/16ffa0c14672c637cf27249d8f0bf146e56c29b7/node_modules/openai/src/pagination.ts#L35-L37 | 16ffa0c14672c637cf27249d8f0bf146e56c29b7 |
GPTPortal | github_2023 | Zaki-1052 | typescript | CursorPage.nextPageParams | nextPageParams(): Partial<CursorPageParams> | null {
const info = this.nextPageInfo();
if (!info) return null;
if ('params' in info) return info.params;
const params = Object.fromEntries(info.url.searchParams);
if (!Object.keys(params).length) return null;
return params;
} | // @deprecated Please use `nextPageInfo()` instead | https://github.com/Zaki-1052/GPTPortal/blob/16ffa0c14672c637cf27249d8f0bf146e56c29b7/node_modules/openai/src/pagination.ts#L76-L83 | 16ffa0c14672c637cf27249d8f0bf146e56c29b7 |
GPTPortal | github_2023 | Zaki-1052 | typescript | Stream.fromReadableStream | static fromReadableStream<Item>(readableStream: ReadableStream, controller: AbortController) {
let consumed = false;
async function* iterLines(): AsyncGenerator<string, void, unknown> {
const lineDecoder = new LineDecoder();
const iter = readableStreamAsyncIterable<Bytes>(readableStream);
for await (const chunk of iter) {
for (const line of lineDecoder.decode(chunk)) {
yield line;
}
}
for (const line of lineDecoder.flush()) {
yield line;
}
}
async function* iterator(): AsyncIterator<Item, any, undefined> {
if (consumed) {
throw new Error('Cannot iterate over a consumed stream, use `.tee()` to split the stream.');
}
consumed = true;
let done = false;
try {
for await (const line of iterLines()) {
if (done) continue;
if (line) yield JSON.parse(line);
}
done = true;
} catch (e) {
// If the user calls `stream.controller.abort()`, we should exit without throwing.
if (e instanceof Error && e.name === 'AbortError') return;
throw e;
} finally {
// If the user `break`s, abort the ongoing request.
if (!done) controller.abort();
}
}
return new Stream(iterator, controller);
} | /**
* Generates a Stream from a newline-separated ReadableStream
* where each item is a JSON value.
*/ | https://github.com/Zaki-1052/GPTPortal/blob/16ffa0c14672c637cf27249d8f0bf146e56c29b7/node_modules/openai/src/streaming.ts#L92-L133 | 16ffa0c14672c637cf27249d8f0bf146e56c29b7 |
GPTPortal | github_2023 | Zaki-1052 | typescript | Stream.tee | tee(): [Stream<Item>, Stream<Item>] {
const left: Array<Promise<IteratorResult<Item>>> = [];
const right: Array<Promise<IteratorResult<Item>>> = [];
const iterator = this.iterator();
const teeIterator = (queue: Array<Promise<IteratorResult<Item>>>): AsyncIterator<Item> => {
return {
next: () => {
if (queue.length === 0) {
const result = iterator.next();
left.push(result);
right.push(result);
}
return queue.shift()!;
},
};
};
return [
new Stream(() => teeIterator(left), this.controller),
new Stream(() => teeIterator(right), this.controller),
];
} | /**
* Splits the stream into two streams which can be
* independently read from at different speeds.
*/ | https://github.com/Zaki-1052/GPTPortal/blob/16ffa0c14672c637cf27249d8f0bf146e56c29b7/node_modules/openai/src/streaming.ts#L143-L165 | 16ffa0c14672c637cf27249d8f0bf146e56c29b7 |
GPTPortal | github_2023 | Zaki-1052 | typescript | Stream.toReadableStream | toReadableStream(): ReadableStream {
const self = this;
let iter: AsyncIterator<Item>;
const encoder = new TextEncoder();
return new ReadableStream({
async start() {
iter = self[Symbol.asyncIterator]();
},
async pull(ctrl: any) {
try {
const { value, done } = await iter.next();
if (done) return ctrl.close();
const bytes = encoder.encode(JSON.stringify(value) + '\n');
ctrl.enqueue(bytes);
} catch (err) {
ctrl.error(err);
}
},
async cancel() {
await iter.return?.();
},
});
} | /**
* Converts this stream to a newline-separated ReadableStream of
* JSON stringified values in the stream
* which can be turned back into a Stream with `Stream.fromReadableStream()`.
*/ | https://github.com/Zaki-1052/GPTPortal/blob/16ffa0c14672c637cf27249d8f0bf146e56c29b7/node_modules/openai/src/streaming.ts#L172-L197 | 16ffa0c14672c637cf27249d8f0bf146e56c29b7 |
GPTPortal | github_2023 | Zaki-1052 | typescript | LineDecoder.constructor | constructor() {
this.buffer = [];
this.trailingCR = false;
} | // TextDecoder found in browsers; not typed to avoid pulling in either "dom" or "node" types. | https://github.com/Zaki-1052/GPTPortal/blob/16ffa0c14672c637cf27249d8f0bf146e56c29b7/node_modules/openai/src/streaming.ts#L361-L364 | 16ffa0c14672c637cf27249d8f0bf146e56c29b7 |
GPTPortal | github_2023 | Zaki-1052 | typescript | AbstractChatCompletionRunner.finalChatCompletion | async finalChatCompletion(): Promise<ChatCompletion> {
await this.done();
const completion = this._chatCompletions[this._chatCompletions.length - 1];
if (!completion) throw new OpenAIError('stream ended without producing a ChatCompletion');
return completion;
} | /**
* @returns a promise that resolves with the final ChatCompletion, or rejects
* if an error occurred or the stream ended prematurely without producing a ChatCompletion.
*/ | https://github.com/Zaki-1052/GPTPortal/blob/16ffa0c14672c637cf27249d8f0bf146e56c29b7/node_modules/openai/src/lib/AbstractChatCompletionRunner.ts#L78-L83 | 16ffa0c14672c637cf27249d8f0bf146e56c29b7 |
GPTPortal | github_2023 | Zaki-1052 | typescript | AbstractChatCompletionRunner.finalContent | async finalContent(): Promise<string | null> {
await this.done();
return this.#getFinalContent();
} | /**
* @returns a promise that resolves with the content of the final ChatCompletionMessage, or rejects
* if an error occurred or the stream ended prematurely without producing a ChatCompletionMessage.
*/ | https://github.com/Zaki-1052/GPTPortal/blob/16ffa0c14672c637cf27249d8f0bf146e56c29b7/node_modules/openai/src/lib/AbstractChatCompletionRunner.ts#L93-L96 | 16ffa0c14672c637cf27249d8f0bf146e56c29b7 |
GPTPortal | github_2023 | Zaki-1052 | typescript | AbstractChatCompletionRunner.finalMessage | async finalMessage(): Promise<ChatCompletionMessage> {
await this.done();
return this.#getFinalMessage();
} | /**
* @returns a promise that resolves with the the final assistant ChatCompletionMessage response,
* or rejects if an error occurred or the stream ended prematurely without producing a ChatCompletionMessage.
*/ | https://github.com/Zaki-1052/GPTPortal/blob/16ffa0c14672c637cf27249d8f0bf146e56c29b7/node_modules/openai/src/lib/AbstractChatCompletionRunner.ts#L118-L121 | 16ffa0c14672c637cf27249d8f0bf146e56c29b7 |
GPTPortal | github_2023 | Zaki-1052 | typescript | AbstractChatCompletionRunner.finalFunctionCall | async finalFunctionCall(): Promise<ChatCompletionMessage.FunctionCall | undefined> {
await this.done();
return this.#getFinalFunctionCall();
} | /**
* @returns a promise that resolves with the content of the final FunctionCall, or rejects
* if an error occurred or the stream ended prematurely without producing a ChatCompletionMessage.
*/ | https://github.com/Zaki-1052/GPTPortal/blob/16ffa0c14672c637cf27249d8f0bf146e56c29b7/node_modules/openai/src/lib/AbstractChatCompletionRunner.ts#L141-L144 | 16ffa0c14672c637cf27249d8f0bf146e56c29b7 |
GPTPortal | github_2023 | Zaki-1052 | typescript | mockFetch | function mockFetch(): { fetch: Fetch; handleRequest: (handle: Fetch) => Promise<void> } {
const fetchQueue: ((handler: typeof fetch) => void)[] = [];
const handlerQueue: Promise<typeof fetch>[] = [];
const enqueueHandler = () => {
handlerQueue.push(
new Promise<typeof fetch>((resolve) => {
fetchQueue.push((handle: typeof fetch) => {
enqueueHandler();
resolve(handle);
});
}),
);
};
enqueueHandler();
async function fetch(req: string | RequestInfo, init?: RequestInit): Promise<Response> {
const handler = await handlerQueue.shift();
if (!handler) throw new Error('expected handler to be defined');
const signal = init?.signal;
if (!signal) return await handler(req, init);
return await Promise.race([
handler(req, init),
new Promise<Response>((resolve, reject) => {
if (signal.aborted) {
// @ts-ignore does exist in Node
reject(new DOMException('The user aborted a request.', 'AbortError'));
return;
}
signal.addEventListener('abort', (e) => {
// @ts-ignore does exist in Node
reject(new DOMException('The user aborted a request.', 'AbortError'));
});
}),
]);
}
function handleRequest(handle: typeof fetch): Promise<void> {
return new Promise<void>((resolve, reject) => {
fetchQueue.shift()?.(async (req, init) => {
try {
return await handle(req, init);
} catch (err) {
reject(err);
return err as any;
} finally {
resolve();
}
});
});
}
return { fetch, handleRequest };
} | /**
* Creates a mock `fetch` function and a `handleRequest` function for intercepting `fetch` calls.
*
* You call `handleRequest` with a callback function that handles the next `fetch` call.
* It returns a Promise that:
* - waits for the next call to `fetch`
* - calls the callback with the `fetch` arguments
* - resolves `fetch` with the callback output
*/ | https://github.com/Zaki-1052/GPTPortal/blob/16ffa0c14672c637cf27249d8f0bf146e56c29b7/node_modules/openai/src/lib/ChatCompletionRunFunctions.test.ts#L28-L81 | 16ffa0c14672c637cf27249d8f0bf146e56c29b7 |
GPTPortal | github_2023 | Zaki-1052 | typescript | mockChatCompletionFetch | function mockChatCompletionFetch() {
const { fetch, handleRequest: handleRawRequest } = mockFetch();
function handleRequest(
handler: (body: ChatCompletionFunctionRunnerParams<any[]>) => Promise<OpenAI.Chat.ChatCompletion>,
): Promise<void> {
return handleRawRequest(async (req, init) => {
const rawBody = init?.body;
if (typeof rawBody !== 'string') throw new Error(`expected init.body to be a string`);
const body: ChatCompletionFunctionRunnerParams<any[]> = JSON.parse(rawBody);
return new Response(JSON.stringify(await handler(body)), {
headers: { 'Content-Type': 'application/json' },
});
});
}
return { fetch, handleRequest };
} | // mockChatCompletionFetch is like mockFetch, but with better a more convenient handleRequest to mock | https://github.com/Zaki-1052/GPTPortal/blob/16ffa0c14672c637cf27249d8f0bf146e56c29b7/node_modules/openai/src/lib/ChatCompletionRunFunctions.test.ts#L85-L101 | 16ffa0c14672c637cf27249d8f0bf146e56c29b7 |
GPTPortal | github_2023 | Zaki-1052 | typescript | mockStreamingChatCompletionFetch | function mockStreamingChatCompletionFetch() {
const { fetch, handleRequest: handleRawRequest } = mockFetch();
function handleRequest(
handler: (
body: ChatCompletionStreamingFunctionRunnerParams<any[]>,
) => AsyncIterable<OpenAI.Chat.ChatCompletionChunk>,
): Promise<void> {
return handleRawRequest(async (req, init) => {
const rawBody = init?.body;
if (typeof rawBody !== 'string') throw new Error(`expected init.body to be a string`);
const body: ChatCompletionStreamingFunctionRunnerParams<any[]> = JSON.parse(rawBody);
const stream = new PassThrough();
(async () => {
for await (const chunk of handler(body)) {
stream.write(`data: ${JSON.stringify(chunk)}\n\n`);
}
stream.end(`data: [DONE]\n\n`);
})();
return new Response(stream, {
headers: {
'Content-Type': 'text/event-stream',
'Transfer-Encoding': 'chunked',
},
});
});
}
return { fetch, handleRequest };
} | // mockStreamingChatCompletionFetch is like mockFetch, but with better a more convenient handleRequest to mock | https://github.com/Zaki-1052/GPTPortal/blob/16ffa0c14672c637cf27249d8f0bf146e56c29b7/node_modules/openai/src/lib/ChatCompletionRunFunctions.test.ts#L105-L133 | 16ffa0c14672c637cf27249d8f0bf146e56c29b7 |
GPTPortal | github_2023 | Zaki-1052 | typescript | ChatCompletionRunner.runFunctions | static runFunctions(
completions: Completions,
params: ChatCompletionFunctionRunnerParams<any[]>,
options?: RunnerOptions,
): ChatCompletionRunner {
const runner = new ChatCompletionRunner();
const opts = {
...options,
headers: { ...options?.headers, 'X-Stainless-Helper-Method': 'runFunctions' },
};
runner._run(() => runner._runFunctions(completions, params, opts));
return runner;
} | /** @deprecated - please use `runTools` instead. */ | https://github.com/Zaki-1052/GPTPortal/blob/16ffa0c14672c637cf27249d8f0bf146e56c29b7/node_modules/openai/src/lib/ChatCompletionRunner.ts#L34-L46 | 16ffa0c14672c637cf27249d8f0bf146e56c29b7 |
GPTPortal | github_2023 | Zaki-1052 | typescript | ChatCompletionStream.fromReadableStream | static fromReadableStream(stream: ReadableStream): ChatCompletionStream {
const runner = new ChatCompletionStream();
runner._run(() => runner._fromReadableStream(stream));
return runner;
} | /**
* Intended for use on the frontend, consuming a stream produced with
* `.toReadableStream()` on the backend.
*
* Note that messages sent to the model do not appear in `.on('message')`
* in this context.
*/ | https://github.com/Zaki-1052/GPTPortal/blob/16ffa0c14672c637cf27249d8f0bf146e56c29b7/node_modules/openai/src/lib/ChatCompletionStream.ts#L43-L47 | 16ffa0c14672c637cf27249d8f0bf146e56c29b7 |
GPTPortal | github_2023 | Zaki-1052 | typescript | ChatCompletionStreamingRunner.runFunctions | static runFunctions<T extends (string | object)[]>(
completions: Completions,
params: ChatCompletionStreamingFunctionRunnerParams<T>,
options?: RunnerOptions,
): ChatCompletionStreamingRunner {
const runner = new ChatCompletionStreamingRunner();
const opts = {
...options,
headers: { ...options?.headers, 'X-Stainless-Helper-Method': 'runFunctions' },
};
runner._run(() => runner._runFunctions(completions, params, opts));
return runner;
} | /** @deprecated - please use `runTools` instead. */ | https://github.com/Zaki-1052/GPTPortal/blob/16ffa0c14672c637cf27249d8f0bf146e56c29b7/node_modules/openai/src/lib/ChatCompletionStreamingRunner.ts#L41-L53 | 16ffa0c14672c637cf27249d8f0bf146e56c29b7 |
GPTPortal | github_2023 | Zaki-1052 | typescript | EventStream.on | on<Event extends keyof EventTypes>(event: Event, listener: EventListener<EventTypes, Event>): this {
const listeners: EventListeners<EventTypes, Event> =
this.#listeners[event] || (this.#listeners[event] = []);
listeners.push({ listener });
return this;
} | /**
* Adds the listener function to the end of the listeners array for the event.
* No checks are made to see if the listener has already been added. Multiple calls passing
* the same combination of event and listener will result in the listener being added, and
* called, multiple times.
* @returns this ChatCompletionStream, so that calls can be chained
*/ | https://github.com/Zaki-1052/GPTPortal/blob/16ffa0c14672c637cf27249d8f0bf146e56c29b7/node_modules/openai/src/lib/EventStream.ts#L82-L87 | 16ffa0c14672c637cf27249d8f0bf146e56c29b7 |
GPTPortal | github_2023 | Zaki-1052 | typescript | EventStream.off | off<Event extends keyof EventTypes>(event: Event, listener: EventListener<EventTypes, Event>): this {
const listeners = this.#listeners[event];
if (!listeners) return this;
const index = listeners.findIndex((l) => l.listener === listener);
if (index >= 0) listeners.splice(index, 1);
return this;
} | /**
* Removes the specified listener from the listener array for the event.
* off() will remove, at most, one instance of a listener from the listener array. If any single
* listener has been added multiple times to the listener array for the specified event, then
* off() must be called multiple times to remove each instance.
* @returns this ChatCompletionStream, so that calls can be chained
*/ | https://github.com/Zaki-1052/GPTPortal/blob/16ffa0c14672c637cf27249d8f0bf146e56c29b7/node_modules/openai/src/lib/EventStream.ts#L96-L102 | 16ffa0c14672c637cf27249d8f0bf146e56c29b7 |
GPTPortal | github_2023 | Zaki-1052 | typescript | EventStream.once | once<Event extends keyof EventTypes>(event: Event, listener: EventListener<EventTypes, Event>): this {
const listeners: EventListeners<EventTypes, Event> =
this.#listeners[event] || (this.#listeners[event] = []);
listeners.push({ listener, once: true });
return this;
} | /**
* Adds a one-time listener function for the event. The next time the event is triggered,
* this listener is removed and then invoked.
* @returns this ChatCompletionStream, so that calls can be chained
*/ | https://github.com/Zaki-1052/GPTPortal/blob/16ffa0c14672c637cf27249d8f0bf146e56c29b7/node_modules/openai/src/lib/EventStream.ts#L109-L114 | 16ffa0c14672c637cf27249d8f0bf146e56c29b7 |
GPTPortal | github_2023 | Zaki-1052 | typescript | EventStream.emitted | emitted<Event extends keyof EventTypes>(
event: Event,
): Promise<
EventParameters<EventTypes, Event> extends [infer Param] ? Param
: EventParameters<EventTypes, Event> extends [] ? void
: EventParameters<EventTypes, Event>
> {
return new Promise((resolve, reject) => {
this.#catchingPromiseCreated = true;
if (event !== 'error') this.once('error', reject);
this.once(event, resolve as any);
});
} | /**
* This is similar to `.once()`, but returns a Promise that resolves the next time
* the event is triggered, instead of calling a listener callback.
* @returns a Promise that resolves the next time given event is triggered,
* or rejects if an error is emitted. (If you request the 'error' event,
* returns a promise that resolves with the error).
*
* Example:
*
* const message = await stream.emitted('message') // rejects if the stream errors
*/ | https://github.com/Zaki-1052/GPTPortal/blob/16ffa0c14672c637cf27249d8f0bf146e56c29b7/node_modules/openai/src/lib/EventStream.ts#L127-L139 | 16ffa0c14672c637cf27249d8f0bf146e56c29b7 |
GPTPortal | github_2023 | Zaki-1052 | typescript | Batches.create | create(body: BatchCreateParams, options?: Core.RequestOptions): Core.APIPromise<Batch> {
return this._client.post('/batches', { body, ...options });
} | /**
* Creates and executes a batch from an uploaded file of requests
*/ | https://github.com/Zaki-1052/GPTPortal/blob/16ffa0c14672c637cf27249d8f0bf146e56c29b7/node_modules/openai/src/resources/batches.ts#L13-L15 | 16ffa0c14672c637cf27249d8f0bf146e56c29b7 |
GPTPortal | github_2023 | Zaki-1052 | typescript | Batches.retrieve | retrieve(batchId: string, options?: Core.RequestOptions): Core.APIPromise<Batch> {
return this._client.get(`/batches/${batchId}`, options);
} | /**
* Retrieves a batch.
*/ | https://github.com/Zaki-1052/GPTPortal/blob/16ffa0c14672c637cf27249d8f0bf146e56c29b7/node_modules/openai/src/resources/batches.ts#L20-L22 | 16ffa0c14672c637cf27249d8f0bf146e56c29b7 |
GPTPortal | github_2023 | Zaki-1052 | typescript | Batches.cancel | cancel(batchId: string, options?: Core.RequestOptions): Core.APIPromise<Batch> {
return this._client.post(`/batches/${batchId}/cancel`, options);
} | /**
* Cancels an in-progress batch. The batch will be in status `cancelling` for up to
* 10 minutes, before changing to `cancelled`, where it will have partial results
* (if any) available in the output file.
*/ | https://github.com/Zaki-1052/GPTPortal/blob/16ffa0c14672c637cf27249d8f0bf146e56c29b7/node_modules/openai/src/resources/batches.ts#L44-L46 | 16ffa0c14672c637cf27249d8f0bf146e56c29b7 |
GPTPortal | github_2023 | Zaki-1052 | typescript | Embeddings.create | create(
body: EmbeddingCreateParams,
options?: Core.RequestOptions,
): Core.APIPromise<CreateEmbeddingResponse> {
return this._client.post('/embeddings', { body, ...options });
} | /**
* Creates an embedding vector representing the input text.
*/ | https://github.com/Zaki-1052/GPTPortal/blob/16ffa0c14672c637cf27249d8f0bf146e56c29b7/node_modules/openai/src/resources/embeddings.ts#L11-L16 | 16ffa0c14672c637cf27249d8f0bf146e56c29b7 |
GPTPortal | github_2023 | Zaki-1052 | typescript | Files.create | create(body: FileCreateParams, options?: Core.RequestOptions): Core.APIPromise<FileObject> {
return this._client.post('/files', Core.multipartFormRequestOptions({ body, ...options }));
} | /**
* Upload a file that can be used across various endpoints. Individual files can be
* up to 512 MB, and the size of all files uploaded by one organization can be up
* to 100 GB.
*
* The Assistants API supports files up to 2 million tokens and of specific file
* types. See the
* [Assistants Tools guide](https://platform.openai.com/docs/assistants/tools) for
* details.
*
* The Fine-tuning API only supports `.jsonl` files. The input also has certain
* required formats for fine-tuning
* [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input) or
* [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input)
* models.
*
* The Batch API only supports `.jsonl` files up to 100 MB in size. The input also
* has a specific required
* [format](https://platform.openai.com/docs/api-reference/batch/request-input).
*
* Please [contact us](https://help.openai.com/) if you need to increase these
* storage limits.
*/ | https://github.com/Zaki-1052/GPTPortal/blob/16ffa0c14672c637cf27249d8f0bf146e56c29b7/node_modules/openai/src/resources/files.ts#L36-L38 | 16ffa0c14672c637cf27249d8f0bf146e56c29b7 |
GPTPortal | github_2023 | Zaki-1052 | typescript | Files.retrieve | retrieve(fileId: string, options?: Core.RequestOptions): Core.APIPromise<FileObject> {
return this._client.get(`/files/${fileId}`, options);
} | /**
* Returns information about a specific file.
*/ | https://github.com/Zaki-1052/GPTPortal/blob/16ffa0c14672c637cf27249d8f0bf146e56c29b7/node_modules/openai/src/resources/files.ts#L43-L45 | 16ffa0c14672c637cf27249d8f0bf146e56c29b7 |
GPTPortal | github_2023 | Zaki-1052 | typescript | Files.del | del(fileId: string, options?: Core.RequestOptions): Core.APIPromise<FileDeleted> {
return this._client.delete(`/files/${fileId}`, options);
} | /**
* Delete a file.
*/ | https://github.com/Zaki-1052/GPTPortal/blob/16ffa0c14672c637cf27249d8f0bf146e56c29b7/node_modules/openai/src/resources/files.ts#L65-L67 | 16ffa0c14672c637cf27249d8f0bf146e56c29b7 |
GPTPortal | github_2023 | Zaki-1052 | typescript | Files.content | content(fileId: string, options?: Core.RequestOptions): Core.APIPromise<Response> {
return this._client.get(`/files/${fileId}/content`, { ...options, __binaryResponse: true });
} | /**
* Returns the contents of the specified file.
*/ | https://github.com/Zaki-1052/GPTPortal/blob/16ffa0c14672c637cf27249d8f0bf146e56c29b7/node_modules/openai/src/resources/files.ts#L72-L74 | 16ffa0c14672c637cf27249d8f0bf146e56c29b7 |
GPTPortal | github_2023 | Zaki-1052 | typescript | Files.retrieveContent | retrieveContent(fileId: string, options?: Core.RequestOptions): Core.APIPromise<string> {
return this._client.get(`/files/${fileId}/content`, {
...options,
headers: { Accept: 'application/json', ...options?.headers },
});
} | /**
* Returns the contents of the specified file.
*
* @deprecated The `.content()` method should be used instead
*/ | https://github.com/Zaki-1052/GPTPortal/blob/16ffa0c14672c637cf27249d8f0bf146e56c29b7/node_modules/openai/src/resources/files.ts#L81-L86 | 16ffa0c14672c637cf27249d8f0bf146e56c29b7 |
GPTPortal | github_2023 | Zaki-1052 | typescript | Files.waitForProcessing | async waitForProcessing(
id: string,
{ pollInterval = 5000, maxWait = 30 * 60 * 1000 }: { pollInterval?: number; maxWait?: number } = {},
): Promise<FileObject> {
const TERMINAL_STATES = new Set(['processed', 'error', 'deleted']);
const start = Date.now();
let file = await this.retrieve(id);
while (!file.status || !TERMINAL_STATES.has(file.status)) {
await sleep(pollInterval);
file = await this.retrieve(id);
if (Date.now() - start > maxWait) {
throw new APIConnectionTimeoutError({
message: `Giving up on waiting for file ${id} to finish processing after ${maxWait} milliseconds.`,
});
}
}
return file;
} | /**
* Waits for the given file to be processed, default timeout is 30 mins.
*/ | https://github.com/Zaki-1052/GPTPortal/blob/16ffa0c14672c637cf27249d8f0bf146e56c29b7/node_modules/openai/src/resources/files.ts#L91-L112 | 16ffa0c14672c637cf27249d8f0bf146e56c29b7 |
GPTPortal | github_2023 | Zaki-1052 | typescript | Images.createVariation | createVariation(
body: ImageCreateVariationParams,
options?: Core.RequestOptions,
): Core.APIPromise<ImagesResponse> {
return this._client.post('/images/variations', Core.multipartFormRequestOptions({ body, ...options }));
} | /**
* Creates a variation of a given image.
*/ | https://github.com/Zaki-1052/GPTPortal/blob/16ffa0c14672c637cf27249d8f0bf146e56c29b7/node_modules/openai/src/resources/images.ts#L11-L16 | 16ffa0c14672c637cf27249d8f0bf146e56c29b7 |
GPTPortal | github_2023 | Zaki-1052 | typescript | Images.edit | edit(body: ImageEditParams, options?: Core.RequestOptions): Core.APIPromise<ImagesResponse> {
return this._client.post('/images/edits', Core.multipartFormRequestOptions({ body, ...options }));
} | /**
* Creates an edited or extended image given an original image and a prompt.
*/ | https://github.com/Zaki-1052/GPTPortal/blob/16ffa0c14672c637cf27249d8f0bf146e56c29b7/node_modules/openai/src/resources/images.ts#L21-L23 | 16ffa0c14672c637cf27249d8f0bf146e56c29b7 |
GPTPortal | github_2023 | Zaki-1052 | typescript | Images.generate | generate(body: ImageGenerateParams, options?: Core.RequestOptions): Core.APIPromise<ImagesResponse> {
return this._client.post('/images/generations', { body, ...options });
} | /**
* Creates an image given a prompt.
*/ | https://github.com/Zaki-1052/GPTPortal/blob/16ffa0c14672c637cf27249d8f0bf146e56c29b7/node_modules/openai/src/resources/images.ts#L28-L30 | 16ffa0c14672c637cf27249d8f0bf146e56c29b7 |
GPTPortal | github_2023 | Zaki-1052 | typescript | Models.retrieve | retrieve(model: string, options?: Core.RequestOptions): Core.APIPromise<Model> {
return this._client.get(`/models/${model}`, options);
} | /**
* Retrieves a model instance, providing basic information about the model such as
* the owner and permissioning.
*/ | https://github.com/Zaki-1052/GPTPortal/blob/16ffa0c14672c637cf27249d8f0bf146e56c29b7/node_modules/openai/src/resources/models.ts#L13-L15 | 16ffa0c14672c637cf27249d8f0bf146e56c29b7 |
GPTPortal | github_2023 | Zaki-1052 | typescript | Models.list | list(options?: Core.RequestOptions): Core.PagePromise<ModelsPage, Model> {
return this._client.getAPIList('/models', ModelsPage, options);
} | /**
* Lists the currently available models, and provides basic information about each
* one such as the owner and availability.
*/ | https://github.com/Zaki-1052/GPTPortal/blob/16ffa0c14672c637cf27249d8f0bf146e56c29b7/node_modules/openai/src/resources/models.ts#L21-L23 | 16ffa0c14672c637cf27249d8f0bf146e56c29b7 |
GPTPortal | github_2023 | Zaki-1052 | typescript | Models.del | del(model: string, options?: Core.RequestOptions): Core.APIPromise<ModelDeleted> {
return this._client.delete(`/models/${model}`, options);
} | /**
* Delete a fine-tuned model. You must have the Owner role in your organization to
* delete a model.
*/ | https://github.com/Zaki-1052/GPTPortal/blob/16ffa0c14672c637cf27249d8f0bf146e56c29b7/node_modules/openai/src/resources/models.ts#L29-L31 | 16ffa0c14672c637cf27249d8f0bf146e56c29b7 |
GPTPortal | github_2023 | Zaki-1052 | typescript | Moderations.create | create(
body: ModerationCreateParams,
options?: Core.RequestOptions,
): Core.APIPromise<ModerationCreateResponse> {
return this._client.post('/moderations', { body, ...options });
} | /**
* Classifies if text is potentially harmful.
*/ | https://github.com/Zaki-1052/GPTPortal/blob/16ffa0c14672c637cf27249d8f0bf146e56c29b7/node_modules/openai/src/resources/moderations.ts#L11-L16 | 16ffa0c14672c637cf27249d8f0bf146e56c29b7 |
GPTPortal | github_2023 | Zaki-1052 | typescript | Speech.create | create(body: SpeechCreateParams, options?: Core.RequestOptions): Core.APIPromise<Response> {
return this._client.post('/audio/speech', { body, ...options, __binaryResponse: true });
} | /**
* Generates audio from the input text.
*/ | https://github.com/Zaki-1052/GPTPortal/blob/16ffa0c14672c637cf27249d8f0bf146e56c29b7/node_modules/openai/src/resources/audio/speech.ts#L12-L14 | 16ffa0c14672c637cf27249d8f0bf146e56c29b7 |
GPTPortal | github_2023 | Zaki-1052 | typescript | Transcriptions.create | create(body: TranscriptionCreateParams, options?: Core.RequestOptions): Core.APIPromise<Transcription> {
return this._client.post('/audio/transcriptions', Core.multipartFormRequestOptions({ body, ...options }));
} | /**
* Transcribes audio into the input language.
*/ | https://github.com/Zaki-1052/GPTPortal/blob/16ffa0c14672c637cf27249d8f0bf146e56c29b7/node_modules/openai/src/resources/audio/transcriptions.ts#L12-L14 | 16ffa0c14672c637cf27249d8f0bf146e56c29b7 |
GPTPortal | github_2023 | Zaki-1052 | typescript | Translations.create | create(body: TranslationCreateParams, options?: Core.RequestOptions): Core.APIPromise<Translation> {
return this._client.post('/audio/translations', Core.multipartFormRequestOptions({ body, ...options }));
} | /**
* Translates audio into English.
*/ | https://github.com/Zaki-1052/GPTPortal/blob/16ffa0c14672c637cf27249d8f0bf146e56c29b7/node_modules/openai/src/resources/audio/translations.ts#L12-L14 | 16ffa0c14672c637cf27249d8f0bf146e56c29b7 |
GPTPortal | github_2023 | Zaki-1052 | typescript | Assistants.create | create(body: AssistantCreateParams, options?: Core.RequestOptions): Core.APIPromise<Assistant> {
return this._client.post('/assistants', {
body,
...options,
headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },
});
} | /**
* Create an assistant with a model and instructions.
*/ | https://github.com/Zaki-1052/GPTPortal/blob/16ffa0c14672c637cf27249d8f0bf146e56c29b7/node_modules/openai/src/resources/beta/assistants.ts#L19-L25 | 16ffa0c14672c637cf27249d8f0bf146e56c29b7 |
GPTPortal | github_2023 | Zaki-1052 | typescript | Assistants.retrieve | retrieve(assistantId: string, options?: Core.RequestOptions): Core.APIPromise<Assistant> {
return this._client.get(`/assistants/${assistantId}`, {
...options,
headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },
});
} | /**
* Retrieves an assistant.
*/ | https://github.com/Zaki-1052/GPTPortal/blob/16ffa0c14672c637cf27249d8f0bf146e56c29b7/node_modules/openai/src/resources/beta/assistants.ts#L30-L35 | 16ffa0c14672c637cf27249d8f0bf146e56c29b7 |
GPTPortal | github_2023 | Zaki-1052 | typescript | Assistants.update | update(
assistantId: string,
body: AssistantUpdateParams,
options?: Core.RequestOptions,
): Core.APIPromise<Assistant> {
return this._client.post(`/assistants/${assistantId}`, {
body,
...options,
headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },
});
} | /**
* Modifies an assistant.
*/ | https://github.com/Zaki-1052/GPTPortal/blob/16ffa0c14672c637cf27249d8f0bf146e56c29b7/node_modules/openai/src/resources/beta/assistants.ts#L40-L50 | 16ffa0c14672c637cf27249d8f0bf146e56c29b7 |
GPTPortal | github_2023 | Zaki-1052 | typescript | Assistants.del | del(assistantId: string, options?: Core.RequestOptions): Core.APIPromise<AssistantDeleted> {
return this._client.delete(`/assistants/${assistantId}`, {
...options,
headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },
});
} | /**
* Delete an assistant.
*/ | https://github.com/Zaki-1052/GPTPortal/blob/16ffa0c14672c637cf27249d8f0bf146e56c29b7/node_modules/openai/src/resources/beta/assistants.ts#L77-L82 | 16ffa0c14672c637cf27249d8f0bf146e56c29b7 |
GPTPortal | github_2023 | Zaki-1052 | typescript | Completions.stream | stream(body: ChatCompletionStreamParams, options?: Core.RequestOptions): ChatCompletionStream {
return ChatCompletionStream.createChatCompletion(this._client.chat.completions, body, options);
} | /**
* Creates a chat completion stream
*/ | https://github.com/Zaki-1052/GPTPortal/blob/16ffa0c14672c637cf27249d8f0bf146e56c29b7/node_modules/openai/src/resources/beta/chat/completions.ts#L103-L105 | 16ffa0c14672c637cf27249d8f0bf146e56c29b7 |
GPTPortal | github_2023 | Zaki-1052 | typescript | Messages.create | create(
threadId: string,
body: MessageCreateParams,
options?: Core.RequestOptions,
): Core.APIPromise<Message> {
return this._client.post(`/threads/${threadId}/messages`, {
body,
...options,
headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },
});
} | /**
* Create a message.
*/ | https://github.com/Zaki-1052/GPTPortal/blob/16ffa0c14672c637cf27249d8f0bf146e56c29b7/node_modules/openai/src/resources/beta/threads/messages.ts#L14-L24 | 16ffa0c14672c637cf27249d8f0bf146e56c29b7 |
GPTPortal | github_2023 | Zaki-1052 | typescript | Messages.retrieve | retrieve(threadId: string, messageId: string, options?: Core.RequestOptions): Core.APIPromise<Message> {
return this._client.get(`/threads/${threadId}/messages/${messageId}`, {
...options,
headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },
});
} | /**
* Retrieve a message.
*/ | https://github.com/Zaki-1052/GPTPortal/blob/16ffa0c14672c637cf27249d8f0bf146e56c29b7/node_modules/openai/src/resources/beta/threads/messages.ts#L29-L34 | 16ffa0c14672c637cf27249d8f0bf146e56c29b7 |
GPTPortal | github_2023 | Zaki-1052 | typescript | Messages.update | update(
threadId: string,
messageId: string,
body: MessageUpdateParams,
options?: Core.RequestOptions,
): Core.APIPromise<Message> {
return this._client.post(`/threads/${threadId}/messages/${messageId}`, {
body,
...options,
headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },
});
} | /**
* Modifies a message.
*/ | https://github.com/Zaki-1052/GPTPortal/blob/16ffa0c14672c637cf27249d8f0bf146e56c29b7/node_modules/openai/src/resources/beta/threads/messages.ts#L39-L50 | 16ffa0c14672c637cf27249d8f0bf146e56c29b7 |
GPTPortal | github_2023 | Zaki-1052 | typescript | Messages.del | del(threadId: string, messageId: string, options?: Core.RequestOptions): Core.APIPromise<MessageDeleted> {
return this._client.delete(`/threads/${threadId}/messages/${messageId}`, {
...options,
headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },
});
} | /**
* Deletes a message.
*/ | https://github.com/Zaki-1052/GPTPortal/blob/16ffa0c14672c637cf27249d8f0bf146e56c29b7/node_modules/openai/src/resources/beta/threads/messages.ts#L79-L84 | 16ffa0c14672c637cf27249d8f0bf146e56c29b7 |
GPTPortal | github_2023 | Zaki-1052 | typescript | Threads.retrieve | retrieve(threadId: string, options?: Core.RequestOptions): Core.APIPromise<Thread> {
return this._client.get(`/threads/${threadId}`, {
...options,
headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },
});
} | /**
* Retrieves a thread.
*/ | https://github.com/Zaki-1052/GPTPortal/blob/16ffa0c14672c637cf27249d8f0bf146e56c29b7/node_modules/openai/src/resources/beta/threads/threads.ts#L41-L46 | 16ffa0c14672c637cf27249d8f0bf146e56c29b7 |
GPTPortal | github_2023 | Zaki-1052 | typescript | Threads.update | update(threadId: string, body: ThreadUpdateParams, options?: Core.RequestOptions): Core.APIPromise<Thread> {
return this._client.post(`/threads/${threadId}`, {
body,
...options,
headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },
});
} | /**
* Modifies a thread.
*/ | https://github.com/Zaki-1052/GPTPortal/blob/16ffa0c14672c637cf27249d8f0bf146e56c29b7/node_modules/openai/src/resources/beta/threads/threads.ts#L51-L57 | 16ffa0c14672c637cf27249d8f0bf146e56c29b7 |
GPTPortal | github_2023 | Zaki-1052 | typescript | Threads.del | del(threadId: string, options?: Core.RequestOptions): Core.APIPromise<ThreadDeleted> {
return this._client.delete(`/threads/${threadId}`, {
...options,
headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },
});
} | /**
* Delete a thread.
*/ | https://github.com/Zaki-1052/GPTPortal/blob/16ffa0c14672c637cf27249d8f0bf146e56c29b7/node_modules/openai/src/resources/beta/threads/threads.ts#L62-L67 | 16ffa0c14672c637cf27249d8f0bf146e56c29b7 |
GPTPortal | github_2023 | Zaki-1052 | typescript | Threads.createAndRunPoll | async createAndRunPoll(
body: ThreadCreateAndRunParamsNonStreaming,
options?: Core.RequestOptions & { pollIntervalMs?: number },
): Promise<Threads.Run> {
const run = await this.createAndRun(body, options);
return await this.runs.poll(run.thread_id, run.id, options);
} | /**
* A helper to create a thread, start a run and then poll for a terminal state.
* More information on Run lifecycles can be found here:
* https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
*/ | https://github.com/Zaki-1052/GPTPortal/blob/16ffa0c14672c637cf27249d8f0bf146e56c29b7/node_modules/openai/src/resources/beta/threads/threads.ts#L101-L107 | 16ffa0c14672c637cf27249d8f0bf146e56c29b7 |
GPTPortal | github_2023 | Zaki-1052 | typescript | Threads.createAndRunStream | createAndRunStream(
body: ThreadCreateAndRunParamsBaseStream,
options?: Core.RequestOptions,
): AssistantStream {
return AssistantStream.createThreadAssistantStream(body, this._client.beta.threads, options);
} | /**
* Create a thread and stream the run back
*/ | https://github.com/Zaki-1052/GPTPortal/blob/16ffa0c14672c637cf27249d8f0bf146e56c29b7/node_modules/openai/src/resources/beta/threads/threads.ts#L112-L117 | 16ffa0c14672c637cf27249d8f0bf146e56c29b7 |
GPTPortal | github_2023 | Zaki-1052 | typescript | Runs.retrieve | retrieve(threadId: string, runId: string, options?: Core.RequestOptions): Core.APIPromise<Run> {
return this._client.get(`/threads/${threadId}/runs/${runId}`, {
...options,
headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },
});
} | /**
* Retrieves a run.
*/ | https://github.com/Zaki-1052/GPTPortal/blob/16ffa0c14672c637cf27249d8f0bf146e56c29b7/node_modules/openai/src/resources/beta/threads/runs/runs.ts#L52-L57 | 16ffa0c14672c637cf27249d8f0bf146e56c29b7 |
GPTPortal | github_2023 | Zaki-1052 | typescript | Runs.update | update(
threadId: string,
runId: string,
body: RunUpdateParams,
options?: Core.RequestOptions,
): Core.APIPromise<Run> {
return this._client.post(`/threads/${threadId}/runs/${runId}`, {
body,
...options,
headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },
});
} | /**
* Modifies a run.
*/ | https://github.com/Zaki-1052/GPTPortal/blob/16ffa0c14672c637cf27249d8f0bf146e56c29b7/node_modules/openai/src/resources/beta/threads/runs/runs.ts#L62-L73 | 16ffa0c14672c637cf27249d8f0bf146e56c29b7 |
GPTPortal | github_2023 | Zaki-1052 | typescript | Runs.cancel | cancel(threadId: string, runId: string, options?: Core.RequestOptions): Core.APIPromise<Run> {
return this._client.post(`/threads/${threadId}/runs/${runId}/cancel`, {
...options,
headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },
});
} | /**
* Cancels a run that is `in_progress`.
*/ | https://github.com/Zaki-1052/GPTPortal/blob/16ffa0c14672c637cf27249d8f0bf146e56c29b7/node_modules/openai/src/resources/beta/threads/runs/runs.ts#L102-L107 | 16ffa0c14672c637cf27249d8f0bf146e56c29b7 |
GPTPortal | github_2023 | Zaki-1052 | typescript | Runs.createAndPoll | async createAndPoll(
threadId: string,
body: RunCreateParamsNonStreaming,
options?: Core.RequestOptions & { pollIntervalMs?: number },
): Promise<Run> {
const run = await this.create(threadId, body, options);
return await this.poll(threadId, run.id, options);
} | /**
* A helper to create a run an poll for a terminal state. More information on Run
* lifecycles can be found here:
* https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
*/ | https://github.com/Zaki-1052/GPTPortal/blob/16ffa0c14672c637cf27249d8f0bf146e56c29b7/node_modules/openai/src/resources/beta/threads/runs/runs.ts#L114-L121 | 16ffa0c14672c637cf27249d8f0bf146e56c29b7 |
GPTPortal | github_2023 | Zaki-1052 | typescript | Runs.createAndStream | createAndStream(
threadId: string,
body: RunCreateParamsBaseStream,
options?: Core.RequestOptions,
): AssistantStream {
return AssistantStream.createAssistantStream(threadId, this._client.beta.threads.runs, body, options);
} | /**
* Create a Run stream
*
* @deprecated use `stream` instead
*/ | https://github.com/Zaki-1052/GPTPortal/blob/16ffa0c14672c637cf27249d8f0bf146e56c29b7/node_modules/openai/src/resources/beta/threads/runs/runs.ts#L128-L134 | 16ffa0c14672c637cf27249d8f0bf146e56c29b7 |
GPTPortal | github_2023 | Zaki-1052 | typescript | Runs.poll | async poll(
threadId: string,
runId: string,
options?: Core.RequestOptions & { pollIntervalMs?: number },
): Promise<Run> {
const headers: { [key: string]: string } = { ...options?.headers, 'X-Stainless-Poll-Helper': 'true' };
if (options?.pollIntervalMs) {
headers['X-Stainless-Custom-Poll-Interval'] = options.pollIntervalMs.toString();
}
while (true) {
const { data: run, response } = await this.retrieve(threadId, runId, {
...options,
headers: { ...options?.headers, ...headers },
}).withResponse();
switch (run.status) {
//If we are in any sort of intermediate state we poll
case 'queued':
case 'in_progress':
case 'cancelling':
let sleepInterval = 5000;
if (options?.pollIntervalMs) {
sleepInterval = options.pollIntervalMs;
} else {
const headerInterval = response.headers.get('openai-poll-after-ms');
if (headerInterval) {
const headerIntervalMs = parseInt(headerInterval);
if (!isNaN(headerIntervalMs)) {
sleepInterval = headerIntervalMs;
}
}
}
await sleep(sleepInterval);
break;
//We return the run in any terminal state.
case 'requires_action':
case 'incomplete':
case 'cancelled':
case 'completed':
case 'failed':
case 'expired':
return run;
}
}
} | /**
* A helper to poll a run status until it reaches a terminal state. More
* information on Run lifecycles can be found here:
* https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
*/ | https://github.com/Zaki-1052/GPTPortal/blob/16ffa0c14672c637cf27249d8f0bf146e56c29b7/node_modules/openai/src/resources/beta/threads/runs/runs.ts#L141-L188 | 16ffa0c14672c637cf27249d8f0bf146e56c29b7 |
GPTPortal | github_2023 | Zaki-1052 | typescript | Runs.stream | stream(threadId: string, body: RunCreateParamsBaseStream, options?: Core.RequestOptions): AssistantStream {
return AssistantStream.createAssistantStream(threadId, this._client.beta.threads.runs, body, options);
} | /**
* Create a Run stream
*/ | https://github.com/Zaki-1052/GPTPortal/blob/16ffa0c14672c637cf27249d8f0bf146e56c29b7/node_modules/openai/src/resources/beta/threads/runs/runs.ts#L193-L195 | 16ffa0c14672c637cf27249d8f0bf146e56c29b7 |
GPTPortal | github_2023 | Zaki-1052 | typescript | Runs.submitToolOutputsAndPoll | async submitToolOutputsAndPoll(
threadId: string,
runId: string,
body: RunSubmitToolOutputsParamsNonStreaming,
options?: Core.RequestOptions & { pollIntervalMs?: number },
): Promise<Run> {
const run = await this.submitToolOutputs(threadId, runId, body, options);
return await this.poll(threadId, run.id, options);
} | /**
* A helper to submit a tool output to a run and poll for a terminal run state.
* More information on Run lifecycles can be found here:
* https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
*/ | https://github.com/Zaki-1052/GPTPortal/blob/16ffa0c14672c637cf27249d8f0bf146e56c29b7/node_modules/openai/src/resources/beta/threads/runs/runs.ts#L240-L248 | 16ffa0c14672c637cf27249d8f0bf146e56c29b7 |
GPTPortal | github_2023 | Zaki-1052 | typescript | Runs.submitToolOutputsStream | submitToolOutputsStream(
threadId: string,
runId: string,
body: RunSubmitToolOutputsParamsStream,
options?: Core.RequestOptions,
): AssistantStream {
return AssistantStream.createToolAssistantStream(
threadId,
runId,
this._client.beta.threads.runs,
body,
options,
);
} | /**
* Submit the tool outputs from a previous run and stream the run to a terminal
* state. More information on Run lifecycles can be found here:
* https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
*/ | https://github.com/Zaki-1052/GPTPortal/blob/16ffa0c14672c637cf27249d8f0bf146e56c29b7/node_modules/openai/src/resources/beta/threads/runs/runs.ts#L255-L268 | 16ffa0c14672c637cf27249d8f0bf146e56c29b7 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.