foundry_llm_extraction
v2.4.7
Published
```ts import { Function, MediaItem, OntologyEditFunction, Query, Uses, Edits } from "@foundry/functions-api"; import { Objects, ScmePriceList } from "@foundry/ontology-api"; // extract function, assignToObject function, default system prompts for both mod
Readme
Usage
Works with images (PNG and JPEG) by providing them as input to the model and with documents by extracting text/doing OCR on them
import { Function, MediaItem, OntologyEditFunction, Query, Uses, Edits } from "@foundry/functions-api";
import { Objects, ScmePriceList } from "@foundry/ontology-api";
// extract function, assignToObject function, default system prompts for both modes
import { extract, assignToObject, SYSTEM_PROMPT, MD_SYSTEM_PROMPT } from "foundry_llm_extraction";
import { Gpt5mini } from "@foundry/languagemodelservice/models";
import { Uuid } from "@foundry/functions-utils";
export class MyFunctions {
@Edits(ScmePriceList)
@OntologyEditFunction()
public async llmExtract(input: MediaItem): Promise<void> {
let res = await extract({
createChatVisionCompletion: Gpt5mini.createChatVisionCompletion,
file: input,
config: {
jsonSchema: {
"type": "object",
"title": "ScmePriceList",
"properties": {
"applicableGlt": {
"anyOf": [
{
"type": "string"
},
{
"type": "null"
}
],
"title": "Applicable Glt",
"description": "true/false only; specifies if this price point is applicable to GLT"
},
"applicableKlt": {
"anyOf": [
{
"type": "string"
},
{
"type": "null"
}
],
"title": "Applicable Klt",
"description": "true/false only; specifies if this price point is applicable to KLT"
},
"contractId": {
"anyOf": [
{
"type": "string"
},
{
"type": "null"
}
],
"title": "Contract Id"
},
"descriptionDe": {
"anyOf": [
{
"type": "string"
},
{
"type": "null"
}
],
"title": "Description De"
},
"descriptionEn": {
"anyOf": [
{
"type": "string"
},
{
"type": "null"
}
],
"title": "Description En"
},
"isOptional": {
"anyOf": [
{
"type": "string"
},
{
"type": "null"
}
],
"title": "Is Optional",
"description": "true/false only"
},
"masterProcess": {
"anyOf": [
{
"type": "string"
},
{
"type": "null"
}
],
"title": "Master Process"
},
"nightSurcharge": {
"anyOf": [
{
"type": "number"
},
{
"type": "null"
}
],
"title": "Night Surcharge"
},
"priceEur": {
"anyOf": [
{
"type": "number"
},
{
"type": "null"
}
],
"title": "Price Eur"
},
"specialCase": {
"anyOf": [
{
"type": "string"
},
{
"type": "null"
}
],
"title": "Special Case",
"description": "true/false only; specifies if the price point is temporary"
},
"subprocess": {
"anyOf": [
{
"type": "string"
},
{
"type": "null"
}
],
"title": "Subprocess"
},
"unit": {
"anyOf": [
{
"type": "string"
},
{
"type": "null"
}
],
"title": "Unit"
}
},
"required": [
"applicableGlt",
"applicableKlt",
"contractId",
"descriptionDe",
"descriptionEn",
"isOptional",
"masterProcess",
"nightSurcharge",
"priceEur",
"specialCase",
"subprocess",
"unit"
]
} as const
}
});
let obj = Objects.create().scmePriceList(Uuid.random());
assignToObject(obj, res);
}
}If the LLM is misbehaving you can edit the json schema by adding titles, descriptions, etc. to help it understand what's going on. Same applies to potential Object Type -> Schema conversion errors (see below)
Parameters
type Parameters = {
createChatVisionCompletion: ExtractFunction,
file: MediaItem,
config: {
jsonSchema: JsonSchema,
ocr?: boolean,
systemPrompt?: string,
modelParams?: ModelParameters,
twoStageExtraction?: boolean,
mdCreateChatVisionCompletion?: MdExtractFunction,
mdSystemPrompt?: string,
mdModelParams?: ModelParameters,
}
}createChatVisionCompletion: ExtractFunction: A createChatVisionCompletion function used for the extraction, such as Gpt5mini.createChatVisionCompletion
file: MediaItem: The file input to the function
config.jsonSchema: JsonSchema: The json schema to use to extract data from the file. Note: add as const at the end of the object in this parameter, then the return type of the function will be inferred properly
config.ocr?: boolean: Whether to use OCR for documents. if this is true, OCR is used, otherwise (and by default) programmatic text extraction is used. Does nothing if the file input is an image.
config.systemPrompt?: string: Option to customize the system prompt. It may include the %%schema%% string to mark where you want the schema to be placed. It is intended to be generic, and it is recommended to provide additional info via description fields in the schema instead.
config.modelParams?: ModelParameters: Option to provide model parameters (eg temperature, reasoning effort, etc), model dependent.
config.twoStageExtraction?: boolean: Two stage extraction mode. If this is true, first a markdown representation of the document is generated by an llm and then the markdown is processed into a json schema representation.
config.mdCreateChatVisionCompletion?: MdExtractFunction: createChatVisionCompletion for markdown extraction, by default the other one is used.
config.mdSystemPrompt?: string: An option to customize the markdown mode system prompt. You can import MD_SYSTEM_PROMPT (the default prompt) and add information pertinent to the specific document type.
config.mdModelParams?: ModelParameters: An option to provide model parameters for markdown mode. If no mdCreateChatVisionCompletion is specified, modelParams is used by default.
Execute this in console to extract the JSON Schema from an Object Type when on its page
let body = "{\"objectTypes\":[{\"identifier\":{\"objectTypeId\":\"" +
location.pathname.split("/workspace/ontology/object-type/")[1].split("/")[0] +
"\",\"type\":\"objectTypeId\"}";
if (localStorage["@foundry/ontology/vv/13/ontologyBranchIdentifier"] && JSON.parse(localStorage["@foundry/ontology/vv/13/ontologyBranchIdentifier"])?.value) body += ",\"versionReference\":{\"type\":\"ontologyBranch\",\"ontologyBranch\":\"" +
JSON.parse(localStorage["@foundry/ontology/vv/13/ontologyBranchIdentifier"]).value.ontologyBranchRid +
"\"}"
body += "}],\"datasourceTypes\":[\"DATASET\",\"RESTRICTED_VIEW\",\"TIME_SERIES\",\"STREAM\",\"STREAM_V2\",\"STREAM_V3\",\"DATASET_V2\",\"RESTRICTED_VIEW_V2\",\"RESTRICTED_STREAM\",\"MEDIA\",\"MEDIA_SET_VIEW\",\"GEOTIME_SERIES\",\"TABLE\",\"EDITS_ONLY\",\"DIRECT\",\"DERIVED\",\"DATASET_V3\"],\"linkTypes\":[],\"sharedPropertyTypes\":[],\"interfaceTypes\":[],\"typeGroups\":[],\"actionTypes\":[],\"loadRedacted\":true,\"includeObjectTypesWithoutSearchableDatasources\":true,\"includeEntityMetadata\":true}"
let response = await fetch("/ontology-metadata/api/ontology/ontology/bulkLoadEntities", {
"headers": {
"accept": "application/json",
"accept-conjure-error-parameter-format": "JSON",
"authorization": "Bearer " + document.cookie.split(";").map(i => i.split("=")).find(i => i[0] === "PALANTIR_TOKEN")[1],
"cache-control": "no-cache",
"content-type": "application/json",
"pragma": "no-cache",
},
body,
"method": "POST",
"mode": "cors",
"credentials": "include"
}).then(it => it.json());
function objectTypeToSchema(obj) {
function typeToSchema(type) {
if (type.type === "string") {
return { type: "string" };
}
if (type.type === "integer" || type.type === "long") {
return { type: "integer" };
}
if (type.type === "double" || type.type === "float") {
return { type: "number" };
}
if (type.type === "decimal") {
throw new Error("Decimal type not supported in TypeScript v1");
}
if (type.type === "geohash") {
return { type: "string", description: "Format: $lat,$lon" };
}
if (type.type === "boolean") {
return { type: "boolean" };
}
if (type.type === "date") {
return { type: "string", format: "date" };
}
if (type.type === "timestamp") {
return { type: "string", format: "date-time" };
}
throw new Error("Unsupported type " + type.type);
}
const res = {
type: "object",
title: obj.apiName,
properties: {},
required: [],
};
for (const [rid, property] of Object.entries(obj.propertyTypes)) {
if (obj.primaryKeys.includes(rid)) continue;
let newProp = typeToSchema(property.type);
if (!property.dataConstraints?.nullabilityV2?.noNulls) {
newProp = { anyOf: [newProp, { type: "null" }] };
}
if (property.displayMetadata) {
newProp.title = property.displayMetadata.displayName;
}
res.required.push(property.apiName);
res.properties[property.apiName] = newProp;
}
return res;
}
objectTypeToSchema(response.objectTypes[0].objectType)