seeme_client

Client

Client class to interact with the SeeMe.ai backend, allowing you to manage models, datasets, predictions and training requests.

Parameters:

username (optional) : the username for the account you want to use; apikey (optional) : the API key for the username you want user; backend (prefilled): the backend the client communicates with.

Note: username and apikey are optional but they need to used together in order to be authenticated. Authentication will be used on subsequent requests. Alternatively, you can use the login method (see below)

Methods

__init__(username: str = None, apikey: str = None, backend: str = None, env_file: str = '.env')

register(username: str, email: str, password: str, firstname: str, name: str) -> User

Register a new user with a username, email and password.

Optionally, you can add a first and last name.

login(username: str, password: str) -> LoginReply

Log in with a username and password.

The username and password will be used to get the API key from the backend. The method will fail if the user is not known, the password is incorrect, or the service cannot be reached.

logout()

Log out the current user.

get_application_id(base_framework: str, framework: str, base_framework_version: str, framework_version, application: ApplicationType) -> str


base_framework: the base_framework for the application (e.g. “pytorch”, …) base_framework_version: the version of the base_framework (e.g. “1.9.0”, …) framework: the framework for the application (e.g. “fastai”, …) framework_version: the version of the framework (e.g. “2.5.2”, …) application: the type of application you want to deply (e.g. “image_classification”, “object_detection”, “text_classification”, “structured”)

Note

To get a list of all the supported applications, see the “get_applications” method.

Parameters:

  • base_framework (the base_framework for the application (e.g. “pytorch”, …):
  • base_framework_version (the version of the base_framework (e.g. “1.9.0”, …):
  • framework (the framework for the application (e.g. “fastai”, …):
  • framework_version (the version of the framework (e.g. “2.5.2”, …):
  • application (the type of application you want to deply (e.g. “image_classification”, “object_detection”, “text_classification”, “structured”):

get_models() -> List[Model]

create_model(model: Model) -> Model

get_model(model_id: str) -> Model

update_model(model: Model) -> Model

delete_model(model_id: str) -> str

upload_model(model_id: str, folder: str = 'data', filename: str = 'export.pkl') -> ModelVersion

upload_logo(model_id: str, folder: str = 'data', filename: str = 'logo.jpg') -> Model

get_logo(model: Model)

set_active_model_version(model: Union[Model, str], version_id: Union[str, ModelVersion]) -> Model

download_active_model(model: Model, asset_type: AssetType = AssetType.PKL, download_folder = '.')

upload(url: str, folder: str, filename: str, content_type: str)

get_model_versions(model_id: str) -> List[ModelVersion]

get_model_version(model_id: str, version_id: str) -> ModelVersion

create_model_version(model_id: str, version: ModelVersion) -> ModelVersion

update_model_version(version: ModelVersion) -> ModelVersion

delete_model_version(model_id: str, version_id: str) -> str

upload_model_version(version: ModelVersion, folder: str = 'data', filename: str = 'export.pkl') -> ModelVersion

upload_model_version_logo(model_id: str, version_id: str, folder: str = 'data', filename: str = 'logo.jpg') -> ModelVersion

download_model_version(model_version: ModelVersion, asset_type: AssetType = AssetType.PKL, download_folder: str = '.')

share_model(model_id: str, email: str, send_invite: bool = False) -> Share

get_model_shares(model_id: str) -> List[Share]

delete_model_share(model_id: str, share_id: str) -> str

get_jobs(application_id: str = '', states: List[JobStatus] = [JobStatus.WAITING, JobStatus.STARTED, JobStatus.FINISHED, JobStatus.ERROR], job_types: List[JobType] = [JobType.TRAINING]) -> List[Job]

get_job(job_id: str) -> Job

create_job(job: Job) -> Job

update_job(job: Job) -> Job

delete_job(job_id: str) -> str

predict(model_id: str, item: Union[str, dict], application_type: ApplicationType = ApplicationType.IMAGE_CLASSIFICATION) -> Inference

inference(model_id: str, item: Union[str, dict], application_type: ApplicationType = ApplicationType.IMAGE_CLASSIFICATION) -> Inference

version_predict(version: ModelVersion, item: Union[str, dict], application_type: ApplicationType = ApplicationType.IMAGE_CLASSIFICATION) -> Inference

version_inference(version: ModelVersion, item: Union[str, dict], application_type: ApplicationType = ApplicationType.IMAGE_CLASSIFICATION) -> Inference

add_inference(model_id: str, inference: Inference) -> Inference

add_inference_file(inference_id: str, folder: str, filename: str, content_type: str) -> Inference

download_inference_file(inference_id: str, filename: str)

update_inference(inference: Inference) -> Inference

get_inferences(model_id: str, model_version_ids: List[str] = [], page_count: int = 0, page_size: int = 25, include_already_added: bool = False, show_hidden: bool = False) -> List[Inference]

add_inferences(dataset_id: str, dataset_version_id: str, dataset_split_id: str, add_inferences: AddInferences) -> AddInferences

get_inference_stats(model_id: str) -> float

get_applications() -> List[Application]

get_datasets() -> List[Dataset]

create_dataset(dataset: Dataset) -> Dataset

get_dataset(dataset_id: str) -> Dataset

update_dataset(dataset: Dataset) -> Dataset

upload_dataset_logo(dataset_id: str, folder: str = 'data', filename: str = 'logo.jpg') -> Dataset

get_dataset_logo(dataset: Dataset)

delete_dataset(id: str) -> str

share_dataset(dataset_id: str, email: str, send_invite: bool = False) -> Share

get_dataset_versions(dataset_id: str) -> List[DatasetVersion]

create_dataset_version(dataset_id: str, dataset_version: DatasetVersion) -> DatasetVersion

get_dataset_version(dataset_id: str, dataset_version_id: str) -> DatasetVersion

update_dataset_version(dataset_id: str, dataset_version: DatasetVersion) -> DatasetVersion

duplicate_dataset_version(dataset_id: str, dataset_version_id: str) -> DatasetVersion

delete_dataset_version(dataset_id: str, dataset_version_id: str) -> str

create_dataset_label(dataset_id: str, dataset_version_id: str, label: Label) -> Label

get_dataset_labels(dataset_id: str, dataset_version_id: str) -> List[Label]

get_dataset_label(dataset_id: str, dataset_version_id: str, label_id: str) -> Label

update_dataset_label(dataset_id: str, dataset_version_id: str, label: Label) -> Label

delete_dataset_label(dataset_id: str, dataset_version_id: str, label_id: str) -> str

get_label_stats(dataset_id: str, dataset_version_id: str, split_id: str) -> List[LabelStat]

get_stats_for_unlabelled(label_stats: List[LabelStat]) -> List[LabelStat]

get_stats_for_label(label_stats: List[LabelStat], label_id: str) -> LabelStat

create_dataset_split(dataset_id: str, dataset_version_id: str, split: DatasetSplit) -> DatasetSplit

get_dataset_splits(dataset_id: str, dataset_version_id: str) -> List[DatasetSplit]

get_dataset_split(dataset_id: str, dataset_version_id: str, split_id: str) -> DatasetSplit

update_dataset_split(dataset_id: str, dataset_version_id: str, split: DatasetSplit) -> DatasetSplit

delete_dataset_split(dataset_id: str, dataset_version_id: str, split: DatasetSplit) -> str

get_dataset_items(dataset_id: str, dataset_version_id: str, params: dict = None) -> List[DatasetItem]

create_dataset_item(dataset_id: str, dataset_version_id: str, item: DatasetItem) -> DatasetItem

get_dataset_item(dataset_id: str, dataset_version_id: str, item_id: str) -> DatasetItem

update_dataset_item(dataset_id: str, dataset_version_id: str, item: DatasetItem) -> DatasetItem

delete_dataset_item(dataset_id: str, dataset_version_id: str, split_id: str, item_id: str) -> DatasetItem

upload_dataset_item_image(dataset_id: str, dataset_version_id: str, item_id: str, folder: str, filename: str) -> DatasetItem

download_dataset_item_image(dataset_id: str, dataset_version_id: str, item_id: str, download_location: str, thumbnail: bool = False)

annotate(dataset_id: str, dataset_version_id: str, annotation: Annotation) -> Annotation

update_annotation(dataset_id: str, dataset_version_id: str, annotation: Annotation) -> Annotation

delete_annotation(dataset_id: str, dataset_version_id: str, annotation_id: str) -> Annotation

download_dataset(dataset_id: str, dataset_version_id: str, split_id: str = '', extract_to_folder: str = 'data', download_file: str = 'dataset.zip', remove_download_file: bool = True, export_format: DatasetFormat = None, params: dict = {})

upload_dataset_version(dataset_id: str, dataset_version_id: str, folder: str = 'data', filename: str = 'dataset.zip', format: DatasetFormat = None) -> DatasetVersion

get_workflows() -> List[Workflow]

create_workflow(workflow: Workflow) -> Workflow

get_workflow(workflow_id: str) -> Workflow

update_workflow(workflow: Workflow) -> Workflow

delete_workflow(id: str) -> str

run_workflow(workflow_id: str, item: Union[str, dict]) -> WorkflowExecutionResponse

get_workflow_versions(workflow_id: str) -> List[WorkflowVersion]

create_workflow_version(workflow_id: str, workflow_version: WorkflowVersion) -> WorkflowVersion

get_workflow_version(workflow_id: str, workflow_version_id: str) -> WorkflowVersion

update_workflow_version(workflow_id: str, workflow_version: WorkflowVersion) -> WorkflowVersion

delete_workflow_version(workflow_id: str, workflow_version_id: str) -> str

get_workflow_nodes(workflow_id: str, workflow_version_id: str) -> List[WorkflowNode]

create_workflow_node(workflow_id: str, workflow_version_id: str, workflow_node: WorkflowNode) -> WorkflowNode

get_workflow_node(workflow_id: str, workflow_version_id: str, node_id: str) -> WorkflowNode

update_workflow_node(workflow_id: str, workflow_version_id: str, node: WorkflowNode) -> WorkflowNode

delete_workflow_node(workflow_id: str, workflow_version_id: str, node_id: str) -> str

get_workflow_edges(workflow_id: str, workflow_version_id: str) -> List[WorkflowEdge]

create_workflow_edge(workflow_id: str, workflow_version_id: str, workflow_edge: WorkflowEdge) -> WorkflowEdge

get_workflow_edge(workflow_id: str, workflow_version_id: str, edge_id: str) -> WorkflowEdge

update_workflow_edge(workflow_id: str, workflow_version_id: str, edge: WorkflowEdge) -> WorkflowEdge

delete_workflow_edge(workflow_id: str, workflow_version_id: str, edge_id: str) -> str

get_post_processors(dataset_id: str) -> List[PostProcessorWithModel]

Get all post-processors for a dataset.

get_post_processor(dataset_id: str, processor_id: str) -> PostProcessorWithModel

Get a single post-processor by ID.

create_post_processor(dataset_id: str, processor: CreatePostProcessorRequest) -> DatasetPostProcessor

Create a new post-processor for a dataset.

update_post_processor(dataset_id: str, processor_id: str, updates: UpdatePostProcessorRequest) -> DatasetPostProcessor

Update an existing post-processor.

delete_post_processor(dataset_id: str, processor_id: str) -> str

Delete a post-processor.

get_post_processor_jobs(dataset_id: str, params: dict = None) -> List[PostProcessorJobWithDetails]

Get post-processor jobs for a dataset.

execute_workflow_async(workflow_id: str, request: WorkflowExecutionRequest) -> WorkflowExecutionResponse

Execute a workflow asynchronously. Returns immediately with an execution ID.

execute_workflow_async_with_file(workflow_id: str, folder: str, filename: str) -> WorkflowExecutionResponse

Execute a workflow asynchronously with a file upload.

get_workflow_executions(workflow_id: str) -> List[WorkflowExecution]

Get all executions for a workflow.

get_workflow_execution(workflow_id: str, execution_id: str) -> WorkflowExecution

Get a specific workflow execution.

download_workflow_execution_input_file(workflow_id: str, execution_id: str, download_location: str)

Download the input file for a workflow execution.

cancel_workflow_execution(workflow_id: str, execution_id: str) -> str

Cancel a running workflow execution.

execute_workflow_version_async(workflow_id: str, version_id: str, request: WorkflowExecutionRequest) -> WorkflowExecutionResponse

Execute a specific workflow version asynchronously.

execute_workflow_version_async_with_file(workflow_id: str, version_id: str, folder: str, filename: str) -> WorkflowExecutionResponse

Execute a specific workflow version asynchronously with a file upload.

get_graphs() -> List[Graph]

Get all graphs for the current user.

get_graph(graph_id: str) -> Graph

Get a single graph by ID.

create_graph(graph: Graph) -> Graph

Create a new graph.

update_graph(graph: Graph) -> Graph

Update an existing graph.

delete_graph(graph_id: str) -> str

Delete a graph.

get_graph_stats(graph_id: str) -> dict

Get statistics for a graph.

get_graph_versions(graph_id: str) -> List[GraphVersion]

Get all versions of a graph.

get_graph_version(graph_id: str, version_id: str) -> GraphVersion

Get a specific graph version.

create_graph_version(graph_id: str, version: GraphVersion) -> GraphVersion

Create a new graph version.

update_graph_version(graph_id: str, version: GraphVersion) -> GraphVersion

Update a graph version.

delete_graph_version(graph_id: str, version_id: str) -> str

Delete a graph version.

set_active_graph_version(graph_id: str, version_id: str) -> dict

Set a graph version as the active version.

duplicate_graph_version(graph_id: str, version_id: str) -> GraphVersion

Duplicate a graph version.

create_graph_node(graph_id: str, version_id: str, node: GraphNode) -> dict

Create a node in the graph.

create_graph_nodes(graph_id: str, version_id: str, nodes: List[GraphNode]) -> dict

Create multiple nodes in bulk.

get_graph_node(graph_id: str, version_id: str, node_uid: str) -> dict

Get a specific node by UID.

update_graph_node(graph_id: str, version_id: str, node_uid: str, node: GraphNode) -> dict

Update a node.

delete_graph_node(graph_id: str, version_id: str, node_uid: str) -> dict

Delete a node.

query_graph_nodes(graph_id: str, version_id: str, params: dict = None) -> List[dict]

Query nodes in the graph.

create_graph_edge(graph_id: str, version_id: str, edge: GraphEdge) -> dict

Create an edge in the graph.

create_graph_edges(graph_id: str, version_id: str, edges: List[GraphEdge]) -> dict

Create multiple edges in bulk.

delete_graph_edge(graph_id: str, version_id: str, params: dict = None) -> dict

Delete an edge.

execute_graph_query(graph_id: str, version_id: str, query: str) -> dict

Execute a DQL query on the graph.

add_columns_structured_dataset_version(dataset_version: DatasetVersion, column_names: List, csv_separator: str = ',') -> DatasetVersion

create_structured_dataset_item(dataset_version: DatasetVersion, dataset_split: DatasetSplit, item: dict) -> DatasetItem

get_structured_dataset_item(dataset_id: str, dataset_version_id: str, item_id: str) -> dict

get_apikey() -> str

random_color() -> str

read_df(folder: str, filename: str, extension = '.csv', separator = ',')

download_df(dataset_id: str = None, dataset_version_id: str = None, download_folder: str = 'tmp', separator = ',')

upload_df(df: pd.DataFrame = None, dataset_id = None, name = 'my_dataset_version', keep_index = False, index_label = 'index', separator = ',', split_column = 'split', label_column = 'labels', multi_label = False, label_separator = ' ') -> DatasetVersion

requires_login()

update_applications()

update_auth_header(username: str = None, apikey: str = None)

is_logged_in()

delete_user()

crud_endpoint(endpoint: str) -> str

find_value_for_item_name(job: Job, item_name: str, failover = None)

find_job_item(job: Job, item_key: str, item_value: Union[dict, Base])

find_value_for_item_key(item: Union[dict, Base])

find_label_with_name(labels: List[Label], name: str) -> Union[Label, None]

find_split_with_name(splits: List[DatasetSplit], name: str) -> Union[DatasetSplit, None]

find_item_in_array(ar, prop, value, first_item = False, t: Base = None) -> Union[Base, None]

api_get(api: str, params: dict = None) -> str

api_post(api: str, data, params = None) -> str

api_upload(api: str, data, headers) -> str

api_put(api: str, data) -> str

api_delete(api: str) -> dict

api_download(api: str, filename: str, params = None)