query stringlengths 9 9.05k | document stringlengths 10 222k | negatives listlengths 19 20 | metadata dict |
|---|---|---|---|
Create a new dashboard. | def create_dashboard(self, dashboard_post: DashboardPOST, query_params: Dict[str, object] = None) -> Dashboard:
if query_params is None:
query_params = {}
path_params = {
}
path = Template("/catalog/v2alpha2/dashboards").substitute(path_params)
url = self.base_clien... | [
"def create_dashboard(self, title, description, graphs):\n if isinstance(graphs, str):\n graphs = json.loads(graphs)\n body = {\n 'title': title,\n 'description': description,\n 'graphs': graphs\n }\n return self.http_request('POST', '/dash', b... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a new dataset import. | def create_dataset_import(self, datasetresourcename: str, dataset_imported_by: DatasetImportedBy = None, query_params: Dict[str, object] = None) -> Dataset:
if query_params is None:
query_params = {}
path_params = {
"datasetresourcename": datasetresourcename,
}
... | [
"def create(ds_name, description, tsuid_list):\n\n tdm = TemporalDataMgr()\n return tdm.import_data_set(data_set_id=ds_name, description=description, tsuid_list=tsuid_list)",
"def new_dataset(dataset_name: str):\n icedata.template.generate_dataset(dataset_name)",
"def importDataset(datasetName)... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a new field on a specific dataset. | def create_field_for_dataset(self, datasetresourcename: str, field_post: FieldPOST, query_params: Dict[str, object] = None) -> Field:
if query_params is None:
query_params = {}
path_params = {
"datasetresourcename": datasetresourcename,
}
path = Template("/catal... | [
"def createField(self, fieldName, ogrOFT):\n aField = ogr.FieldDefn(fieldName, ogrOFT)\n self.layer.CreateField(aField)",
"def add_data_field(self, data_field):\n assert isinstance(data_field, DataField), \"Only objects of type SnowLibrary.keywords.file_creator.DataField can be added. Instead... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a new relationship. | def create_relationship(self, relationship_post: RelationshipPOST, query_params: Dict[str, object] = None) -> Relationship:
if query_params is None:
query_params = {}
path_params = {
}
path = Template("/catalog/v2alpha2/relationships").substitute(path_params)
url = ... | [
"def create_relationship(\r\n self, from_item_id, to_item_id, relationship_type_id, **data):\r\n payload = {\r\n 'fromItem': from_item_id,\r\n 'toItem': to_item_id,\r\n 'relationshipType': relationship_type_id\r\n }\r\n\r\n payload = dict_update(paylo... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a new rule. | def create_rule(self, rule_post: RulePOST, query_params: Dict[str, object] = None) -> Rule:
if query_params is None:
query_params = {}
path_params = {
}
path = Template("/catalog/v2alpha2/rules").substitute(path_params)
url = self.base_client.build_url(path)
... | [
"def create_rule(connection, rule_info):\n connection.command_path = 'rule'\n extra_headers = {\n connection.header_key: connection.token,\n 'Content-Type': 'text/xml'\n }\n url = connection.build_url()\n rule_data = _build_rule_payload(rule_info)\n verify_ssl = connection.verify_ssl... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a new workflow build. | def create_workflow_build(self, workflowid: str, workflow_build_post: WorkflowBuildPOST, query_params: Dict[str, object] = None) -> WorkflowBuild:
if query_params is None:
query_params = {}
path_params = {
"workflowid": workflowid,
}
path = Template("/catalog/v2... | [
"def workflow_new(input_params={}, always_retry=False, **kwargs):\n return DXHTTPRequest('/workflow/new', input_params, always_retry=always_retry, **kwargs)",
"def create_workflow(\n ctx, key, name, description, instructions, specfile, manifest, template,\n configfile, ignore_postproc\n):\n config = u... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a new workflow run for the specified workflow build ID. | def create_workflow_run(self, workflowid: str, workflowbuildid: str, workflow_run_post: WorkflowRunPOST, query_params: Dict[str, object] = None) -> WorkflowRun:
if query_params is None:
query_params = {}
path_params = {
"workflowid": workflowid,
"workflowbuildid": wo... | [
"def create_workflow_build(self, workflowid: str, workflow_build_post: WorkflowBuildPOST, query_params: Dict[str, object] = None) -> WorkflowBuild:\n if query_params is None:\n query_params = {}\n\n path_params = {\n \"workflowid\": workflowid,\n }\n\n path = Templa... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Delete an action on a rule. | def delete_action_by_id_for_rule(self, ruleresourcename: str, actionid: str, query_params: Dict[str, object] = None) -> SSCVoidModel:
if query_params is None:
query_params = {}
path_params = {
"ruleresourcename": ruleresourcename,
"actionid": actionid,
}
... | [
"def delete_action_by_id_for_rule_by_id(self, ruleid: str, actionid: str, query_params: Dict[str, object] = None) -> SSCVoidModel:\n if query_params is None:\n query_params = {}\n\n path_params = {\n \"ruleid\": ruleid,\n \"actionid\": actionid,\n }\n\n p... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Delete an action that is part of a specific rule. | def delete_action_by_id_for_rule_by_id(self, ruleid: str, actionid: str, query_params: Dict[str, object] = None) -> SSCVoidModel:
if query_params is None:
query_params = {}
path_params = {
"ruleid": ruleid,
"actionid": actionid,
}
path = Template("/c... | [
"def delete_action_by_id_for_rule(self, ruleresourcename: str, actionid: str, query_params: Dict[str, object] = None) -> SSCVoidModel:\n if query_params is None:\n query_params = {}\n\n path_params = {\n \"ruleresourcename\": ruleresourcename,\n \"actionid\": actionid,... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Delete a specific annotation of a dashboard. | def delete_annotation_of_dashboard_by_id(self, dashboardid: str, annotationid: str, query_params: Dict[str, object] = None) -> SSCVoidModel:
if query_params is None:
query_params = {}
path_params = {
"dashboardid": dashboardid,
"annotationid": annotationid,
}... | [
"def delete_annotation_of_dashboard_by_resource_name(self, dashboardresourcename: str, annotationid: str, query_params: Dict[str, object] = None) -> SSCVoidModel:\n if query_params is None:\n query_params = {}\n\n path_params = {\n \"dashboardresourcename\": dashboardresourcename... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Delete a specific annotation of a dashboard. | def delete_annotation_of_dashboard_by_resource_name(self, dashboardresourcename: str, annotationid: str, query_params: Dict[str, object] = None) -> SSCVoidModel:
if query_params is None:
query_params = {}
path_params = {
"dashboardresourcename": dashboardresourcename,
... | [
"def delete_annotation_of_dashboard_by_id(self, dashboardid: str, annotationid: str, query_params: Dict[str, object] = None) -> SSCVoidModel:\n if query_params is None:\n query_params = {}\n\n path_params = {\n \"dashboardid\": dashboardid,\n \"annotationid\": annotati... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Delete a specific annotation of a dataset. | def delete_annotation_of_dataset_by_id(self, datasetid: str, annotationid: str, query_params: Dict[str, object] = None) -> SSCVoidModel:
if query_params is None:
query_params = {}
path_params = {
"datasetid": datasetid,
"annotationid": annotationid,
}
... | [
"def delete_annotation_of_dataset_by_resource_name(self, datasetresourcename: str, annotationid: str, query_params: Dict[str, object] = None) -> SSCVoidModel:\n if query_params is None:\n query_params = {}\n\n path_params = {\n \"datasetresourcename\": datasetresourcename,\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Delete a specific annotation of a dataset. | def delete_annotation_of_dataset_by_resource_name(self, datasetresourcename: str, annotationid: str, query_params: Dict[str, object] = None) -> SSCVoidModel:
if query_params is None:
query_params = {}
path_params = {
"datasetresourcename": datasetresourcename,
"annot... | [
"def delete_annotation_of_dataset_by_id(self, datasetid: str, annotationid: str, query_params: Dict[str, object] = None) -> SSCVoidModel:\n if query_params is None:\n query_params = {}\n\n path_params = {\n \"datasetid\": datasetid,\n \"annotationid\": annotationid,\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Delete the dashboard with the specified ID. | def delete_dashboard_by_id(self, dashboardid: str, query_params: Dict[str, object] = None) -> SSCVoidModel:
if query_params is None:
query_params = {}
path_params = {
"dashboardid": dashboardid,
}
path = Template("/catalog/v2alpha2/dashboards/${dashboardid}").su... | [
"def delete_dashboard(self, dash_id):\n return self.http_request('DELETE', '/dash/' + str(dash_id))",
"def delete():\n return render_template(\"dashboard/delete.html\", tagname = 'delete', form = DeleteForm())",
"def delete(self, id: int):\n\n del self._clients[id]",
"def delete_department(id... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Delete the dashboard with the specified resource name. | def delete_dashboard_by_resource_name(self, dashboardresourcename: str, query_params: Dict[str, object] = None) -> SSCVoidModel:
if query_params is None:
query_params = {}
path_params = {
"dashboardresourcename": dashboardresourcename,
}
path = Template("/catalo... | [
"def delete_dashboard(self, dash_id):\n return self.http_request('DELETE', '/dash/' + str(dash_id))",
"def delete_annotation_of_dashboard_by_resource_name(self, dashboardresourcename: str, annotationid: str, query_params: Dict[str, object] = None) -> SSCVoidModel:\n if query_params is None:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Delete the dataset with the specified resource name, along with its dependencies. For the default module, the resource name format is datasetName. Otherwise, the resource name format is module.datasetName. | def delete_dataset(self, datasetresourcename: str, query_params: Dict[str, object] = None) -> SSCVoidModel:
if query_params is None:
query_params = {}
path_params = {
"datasetresourcename": datasetresourcename,
}
path = Template("/catalog/v2alpha2/datasets/${dat... | [
"def delete_dataset(datasetName=None):\n pass",
"def delete_dataset_content(datasetName=None, versionId=None):\n pass",
"def dataset_delete(config: Config, repository: str, dataset: str):\n repository = check_repo(repository)\n client = Client(config)\n result = client.dataset_delete(repository=r... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Delete a field that is part of a specific dataset. | def delete_field_by_id_for_dataset(self, datasetresourcename: str, fieldid: str, query_params: Dict[str, object] = None) -> SSCVoidModel:
if query_params is None:
query_params = {}
path_params = {
"datasetresourcename": datasetresourcename,
"fieldid": fieldid,
... | [
"def delete_field_by_id_for_dataset_by_id(self, datasetid: str, fieldid: str, query_params: Dict[str, object] = None) -> SSCVoidModel:\n if query_params is None:\n query_params = {}\n\n path_params = {\n \"datasetid\": datasetid,\n \"fieldid\": fieldid,\n }\n\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Delete a field that is part of a specific dataset. | def delete_field_by_id_for_dataset_by_id(self, datasetid: str, fieldid: str, query_params: Dict[str, object] = None) -> SSCVoidModel:
if query_params is None:
query_params = {}
path_params = {
"datasetid": datasetid,
"fieldid": fieldid,
}
path = Temp... | [
"def delete_field_by_id_for_dataset(self, datasetresourcename: str, fieldid: str, query_params: Dict[str, object] = None) -> SSCVoidModel:\n if query_params is None:\n query_params = {}\n\n path_params = {\n \"datasetresourcename\": datasetresourcename,\n \"fieldid\": ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Delete the rule with the specified resource name and its dependencies. | def delete_rule(self, ruleresourcename: str, query_params: Dict[str, object] = None) -> SSCVoidModel:
if query_params is None:
query_params = {}
path_params = {
"ruleresourcename": ruleresourcename,
}
path = Template("/catalog/v2alpha2/rules/${ruleresourcename}"... | [
"def delete_rule(connection, rule_id):\n connection.command_path = 'rule/{0}'.format(rule_id)\n url = connection.build_url()\n verify_ssl = connection.verify_ssl\n extra_headers = {connection.header_key: connection.token}\n res = requests.delete(url, headers=extra_headers, verify=verify_ssl)\n if ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Delete the workflow build with the specified workflow build ID. | def delete_workflow_build_by_id(self, workflowid: str, workflowbuildid: str, query_params: Dict[str, object] = None) -> SSCVoidModel:
if query_params is None:
query_params = {}
path_params = {
"workflowid": workflowid,
"workflowbuildid": workflowbuildid,
}
... | [
"def delete_workflow_run_by_id(self, workflowid: str, workflowbuildid: str, workflowrunid: str, query_params: Dict[str, object] = None) -> SSCVoidModel:\n if query_params is None:\n query_params = {}\n\n path_params = {\n \"workflowid\": workflowid,\n \"workflowbuildid... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Delete the workflow with the specified workflow ID. | def delete_workflow_by_id(self, workflowid: str, query_params: Dict[str, object] = None) -> SSCVoidModel:
if query_params is None:
query_params = {}
path_params = {
"workflowid": workflowid,
}
path = Template("/catalog/v2alpha2/workflows/${workflowid}").substitu... | [
"def delete_workflow_by_id(self, workflow_id) -> Status:\n try:\n workflow = MongoWorkflow.objects(uuid=workflow_id, is_deleted__ne=TRUE).first()\n if workflow is None:\n return Status.ERROR\n deleted_workflow_counts = MongoWorkflow.objects(\n pr... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Delete the workflow run with the specified workflow run ID. | def delete_workflow_run_by_id(self, workflowid: str, workflowbuildid: str, workflowrunid: str, query_params: Dict[str, object] = None) -> SSCVoidModel:
if query_params is None:
query_params = {}
path_params = {
"workflowid": workflowid,
"workflowbuildid": workflowbui... | [
"def delete_run(self, run_id):\n url = urljoin(self.api_url, f\"TestRuns/{run_id}\")\n response = requests.delete(url)\n if response.status_code == 204:\n log.info(f\"ETF test run {run_id} deleted.\")\n else:\n log.error(f\"Could not delete ETF test run {run_id}.\")... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return an action that is part of a specified rule. | def get_action_by_id_for_rule(self, ruleresourcename: str, actionid: str, query_params: Dict[str, object] = None) -> Action:
if query_params is None:
query_params = {}
path_params = {
"ruleresourcename": ruleresourcename,
"actionid": actionid,
}
path... | [
"def parse(cls, expr: str) -> \"Action\":\n return _parse_and_convert(expr, rule_name=\"onlyAction\")",
"def _get_rule_actions(self, rule):\n parser = self.switch.dp.ofproto_parser\n return [parser.OFPActionOutput(rule.vnf_id)]",
"def get_action(self, action):\n if self.parsed_workfl... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return information about a dashboard with the specified ID. | def get_dashboard_by_id(self, dashboardid: str, query_params: Dict[str, object] = None) -> Dashboard:
if query_params is None:
query_params = {}
path_params = {
"dashboardid": dashboardid,
}
path = Template("/catalog/v2alpha2/dashboards/${dashboardid}").substitu... | [
"def dashboard(self, dash_id):\n return self.http_request('GET', '/dash/' + str(dash_id),\n response_formatter=lambda x: x['dash'],\n )",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n dashboard_arn... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return information about a dashboard with the specified resource name. | def get_dashboard_by_resource_name(self, dashboardresourcename: str, query_params: Dict[str, object] = None) -> Dashboard:
if query_params is None:
query_params = {}
path_params = {
"dashboardresourcename": dashboardresourcename,
}
path = Template("/catalog/v2al... | [
"def get_dashboard_name(self, dashboard: Any) -> str:",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n dashboard_arn: Optional[pulumi.Input[str]] = None,\n dashboard_body: Optional[pulumi.Input[str]] = None,\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a field that is part of a specific dataset. | def get_field_by_id_for_dataset(self, datasetresourcename: str, fieldid: str, query_params: Dict[str, object] = None) -> Field:
if query_params is None:
query_params = {}
path_params = {
"datasetresourcename": datasetresourcename,
"fieldid": fieldid,
}
... | [
"def get_field_by_id_for_dataset_by_id(self, datasetid: str, fieldid: str, query_params: Dict[str, object] = None) -> Field:\n if query_params is None:\n query_params = {}\n\n path_params = {\n \"datasetid\": datasetid,\n \"fieldid\": fieldid,\n }\n\n pat... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a field that is part of a specific dataset. | def get_field_by_id_for_dataset_by_id(self, datasetid: str, fieldid: str, query_params: Dict[str, object] = None) -> Field:
if query_params is None:
query_params = {}
path_params = {
"datasetid": datasetid,
"fieldid": fieldid,
}
path = Template("/cat... | [
"def get_field_by_id_for_dataset(self, datasetresourcename: str, fieldid: str, query_params: Dict[str, object] = None) -> Field:\n if query_params is None:\n query_params = {}\n\n path_params = {\n \"datasetresourcename\": datasetresourcename,\n \"fieldid\": fieldid,\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return information about the workflow build with the specified workflow build ID. | def get_workflow_build_by_id(self, workflowid: str, workflowbuildid: str, query_params: Dict[str, object] = None) -> WorkflowBuild:
if query_params is None:
query_params = {}
path_params = {
"workflowid": workflowid,
"workflowbuildid": workflowbuildid,
}
... | [
"def get_koji_build_info(build_id, session, config):\n\n print(\"Retriewing build metadata from: \", config.koji_host)\n build = session.getBuild(build_id)\n if not build:\n raise Exception(\"Build with id '{id}' has not been found.\".format(id=build_id))\n\n print(\"Build with the ID\", build_id... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return information about a workflow with the specified workflow ID. | def get_workflow_by_id(self, workflowid: str, query_params: Dict[str, object] = None) -> Workflow:
if query_params is None:
query_params = {}
path_params = {
"workflowid": workflowid,
}
path = Template("/catalog/v2alpha2/workflows/${workflowid}").substitute(path... | [
"def get(cls, id, client=None):\n if client is None:\n client = get_global_grpc_client()\n\n message = client.api[\"GetWorkflow\"](\n workflow_pb2.GetWorkflowRequest(id=id),\n timeout=client.DEFAULT_TIMEOUT,\n )\n return cls._from_proto(message, client)",... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return information about the workflow run with the specified workflow build ID. | def get_workflow_run_by_id(self, workflowid: str, workflowbuildid: str, workflowrunid: str, query_params: Dict[str, object] = None) -> WorkflowRun:
if query_params is None:
query_params = {}
path_params = {
"workflowid": workflowid,
"workflowbuildid": workflowbuildid... | [
"def get_workflow_run(self, workflow_run_id):\n variables = {\n 'id': workflow_run_id\n }\n\n return self.query(\"\"\"\n query workflowRunQuery($id: ID!) {\n workflowRun(id: $id) {\n id\n name\n create... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the list of actions that are part of a specified rule. | def list_actions_for_rule(self, ruleresourcename: str, count: int = None, filter: str = None, offset: int = None, orderby: List[str] = None, query_params: Dict[str, object] = None) -> List[Action]:
if query_params is None:
query_params = {}
if count is not None:
query_params['cou... | [
"def _get_rule_actions(self, rule):\n parser = self.switch.dp.ofproto_parser\n return [parser.OFPActionOutput(rule.vnf_id)]",
"def get_valid_actions(self, section, action):\n valid_actions = []\n for candidate_action in sorted(self.actions[section]):\n if is_string(action) a... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the list of fields that are part of a specified dataset. | def list_fields_for_dataset(self, datasetresourcename: str, count: int = None, filter: str = None, offset: int = None, orderby: List[str] = None, query_params: Dict[str, object] = None) -> List[Field]:
if query_params is None:
query_params = {}
if count is not None:
query_params[... | [
"def get_updated_dataset_fields(dataset: bigquery.Dataset):\n fields = []\n if dataset.description is not None and len(dataset.description) > 0:\n fields.append(\"description\")\n if dataset.labels is not None and len(dataset.labels) > 0:\n fields.append(\"labels\")\n return sorted(fields)... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a list of all modules, unless you specify a filter. Use a filter to return a specific list of modules. | def list_modules(self, filter: str = None, query_params: Dict[str, object] = None) -> List[Module]:
if query_params is None:
query_params = {}
if filter is not None:
query_params['filter'] = filter
path_params = {
}
path = Template("/catalog/v2alpha2/mod... | [
"def get_modules(self):\n test_repo = import_module(self.test_repo_name)\n prefix = \"{0}.\".format(test_repo.__name__)\n product_path = \"{0}{1}\".format(prefix, self.product)\n modnames = []\n for importer, modname, is_pkg in pkgutil.walk_packages(\n path=test_rep... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a list of all relationships, unless you specify a filter. Use a filter to return a specific list of relationships. | def list_relationships(self, count: int = None, filter: str = None, offset: int = None, orderby: List[str] = None, query_params: Dict[str, object] = None) -> List[Relationship]:
if query_params is None:
query_params = {}
if count is not None:
query_params['count'] = count
... | [
"def relationships(self):\n relationships = []\n if parent is not none:\n relationships = [parent.relationship]\n relationships.extend(parent.relationships)\n\n return relationships",
"def relationship(self) -> List[Relationship]:\n return self._relationship",
"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a list of Machine Learning workflow builds. | def list_workflow_builds(self, workflowid: str, count: int = None, filter: str = None, offset: int = None, orderby: List[str] = None, query_params: Dict[str, object] = None) -> List[WorkflowBuild]:
if query_params is None:
query_params = {}
if count is not None:
query_params['cou... | [
"def model_build_list(name, username=None, password=None, cookies_file=None):\n model = get_model(name, username, password, cookies_file)\n return model.get(\"builds\", [])",
"def _get_running_builds(self):\n running_builds = self._get_resource(self._RUNNING_BUILDS_RESOURCE)\n if self._COUNT_A... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a list of Machine Learning workflow runs for specified workflow build ID. | def list_workflow_runs(self, workflowid: str, workflowbuildid: str, count: int = None, filter: str = None, offset: int = None, orderby: List[str] = None, query_params: Dict[str, object] = None) -> List[WorkflowRun]:
if query_params is None:
query_params = {}
if count is not None:
... | [
"def list_workflow_builds(self, workflowid: str, count: int = None, filter: str = None, offset: int = None, orderby: List[str] = None, query_params: Dict[str, object] = None) -> List[WorkflowBuild]:\n if query_params is None:\n query_params = {}\n if count is not None:\n query_pa... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Update the Action with the specified id for the specified Rule | def update_action_by_id_for_rule(self, ruleresourcename: str, actionid: str, action_patch: ActionPATCH, query_params: Dict[str, object] = None) -> Action:
if query_params is None:
query_params = {}
path_params = {
"ruleresourcename": ruleresourcename,
"actionid": act... | [
"def update_action_by_id_for_rule_by_id(self, ruleid: str, actionid: str, action_patch: ActionPATCH, query_params: Dict[str, object] = None) -> Action:\n if query_params is None:\n query_params = {}\n\n path_params = {\n \"ruleid\": ruleid,\n \"actionid\": actionid,\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Update an action for a specific rule. | def update_action_by_id_for_rule_by_id(self, ruleid: str, actionid: str, action_patch: ActionPATCH, query_params: Dict[str, object] = None) -> Action:
if query_params is None:
query_params = {}
path_params = {
"ruleid": ruleid,
"actionid": actionid,
}
... | [
"def update_action_by_id_for_rule(self, ruleresourcename: str, actionid: str, action_patch: ActionPATCH, query_params: Dict[str, object] = None) -> Action:\n if query_params is None:\n query_params = {}\n\n path_params = {\n \"ruleresourcename\": ruleresourcename,\n \"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Update the dashboard with the specified ID. | def update_dashboard_by_id(self, dashboardid: str, dashboard_patch: DashboardPATCH, query_params: Dict[str, object] = None) -> Dashboard:
if query_params is None:
query_params = {}
path_params = {
"dashboardid": dashboardid,
}
path = Template("/catalog/v2alpha2/... | [
"def update_dashboard(self, dash_id, title, description, graphs):\n if isinstance(graphs, str):\n graphs = json.loads(graphs)\n body = {\n 'title': title,\n 'description': description,\n 'graphs': graphs\n }\n return self.http_request('PUT', '/... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Update the dashboard with the specified resource name. | def update_dashboard_by_resource_name(self, dashboardresourcename: str, dashboard_patch: DashboardPATCH, query_params: Dict[str, object] = None) -> SSCVoidModel:
if query_params is None:
query_params = {}
path_params = {
"dashboardresourcename": dashboardresourcename,
}
... | [
"def update_resource(self, resource):\n raise NotImplementedError(\"update_resource is not implemented\")",
"def update(\n self,\n resource_group_name, # type: str\n dashboard_name, # type: str\n lenses=None, # type: Optional[Dict[str, \"DashboardLens\"]]\n metadata=No... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Update the dataset with the specified resource name. For the default module, the resource name format is datasetName. Otherwise, the resource name format is module.datasetName. | def update_dataset(self, datasetresourcename: str, dataset_patch: DatasetPATCH, query_params: Dict[str, object] = None) -> Dataset:
if query_params is None:
query_params = {}
path_params = {
"datasetresourcename": datasetresourcename,
}
path = Template("/catalog... | [
"def rename_dataset(self, new_name: str):\n self.dataset_id = new_name",
"def update(self, dataset_id, name=None, description=None):\n\n dataset = models.Dataset(\n name=name,\n description=description\n )\n\n repository = self.build_repository(repositories.Update... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Update a field with a specified ID for a specified dataset. | def update_field_by_id_for_dataset(self, datasetresourcename: str, fieldid: str, field_patch: FieldPATCH, query_params: Dict[str, object] = None) -> Field:
if query_params is None:
query_params = {}
path_params = {
"datasetresourcename": datasetresourcename,
"fieldid... | [
"def update_field_by_id_for_dataset_by_id(self, datasetid: str, fieldid: str, field_patch: FieldPATCH, query_params: Dict[str, object] = None) -> Field:\n if query_params is None:\n query_params = {}\n\n path_params = {\n \"datasetid\": datasetid,\n \"fieldid\": fieldi... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Update a field for a specific dataset. | def update_field_by_id_for_dataset_by_id(self, datasetid: str, fieldid: str, field_patch: FieldPATCH, query_params: Dict[str, object] = None) -> Field:
if query_params is None:
query_params = {}
path_params = {
"datasetid": datasetid,
"fieldid": fieldid,
}
... | [
"def update_field_by_id_for_dataset(self, datasetresourcename: str, fieldid: str, field_patch: FieldPATCH, query_params: Dict[str, object] = None) -> Field:\n if query_params is None:\n query_params = {}\n\n path_params = {\n \"datasetresourcename\": datasetresourcename,\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Update the Rule with the specified resourcename | def update_rule(self, ruleresourcename: str, rule_patch: RulePATCH, query_params: Dict[str, object] = None) -> Rule:
if query_params is None:
query_params = {}
path_params = {
"ruleresourcename": ruleresourcename,
}
path = Template("/catalog/v2alpha2/rules/${rul... | [
"def PBH_RULE_update():\n\n pass",
"def replace_rule(self, *args):\n return _wali.WPDS_replace_rule(self, *args)",
"def replace_rule(self, *args):\n return _wali.EWPDS_replace_rule(self, *args)",
"def PBH_RULE_update_field():\n\n pass",
"def assigned(self, uri, newRuleset):\n pass... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Update the workflow build with the specified workflow build ID. | def update_workflow_build_by_id(self, workflowid: str, workflowbuildid: str, workflow_build_patch: WorkflowBuildPATCH, query_params: Dict[str, object] = None) -> SSCVoidModel:
if query_params is None:
query_params = {}
path_params = {
"workflowid": workflowid,
"workf... | [
"def update_build(build_id):\n build = mongo.db.build\n build_params = {\n 'build_name': request.form.get('build_name'),\n 'motherboard': request.form.get('motherboard'),\n 'processor': request.form.get('processor'),\n 'processor_cooler': request.form.get('processor_cooler'),\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Update the workflow with the specified workflow ID. | def update_workflow_by_id(self, workflowid: str, workflow_patch: WorkflowPATCH, query_params: Dict[str, object] = None) -> SSCVoidModel:
if query_params is None:
query_params = {}
path_params = {
"workflowid": workflowid,
}
path = Template("/catalog/v2alpha2/wor... | [
"def updateWorkflow(self, contribId, record):\n\n crit = {N._id: contribId}\n self.mongoCmd(N.updateWorkflow, N.workflow, N.replace_one, crit, record)",
"def put(self):\n definition = pecan.request.text\n\n LOG.info(\"Update workflow(s) [definition=%s]\" % definition)\n\n db_wfs... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Update the workflow run with the specified workflow run ID. | def update_workflow_run_by_id(self, workflowid: str, workflowbuildid: str, workflowrunid: str, workflow_run_patch: WorkflowRunPATCH, query_params: Dict[str, object] = None) -> SSCVoidModel:
if query_params is None:
query_params = {}
path_params = {
"workflowid": workflowid,
... | [
"def update_operation_run(\n self,\n operation_run_id,\n start_date_time=None,\n end_date_time=None,\n status=None,\n message=None,\n deleted=None\n ):\n\n \n variables = {\n 'operationRun': {\n 'id':... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Merge an attribute item with the timestream item, no checks | def merge_attribute(self, attribute, overwrite=False):
assert(attribute.user == self.user)
for k, v in attribute.other_data.iteritems():
if not overwrite and k in self.data:
print "WARNING! OVEWRITITNG ", k
self.data[k] = v | [
"def merge(timestream_list, attribute_list, reverse=False):\n\n # I'm sure there's a more pythonic way of doing this...\n attribute_list_ptr = 0\n attribute_list_last = len(attribute_list)\n for item in timestream_list:\n\n if attribute_list_ptr >= attribute_list_last:\n break\n\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Assign the appropriate attribute to the timestream. If reverse is false, assign the network data to the attribute instead. Both must be sorted. | def merge(timestream_list, attribute_list, reverse=False):
# I'm sure there's a more pythonic way of doing this...
attribute_list_ptr = 0
attribute_list_last = len(attribute_list)
for item in timestream_list:
if attribute_list_ptr >= attribute_list_last:
break
whi... | [
"def link_attribute(\n self,\n attribute: shared_enum.LinkAttribute,\n time_index: Union[int, datetime, None] = None,\n ):\n\n time_index = self.verify_time(\n time_index, self.times, self.start, self.end, self.report, 0\n )\n\n values = output.get_link_attrib... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Build a contingency matrix describing the relationship between labels. | def contingency_matrix(
labels_true, labels_pred, *, eps=None, sparse=False, dtype=np.int64
):
if eps is not None and sparse:
raise ValueError("Cannot set 'eps' when sparse=True")
classes, class_idx = np.unique(labels_true, return_inverse=True)
clusters, cluster_idx = np.unique(labels_pred, re... | [
"def create_contingency_matrix(self):\n self.cont_matrix=contingency_matrix(labels_true=self.test_labels, labels_pred=self.model_binary_forecasts)",
"def incidence_matrix(labels):\n Npts = len(labels)\n incidence_matrix = np.zeros((Npts,Npts))\n for i in range(Npts):\n for j in range(Npts):... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Homogeneity metric of a cluster labeling given a ground truth. A clustering result satisfies homogeneity if all of its clusters contain only data points which are members of a single class. | def homogeneity_score(labels_true, labels_pred):
return homogeneity_completeness_v_measure(labels_true, labels_pred)[0] | [
"def homogeneity(self):\n try:\n homogeneity = self._message.homogeneity\n if homogeneity == 117:\n return available_result_pb2.Homogeneity.Name(\n available_result_pb2.Homogeneity.DIMENSIONLESS\n ).lower()\n return available_r... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Compute completeness metric of a cluster labeling given a ground truth. A clustering result satisfies completeness if all the data points that are members of a given class are elements of the same cluster. | def completeness_score(labels_true, labels_pred):
return homogeneity_completeness_v_measure(labels_true, labels_pred)[1] | [
"def calculate_purity(cluster_assignments, true_classes):\n # get the set of unique cluster ids\n cluster_ids = np.unique(cluster_assignments)\n cluster_indices = {}\n\n # find out the index of data points for each cluster in the data set\n for cls_id in cluster_ids:\n cluster_indices[cls_id] ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Adjusted Mutual Information between two clusterings. Adjusted Mutual Information (AMI) is an adjustment of the Mutual Information (MI) score to account for chance. It accounts for the fact that the MI is generally higher for two clusterings with a larger number of clusters, regardless of whether there is actually more ... | def adjusted_mutual_info_score(
labels_true, labels_pred, *, average_method="arithmetic"
):
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no cluste... | [
"def adjusted_mutual_information(\n feature_list0: List[np.ndarray],\n feature_list1: List[np.ndarray],\n is_categorical_list0: List[bool],\n is_categorical_list1: List[bool],\n k: int = 3,\n estimate_method: str = 'larger_data',\n weight_feature: Optional[np.ndarray] = None,\n filter_featur... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Normalized Mutual Information between two clusterings. Normalized Mutual Information (NMI) is a normalization of the Mutual Information (MI) score to scale the results between 0 (no mutual information) and 1 (perfect correlation). In this function, mutual information is normalized by some generalized mean of ``H(labels... | def normalized_mutual_info_score(
labels_true, labels_pred, *, average_method="arithmetic"
):
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
... | [
"def calculate_NMI(cluster_assignments, true_classes):\n\n return adjusted_mutual_info_score(cluster_assignments, true_classes)",
"def adjusted_mutual_info_score(\n labels_true, labels_pred, *, average_method=\"arithmetic\"\n):\n labels_true, labels_pred = check_clusterings(labels_true, labels_pred)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Orders product and displays message. | def order_product(request, product_id):
product = get_object_or_404(Product, pk=product_id)
customer = request.user.customer
Order.objects.create(customer=customer, product=product)
messages.success(request, 'Продукт был добавлен в заказ!')
return HttpResponseRedirect(reverse('index')) | [
"def buy_product(self, product_id, label_summary, label_inserted_money):\n if not product_id:\n label_summary['text'] = \"Wprowadź numer produktu\"\n else:\n result = self.vending_machine.buy_product(product_id)\n displayed_text = self.get_displayed_text(result, produc... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Integrate f from a to b, using the Trapezoidal rule with n intervals. | def integrate(f, a, b, n=100):
x = linspace(a, b, n+1) # Coordinates of the intervals
h = x[1] - x[0] # Interval spacing
I = h*(sum(f(x)) - 0.5*(f(a) + f(b)))
return I | [
"def integrate(f, a, b, n=100):\n x = linspace(a, b, n+1) # Coordinates of the intervals\n h = x[1] - x[0] # Interval spacing\n I = h*(sum(f(x)) - 0.5*(f(a) + f(b)))\n return I",
"def integrate_range(fxn, a, b, n):\n\n # There are n trapezoids and therefore there are n+1 endpoints\n\n endp... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check if a string can be converted to a nonzero float. | def is_float(string):
try:
return True if float(string) != 0 else False
except ValueError:
return False | [
"def is_float(s: str) -> bool:\n\n try:\n out = float(s)\n except:\n return False\n return True",
"def is_float(str1):\r\n isfloat = False\r\n if str1.count(\".\") == 1 or str1.replace(\".\", \"\").isdigit():\r\n isfloat = True\r\n return isfloat",
"def procheckString2floa... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Apply quirks for `subprocess.Popen` to have standard behavior in PyInstallerfrozen windows binary. Returns dict[str, str or bool or None] The additional arguments for `subprocess` calls. | def sp_args():
if sys.platform.startswith('win32'):
# Prevent Windows from popping up a command window on subprocess calls
startup_info = sp.STARTUPINFO()
startup_info.dwFlags |= sp.STARTF_USESHOWWINDOW
# Make Windows search the ``PATH``
environment = os.environ
else:
... | [
"def popen_wrapper(args, stdout_encoding='utf-8'):\n try:\n p = Popen(args, shell=False, stdout=PIPE, stderr=PIPE, close_fds=os.name != 'nt')\n except OSError as err:\n raise InvalidCommand('Error executing %s' % args[0]) from err\n output, errors = p.communicate()\n return (\n outp... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the command for opening document files with the standard application for its type (on Windows, Linux, and macOS). Returns str The handler file name for opening documents. Raises FileNotFoundError If no handler is found due to an unknown operating system. | def doc_handler():
if sys.platform.startswith('win32'):
return 'explorer.exe'
elif sys.platform.startswith('linux'):
return 'xdg-open'
elif sys.platform.startswith('darwin'):
return 'open'
else:
raise FileNotFoundError('Unknown operating system: File handler not found') | [
"def get_open_command(filepath):\n if 'windows' in OSNAME:\n opener = 'start'\n elif 'osx' in OSNAME or 'darwin' in OSNAME:\n opener = 'open'\n else:\n opener = 'xdg-open'\n return '{opener} {filepath}'.format(opener=opener, filepath=filepath)",
"def getAutoOpenFunction(self):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Display a popup for errors in subroutines. | def subroutine_error_popup(subroutine, error, message):
# ===== Error Window Definition ===== #
error_layout = [
[sg.Text(an.ERROR_INTRO.format(subroutine))],
[sg.Text(message)],
[sg.Text(an.ERROR_OUTRO, text_color='red')],
[sg.Text('', font='default 10 italic', key='-COPY_DONE-'... | [
"def showError(errormessage):\r\n messagebox.showerror(\"WinRAT\", errormessage)",
"def display_error(title, error_message):\n messagebox.showerror(title=title, message=error_message)",
"def displayError(err):\n print(\"\\nError: %s.\" % err)\n displayUsage()",
"def pop_error(self, error):... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function transfer csv to dataframe | def csv_to_dataframe(csv):
data = pd.read_csv(csv,thousands='.', decimal=',', index_col=[0])
return data | [
"def make_dataframe(csv):\n try:\n dataframe = pd.read_table(csv, sep=\"\\s+|,\", engine=\"python\")\n except:\n error(\"{} does not exist or cannot be read\".format(csv),\n continue_exec=False)\n return dataframe",
"def prepare_df(args):\r\n path, column, replace_commas, r... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function calculate growth rate of each country and add into DataFrame | def growth_rate(dataframe):
dataframe["Growth Rate"] = dataframe.Birthrate - dataframe.Deathrate | [
"def get_relative_growth(country):\n\n\n # Implementation...\n # ...\n # ...\n # ...",
"def get_max_changes_by_country(df: pd.DataFrame,\n start_year: str = \"1850\") -> pd.DataFrame:\n countries = df[\"Country\"].unique()\n max_values = []\n\n for country in cou... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
If `resume==True`, and last checkpoint exists, resume from it, load all checkpointables (eg. optimizer and scheduler) and update iteration counter. Otherwise, load the model specified by the config (skip all checkpointables) and start from the first iteration. | def resume_or_load(self, resume=True):
checkpoint = self.checkpointer.resume_or_load(self.cfg.MODEL.WEIGHTS, resume=resume)
if resume and self.checkpointer.has_checkpoint():
self.start_iter = checkpoint.get("iteration", -1) + 1
# The checkpoint stores the training iteration that ... | [
"def resume_checkpoint(self, resume_path):\n resume_path = str(resume_path)\n self.logger.info(\"Loading checkpoint: %s ...\", resume_path)\n checkpoint = torch.load(resume_path)\n self.start_epoch = checkpoint['epoch'] + 1\n self.monitor_best = checkpoint['monitor_best']\n\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Build a list of default hooks, including timing, evaluation, checkpointing, lr scheduling, precise BN, writing events. | def build_hooks(self):
cfg = self.cfg.clone()
cfg.defrost()
cfg.DATALOADER.NUM_WORKERS = 0 # save some memory and time for PreciseBN
ret = [
hooks.IterationTimer(),
hooks.LRScheduler(self.optimizer, self.scheduler),
hooks.PreciseBN(
#... | [
"def build_hooks(self):\n cfg = self.cfg.clone()\n cfg.defrost()\n cfg.DATALOADER.NUM_WORKERS = 0 # save some memory and time for PreciseBN\n\n hooks_list = [\n hooks.IterationTimer(),\n LRScheduler(self.optimizer, self.scheduler),\n hooks.PreciseBN(\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Build a list of writers to be used. By default it contains writers that write metrics to the screen, a json file, and a tensorboard event file respectively. If you'd like a different list of writers, you can overwrite it in your trainer. | def build_writers(self):
# Here the default print/log frequency of each writer is used.
return [
# It may not always print what you want to see, since it prints "common" metrics only.
CommonMetricPrinter(self.max_iter),
JSONWriter(os.path.join(self.cfg.OUTPUT_DIR, "me... | [
"def build_writers(self):\n # Assume the default print/log frequency.\n return [\n # It may not always print what you want to see, since it prints \"common\" metrics only.\n CommonMetricPrinterWithComet(self.max_iter), # 重载实现\n JSONWriter(os.path.join(self.cfg.OUTPU... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
When the config is defined for certain number of workers (according to ``cfg.SOLVER.REFERENCE_WORLD_SIZE``) that's different from the number of workers currently in use, returns a new cfg where the total batch size is scaled so that the perGPU batch size stays the same as the original ``IMS_PER_BATCH // REFERENCE_WORLD... | def auto_scale_workers(cfg, num_workers: int):
old_world_size = cfg.SOLVER.REFERENCE_WORLD_SIZE
if old_world_size == 0 or old_world_size == num_workers:
return cfg
cfg = cfg.clone()
frozen = cfg.is_frozen()
cfg.defrost()
assert (
cfg.SOLVER.IMS_PE... | [
"def _update_batch_size(self):\n candidate = None\n for op in self.operations:\n op_batch_size = getattr(op, \"batch_size\", None)\n if op_batch_size is None:\n continue\n if candidate:\n if op_batch_size != candidate:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
pads or crops the spectrogram as needed to make it the desired length desired_length is in bins (not ms) | def force_spectrogram_length(spec, desired_length):
if spec.shape[1] < desired_length:
to_add = desired_length - spec.shape[1]
spec = np.hstack((spec, np.zeros((512, to_add))))
elif spec.shape[1] > desired_length:
spec = spec[:, :desired_length]
return spec | [
"def fixed_len_spectrogram(file, adv_ms, len_ms, offset_s, specfmt=\"dB\",\n mel_filters_N=12):\n\n # Use the Endpointer class to determine the times associated with speech.\n endpoint = Endpointer(file, adv_ms, len_ms)\n [sfull, sfull_t, sfull_in] = spectrogram(file,adv_ms,len_ms... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
uses the bow_hist to compute a histogram | def bow_hist(spec, bow_hist):
return | [
"def histogram(*args):\n return _seb.histogram(*args)",
"def make_histogram(self): # connected to make histogram (btn_histogram)\n print(\"make hist\")\n# self.calculate_images()\n self.intensitys = np.linspace(0,10,10)\n self.intensitys2 = self.intensitys\n try:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add an edge from source to target if it is new and valid. For the edge to be valid, new_attr must contain map/value pairs for ECs, degree, and PDCs for the nodes, ECs, and PDCs. If an edge from source to target is already present, the set of attributes with the lower PDC is kept. | def add_edge(self, source, target, new_attr):
self._assert_valid_attr(new_attr)
add_edge = DiGraph.add_edge.im_func
if not self.has_edge(source, target):
add_edge(self, source, target, new_attr)
else:
old_attr = self[source][target]
add_edge(self, sour... | [
"def update_edge(self, e):\n orig_prop = self._g.adj.get(\n e.source_id, {}).get(\n e.target_id, {}).get(\n e.label, None)\n if not orig_prop:\n self._add_edge(e)\n return\n self._g.adj[e.source_id][e.target_id][e.label].update(e.properties... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add the edges in ebunch if they are new and valid. The docstring for add_edge explains what is meant by valid and how attributes for the same source and target are updated. | def add_edges_from(self, ebunch):
for (source, target, new_attr) in ebunch:
self.add_edge(source, target, new_attr) | [
"def add_edge(self, ed):\n self.edge.append(ed)\n\n\t# This one creates a new edge and adds it to the tree.",
"def add_edges(self):\n\t\twith open(self.fname, 'a') as f:\n\t\t\tf.write(\"%%%%%%%%%% ADDING EDGES %%%%%%%%%%%%%\\n\\n\")\n\t\t\tfor v in self.G.nodes:\t\t\t\n\t\t\t\tfor w in self.G.nodes:\n\t\t... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns extracted auth token from html | def get_token(html_content: str) -> str:
soup = BeautifulSoup(html_content, features='lxml')
return soup.find('input', attrs={'name': 'authenticity_token'})['value'] | [
"def extract_xsrf_token(html):\n return re.search(r'var ttoken = \"([a-f0-9]+)\";', html).group(1)",
"def _get_token(self):\n with requests.Session() as session:\n # This first request is to get redirected and get the token\n url = 'https://omsweb.public-safety-cloud.com/jtclientwe... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Fetch project details referenced by project URL. | def fetch_project_details(self, url: str) -> Union[JsonType, Dict]:
with self.__session.get(url + ".json") as res:
if res.status_code in range(200, 299):
data = self.preprocess_data(res.json())
data['tasks'].append(
{ # Add README.md file :)
... | [
"def get_project_url(self):",
"def get_project(self, project_name, dataset_name):\n url = self.url() + \"/nd/resource/dataset/{}\".format(dataset_name)\\\n + \"/project/{}/\".format(project_name)\n req = self.remote_utils.get_url(url)\n\n if req.status_code is not 200:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Constructs a ResNetW model. | def resnetW(pretrained=False, **kwargs):
model = ResNet(SATBlock, [1,3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model | [
"def create_ResNet(self):\n \n resnet = ResNet50V2(include_top=False, weights='imagenet')\n \n dense_1 = Dense(128, activation='relu')\n dense_2 = Dense(128, activation='relu')\n dense_3 = Dense(1, activation='sigmoid')\n\n\n model = Sequential()\n model.add(I... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculate relative humidity from drybulb temperature T (in degC) and dewpoint temperature Td (in degC), based on equation (4) and (5) (mode=1) or equations (2) and (3) (mode = 1) from | def calc_RH_from_T_Td(T, Td, mode=0):
if mode == 0:
Tk = T + SHR_CONST_TKFRZ
Tdk = Td + SHR_CONST_TKFRZ
es = np.exp( -6096.9385 * Tk**(-1) + 21.2409642 - 2.711193e-2 * Tk + \
1.673952e-5 * Tk**2.0 + 2.433502 * np.log(Tk))
e = np.exp( -6096.9385 * Tdk**(-1) + 21.2409642 - 2.7... | [
"def compute_dewpoint(t, h):\n tempC = (t-32)*5/9 # Convert temperatyre from deg F to deg C\n rh = h / 100\n\n b = 18.678\n c = 257.14 # deg C\n \n gamma = math.log(rh) + (b*tempC)/(c + tempC)\n\n tdp = c * gamma / (b - gamma)\n\n tdp_F = (9/5) * tdp + 32 \n\n return tdp_F",
"def calcu... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
No. 1 tests collection for BodyStructure. | def test_bodystructure_1(base_settings):
filename = base_settings["unittest_data_dir"] / "bodystructure-example-fetus.json"
inst = bodystructure.BodyStructure.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "BodyStructure" == inst.resource_type
impl_bodystru... | [
"def test_bolg_body(self):\n\t\tself.assertIn(testbolg_body, self.test_bolg['body'])",
"def test_bodystructure_2(base_settings):\n filename = base_settings[\"unittest_data_dir\"] / \"bodystructure-example-tumor.json\"\n inst = bodystructure.BodyStructure.parse_file(\n filename, content_type=\"applica... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
No. 2 tests collection for BodyStructure. | def test_bodystructure_2(base_settings):
filename = base_settings["unittest_data_dir"] / "bodystructure-example-tumor.json"
inst = bodystructure.BodyStructure.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "BodyStructure" == inst.resource_type
impl_bodystru... | [
"def test_bodystructure_1(base_settings):\n filename = base_settings[\"unittest_data_dir\"] / \"bodystructure-example-fetus.json\"\n inst = bodystructure.BodyStructure.parse_file(\n filename, content_type=\"application/json\", encoding=\"utf-8\"\n )\n assert \"BodyStructure\" == inst.resource_typ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given an array of integers, find the pair of adjacent elements that has the largest product and return that product. > int | def adjacentElementsProduct(inputArray):
iter = len(inputArray) - 1
maxProd = inputArray[0]*inputArray[1]
for i in list(range(iter)):
prod = inputArray[i]*inputArray[i+1]
if (prod > maxProd):
maxProd = prod
return maxProd | [
"def maxProductDifference(self, nums: List[int]) -> int:\n nums.sort()\n return nums[-1]*nums[-2]-nums[0]*nums[1]",
"def adjacentElementsProduct(inputArray):\n assert type(inputArray) == list\n answer = inputArray[0]*inputArray[1]\n if len(inputArray) > 2:\n for i in xrange(1,len(inp... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gnome Sort / Stupid Sort | def gnome_sort(input_list):
i=1
while True:
if i < len(input_list)-1:
if input_list[i] >= input_list[i - 1]:
i += 1
if input_list[i] < input_list[i-1]:
input_list[i],input_list[i-1]=input_list[i-1],input_list[i]
i-=1
if i==0... | [
"def sort(self):",
"def selection_sort(array):\n pass",
"def shell_sort(data):\n if len(data) <= 1:\n return data\n\n gap = len(data) // 2\n while gap > 0:\n for i in range(gap, len(data)):\n last = data[i]\n j = i\n while j >= gap and data[j - gap] > l... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get shape of weights for given conv_op_axes >>> _get_weights_shape(3, [4, 5, 6], 7, ('b', 0, 1, 2, 'c')) [3, 4, 5, 6, 7] >>> _get_weights_shape(3, [4, 5, 6], 7, ('b', 'c', 0, 1, 2)) [3, 7, 4, 5, 6] | def _get_weights_shape(out_channels, kernel_shape, in_channels, conv_op_axes):
# Use b 0 1 2 c shape and then shuffle it according to op axes
weights_shape_b_0_1_2_c = [out_channels, kernel_shape[0], kernel_shape[1],
kernel_shape[2], in_channels]
shuffle_arr = [['b', 0, 1, 2, 'c'].index(ax) for ax... | [
"def _get_kernel_and_weight_shape(self):\n ks = self.kernel_size\n w_shape = (self.out_channels, self.in_channels) + tuple((ks,))\n return ks, w_shape",
"def _get_weights_shape(self, kwargs):\n if kwargs['shape'] is not None:\n return kwargs['shape']\n else:\n if 'initial_valu... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function initializes the transformer of the class. Rerunning this function will reset the transformer. | def initialize_transformer(self, rng):
assert self.irange is not None
self.transformer = make_conv_3d(
irange=self.irange,
input_space=self.input_space,
output_space=self.detector_space,
kernel_shape=self.kernel_shape,
kernel_stride=self.kernel... | [
"def setTransformAlgorithm(self, transformer) -> None:\n ...",
"def set_transformers(self, text_transformer, title_transformer):\n self.text_transformer = text_transformer\n self.title_transformer = title_transformer",
"def __init__(self):\n this = _coin.new_SoTransformerManip()\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Initializes the output space of the ConvElemwise layer by taking pooling operator and the hyperparameters of the convolutional layer into consideration as well. | def initialize_output_space(self):
if self.pool_type is not None:
dummy_batch_size = self.mlp.batch_size
if dummy_batch_size is None:
dummy_batch_size = 2
dummy_detector =\
sharedX(self.detector_space.get_origin_batch(dummy_batch_size))
... | [
"def __init__(self, channels: int, kernel_size: int, bias: bool=True) ->None:\n super(ConvolutionModule, self).__init__()\n assert (kernel_size - 1) % 2 == 0\n self.pointwise_conv1 = ScaledConv1d(channels, 2 * channels, kernel_size=1, stride=1, padding=0, bias=bias)\n self.deriv_balancer... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the index of the first argument found in the remaining arguments where index starts at 0 and increments for each additoinal argument e.g. if 'Armed' is the first arg, then 'Disarmed' then 'Armed' 1 will be returned ('Disarmed' is 0) | def maStrToIndex(*args):
for num, arg in enumerate(args):
if num != 0 and arg == args[0]:
return num-1
return None | [
"def get_first_path_index(args: Sequence[str]) -> int:\n index = 0\n try:\n while args[index].startswith('-'):\n index += 1\n for pattern in ('json|markdown|md', 'document|table'):\n if re.match(pattern, args[index]):\n index += 1\n except IndexError:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
First argument is string to convert Arguments that follow are pairs of strings and values (ints) Function returns the value corresponding to the string in the first argument | def maStrToInt(*args):
for num, arg in enumerate(args):
if num % 2 == 1 and arg == args[0]:
return args[num+1]
return None | [
"def parse_pair(s):\n return tuple(int(x) for x in s.split(','))",
"def parse_to_integer_pair(input_str):\n num_regex=re.compile(r'[0-9]+')\n matches=num_regex.findall(input_str)\n if len(matches)==2:\n #Easy case. we found two number-looking things.\n return tuple([int(m) for m in matche... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
parse scan data into xml format | def xml_format(scan_output, directory):
date_time = datetime.now().strftime("%d-%m-%Y_Time_%H-%M-%S")
ip = scan_output.get("ip")
xml_file = f'IP_{ip}_DATE_{date_time}.xml'
xml_file = os.path.join(directory, xml_file)
root = ET.Element('scan_results')
date_time = ET.SubElement(root, 'date')
... | [
"def parse_raw(self):\n self.log.info('parse_raw')\n nxs_file_path = path.join(self.file_path, self.file_name)\n try:\n nxs_file = nxs.nxload(nxs_file_path, mode='r')\n except nxs.NeXusError:\n raise nxs.NeXusError('Sardana NeXus file \\'{:s}\\' does not exist!'.for... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check the Operating System and create a directory where the output of the scan will be saved | def check_platform():
directory = "scan_output"
mode = 0o666
if platform == "linux" or platform == "linux2": # linux
parent_directory = os.getcwd()
path = os.path.join(parent_directory, directory)
if os.path.exists(path):
return path
try:
os.mkdir(... | [
"def check_or_create_output_dir(self):\n if not os.path.exists(self.output_dir):\n os.makedirs(self.output_dir)",
"def create_output_directory(args):\n if args.testrun:\n output_folder = args.outputpath+datetime.datetime.now().strftime('%Y_%m_%d_%Hh%M')+\"_TEST/\"\n output_log_p... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Function that returns a temperature value in 3 hours | def temp_in_3_hours():
# let's select the second result, i.e. weather in 3 hours
temp_in_3 = temp_feels_like()[1].text
return None | [
"def how_will_temp_be_in_3_hours():\n\n return None",
"def fetch_tmy3_hourly_temp_data(self):\n return fetch_tmy3_hourly_temp_data(self.usaf_id)",
"def temperature_in_celsius(T):\n return T - T_i",
"def get_temperature(self):\n self.get_reading()\n if self.raw_temperature < 0x3FFF:\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Function that returns a str saying whether the temperature will be colder, warmer or the same in 3 hours | def how_will_temp_be_in_3_hours():
return None | [
"def temp_in_3_hours():\n\n # let's select the second result, i.e. weather in 3 hours\n temp_in_3 = temp_feels_like()[1].text\n\n return None",
"def get_time_welcome_message():\n current_time = datetime.datetime.now()\n hour = current_time.hour\n\n if 6 < hour < 12:\n return 'May the sun ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add the attributes to the ax_resp object requested in the ax_req object. If it is not possible to return them, raise MissingRequiredAttrs error | def __call__(self, ax_req, ax_resp, authnInterface, authnCtx):
log.debug('SQLAlchemyAXInterface.__call__ ...')
username = authnCtx.get(SQLAlchemyAXInterface.USERNAME_SESSION_KEYNAME)
if username is None:
raise AXInterfaceConfigError("No username set in session context")
re... | [
"def set_attributes_all_required(instance, attrs, res):\r\n for attr in attrs:\r\n attr_val = res.get(attr)\r\n # all attributes are required\r\n if not attr_val:\r\n print(attr)\r\n abort(400)\r\n setattr(instance, attr, attr_val)\r\n return instance",
"def... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Decorator which checks that user is authorized to interact with bot. | def authorization_check(func):
@wraps(func)
def wrapper(*args, **kwargs):
update, ctx = args
try:
if update.message.chat.id not in ctx.bot.user_ids:
raise UserAuthError
return func(*args, **kwargs)
except UserAuthError:
ctx.bot._log.er... | [
"def isAuthorized(self, func, user):\n if user.isAdmin():\n return True\n elif func in perm_map and user.hasPermission(perm_map[func]):\n return True\n else:\n return False",
"def elevated_required(unauthorized):\n def decorator_wrapper(func):\n @wra... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Decorator which checks which camera id to use. | def camera_selection(func):
@wraps(func)
def wrapper(*args, **kwargs):
update, ctx = args
cam_id = re.split(CMD_CAM_ID_REGEX, update.message.text)[-1]
cam_meta = ctx.bot.cam_registry.get_conf(cam_id)
# Generate unique event id and remember update object
event_id = uuid.... | [
"def selectCamera(*args):\n\n pass",
"def _set_cameraType(self, *args) -> \"bool\" :\n return _core.Camera__set_cameraType(self, *args)",
"def get_camera(\n self,\n name: Optional[str] = None,\n uid: Optional[str] = None,\n ) -> Union[BaseCamera, Literal[False]]:\n\n if ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the SAML2 AuthnContext of the currently logged in users session. session_info is a dict like | def get_authn_ctx(session_info):
try:
return session_info['authn_info'][0][0]
except KeyError:
return None | [
"def _get_session_auth_info(_helper_cfg):\n _session_auth = {}\n _session_info = ['username', 'password']\n for _key in _session_info:\n if _key in _helper_cfg['connection']['session_auth']:\n _session_auth[_key] = _helper_cfg['connection']['session_auth'][_key]\n else:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Segment the floormap into rooms and create a Region Adjacency Graph for the level. Many settings for decorating the graph are possible, by default the simplest form is returned. | def create_graph(floormap, return_dist=False, room_coordinates=False):
# Ensuring that floormap is always a boolean array
floormap = floormap.astype(np.bool)
#floormap = rescale(floormap, 2)
dist = ndi.distance_transform_edt(floormap)
threshold = int(dist.max())
optimal_threshold = 0
number_... | [
"def path_planner(self):\n # Load occupancy grid\n self.load_map_from_disk()\n\n # Transform occupancy grid in array of binary values where 0 represents passable cell and 1 impassable cell\n self.map_grid_binary = np.zeros((self.x_size, self.y_size))\n for iy, ix in np.ndindex(sel... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create the level graph from the floormap and compute some topological features on the graph. | def topological_features(floormap, prepare_for_doom=False):
roommap, room_graph, dist = create_graph(floormap, return_dist=True, room_coordinates=prepare_for_doom)
room_props = regionprops(roommap)
for r in range(1, roommap.max() + 1):
# Room Size
room_graph.node[r]["area"] = room_props[r - ... | [
"def build_graph(self, msg):\n self.map = msg\n self.map_points = geodesy.wu_point.WuPointSet(msg.points)\n self.graph = makeGraph(msg)\n\n # process each feature marked as a route\n for feature in itertools.ifilter(is_route, self.map.features):\n oneway = is_oneway(fea... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Compute map quality metrics (entropy, encoding error, number of corners) for the provided sample. | def quality_metrics(sample, maps):
metrics = dict()
for m, mname in enumerate(maps):
hist = np.histogram(sample[:, :, m], bins=255, range=(0, 255), density=True)[0]
metrics['entropy_{}'.format(mname)] = entropy(hist)
if mname in ['floormap', 'wallmap']:
metrics['encoding_erro... | [
"def updateQuality(self): \n\t\tmethod = self.parameters[\"Method\"]\n\t\tif self.parameters[\"QualityValue\"]: \n\t\t\tquality = self.parameters[\"QualityValue\"]\n\t\t\tLogging.info(\"Setting quality to raw \", quality, kw = \"rendering\")\n\t\t\tif method in [TEXTURE_MAPPING]:\n\t\t\t\tLogging.info(\"Setting max... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Take as argument data with column as features and rows as observations. Return eigenvectors and eigenvalues both sorted from eigenvalues (variance). | def pca(data):
# Obtain covariance matrix
sigma = np.cov(data.T) # np.cov wants features as rows and observations as columns (so transpose the data)
# Obtain sorted eigenvalues and eigenvectors
eigvals, eigvect = np.linalg.eig(sigma)
isorted = eigvals.argsort()[::-1]
sorted_eigvals = eigvals[iso... | [
"def eig(S):\n\n eigenvalues, eigenvectors = np.linalg.eig(S)\n\n idx = eigenvalues.argsort()[::-1]\n eigenvalues = eigenvalues[idx]\n eigenvectors = eigenvectors[:, idx]\n\n return eigenvalues, eigenvectors # <-- EDIT THIS to return eigenvalues and corresp eigenvectors",
"def eig(S):\n eigenVa... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Obtains the loss function as the sum of the least squares among the clusters. | def get_loss(list_clusters):
loss = 0
for _, cluster in enumerate(list_clusters):
mu = np.mean(cluster, axis = 0)
loss_cluster = sum(np.linalg.norm((cluster - mu), axis=1)**2)
loss += loss_cluster
return loss | [
"def loss(self, X, labels):\n loss = 0\n for sample, label in zip(X, labels):\n loss += np.linalg.norm(sample -\n self.cluster_centers[int(label)])**2\n return loss",
"def kmeans_loss(data, n_clusters):\r\n data = np.array(data)\r\n data = da... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Takes an n by m (observations by features) matrix and assign clusters based on points distance from the centroids. Return indexes of the clusters if indexes_only equal True, return the clusters otherwise. | def kmeans_clustering(data, centroids_list, indexes_only=False):
## Compute distances ##
dist_lists = []
k = len(centroids_list)
# Obtain a list of distances for each cluster
for i in range(0,k):
dist_to_c = np.linalg.norm(data - centroids_list[i], axis=1)
dist_lists.append(dist_to_... | [
"def kmeans_clustering(data, centroids_list, indexes_only=False):\n ## Compute distances ##\n dist_lists = []\n k = len(centroids_list)\n # Obtain a list of distances for each cluster\n for i in range(0,k): \n dist_to_c = np.linalg.norm(data - centroids_list[i], axis=1)\n dist_lists.app... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given a dataset (observations by features matrix), find the optimal k clusters that minimize a least square loss function. | def kmeans_fit(data, k):
## Initialization ##
centroids_list = []
# Initialize centroids (just take the first points of the dataset)
for row in range(0,k):
mu = data[row,:]
centroids_list.append(mu)
# Assign clusters (based on distance from the centroids)
cluster_list = kmeans_... | [
"def kmeans(dataset, k):\n np.random.seed(45)\n #initialising the means randomly\n means = list(np.random.choice(dataset.shape[0],k, replace=False))\n means = dataset[means]\n prevmean = []\n #running until converged\n while True:\n #initialising the k empty clusters\n clusters = ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Scale a surface based on a percentage of the height of a comparison surface. Preserves aspect ratio of the child. Returns the scaled surface and its rectangle | def scale(child_surface, comparison_surface, ratio):
child_rect = child_surface.get_rect()
child_ratio = child_rect.width / child_rect.height
# this is the value that the rectangle will be scaled to
comparison_height = comparison_surface.get_height()
# scale child rectangle
child_rect.height =... | [
"def scale(self, max_side: int):\n if self.scaled:\n return\n elif max(self.width, self.height) > max_side:\n ratio_w = float(max_side) / self.width\n ratio_h = float(max_side) / self.height\n w = self.width\n h = self.height\n\n self.w... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Accepts a customer parameters dict and overrides default args to create a DAG object | def create_dag(customer):
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'email': 'xyz@xyz.com',
'retries': 1,
'retry_delay': timedelta(minutes=5),
'start_date': datetime(2017, 1, 1, 0, 0),
'end_date': None
}
"""
This allows DAG pa... | [
"def __init__(__self__, *,\n deployment_duration_in_minutes: pulumi.Input[float],\n growth_factor: pulumi.Input[float],\n replicate_to: pulumi.Input[str],\n description: Optional[pulumi.Input[str]] = None,\n final_bake_time_in_minutes: ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |