query stringlengths 9 9.05k | document stringlengths 10 222k | negatives listlengths 19 20 | metadata dict |
|---|---|---|---|
Assert predefined config path. | def _assert_predefined_config_path(
self,
framework: str,
domain: str,
domain_flavour: str,
expected_filename: str,
) -> None:
result = get_predefined_config_path(framework, domain, domain_flavour)
expected = os.path.join(
os.path.abspath(
... | [
"def test_config_path(setup):\n\n assert Log.CONFIG_FILE is not None",
"def test_default_config_file_paths(\n config,\n):\n assert \"~/.config/yessssms.conf\" in CONFIG_FILE_PATHS\n assert \"/etc/yessssms.conf\" in CONFIG_FILE_PATHS",
"def test_expected_config(expectedconfig):\n expected = expect... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check if development env is activated. | def test_is_development_env(self) -> None:
os.environ.update({"NC_MODE": "development"})
is_develop = is_development_env()
self.assertTrue(is_develop) | [
"def is_dev(self):\n return (self._environ.get('WIKIA_ENVIRONMENT') or 'dev') == 'dev'",
"def is_development():\n return os.environ.get('SERVER_SOFTWARE', '').startswith('Development')",
"def is_dev_env():\n return os.getenv(\"PYTEST_ENV\", \"dev\") == \"dev\"",
"def isDevelopmentServer():\n retur... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test release tag building. | def test_release_tag(self) -> None:
self.assertEqual("v3.14.15", release_tag()) | [
"def test_release_deployment_run(self):\n pass",
"def test_release_version():\n assert (\n RELEASE_TAG == f\"v{PROJECT_VERSION}\"\n ), \"RELEASE_TAG does not match the project version\"",
"def release(context):\n print(f\"Starting a release of v{IMAGE_VER} on GitHub!\")\n run_cmd(conte... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Turn URL into PIL image. Can throw a timout error. | def url2img(url : str, timeout = 1) -> Image:
response = requests.get(url, timeout = timeout)
return Image.open(BytesIO(response.content)) | [
"def download_pil_image(self, url):\r\n return Image.open(urlopen(url))",
"def download_image(url):\n buffer = BytesIO()\n download_from_url(url, buffer, pbar=False)\n buffer.seek(0)\n return Image.open(buffer)",
"def load_image(url):\n\tfd = urllib2.urlopen(url)\n\treturn StringIO.StringIO(f... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Goes through and removes all values in frames_rec and frames_proc that are outside of the window | def clean_window(self) -> None:
prune_before = time.time() - self.window_size
while self.frames_rec:
left = self.frames_rec.popleft()
if left[1] >= prune_before:
self.frames_rec.appendleft(left)
break
self.sum_frames_rec -= left[... | [
"def clear_restriction_details(self):\n\t\n\t\tif getattr(self,'new_seq_win_objs',None):\n\t\t\tfor obj in self.new_seq_win_objs.keys():\n\t\t\t\tself.seqframe.delete(obj)\n\t\t\tself.new_seq_win_objs={}\n\t\t\tself.donepos={}\n\t\t\t#\n\t\t\tfor obj in self.temp_objs.keys():\n\t\t\t\tself.seqframe.delete(obj)\n\t\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns True if we have a full window of information available, False otherwise | def have_window(self) -> bool:
return (
self._first_enc_at is not None
and (time.time() > self._first_enc_at + self.window_size)
) | [
"def has_windows(ds: Dataset) -> bool:\n return window_start in ds and window_stop in ds",
"def isWindowOpen(self):\n try:\n _ = self.browser.window_handles\n return True\n except:\n return False",
"def _can_render_now(self):\n # First check that no updat... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Spawns the ffmpeg process | def _spawn_ffmpeg(self) -> None:
if self.ffmpeg_proc is not None:
raise RuntimeError('_spawn_ffmpeg called when ffmpeg_proc is '
+ f'{self.ffmpeg_proc} (not None)')
args = ['ffmpeg', '-f', 'rawvideo', '-vcodec', 'rawvideo',
'-s', f'{self.... | [
"def run_ffmpeg(self, task):\n ffmpeg_file = task['file'].replace(\".ts\", \".mp4\")\n\n cmd = [\n ['ffmpeg', '-nostats', '-loglevel', 'quiet', '-y', '-i',\n task['file']],\n self.config['ffmpeg-flags'].split(),\n [ffmpeg_file]\n ]\n cmd = [it... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Cleans up the ffmpeg process. This will wait for it to terminate | def _cleanup_ffmpeg(self) -> None:
self.ffmpeg_proc.communicate()
self.ffmpeg_proc = None | [
"def close(self):\n self.ffmpeg_process.stdin.close()\n self.ffmpeg_process.wait()",
"def async_stop_ffmpeg(self):\n return self.ffmpeg.close()",
"def stop(self):\n if not self.ffmpeg:\n raise RuntimeError(\"ffmpeg is not running\")\n self.ffmpeg.send_signal(signal.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Registers the specified queuelike object as something frames can be received from. Must have a get_nowait and empty member. | def register_queue(self, queue) -> None:
if queue is None:
raise ValueError('queue is None')
if not hasattr(queue, 'empty'):
raise ValueError(f'queue {queue} is missing empty member')
if not hasattr(queue, 'get_nowait'):
raise ValueError(f'queue {queue} ... | [
"def push(self, obj):\n\n self.in_queue.append(obj)",
"def example(example_object, queue):\n queue.put(example_object)",
"def example(example_object, queue_object):\n queue_object.put(example_object)",
"def add(self, obj):\n self._queue.append(obj)",
"def queue_append(self, object):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Checks for items from each of the receive queues and pushes them onto the local memory dict. Returns the number of frames received | def check_queues(self) -> int:
nframes = 0
for queue in self.receive_queues:
if not queue.empty():
nframes += 1
frame, img_bytes = queue.get_nowait()
if frame < self.next_frame:
raise ValueError('received frame ... | [
"def queue_lengths(self):\r\n out = []\r\n for probe in self.__probes.values():\r\n if probe.complete():\r\n out.append(probe.queue_length)\r\n return out",
"def current_queues(petrol_stations):\n current_queues = {}\n for number_of_station in petrol_stations:\n in... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Processes the next frame to the ffmpeg process if it is available. Returns True if we processed a frame, False if we did not. | def process_frame(self) -> bool:
if self.next_frame not in self.ooo_frames:
return False
img_bytes = self.ooo_frames.pop(self.next_frame)
for kb_start in range(0, len(img_bytes), self.block_size):
self.ffmpeg_proc.stdin.write(
img_bytes[kb_start:... | [
"def process(self, next_byte) -> bool:\r\n\r\n return_flag = False\r\n\r\n # decoding_str = 'utf-8' # If payload bytes are 0xaa etc they're invalid in utf-8\r\n decoding_str = 'iso-8859-1' # Shouldn't balk in the same way that utf-8 does\r\n\r\n # print('next_byte: ' + bytes([next_byte... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Helping function to get the color of the dock | def _getDockColor(self, plane):
color = (0,0,0)
if plane.zAxis != -1:
color = self.globalAxis[plane.zAxis].color[0:3]
return color | [
"def GetDrawColor(self):\n ...",
"def tkColor(self):\n return (self.red/255.0, self.green/255.0, self.blue/255.0)",
"def get_color(self, workspace, output):\r\n if workspace['focused']:\r\n if output['current_workspace'] == workspace['name']:\r\n return self.focuse... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Helper method to create default request meta. All 'SERP' and 'PRODUCT' requests need to " "implement this. It will propagate the meta information from the original/ parent request " "to the child requests. | def create_default_request_meta(
response: Union[ScrapyTextResponse, ScrapyHttpResponse], original_url: Optional[str] = None
) -> Dict:
return {
"original_URL": original_url if original_url else response.url,
"category": response.meta.get("category"),
"gender": r... | [
"def pre_create_product(\n self,\n request: product_search_service.CreateProductRequest,\n metadata: Sequence[Tuple[str, str]],\n ) -> Tuple[product_search_service.CreateProductRequest, Sequence[Tuple[str, str]]]:\n return request, metadata",
"def request_info(self, request):\n\n\t\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check if a given bbUser object is stored in the database. Currently only checks if a user with the same ID is stored in the database, not if the objects are the same. | def userObjExists(self, user : bbUser.bbUser) -> bool:
return self.userIDExists(user.id) | [
"def exists_in_db(self) -> bool:\n query = \"\"\"SELECT * \n FROM Users \n WHERE Username=?;\"\"\"\n return len(self.db.fetchall(query, values=(self.username,))) > 0",
"def is_userAS(self, obj):\n # Some other places simply check for owner=None.\n re... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Reset the stats for the user with the specified ID. | def reinitUser(self, id : int):
id = self.validateID(id)
# ensure the ID exists in the database
if not self.userIDExists(id):
raise KeyError("user not found: " + str(id))
# Reset the user
self.users[id].resetUser() | [
"def reset_user_dish(user_id):\n sess = tables.Session()\n user_obj = sess.query(tables.User).filter(tables.User.user_id == user_id).first()\n user_obj.curr_dish = None\n user_obj.quantity_dish = None\n sess.commit()",
"def reset_metric_stats(self):\n\n self.__stats_table = {}",
"def users... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a new bbUser object with the specified ID and add it to the database | def addUser(self, id : int) -> bbUser.bbUser:
id = self.validateID(id)
# Ensure no user exists with the specified ID in the database
if self.userIDExists(id):
raise KeyError("Attempted to add a user that is already in this bbUserDB")
# Create and return a new user
new... | [
"def create_user(user: User):\n db_session.add(user)\n db_session.commit()\n return user.id",
"def _add_user(self):\n user = User(email=\"email@test.com\", password=\"password\")\n db.session.add(user)\n db.session.commit()",
"def new(cls, user_id, **kwargs):\n return Dropbox.ge... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
If a bbUser exists in the database with the requested ID, return it. If not, create and store a new bbUser and return it. | def getOrAddID(self, id : int) -> bbUser.bbUser:
return self.getUser(id) if self.userIDExists(id) else self.addUser(id) | [
"def getUser(self, id : int) -> bbUser.bbUser:\n id = self.validateID(id)\n return self.users[id]",
"def addUser(self, id : int) -> bbUser.bbUser:\n id = self.validateID(id)\n # Ensure no user exists with the specified ID in the database\n if self.userIDExists(id):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Remove the new bbUser object with the specified ID from the database ⚠ The bbUser object is deleted from memory. | def removeUser(self, id : int):
id = self.validateID(id)
if not self.userIDExists(id):
raise KeyError("user not found: " + str(id))
del self.users[id] | [
"def deleteUser(self):\n db.session.delete(self)\n db.session.commit()",
"def delete_user(self):\n\n \tUser.user_list.remove(self)",
"def delete_user(self):\n User.users_list.remove(self)",
"def delete_user(self):\n\n User.user_list.remove(self)",
"def delete_user(self):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Fetch the bbUser from the database with the given ID. | def getUser(self, id : int) -> bbUser.bbUser:
id = self.validateID(id)
return self.users[id] | [
"def get_user_by_id(id):\n\n\treturn User.query.get(id)",
"async def fetch_user(self, id: str):\n user = await self.http.get_user(id)\n return User(state=self.http, data=user)",
"def _get_user_by_id(self, _id):\n user_resp = self._db.Users(database_pb2.UsersRequest(\n request_typ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Serialise this bbUserDB into dictionary format. | def toDict(self, **kwargs) -> dict:
data = {}
# Iterate over all user IDs in the database
for id in self.getIds():
# Serialise each bbUser in the database and save it, along with its ID to dict
# JSON stores properties as strings, so ids must be converted to str first.
... | [
"def to_dict(self):\n user_info = {\n 'id': self.identifier,\n 'full_name': self.full_name,\n 'username': self.username,\n 'email': self.email,\n 'bio': self.bio,\n 'created_at': self.created_at,\n }\n return user_info",
"def g... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get summarising information about this bbUserDB in string format. Currently only the number of users stored. | def __str__(self) -> str:
return "<bbUserDB: " + str(len(self.users)) + " users>" | [
"def count_users():\n result = count_users_db()\n return str(result[0])",
"def get_stats(self):\n result = {\n 'datetime': dt.datetime.now().strftime('%d.%m.%Y %H:%M:%S'),\n 'total': db.session.query(User). \\\n count(),\n 'unverified': db.session.query... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Construct a bbUserDB from a dictionaryserialised representation the reverse of bbUserDB.toDict() | def fromDict(cls, userDBDict : dict, **kwargs) -> bbUserDB:
# Instance the new bbUserDB
newDB = bbUserDB()
# iterate over all user IDs to spawn
for id in userDBDict.keys():
# Construct new bbUsers for each ID in the database
# JSON stores properties as strings, so... | [
"def toDict(self, **kwargs) -> dict:\n data = {}\n # Iterate over all user IDs in the database\n for id in self.getIds():\n # Serialise each bbUser in the database and save it, along with its ID to dict \n # JSON stores properties as strings, so ids must be converted to st... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Screen with calendar for one month | def create_month_scr(self, month, toogle_today=False):
scr = Screen()
m = self.month_names_eng[self.active_date[1] - 1]
scr.name = "%s-%s" % (m, self.active_date[2]) # like march-2015
# Grid for days
grid_layout = GridLayout(cols=7, rows=7, size_hint=(1, 1), pos_hint={"top": 1... | [
"def calendar(request):\n\treturn render_to_response('calendar.html')",
"def calendar(self):\n j = 0\n k = 1\n for i in self.cal.itermonthdays(self.year, self.month):\n j += 1\n if (i == 0) or ():\n self.dayBut = tk.Button(\n self.window... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get day value from pressed button | def get_btn_value(self, inst):
self.active_date[0] = int(inst.text)
selected = [self.active_date[0], self.active_date[1], self.active_date[2]]
global selectedDates
if selected in selectedDates:
selectedDates.remove(selected)
else:
selectedDates.append(... | [
"def _get_day(self):\n return self.datetime.day",
"def get_day():\n return handle_invalid_inputs(question_4, days)",
"def day(sym, date):\n return get(sym, date, date)[0][1]",
"def on_Calendar_day_selected_double_click(self, widget):\n try:\n agno, mes, dia = variables.calendar.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Project a vector onto an L1 ball. | def project_L1_ball(x: "fasta.linalg.Vector", t: float) -> "fasta.linalg.Vector":
# By Moreau's identity, we convert to proximal of dual problem (L-inf norm)
return x - project_Linf_ball(x, t) | [
"def paddle_bounce(self):\n # TODO: Change this so it will rebound the ball back based on\n # when the ball struck the paddle. Above the center returns the\n # ball with a positive y, below the center returns with a -y.\n # P |/ y=+++\n # A |/ y=++\n # D |/ y=+\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The shrink (softthresholding) operator, which is also the proximal operator for the L1norm. The shrink operator reducing the magnitudes of all entries in x by t, leaving them at zero if they're already less than t. | def shrink(x: np.ndarray, t: float) -> np.ndarray:
return np.sign(x) * np.maximum(np.abs(x) - t, 0) | [
"def convert_softshrink(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n dtype = infer_type(x).checked_type.dtype\n threshold = _expr.const(op.attr(\"lambda\"), dtype=dtype)\n zeros = _op.zeros_like(x)\n out = _op.where(x < -threshold, x + threshold, zeros) + _op.where(\n x > threshold,... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
build resnet backbone and position embedding according to config | def build_backbone(config):
assert config.MODEL.BACKBONE in ['resnet50', 'resnet101'], "backbone name is not supported!"
backbone_name = config.MODEL.BACKBONE
dilation = False
train_backbone = not config.EVAL
return_interm_layers = False #TODO: impl case True for segmentation
position_embedding... | [
"def __init__(\n self,\n config, # the config is loaded from scratch later on anyways\n protstonkgs_model_type: str = PROTSTONKGS_MODEL_TYPE,\n lm_model_type: str = NLP_MODEL_TYPE,\n lm_vocab_size: int = 28996,\n prot_start_idx: int = 1024,\n prot_model_type: str = ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the data for the supplied model with or without auxiliary data from the model. The model is needed as the order of the data depends on the order of the channels in the model. | def data(self, model, with_aux=True):
try:
observed_data = sum(
(self.observations[c] for c in model.config.channels), []
)
except KeyError:
log.error(
"Invalid channel: the workspace does not have observation data for one of the chann... | [
"def get_model_data(self):\n sen_info=self._get_sensor_info()\n cam_info=lib.is_GetCameraInfo(self.hcam)\n dll_ver=lib.is_GetDLLVersion()\n dll_ver=\"{}.{}.{}\".format((dll_ver>>24),(dll_ver>>16)&0xFF,dll_ver&0xFFFF)\n return self.ModelData(py3.as_str(sen_info.strSensorName),py3.a... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This page shows detailed stats on an individual switch queried by serial number | def switch_info(serial):
detail = getSwitchDetail(serial)
intdetail = getInterfaceDetail(serial)
try:
raw_data = open(f"raw_output/{serial}.txt", "r").read().splitlines()
except:
raw_data = "None collected yet"
return render_template(
"detail.html",
title=serial,
... | [
"def getSwitchInfo():\n swDB = switchdb.DB()\n raw_info = swDB.getAllSummary()\n switchList = []\n for row in raw_info:\n row = list(row)\n switch = {}\n switch[\"name\"] = row[0]\n switch[\"serial\"] = row[1]\n switch[\"swver\"] = row[2]\n switch[\"ip\"] = row[... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check DB for last runtime of backend script This is published on the main page to see when stats were last updated | def getLastUpdate():
swDB = switchdb.DB()
lastupdate = swDB.getLastUpdate()
swDB.close()
return lastupdate | [
"def check_last_run_table(self, component):\n logging.info(\"Getting the last run time in seconds for component: {0}\".format(component))\n last_record_time = '2000-01-01 00:00:00'\n last_run = LastRun.objects.filter(component=component).values('last_run')\n for last_run in last_run:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Query DB for summary info on all switches currently monitored | def getSwitchInfo():
swDB = switchdb.DB()
raw_info = swDB.getAllSummary()
switchList = []
for row in raw_info:
row = list(row)
switch = {}
switch["name"] = row[0]
switch["serial"] = row[1]
switch["swver"] = row[2]
switch["ip"] = row[3]
switch["chec... | [
"def statistics(self):\n uri = common.genuri('lswitch', self.lswitch_uuid, 'lport', self.uuid,\n 'statistic')\n return super(LSwitchPort, self)._action(\"GET\", uri)",
"def get_switch_summary(self):\n switch_state = self._get_switch_state(None, None)\n return... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Call to DB to delete a device by serial number | def deleteDevice(serial):
swDB = switchdb.DB()
swDB.deleteBySerial(serial)
swDB.close() | [
"def db_delete_device_record(db_path, rec_name):\n path_exist = os.path.exists(db_path)\n if path_exist is False:\n print '!!!Error, database does not exist.'\n return\n\n try:\n with db.connect(db_path) as conn:\n cursor = conn.cursor()\n # Prepare and execute SQ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Does the user have the permission to publish a data. | def has_perm_publish_data(user):
has_perm_publish(user, rights.PUBLISH_DATA) | [
"def has_perm_publish(user, codename):\n publish_perm = permissions_api.get_by_codename(codename)\n if not user.has_perm(\n publish_perm.content_type.app_label + \".\" + publish_perm.codename\n ):\n raise AccessControlError(\n \"The user doesn't have enough rights to publish.\"\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Can read list of data. | def can_read_list_data_id(func, list_data_id, user):
if user.is_superuser:
return func(list_data_id, user)
# check anonymous access
_check_anonymous_access(user)
list_data = func(list_data_id, user)
check_can_read_list(list_data, user)
return list_data | [
"def storage_can_read(self):\n return True",
"def test_read_data(self):\n pass",
"def read_data(self):\n pass",
"def check_can_read_list(document_list, user):\n if document_list.count() > 0:\n # exclude own data\n other_users_documents = document_list.exclude(user_id=str(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Can read a data, given a query. | def can_read_data_query(
func,
query,
user,
workspace_filter=None,
user_filter=None,
order_by_field=DATA_SORTING_FIELDS,
):
# check anonymous access
_check_anonymous_access(user)
# update the query
query = _update_can_read_query(query, user, workspace_filter, user_filter)
# g... | [
"def read(self, query):\r\n t1 = time.time()\r\n if self.database in ['redshift', 'postgres']:\r\n ret = postgres_helper.fetchall(config=self.conf, sql=query)\r\n else:\r\n raise Exception(\"database not supported yet: '{}'\"\r\n .format(self.dat... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Can read a data, given an aggregate query. | def can_read_aggregate_query(func, query, user):
if user.is_superuser:
return func(query, user)
# check anonymous access
_check_anonymous_access(user)
# update the query
query = _update_can_read_aggregate_query(query, user)
# get list of data
data = func(query, user)
return da... | [
"def can_read_data_query(\n func,\n query,\n user,\n workspace_filter=None,\n user_filter=None,\n order_by_field=DATA_SORTING_FIELDS,\n):\n # check anonymous access\n _check_anonymous_access(user)\n # update the query\n query = _update_can_read_query(query, user, workspace_filter, user... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Update query with access control parameters. | def _update_can_read_query(
query, user, workspace_filter=None, user_filter=None
):
accessible_workspaces = _get_read_accessible_workspaces_by_user(user)
# update query with workspace criteria
query = django_raw_query.add_access_criteria(
query, accessible_workspaces, user, workspace_filter, us... | [
"def updatequery(self, query):\n self.query = query\n self._querydict = self.dict_from_query(query)",
"def update_query_params(self):\n\n params = dict(self.params)\n params['api_key'] = self.api_key\n # Luigi seems to use `normalize` to freeze (make hashable) a normal dict so c... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get read accessible workspaces by user. | def _get_read_accessible_workspaces_by_user(user):
if not settings.CAN_ANONYMOUS_ACCESS_PUBLIC_DOCUMENT and user.is_anonymous:
accessible_workspaces = []
else:
# workspace case
# list accessible workspaces
accessible_workspaces = [
workspace.id
for workspa... | [
"def get_accessible_spaces(user):\n if not user:\n return []\n obj_list = get_objects_for_user(user, 'access_space',Space)\\\n .order_by('-created_at')\n return obj_list",
"def get_own_spaces(user):\n if not user:\n return []\n own_spaces = []\n accessible_spacs = ge... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Can user write data in workspace. | def can_write_data_workspace(func, data, workspace, user):
return can_write_in_workspace(
func, data, workspace, user, rights.PUBLISH_DATA
) | [
"def _check_can_write_in_workspace(workspace, user):\n accessible_workspaces = (\n workspace_api.get_all_workspaces_with_write_access_by_user(user)\n )\n if workspace not in accessible_workspaces:\n raise AccessControlError(\n \"The user does not have the permission to write into t... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the calculated_at of this StandardizedTierTier. | def calculated_at(self, calculated_at):
self._calculated_at = calculated_at | [
"def tier(self, tier):\n\n self._tier = tier",
"def scheduled_at(self, scheduled_at):\n\n self._scheduled_at = scheduled_at",
"def vat(self, vat):\n\n self._vat = vat",
"def started_at(self, started_at):\n\n self._started_at = started_at",
"def set_tier(self, tier):\n self... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the explanation of this StandardizedTierTier. | def explanation(self, explanation):
self._explanation = explanation | [
"def tier_explanation(self, tier_explanation):\n\n self._tier_explanation = tier_explanation",
"def tier(self, tier):\n\n self._tier = tier",
"def ShowTier(self, tier):\n self.ShowNothing()",
"def explanation(self, explanation):\n if explanation is None:\n raise ValueErr... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This funtion return integer the number of files in ISO | def NumberOfFilesInISO(XISOPath, XSystemUpdateFolder):
command='./extract-xiso -l '
if XSystemUpdateFolder == True:
command = command + '-s '
command = command + '"' + XISOPath + '"'
print(command)
commandOut = commands.getstatusoutput(command)
commandOut = commandOut[1].split('\n')
... | [
"def get_num_files(self):\r\n return self.nfile",
"def fileCount(self):\n pass",
"def evio_files_count(self):\n # the last file is something like: hd_rawdata_011410_055.evio\n if not self.evio_files:\n return None\n last_file = self.evio_last_file\n u_pos = l... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a dataframe from a parquet file. | def parquet(path, *args, **kwargs):
try:
df = Spark.instance.spark.read.parquet(path, *args, **kwargs)
except IOError as error:
logging.error(error)
raise
return df | [
"def load_df(filename: str) -> dd.DataFrame:\n return dd.read_parquet(filename)",
"def read_parquet(\n filename,\n dataset_class=dataset.pandas_dataset.PandasDataset,\n expectation_suite=None,\n profiler=None,\n *args, **kwargs\n):\n df = pd.read_parquet(filename, *args, **kwargs)\n df = _... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns GP mean and variance in "scaled" space (same as gpflow model.predict_f with bounds applied) | def predict(self, x_scaled):
model_var_with_prior = self.mult_var_by_prior(x_scaled)
scaled_space_mean = self.y_scaler.transform(tf.reshape(tf.math.exp(self.log_prob(x_scaled)), [-1, 1]))
return scaled_space_mean, tf.reshape(model_var_with_prior, [-1, 1]).numpy() | [
"def predict_mean_and_var(self, Fmu, Fvar):\n gh_x, gh_w = hermgauss(self.num_gauss_hermite_points)\n gh_w /= np.sqrt(np.pi)\n gh_w = gh_w.reshape(-1, 1)\n shape = tf.shape(Fmu)\n Fmu, Fvar = [tf.reshape(e, (-1, 1)) for e in (Fmu, Fvar)]\n X = gh_x[None, :] * tf.sqrt(2.0 * ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Connect social account to existing account, if existing email found | def pre_social_login(self, request, sociallogin):
if sociallogin.is_existing:
return
email_addresses = sociallogin.email_addresses
for email in email_addresses:
try:
user_email = EmailAddress.objects.get(email__iexact=email.email)
except Ema... | [
"def connectUser(uname, email):",
"def email_taken(email):\n\n conn = get_connection()\n cur = conn.cursor()\n sql = \"select 1 from useraccount \" \\\n \"where email = %s\"\n cur.execute(sql, (email,))\n result = cur.fetchone()\n cur.close()\n\n # if account doesn't exist\n if re... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Initializes the object using the list of technology dictionaries that are copied and formatted. Takes an optional parameter for the datetime.date object of the last full BuiltWith scan. | def __init__(self, technologies_list, last_full_builtwith_scan_date=None):
self._technologies_by_name = {}
for technologies_dict in technologies_list:
copied_technologies_dict = copy.deepcopy(technologies_dict)
for name in DATETIME_INFORMATION_NAMES:
copied_te... | [
"def __init__(self, *args, **kwargs):\n #print(kwargs)\n self.data = []\n self.date = kwargs.get('date', date.today() - timedelta(days=1))\n #print(self.date)\n self.period = kwargs.get('period', 1)\n self.unitid = kwargs.get('unitid', '')\n self.unittype = kwargs.ge... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Lookup BuiltWith results for the given domain. If API version 2 is used and the get_last_full_query flag enabled, it also queries for the date of the last full BuiltWith scan. | def lookup(self, domain, get_last_full_query=True):
data = {}
try:
last_full_builtwith_scan_date = None
if self.api_version == 7 and isinstance(domain, list):
domain = ','.join(domain)
if self.api_version in [2, 7]:
last_updates_r... | [
"async def query(self, target_domain):\n await self.wait_registered()\n\n return await self.client.query(\n source_domain=self.domain,\n target_domain=target_domain,\n )",
"def query_domain():\n (frm, domains) = domain_query()\n return render_template('query/domain... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Does this lesson or any of its descendants need feedback? 0 = no feedback required 1 = feedback not yet provided 2 = feedback provided | def needs_feedback(lesson, course_id):
descendants = lesson.get_descendants(include_self=True)
provided = False
for descendant in descendants:
if descendant.feedback_required:
# If feedback is needed, check if already provided
try:
# pylint: disable=E1101
... | [
"def should_ask_if_examiner_want_to_give_another_chance(self):\n if self.assignment.is_electronic:\n return (self.delivery_status == \"corrected\" and not self.feedback.is_passing_grade) \\\n or self.delivery_status == 'closed-without-feedback'\n else:\n return Fal... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get meta information about a lesson | def get_lesson_meta(lesson_id):
# pylint: disable=E1101
lesson_meta_list = (LessonMetaData.objects.filter(lesson=lesson_id)
.prefetch_related())
result = []
for item in lesson_meta_list:
result.append({
'description': item.description.description,
... | [
"def lesson(self):\n return self.section.lesson",
"def get_lesson_details_view(request, lesson_id):\n lesson = get_object_or_404(Lesson, pk=lesson_id)\n hours_and_minutes = get_hours_and_minutes(lesson)\n return render(request, 'get_lesson_detail.html', {'lesson': lesson, 'hours_and_minutes': hour... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get attachments for a lesson | def get_lesson_attachments(lesson_id):
# pylint: disable=E1101
lesson_attachments = Attachment.objects.filter(lesson=lesson_id)
result = []
for attachment in lesson_attachments:
url = attachment.attached_file.url
result.append({'title': attachment.title,
'url': url... | [
"def getAttachments(muscleObject):\n # Get muscle\n muscle = getMuscle(muscleObject)\n\n # Get attachments\n attachments = cmds.listConnections(muscle + '.attachment', s=True, d=False)\n if not attachments:\n raise Exception('No valid muscle attachments associated with muscleObject \"' + muscl... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get list of lessons. If lesson_id is provided, then sublessons will be returned. | def get_lessons(course_id, lesson=None):
lesson_list = []
if lesson is None:
lesson_list = Lesson.objects.filter(
id__in=get_root_lesson_ids(course_id))
else:
lesson_list = lesson.get_children()
result = []
for lesson_item in lesson_list:
result.append({
... | [
"def get_lessons(lesson_id):\n url = '{0}?cat={1}'.format(BASE_URL, lesson_id)\n page = requests.get(url, verify=False)\n soup = BeautifulSoup(page.content)\n output = []\n\n for item in soup.find(id='playlist').findAll('dd'):\n video_id = item.find('a')['href'].split('=')[-1]\n title =... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get a CSRF token | def get_token(request: http.Request) -> str:
if hasattr(request, '_csrf_hook'):
return request._csrf_hook.get_token() | [
"def csrf_token(self):\n return generate_csrf_token()",
"def get_token(request):\n return request.app.settings['tangled.app.csrf.token']",
"def csrftoken(me):\n return me.s.cookies.get('csrftoken')",
"def get_csrf_token():\n\tresponse = session.get('https://www.udemy.com/join/login-popup')\n\tmat... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Change the CSRF token in use for a request should be done on login for security purposes. | def rotate_token(request: http.Request):
if hasattr(request, '_csrf_hook'):
request._csrf_hook.rotate_token() | [
"def reset_token():\n request.csrf_cookie_needs_reset = True",
"def reset_csrf(self):\n \n csrf_token = _generate_csrf_token()\n self.request.session[self.csrf_session_key] = csrf_token\n return csrf_token",
"def _set_csrf_header(self):\n csrf = self.session.cookies.get(\"c... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
decorator to coerce a generator to a list | def listify(gen: Callable[..., Union[Generator[T, None, None], AsyncGenerator[T, None]]]) -> Callable[..., List[T]]:
if inspect.isasyncgenfunction(gen):
@wraps(gen)
async def list_func(*args, **kwargs) -> List[Any]:
return [v async for v in gen(*args, **kwargs)]
elif inspect.isgene... | [
"def listify(gen):\n @wraps(gen)\n def patched(*args, **kwargs):\n return list(gen(*args, **kwargs))\n return patched",
"def listify(gen: Callable[..., Generator[T, None, None]]) -> Callable[..., List[T]]:\n\n @wraps(gen)\n def list_func(*args, **kwargs) -> List[Any]:\n return list(gen(*args, *... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Just lists out paths in env variable, one per line | def plist(self):
if self.val == None:
print("No such env variable ", self.val, " exists!")
else:
print("Listing for ", self.name)
for p in self.path_list: print(" ", p) | [
"async def _environment_paths() -> list[str]:\n env = await Get(EnvironmentVars, EnvironmentVarsRequest((\"PATH\",)))\n path = env.get(\"PATH\")\n if path:\n return path.split(os.pathsep)\n return []",
"def path_list():\n return (os.environ.get(\"PATH\", None) or os.defpath).split(os.pat... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Updates the internal env val to ensure path_list & val are insync | def pupdate(self):
try:
tmp = self.path_list[0]
except IndexError:
print("Empty value for env variable ", self.name)
return
for p in self.path_list[1:]:
tmp = tmp + ':' + p
self.val = tmp | [
"def process_env_update(self) -> None:",
"def update(self, env_obj):\n if env_obj:\n if isinstance(env_obj, EnvValues):\n for package_name, env_vars in env_obj.data.items():\n for name, value in env_vars.items():\n if isinstance(value, lis... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Process options based on legal operations & subcommands Return sanitized cmds and arguments | def process_options(args):
subcmds = dict() # each key(cmd) can take on a val of 0, or 1
subcmds_wo_arg = [ 'clean', 'list' ]
subcmds_with_args = [ 'add', 'remove' ]
for cmd in subcmds_wo_arg:
subcmds[cmd] = 0
for cmd in subcmds_with_args:
subcmds[cmd] = 1
if (len(args) == 0)... | [
"def _check_args(cli, basecmd, extcmds):\n\n if len(extcmds) == 0:\n logger.critical(_('Error: clean requires an option: %s'),\n \", \".join(valid_args))\n raise dnf.cli.CliError\n\n for cmd in extcmds:\n if cmd not in valid_args:\n logger.critical(_('Err... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Handle multiple requests each expected to be a 4byte length, followed by the LogRecord in pickle format. Logs the record according to whatever policy is configured locally. | def handle(self):
while True:
chunk = self.connection.recv(4)
if len(chunk) < 4:
break
slen = struct.unpack('>L', chunk)[0]
chunk = self.connection.recv(slen)
while len(chunk) < slen:
chunk = chunk + self.connection.recv... | [
"def handle(self):\n while True:\n try:\n chunk = self.connection.recv(4)\n if len(chunk) < 4:\n break\n slen = struct.unpack(\">L\", chunk)[0]\n chunk = self.connection.recv(slen)\n while len(chunk) < sl... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Plot mesh triangles on a given surface | def plotMesh(verts,tris):
x = verts[:,0]
y = verts[:,1]
plt.figure()
plt.gca().set_aspect('equal')
plt.triplot(x, y, tris, 'k-')
plt.title('Unstructured Mesh')
plt.xlabel('distance (m)')
plt.ylabel('distance (m)') | [
"def DisplayMesh():\r\n \r\n # Load Surface Mesh Data and generate normals\r\n VTKString = OpenData('C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/TEH_Code/InputFiles','muscle_surface.vtk')\r\n header, Vertices, Triangles = CreateMatrixVTK(VTKString)\r\n \r\n fig = pl... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Plot interpolated image of surface displacements, optionally show original points | def plotSurface(surfaceFile, comp=2, points=False, tris=False,
profile=False, ax=None, annotate=True, norm=None,xscale=1, yscale=1):
verts,data,tris = load_h5(surfaceFile)
if comp==3: #radial displacements
z = np.hypot(data[:,:,0], data[:,:,1]).flatten()
else:
z = data[:,:,c... | [
"def imshow_surface(self):\n plt.imshow(self.z)\n plt.colorbar()\n plt.show()",
"def plot_sources_interp(df_nn, rm_nn, zm_nn, rstd_nn, zstd_nn, nn_dist1_weight_avg, nn_tur_weight_avg, nn_dist1_weight_std, nn_tur_weight_std,\n df_oban1, rm_oban1, zm_oban1, rstd_oban1, zs... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
For timedependent runs plot maximum displacements versus time | def plot_maxdisp_time(pointsh5, xscale=1e3, yscale=1e-2, tscale=3.1536e7,
adjustRadial=False):
coords,data,number,times = pu.load_h5_visco(pointsh5)
x = coords[:,0]
ur = np.hypot(data[:,:,0], data[:,:,1])
uz = data[:,:,2]
# Convert units & extract maximums for each timestep
... | [
"def plot_period_ensemble_max(forecast_grid, bmap, start_time, end_time, out_path,\n figsize=(10, 6), contours=np.concatenate([[1] + np.arange(5, 80, 5)]),\n cmap=\"inferno\"):\n cmap = plt.get_cmap(\"Paired\")\n colors = cmap(np.linspace(0, 0.5, cmap.N //... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convert scale term to unit label | def get_unit(scale):
scale2unit = { 1e-9: 'nm',
1e-6: u'\N{MICRO SIGN}m', #or hex id (lookup): u'\u00B5'
1e-3: 'mm',
0.01: 'cm',
0.1:'dm',
1:'m',
1000:'km',
# time
... | [
"def point_scale_name(self):",
"def unit_scale(quantity):\n scales = {\n 'rate': 1.0,\n 'dt': 1.0,\n 'fluence': 1e39,\n 'peak': 1e38,\n }\n return scales.get(quantity, 1.0)",
"def to_axis_units(self, label, vals):\n if label in ['Hmolar', 'Smolar', 'Umolar', 'Dmolar',... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Plot profiles for each output/step0X folder on same figure | def plot_directory_profiles(path, outname=None, show=True, xscale=1, yscale=1,
xval='x', adjustRadial=True):
outdirs = np.sort(os.listdir(path))
plt.figure()
#labels=['homogeneous','1D layering', '3D tomography'] #xscale=1e-3, yscale=1e2
for i,outdir in enumerate(outdirs):
... | [
"def plot_morphism_output(data, outdir):\n\n # show the distributions for each variable separately\n for col in data.columns:\n ProfilePlotter._plot_1d(data[col], outfile = os.path.join(outdir, col + \".pdf\"))\n\n # later, maybe also show 2d plots etc.",
"def plot_profiles(self, f... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Plot grid of surface displacement maps from each output/step folder if normalize=True, use step01 colorbar for all images | def plot_directory_surface(path,figsize=(17,11), comp=2, nrow=1, norm=None,
cbar='each', cloc='top', outname=None, labels='1', show=True):
outdirs = np.sort(os.listdir(path))
nplots = len(outdirs)
ncol = np.ceil(nplots/nrow).astype(np.int)
fig = plt.figure(figsize=figsize)
grid = ImageGrid(fig,... | [
"def plot_colormaps():\n\n import matplotlib.pyplot as plt\n import numpy as np\n\n a = np.linspace(0, 1, 256).reshape(1, -1)\n a = np.vstack((a, a))\n\n nmaps = len(colormaps_list) + 1\n\n fig = plt.figure(figsize=(5, 10))\n fig.subplots_adjust(top=0.99, bottom=0.01, left=0.2, right=0.99)\n\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Use gdal/osr to get latlon point location from georeferenced array indices | def ind2latlon(index, filePath):
# Load georeferencing
ds = gdal.Open(filePath)
proj = ds.GetProjection()
gt = ds.GetGeoTransform()
srs = osr.SpatialReference()
srs.ImportFromWkt(proj)
x0 = gt[0] #top left longitude
y0 = gt[3] #top left latitude
dx = gt[1] #pixel width
dy = gt[5... | [
"def lonlat_index(latitude, longitude, lat_bnds, lon_bnds):\n #handle 2D latitude array\n \n if len(latitude.shape)==2:\n lat1D=np.array(latitude[:,0], copy=True)\n else:\n lat1D=np.array(latitude, copy=True)\n \n #print lats\n #print lat_bnds\n #print np.where((lats >= lat_bnd... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Plot stress contours by extracting nearest stresses resolved on a point for 4 vertices per cell (tetrahedra) | def contour_stresses(matFile, infoFile, ax=0, esize=100):
# NOTE: some bug to work out here
vertices, cells, moduli, stress, strain = pu.load_h5_material(matFile, infoFile)
# NOTE: could get list of all elements that have a vertex on a particular surface
# or get list of all cells that have a centroid ... | [
"def contour_t(X,Y,Z,V=None,ax=None,**kwargs):\n ax=ax or plt.gca()\n if V is not None:\n cset = ax.contour(X,Y,Z,V,**kwargs)\n else:\n cset = ax.contour(X,Y,Z,**kwargs)\n \n # look at one layer:\n for i in range(len(cset.levels)):\n thresh = cset.levels[i]\n coll = cse... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
通过编辑距离计算字符串 str1 和 str2 的相似度 | def similarityByEdit(str1,str2):
dis = Levenshtein_Distance(str1,str2)
sim = (1-dis/max(len(str1),len(str2)))*0.9+0.1
return sim | [
"def editDistance(str1, str2):\n # Create an array of the changes needed to change s1 -> s2\n changes = [change for change in ndiff(str1, str2) if change[0] != ' ']\n distance = len(changes)\n return distance",
"def editing_distance(str1: str, str2: str) -> int:\r\n if not str1 and not str2:\r\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
计算实体对之间的属性相似度 流程: 遍历entity1的所有属性,找到entity2中与之对应的属性,然后计算属性值的相似度 最后加上属性权重,计算所有属性的相似度的总和 | def entityAttrsSim(xueke,entity1,entity2,edit_threshold,bert_threshold):
#将两个实体的属性转换为key-value的字典
attribute1 = entity1[2]
attributes1 = attribute1.split(';')
attrs1 = {}
for attr in attributes1:
attr_list = attr.split(':')
if (len(attr_list) > 1):
attrs1[attr_list[0]] = ... | [
"def compareNamedEntities(self, ne1, ne2):\n score = 0.0\n for k1 in ne1:\n score += ne1[k1] * ne2[k1]\n return score",
"def _CalculateMatchWeight(\n self,\n concrete_fields: Set[EntityTypeField],\n canonical_fields: Set[EntityTypeField]\n ) -> float:",
"def match_attributes(self, ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Initialize the inference network, stream video to network, and output stats and video. | def infer_on_stream(args, client):
# Initialise the class
infer_network = Network()
# Set Probability threshold for detections
prob_threshold = args.prob_threshold
### TODO: Load the model through `infer_network` ###
infer_network.load_model(args.model,args.cpu_extension,args.device)
input_... | [
"def infer_on_stream(args, client):\n # Initialise the class\n plugin = Network()\n # Set Probability threshold for detections\n global prob_threshold\n prob_threshold = args.prob_threshold\n \n ### Variables used for inference\n single_image_mode = False # Flag for single images\n req_i... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Workbook galleries supported by the template. | def galleries(self) -> pulumi.Input[Sequence[pulumi.Input['WorkbookTemplateGalleryArgs']]]:
return pulumi.get(self, "galleries") | [
"def galleries(self) -> pulumi.Output[Sequence['outputs.WorkbookTemplateGalleryResponse']]:\n return pulumi.get(self, \"galleries\")",
"def gallery(self) -> Optional[bool]:\n return pulumi.get(self, \"gallery\")",
"def get_galleries(self):\n data = self._get('get_gallery_list')\n ret... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get an existing WorkbookTemplate resource's state with the given name, id, and optional extra properties used to qualify the lookup. | def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'WorkbookTemplate':
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = WorkbookTemplateArgs.__new__(WorkbookTemplateArgs)
__props__.__d... | [
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n auto_delete_executions: Optional[pulumi.Input[bool]] = None,\n content: Optional[pulumi.Input[str]] = None,\n created_by: Optional[pulumi.Input[str]] = None... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Workbook galleries supported by the template. | def galleries(self) -> pulumi.Output[Sequence['outputs.WorkbookTemplateGalleryResponse']]:
return pulumi.get(self, "galleries") | [
"def galleries(self) -> pulumi.Input[Sequence[pulumi.Input['WorkbookTemplateGalleryArgs']]]:\n return pulumi.get(self, \"galleries\")",
"def gallery(self) -> Optional[bool]:\n return pulumi.get(self, \"gallery\")",
"def get_galleries(self):\n data = self._get('get_gallery_list')\n re... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Key value pair of localized gallery. Each key is the locale code of languages supported by the Azure portal. | def localized(self) -> pulumi.Output[Optional[Mapping[str, Sequence['outputs.WorkbookTemplateLocalizedGalleryResponse']]]]:
return pulumi.get(self, "localized") | [
"def photo_dict(phrase):\n switcher = {\n '병원 위치': 'https://maps.googleapis.com/maps/api/staticmap?center=37.507144,127.063737&zoom=16&size=640x480&markers=color:blue%7Clabel:S%7C37.507144,127.063737&key=AIzaSyCF-XXYf7IW1mkUZFeZF84BCcZdtC-z1M0',\n '병원 운영시간': 'http://gunn.pausd.org/sites/default/fil... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test SNMPv3 script utilizing Kirks snmp_helper module | def main():
# Take path argument and list all text files
ip = '10.1.10.100'
a_user = 'cisco'
auth_key = 'cisco123'
encr_key = 'cisco123'
snmp_user = (a_user, auth_key, encr_key)
sw1 = (ip, 161)
sysDescr = '1.3.6.1.2.1.1.1.0'
sysObjectID = '1.3.6.1.2.1.1.2.0'
sysUpTime = '1.3.6.... | [
"def quickstart():\n snmp.quickstart()\n return 0",
"def handle_snmpconf():\n return 0",
"def snmp_v2(device,\n ip,\n mib_name,\n index=0,\n value=None,\n timeout=10,\n retries=3,\n community=\"private\",\n walk_cmd... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Connect to the API and test connection | def connect_api():
print("INFO: Checking API connection and credentials...")
conf = ConfigParser()
conf.read(os.path.join(os.path.abspath(
os.path.dirname(__file__)), '.', 'api.conf'))
client = CBWApi(conf.get('cyberwatch', 'url'), conf.get(
'cyberwatch', 'api_key'), conf.get('cyberwatch... | [
"def test_connection(self):\n\n self.connect_to_server()",
"def test( self ):\n url = \"http://\" + self.ip + \":\" + self.port + \"/Service/testConnection\"\n \n r = requests.get( url )\n if r.status_code != 200:\n print( \"/Service/testConnection - ERROR!\" ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Setup variables for SMTP | def setup_smtp():
print("INFO: Setting up SMTP variables...")
conf = ConfigParser()
conf.read(os.path.join(os.path.abspath(
os.path.dirname(__file__)), '.', 'smtp.conf'))
smtp = {
"server": conf.get('smtp', 'smtp_server'),
"login": conf.get('smtp', 'smtp_login'),
"passwor... | [
"def setup_email(self, host, _from, to):\n self._host_mail = host\n self._from_mail = _from\n self._to_mail = to",
"def __init__(self, smtp_server, smtp_user, smtp_password,\n smtp_port=25, is_with_tls=False):\n self.smtp_server = smtp_server\n self.smtp_port = s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Replace server list in file with recent one | def replace_file(servers):
print("INFO: Replacing server list in file with recent one...")
if os.path.exists(os.path.dirname(__file__) + '/communication_failure_list.txt'):
try:
os.remove(os.path.dirname(__file__) +
'/communication_failure_list.txt')
except OSEr... | [
"def load_server_list(filename):\n if not os.path.isfile(filename):\n return #ignore this error for now\n fo=open(filename,\"r\")\n rd=fo.read()\n fo.close()\n __load_server_list(rd)",
"def update_servers(self) -> None:\n test_servers = self.api.get_servers()\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Find servers with status "Communication failure" and save them to a file | def find_communication_failure_servers(servers):
print('INFO: Finding servers with "Communication failure" status and saving result in file')
with open(os.path.dirname(__file__) + '/communication_failure_list.txt', 'w+') as file:
for server in servers:
if server.status == "server_update_comm... | [
"def find_recovered_servers(client):\n print(\"INFO: Determining recovered servers by comparing current servers with list in file...\")\n current_servers_list = []\n for server in client.servers():\n if server.status == \"server_update_comm_fail\":\n current_servers_list.append({\"id\": s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Compare list of servers in file with current ones to find recovered servers | def find_recovered_servers(client):
print("INFO: Determining recovered servers by comparing current servers with list in file...")
current_servers_list = []
for server in client.servers():
if server.status == "server_update_comm_fail":
current_servers_list.append({"id": server.id})
... | [
"def replace_file(servers):\n print(\"INFO: Replacing server list in file with recent one...\")\n if os.path.exists(os.path.dirname(__file__) + '/communication_failure_list.txt'):\n try:\n os.remove(os.path.dirname(__file__) +\n '/communication_failure_list.txt')\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Make an HTML list from server list for email | def create_body_html(client, server_list):
servers_html = ""
for server in server_list:
link = '<a href="{}/servers/{}">{}</a>'.format(
client.api_url, server.id, server.hostname)
html = """{}<br />""".format(link)
servers_html += html
return servers_html | [
"def build_html_list(self, items):\n\n html_list = \"<ul>\"\n for item in items:\n html_list += \"<li>\" + item + \"</li>\"\n html_list += \"</ul>\"\n\n return html_list",
"def make_html_list(items, tag=\"ul\"):\n return \"<{0}>\\n{1}</{0}>\".format(\n tag, \"\".jo... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Remove a random item from the set, and return it | def pop_random(self):
rand_index = randint(0, len(self._list) - 1)
item = self._list[rand_index]
self.remove(item)
return item | [
"def popitem(self):\n all_items = self.items()\n removed_item = random.choice(all_items)\n self[removed_item[0]] = None\n return removed_item",
"def getRandom(self):\n n = len(self.keys)\n while n > 0:\n index = random.randint(0, n - 1)\n my_key = se... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate a compressed name from keys wrt config. | def get_compressed_name_from_keys(config: Dict[str, Any],
keys: Tuple[Tuple[str]],
allow_missing: bool = True):
assert not isinstance(config, list), config
name = ''
for pre_keys in keys:
v = config
pre_keys_str = ''
missing = False
... | [
"def _make_pack_name(names):\n assert names\n tokens_in_names = [name.split('/') for name in names]\n common_prefix_tokens = []\n\n # Find the longest common prefix of tokens.\n while True:\n first_token_in_names = set()\n for tokens in tokens_in_names:\n if not tokens:\n break\n first_t... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get a sample of config. | def get_configuration_sample(config, root=True):
if isinstance(config, dict):
return {
k: get_configuration_sample(v, root=False)
for k, v in sorted(config.items())
}
elif isinstance(config, list):
if root:
return get_configuration_sample(
config[np.random.randint(len(con... | [
"def get_extra_config_sample():\n pass",
"def load_sample(filename, **ctx):\n filename = os.path.join(SAMPLE_DIR, filename)\n return parse_config(filename, ctx)",
"def sample(self):\n sample = self.hpo.propose()\n if sample is not None:\n sample = copy.deepcopy(sample)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test aperture_photometry when error has units (see 176). | def test_aperture_photometry_with_error_units():
data1 = np.ones((40, 40), dtype=float)
data2 = u.Quantity(data1, unit=u.adu)
error = u.Quantity(data1, unit=u.adu)
radius = 3
true_flux = np.pi * radius * radius
unit = u.adu
position = (20, 20)
table1 = aperture_photometry(data2, Circula... | [
"def test_aperture_photometry_with_error_units():\n data1 = np.ones((40, 40), dtype=np.float)\n data2 = u.Quantity(data1, unit=u.adu)\n error = u.Quantity(data1, unit=u.adu)\n radius = 3\n true_flux = np.pi * radius * radius\n unit = u.adu\n position = (20, 20)\n table1 = aperture_photometry... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that aperture_photometry does not modify the input data or error array when a mask is input. | def test_aperture_photometry_inputs_with_mask():
data = np.ones((5, 5))
aperture = CircularAperture((2, 2), 2.0)
mask = np.zeros_like(data, dtype=bool)
data[2, 2] = 100.0 # bad pixel
mask[2, 2] = True
error = np.sqrt(data)
data_in = data.copy()
error_in = error.copy()
t1 = aperture... | [
"def test_aperture_photometry_inputs_with_mask():\n data = np.ones((5, 5))\n aperture = CircularAperture((2, 2), 2.)\n mask = np.zeros_like(data, dtype=bool)\n data[2, 2] = 100. # bad pixel\n mask[2, 2] = True\n error = np.sqrt(data)\n data_in = data.copy()\n error_in = error.copy()\n t... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test elliptical exact aperture photometry on a grid of pixel positions. | def test_ellipse_exact_grid(x, y, r):
data = np.ones((10, 10))
aperture = EllipticalAperture((x, y), r, r, 0.0)
t = aperture_photometry(data, aperture, method='exact')
actual = t['aperture_sum'][0] / (np.pi * r**2)
assert_allclose(actual, 1) | [
"def test_radial_elliptical_aperture():\n image = np.ones((50, 50))\n a = pf.radial_elliptical_aperture(position=(25, 25), r=10, elong=2, theta=45)\n area = a.do_photometry(image)[0]\n ellip_area = np.pi * 10 * 5\n assert abs(area - ellip_area)/ellip_area < 0.01\n del image, a",
"def test_radial... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Regression test that nonfinite data values outside of the aperture mask but within the bounding box do not affect the photometry. | def test_nan_in_bbox():
data1 = np.ones((101, 101))
data2 = data1.copy()
data1[33, 33] = np.nan
data1[67, 67] = np.inf
data1[33, 67] = -np.inf
data1[22, 22] = np.nan
data1[22, 23] = np.inf
error = data1.copy()
aper1 = CircularAperture((50, 50), r=20.0)
aper2 = CircularAperture(... | [
"def test_aperture_photometry_inputs_with_mask():\n\n data = np.ones((5, 5))\n aperture = CircularAperture((2, 2), 2.0)\n mask = np.zeros_like(data, dtype=bool)\n data[2, 2] = 100.0 # bad pixel\n mask[2, 2] = True\n error = np.sqrt(data)\n data_in = data.copy()\n error_in = error.copy()\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Regression test to check that scalar SkyCoords are added to the table as a length1 SkyCoord array. | def test_scalar_skycoord():
data = make_4gaussians_image()
wcs = make_wcs(data.shape)
skycoord = wcs.pixel_to_world(90, 60)
aper = SkyCircularAperture(skycoord, r=0.1 * u.arcsec)
tbl = aperture_photometry(data, aper, wcs=wcs)
assert isinstance(tbl['sky_center'], SkyCoord) | [
"def test_lat_array(self):\n acq = acquisitions(LS5_SCENE1).get_acquisitions()[0]\n geobox = acq.gridded_geo_box()\n fid = create_lon_lat_grids(acq, depth=5)\n dataset_name = ppjoin(GroupName.LON_LAT_GROUP.value, DatasetName.LAT.value)\n lat = fid[dataset_name][:]\n ids = u... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Getter of the user id. | def get_user_id(self):
return self.id_user | [
"def user_id(self):\n return self._user_id",
"def get_id(self):\n try:\n return unicode(self.user_id)\n except NameError:\n return str(self.user_id)",
"def user_id(self):\n return self._data['user_oid']",
"def get_user_id(self):\n res = self.qiita_clien... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Getter of ratings list for this user. | def get_ratings(self):
return self.ratings | [
"def ratings(self):\n return self._ratings",
"def get_ratings(self):\n return Vote.objects.filter(content_type=self.get_content_type(), object_id=self.instance.pk, key=self.field.key)",
"def user_ratings(user_id):\n return _fetch_records(f\"SELECT item_id, rating_type FROM ratings WHERE user_id... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Method to check if a movie have been already seen for the given user. | def check_movie_seen(self, id_movie):
if id_movie in self.seen:
return True
return False | [
"def has_seen(self, movie: str) -> bool:\n try:\n self.get_rating(movie)\n except NoSuchRating:\n return False\n return True",
"def new_watched_movie(username: str, movie_id: int) -> bool:\n with connection:\n all_movies = connection.execute(MOVIES_IDS, (movie_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Getter of movie id | def get_movie_id(self):
return self.id_movie | [
"def get_movie_id(self) -> str:\n return self.movie.id",
"def _get_movie_id(self, movie_id_link):\n #return re.sub('\\/movies\\/\\?id\\=', '', movie_id_link)\n return movie_id_link.split('id=')[1]",
"def get_imdb_id(self, movie):\n tmbd_movie = tmdb.Movies(movie.id)\n response... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Getter of ratings list for this movie. Pair (user, rating) | def get_ratings(self):
return self.ratings | [
"def user_ratings(user_id):\n return _fetch_records(f\"SELECT item_id, rating_type FROM ratings WHERE user_id = {user_id}\")",
"def ratings(self):\n return self._ratings",
"def ratings_usuarios(username, ratings):\n return list(filter(lambda x: x.username == username, ratings))",
"def get_ratings... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Write similarities data to file | def write_similarities(self, data):
# If file is yet created, return data and do not create it again
if os.path.isfile(cfg.similarities):
return None
with open(cfg.similarities, 'wb') as similarities:
print("Storing data as serialized object...")
pickle.dump(... | [
"def record_similarities(similarities):\n writer = open(\"similarities.txt\", \"w\")\n for userA in similarities.keys():\n for userB in similarities[userA].keys():\n writer.write(userA + \"\\t\" + userB + \"\\t\" +\n str(similarities[userA][userB]) + \"\\n\")\n wri... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Load similarities pickle data | def load_similarities(self):
if not os.path.isfile(cfg.similarities):
return None
else:
print("Serialized object exists. Reading from disk...")
with open(cfg.similarities, 'rb') as file:
data = pickle.load(file)
return data | [
"def write_similarities(self, data):\n # If file is yet created, return data and do not create it again\n if os.path.isfile(cfg.similarities):\n return None\n\n with open(cfg.similarities, 'wb') as similarities:\n print(\"Storing data as serialized object...\")\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Initializes our connectors by giving them a handler function. | def initConnectors(self):
def handlerFunc(message, responseFunc):
for h in self._handlers:
h.handleMessage(message, responseFunc)
getLogger(__name__).debug('Initializing %d connectors...' % len(self._connectors))
for c in self._connectors:
... | [
"def _init_connectors(self, config):\n self._logger.info(\"Init broker and feed connectors\")\n self._broker_connector = globals()[config[\"broker.connector\"]](config)\n self._feed_connector = globals()[config[\"feed.connector\"]](config)",
"def _setupConnector(self):\n if self._poolk... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Read a pulse of SPI data on a pin that corresponds to DYMO scale output protocol (12 bytes of data at about 14KHz), timeout is in seconds | def get_scale_data(pin, timeout=1.0):
timestamp = time.monotonic()
with pulseio.PulseIn(pin, maxlen=96, idle_state=True) as pulses:
pulses.pause()
pulses.clear()
pulses.resume()
while len(pulses) < 35:
if (time.monotonic() - timestamp) > timeout:
rais... | [
"def getDHT11Data(pinNum):\n \n # set pin as output\n wiringpi.wiringPiSetupPhys() \n wiringpi.pinMode(pinNum, 1)\n\n # pull down for 20 ms = 20000 us\n wiringpi.digitalWrite(pinNum, 0) \n wiringpi.delayMicroseconds(20000)\n # pull up\n wiringpi.digitalWrite(pinNum, 1) \n\n # set pin a... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
encode convex hulls to network input format | def _encode_convex_hull(record):
max_encode_len = max(seq_len)
max_decode_len = max(seq_len) + 1 + 1
total_len = max_encode_len + max_decode_len
encoder_seq, hull = record
encoder_seq_len = len(encoder_seq)
# add new dimension for the [start] token
encoder_seq = [(0., *e) for e in encoder_seq]
# creat... | [
"def save_convex_hulls():\n\timport json\n\tfrom scipy.spatial import ConvexHull\n\tqhull = {}\n\tfor dom in ['lsl', 'lsp', 'mtl_lano']:\n\t\tx, y = GLSLio.get_scen(8, dom, ('X', 'Y'))\n\t\tpts = np.array([x,y]).T\n\t\tqh = ConvexHull( pts )\n\t\tqhull[dom] = pts[qh.vertices].tolist()\n\t\t#T = get_tesselation(dom)... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Stacks are comprised of multiple hosts. Each host may be located in different cloud accounts. This method returns a map of the underlying driver implementation and the hosts that running in the account. host_ids (list); a list of primary keys for the hosts we're interested in (dict); each key is a provider driver imple... | def get_driver_hosts_map(self, host_ids=None):
host_queryset = self.get_hosts(host_ids)
# Create an account -> hosts map
accounts = {}
for h in host_queryset:
accounts.setdefault(h.get_account(), []).append(h)
# Convert to a driver -> hosts map
result = {}
... | [
"def getDictHostsCompute(self, instances):\n hosts = {}\n for inst in instances:\n host = inst[\"host\"]\n if (host in hosts):\n hosts[host].append(inst[\"uuid\"])\n else:\n hosts[host] = [inst[\"uuid\"]]\n return hosts",
"def que... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Quick way of getting all hosts or a subset for this stack. (list); list of primary keys of hosts in this stack (QuerySet); | def get_hosts(self, host_ids=None):
if not host_ids:
return self.hosts.all()
return self.hosts.filter(id__in=host_ids) | [
"def get_hosts_subset(self, host_uuids):\n request_obj = {\n \"host_id_list\": host_uuids\n }\n api_response = self.api_topology.fetch_hosts_with_ids(\n body_fetch_hosts_with_ids=request_obj).to_dict()\n\n hosts = api_response.get(\"data\")\n return hosts",
... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |