query stringlengths 9 9.05k | document stringlengths 10 222k | negatives listlengths 19 20 | metadata dict |
|---|---|---|---|
test update_targets when host is dns | def test_update_targets_dns(opts):
host = "localhost"
user = "test-user@"
opts["tgt"] = user + host
with patch("salt.utils.network.is_reachable_host", MagicMock(return_value=False)):
client = ssh.SSH(opts)
assert opts["tgt"] == user + host
client._update_targets()
assert opts["tgt"]... | [
"def test_update_targets_ip_address(opts):\n host = \"127.0.0.1\"\n user = \"test-user@\"\n opts[\"tgt\"] = user + host\n\n with patch(\"salt.utils.network.is_reachable_host\", MagicMock(return_value=False)):\n client = ssh.SSH(opts)\n assert opts[\"tgt\"] == user + host\n client._update_ta... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
test update_targets when no user defined | def test_update_targets_no_user(opts):
host = "127.0.0.1"
opts["tgt"] = host
with patch("salt.utils.network.is_reachable_host", MagicMock(return_value=False)):
client = ssh.SSH(opts)
assert opts["tgt"] == host
client._update_targets()
assert opts["tgt"] == host | [
"def test_otoroshi_controllers_adminapi_services_controller_update_service_targets(self):\n pass",
"def test_update_targets_ip_address(opts):\n host = \"127.0.0.1\"\n user = \"test-user@\"\n opts[\"tgt\"] = user + host\n\n with patch(\"salt.utils.network.is_reachable_host\", MagicMock(return_va... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
test update_targets and expand_target when host is dns | def test_update_expand_target_dns(opts, roster):
host = "localhost"
user = "test-user@"
opts["tgt"] = user + host
with patch("salt.utils.network.is_reachable_host", MagicMock(return_value=False)):
client = ssh.SSH(opts)
assert opts["tgt"] == user + host
with patch(
"salt.roster.... | [
"def test_update_targets_dns(opts):\n host = \"localhost\"\n user = \"test-user@\"\n opts[\"tgt\"] = user + host\n\n with patch(\"salt.utils.network.is_reachable_host\", MagicMock(return_value=False)):\n client = ssh.SSH(opts)\n assert opts[\"tgt\"] == user + host\n client._update_targets()... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
test parse_tgt when user and host set on the ssh cli tgt | def test_parse_tgt(opts):
host = "localhost"
user = "test-user@"
opts["tgt"] = user + host
with patch("salt.utils.network.is_reachable_host", MagicMock(return_value=False)):
assert not opts.get("ssh_cli_tgt")
client = ssh.SSH(opts)
assert client.parse_tgt["hostname"] == host
... | [
"def test_parse_tgt_no_user(opts):\n host = \"localhost\"\n opts[\"ssh_user\"] = \"ssh-usr\"\n opts[\"tgt\"] = host\n\n with patch(\"salt.utils.network.is_reachable_host\", MagicMock(return_value=False)):\n assert not opts.get(\"ssh_cli_tgt\")\n client = ssh.SSH(opts)\n assert clien... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
test parse_tgt when only the host set on the ssh cli tgt | def test_parse_tgt_no_user(opts):
host = "localhost"
opts["ssh_user"] = "ssh-usr"
opts["tgt"] = host
with patch("salt.utils.network.is_reachable_host", MagicMock(return_value=False)):
assert not opts.get("ssh_cli_tgt")
client = ssh.SSH(opts)
assert client.parse_tgt["hostname"] =... | [
"def test_parse_tgt(opts):\n host = \"localhost\"\n user = \"test-user@\"\n opts[\"tgt\"] = user + host\n\n with patch(\"salt.utils.network.is_reachable_host\", MagicMock(return_value=False)):\n assert not opts.get(\"ssh_cli_tgt\")\n client = ssh.SSH(opts)\n assert client.parse_tgt[... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
test "extra_filerefs" are not excluded from kwargs when preparing the SSH opts | def test_extra_filerefs(tmp_path, opts):
ssh_opts = {
"eauth": "auto",
"username": "test",
"password": "test",
"client": "ssh",
"tgt": "localhost",
"fun": "test.ping",
"ssh_port": 22,
"extra_filerefs": "salt://foobar",
}
roster = str(tmp_path /... | [
"def test_ssh_kwargs(test_opts):\n opt_key = test_opts[0]\n opt_value = test_opts[1]\n # Is the kwarg in salt.utils.parsers?\n in_parser = test_opts[2]\n\n opts = {\n \"eauth\": \"auto\",\n \"username\": \"test\",\n \"password\": \"test\",\n \"client\": \"ssh\",\n \... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The main function used to produce a model ready for compression finetuning from an original PyTorch model and a configuration object. dummy_forward_fn | def create_compressed_model(
model: Module,
config: NNCFConfig,
compression_state: Optional[Dict[str, Any]] = None,
dummy_forward_fn: Callable[[Module], Any] = None,
wrap_inputs_fn: Callable[[Tuple, Dict], Tuple[Tuple, Dict]] = None,
wrap_outputs_fn: Callable[[Tuple, Dict], Tuple[Tuple, Dict]] =... | [
"def compile(self):\n self.model_forward_run = self.train_model.init_model()",
"def initiate(self):\n # if self.opt.checkpoint_encoder:\n # self.load(self.opt.checkpoint_encoder, self.opt.checkpoint_decoder)\n # else:\n # start fresh.\n self.model = Transformer(\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create compression algorithm builders by a given list of algorithm names. | def create_compression_algorithm_builder(config: NNCFConfig, should_init=True) -> PTCompressionAlgorithmBuilder:
algo_names = extract_algorithm_names(config)
return create_compression_algorithm_builder_from_algo_names(algo_names, config, should_init) | [
"def create_compression_algorithm_builder_from_algo_names(\n algo_names: List[str], config: NNCFConfig, should_init: bool\n) -> PTCompressionAlgorithmBuilder:\n if not algo_names:\n algo_builder_classes = [NoCompressionAlgorithmBuilder]\n else:\n algo_builder_classes = [PT_COMPRESSION_ALGORIT... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create compression algorithm builders by a given list of algorithm names. | def create_compression_algorithm_builder_from_algo_names(
algo_names: List[str], config: NNCFConfig, should_init: bool
) -> PTCompressionAlgorithmBuilder:
if not algo_names:
algo_builder_classes = [NoCompressionAlgorithmBuilder]
else:
algo_builder_classes = [PT_COMPRESSION_ALGORITHMS.get(alg... | [
"def create_compression_algorithm_builder(config: NNCFConfig, should_init=True) -> PTCompressionAlgorithmBuilder:\n algo_names = extract_algorithm_names(config)\n return create_compression_algorithm_builder_from_algo_names(algo_names, config, should_init)",
"def make_alg(name, m, alg_params):\n if name =... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Helper to call ``ir.actions.report.xml.render_report()``. | def render_report(cr, uid, ids, name, data, context=None):
registry = yuancloud.modules.registry.RegistryManager.get(cr.dbname)
return registry['ir.actions.report.xml'].render_report(cr, uid, ids, name, data, context) | [
"def render_report(self, cr, uid, res_ids, name, data, context=None):\n new_report = self._lookup_report(cr, name)\n if isinstance(new_report, (str, unicode)): # Qweb report\n # The only case where a QWeb report is rendered with this method occurs when running\n # yml tests originally written f... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns timings for parts, where the video should be kept | def getSectionsOfNewVideo (silences, duration):
return [0.0] + silences + [duration] | [
"def get_talks_gt_one_hour(videos):\n return [ v for v in videos if \"H\" in v.duration ]",
"def extract_video_with_timming_file(video_path, fps, skip_frame, timming_file,\n trim_folder, offset):\n with open(timming_file, \"r\") as f:\n n = int(f.readline())\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Remove implicit resolvers for a particular tag Takes care not to modify resolvers in super classes. We want to load datetimes as strings, not dates, because we go on to serialise as json which doesn't have the advanced types of yaml, and leads to incompatibilities down the track. | def remove_implicit_resolver(cls, tag_to_remove):
if 'yaml_implicit_resolvers' not in cls.__dict__:
cls.yaml_implicit_resolvers = cls.yaml_implicit_resolvers.copy()
for first_letter, mappings in cls.yaml_implicit_resolvers.items():
cls.yaml_implicit_resolvers[first_letter] = [
... | [
"def remove_implicit_resolver(cls, tag_to_remove):\n if 'yaml_implicit_resolvers' not in cls.__dict__:\n cls.yaml_implicit_resolvers = cls.yaml_implicit_resolvers.copy()\n\n for first_letter, mappings in cls.yaml_implicit_resolvers.items():\n cls.yaml_implicit_resolvers[first_let... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Try to get consent status for a single email address | def test_get_one(self, requests_mock, accepts_marketing):
matcher = requests_mock.post(
f'{settings.CONSENT_SERVICE_BASE_URL}'
f'{consent.CONSENT_SERVICE_PERSON_PATH_LOOKUP}',
json={
'results': [
{
'email': 'foo@bar.... | [
"def Get_applicant_status(self, email):\n status = None\n if email in self.Attendees:\n status = ApplicantStatus.Accepted\n elif email in self.Waitlist:\n status = ApplicantStatus.Waitlisted\n else:\n raise MissingAddressException(email)\n return s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Try to get consent status for a list of email addresses | def test_get_many(self, requests_mock, accepts_marketing, emails):
matcher = requests_mock.post(
f'{settings.CONSENT_SERVICE_BASE_URL}'
f'{consent.CONSENT_SERVICE_PERSON_PATH_LOOKUP}',
json={
'results': [
{
'email': ... | [
"def list_verified_email_addresses():\n pass",
"def Get_applicant_status(self, email):\n status = None\n if email in self.Attendees:\n status = ApplicantStatus.Accepted\n elif email in self.Waitlist:\n status = ApplicantStatus.Waitlisted\n else:\n ra... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Try to update consent status | def test_update(self, requests_mock, accepts_marketing):
matcher = requests_mock.post(
f'{settings.CONSENT_SERVICE_BASE_URL}'
f'{consent.CONSENT_SERVICE_PERSON_PATH}',
json={
'consents': [
CONSENT_SERVICE_EMAIL_CONSENT_TYPE,
... | [
"def test_update_pre_authorise_account_consent_using_put(self):\n pass",
"def update_enquirer_consents(key: str, value: bool):\n consent.set_consent(key=key, value=value)\n logging.info(\"Updated enquirer consent\")",
"def test_accept_o_auth2_consent_request(self):\n pass",
"async def cons... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function retrieves all ACISAs | def RetrieveACISA():
db = DBConnector()
cur = db.cursor()
SQLcmd = "SELECT * FROM snaps.SNAPsLocation"
cur.execute(SQLcmd)
returnList = []
count = 0
for item in cur.fetchall():
count += 1
tmplist = [item[1], item[2], count, str(item[0])]
returnList.append(tmplist)
return returnList | [
"def accels(self):\n return self._accels",
"def get_acls():\n return config.get_cfg_storage(ID_ACL)",
"def get_all_saas_apps(self) -> list:\n return self._get(\"/spPortal/internetDb/serviceIdToSaasId/saasApps\")",
"def downloadACS(self):\n geoClassDict = {}\n for areaID in self.fips... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Method to calculate a stat over all time steps | def time_stat(self, stat="mean"):
# create cdo command and run it
cdo_command = f"cdo -tim{stat}"
run_this(cdo_command, self, output="ensemble") | [
"def method_compute_timestep(self):",
"def compute_stats(self):\n if self.stats is not None:\n return\n self.stats = np.zeros(STEPS_MAX + 1)\n for m in self.missions:\n m.compute_stats()\n self.stats += 100 * m.stats\n self.stats /= len(self.missions)",... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculate the aggregated mean and stds. | def _get_aggregated_mean_std(self, means, stds, n):
mean = means.view(n, -1).sum(0) / n
std = (
stds.view(n, -1).sum(0) / n
+ ((means.view(n, -1) - mean) ** 2).view(n, -1).sum(0) / n
)
return mean.detach(), std.detach() | [
"def aggregate_stats(self):\n if self.split_bn.track_running_stats:\n (\n self.bn.running_mean.data,\n self.bn.running_var.data,\n ) = self._get_aggregated_mean_std(\n self.split_bn.running_mean,\n self.split_bn.running_var,\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Synchronize running_mean, and running_var. Call this before eval. | def aggregate_stats(self):
if self.split_bn.track_running_stats:
(
self.bn.running_mean.data,
self.bn.running_var.data,
) = self._get_aggregated_mean_std(
self.split_bn.running_mean,
self.split_bn.running_var,
... | [
"def _synchronize_vars_using_mean(new_var: NestedMap,\n old_var: NestedMap) -> NestedMap:\n delta = new_var - old_var\n delta_mean = jax.lax.pmean(delta, axis_name=data_parallel_axis_name)\n updated_var = old_var + delta_mean\n return updated_var",
"def _for... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return duration in years | def duration(self):
if self.is_valid:
return relativedelta(self.expiry, datetime.date.today()).years
else:
return -1 | [
"def dyear(y):\n return timedelta(days=365*y)",
"def days_to_years(datum):\n return datum/DAYS_PER_YEAR",
"def years_since_birth(cls):\n yearsdelta = cls.seconds_since_birth() / SECS_IN_YEAR\n return yearsdelta",
"def unit_yr(self):\n return ((self.time_base * 60.0) * 24.0) * 365.0",... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Transition from iceboot to domapp by uploading 'domappFile', uncompressing it and executing from iceboot. Load domapp FPGA first. | def uploadDomapp2(self, domappFile):
if not os.path.exists(domappFile): raise DomappFileNotFoundException(domappFile)
size = os.stat(domappFile)[ST_SIZE]
if size <= 0: return (False, "size error: %s %d bytes" % (domappFile, size))
# Load domapp FPGA
ok, txt = self.se("s\" domapp.... | [
"def load(self, app):\n self.shell(\"docker load < %s.gz\" % app)",
"def main(elf_file: click.File, ide_type: str, ide_path: click.Path) -> None:\n bin_file = f'{THIS_DIR}/flashloader.bin'\n cmd = get_elf_to_bin_command(ide_type, ide_path, elf_file, bin_file)\n if cmd is None:\n sys.exit(1)... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Function decorator for unittest test cases to specify test case timeout. | def timeout(time_limit):
class TimeoutException(Exception):
""" Subclass Exception to catch timer expiration during search """
pass
def handler(*args, **kwargs):
""" Generic handler to raise an exception when a timer expires """
raise TimeoutException("Test aborted due to timeo... | [
"def set_timeout(timeout):\n def decor(f):\n @functools.wraps(f)\n def inner(self, *args, **kwargs):\n self.useFixture(fixtures.Timeout(timeout, gentle=True))\n return f(self, *args, **kwargs)\n return inner\n return decor",
"def pytest_timeout_set_timer(item, sett... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return counts of (total, unique) nodes visited | def counts(self):
return sum(self.counter.values()), len(self.visited) | [
"def count_unvisited(data):\n count = sum(n.count(\"n\") for n in data)\n return count",
"def visited(self):\n return sum(self.action_count)",
"def node_count(self) -> int:\n return int(self.graph_tuple_stats.node_count or 0)",
"def number_of_visits(self):\n return self.nodes(data=\"num... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get data from an Amarok database. We fetch rating and score as well as Amarok's unique id for the track to have more reliable syncing after the initial import. | def get_amarok_data(item, db):
if hasattr(item, 'amarok_uid') and item.amarok_uid:
condition = "REPLACE(uniqueid, 'amarok-sqltrackuid://', '') = '%s'" % MySQLdb.escape_string(item.amarok_uid)
else:
condition = "REPLACE(CONCAT_WS('/',lastmountpoint, rpath), '/./', '/') = '%s'" % MySQLdb.escape_s... | [
"def fetch_ratings():\n\n #if the DB is empty, fetch the data\n if (Rating.select().count() == 0):\n clean_and_update_ratings(clean=False)\n\n print('getting ratings from DB')\n date_labels, average_axis, count_axis = format_averages_for_chartjs(\n calc_ratings_over_time(\n requ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This is the method to call and analyze text with the supplied features | def analyze(self, features, text=None, url=None, html=None,
clean=True, xpath=None, fallback_to_raw=True,
return_analyzed_text=False, language=None):
body = {
'clean': clean,
'fallback_to_raw': fallback_to_raw,
'return_analyzed_text': return_an... | [
"def process_text(self):\n prpobj = Preprocess()\n refined_text = self.x_test.apply(prpobj.clean_data)\n tf1_old = pickle.load(open('vector.pkl', 'rb'))\n self.features = tf1_old.transform(refined_text)\n print(self.features)\n self.prediction()",
"def eval_text(self, fea... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
确认修改 将自身数据修改到和 right 一致,并同步到 DB 中 | def commit(self, right, proxy):
assert self._name == right._name
if right.is_dirty():
assert not self.is_dirty() or not self.is_valid()
#同步到 DB
self._sync(proxy, right._state, right._data, self._data)
#用 right 的数据覆盖自己的数据
self._deepcopy(right)... | [
"def test_update_record(self):\n pass",
"def update(self, **validated_data):\n updated = self._provision(validated_data)\n if updated:\n try:\n db.session.commit()\n return True\n except Exception as error:\n db.session.rollba... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
爬取指定页面的内容 思路是: 指定一个起始页面,抓取该页面的内容,然后找到"下一章",继续; 直到"下一章" 对应的超级链接是该小说的章节链接为止; | def crawl(base_url, start_url, file_name):
# 当前页面的内容
str_chapter_content = get_chapter_content(start_url)
append_to_file(str_chapter_content)
# '下一章'的地址
soup = BeautifulSoup(get_html(start_url), 'lxml')
next_url = soup.find('a', text = u'下一章')['href']
while next_url != base_u... | [
"def get_first_url(soup, wikipidia_base):\r\n global articls\r\n try:\r\n content=soup.find(id='mw-content-text').find(class_='mw-parser-output').find('p')\r\n for sp in soup.find(id='mw-content-text').find(class_='mw-parser-output').find('p').find_all('span'):\r\n sp.decompose()\r\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Remove ``>`` from beginning of a line. | def clean(self, line):
m = self.RE.match(line)
if line.strip() == ">":
return ""
elif m:
return m.group(2)
else:
return line | [
"def clean(self, line):\r\n m = self.RE.match(line)\r\n if line.strip() == \">\":\r\n return \"\"\r\n elif m:\r\n return m.group(2)\r\n else:\r\n return line",
"def _stripstuffing(self, line):\n if line.startswith(u' '):\n return line[... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convert ttyrec files to videos | def main(ctx, ttyrec, encoding, ibm, outfile, size, fps, font_size, font_file,
bold_font_file, info, info_all):
if ibm:
encoding = 'cp437'
fp, def_outfile = open_or_get(ttyrec)
try:
with fp:
updates = list(read_ttyrec(fp, encoding=encoding, errors='replace'))
except ... | [
"def seqIo_toVid(fName, ext='avi'):\n\n assert fName[-3:]=='seq', 'Not a seq file'\n sr = seqIo_reader(fName)\n N = sr.header['numFrames']\n h = sr.header['height']\n w = sr.header['width']\n fps = sr.header['fps']\n\n out = fName[:-3]+ext\n sw = skvideo.io.FFmpegWriter(out)\n # sw = cv2... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set the "entity_class_registry" field | def set_entity_class_registry(self, entity_class_registry):
self.entity_class_registry = entity_class_registry | [
"def register(self, entity):\n self.__initget_subregistry(entity.__class__).append(entity)",
"def register_class(self, entity_class):\n key = entity_class.__collection_name__\n\n if key not in self._registered_types:\n self._registered_types[key] = entity_class",
"def _extract_en... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Extract an entity class registry from one of the models of the inner SQLAlchemy query. This result of this function is used by several SQLAlchemy components during the extraction of the SQL query from a SQLAlchemy query. | def _extract_entity_class_registry(self):
for description in self.sa_query.column_descriptions:
if "entity" in description:
declarative_meta = description["entity"]
_class_registry = getattr(
declarative_meta, "_decl_class_registry", None)
... | [
"def query_to_models(query):\n return [\n d['expr'].class_\n for d in query.column_descriptions\n if isinstance(d['expr'], Mapper)\n ]",
"def get_models_query():\n query = db.session.query(Products.model).distinct()\n return query",
"def get_for_protocolize(db_path, class_name, code):\n\tsessio... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Call f on each item in seq, calling inter() in between. | def interleave(inter, f, seq):
seq = iter(seq)
try:
f(next(seq))
except StopIteration:
pass
else:
for x in seq:
inter()
f(x) | [
"def applyToEach(l, f):\n for i in range(len(L)):\n L[i]=f(L[i])",
"def applyToEach(L,f):\n for i in range(len(L)):\n L[i] = f(L[i])",
"def applyToEach(L, f):\n for i in range(len(L)):\n L[i] = f(L[i])",
"def iterate(f, x):\n while True:\n yield x\n x = f(x)",
... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets the device function name by translating a typed Python version to a templated cpp version. Python functions looks like getVariableFloatArray6 and translate to getVariable This function will detect and test against a set of known types and also extract the Array length This function returns None if the string is in... | def _deviceVariableFunctionName(self, tree, permitted_prefixes, allow_lengths = True):
cpp_func_name = ""
py_func = tree.attr
# extract function name start
for prefix in permitted_prefixes:
if py_func.startswith(prefix):
cpp_func_name = prefix
... | [
"def ggml_type_name(type: int) -> ffi.CData:\n ...",
"def function_name_to_string(func):\n if func == statistical_parity_difference:\n return \"Statistical Parity Difference\"\n if func == theil_index:\n return \"Theil Index\"\n if func == equal_opportunity_difference:\n return \"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Function will handle a getMacroEnvironment function (assuming it is correctly formatted (by checking with _deviceVariableFunctionName first)) | def dispatchMacroEnvFunction(self, tree, tree_parent):
cpp_func_name = "getMacroProperty"
py_func = tree.attr
# extract type from function name
py_type = py_func[len(cpp_func_name):]
if py_type not in self._fgpu_types:
self.RaiseError(tree, f"'{py_type}' is not a vali... | [
"def test_get_environment_string(self):\n pass",
"def getEnv():",
"def get_environment_function_args(development_environment: DevelopmentEnvironment):\n return {\n \"vpc_config\": {\n \"security_group_ids\": [development_environment.security_group_id],\n \"subnet_ids\": de... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Handles arguments for a FLAME GPU device function. Arguments must use type hinting to be translated to cpp. | def dispatchFGPUDeviceFunctionArgs(self, tree):
# reset the locals variable stack
self._locals = ["pyflamegpu"]
# input message
first = True
annotation = None
for arg in tree.args.args:
# ensure that there is a type annotation
if not arg.annotation... | [
"def __call__(self, *args):\n args_ = []\n for arg, arg_t in zip(args, self._fn.argtypes):\n if hasattr(arg, \"ctypes\"):\n if arg.size == 0:\n # TODO eliminate unused arguments from kernel\n arg_ = arg_t(0.0)\n else:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Message iterator call maybe a simple one (e.g. message_in(x, y, z)) or a call to a member (e.g. message_in.wrap()) Using this function avoid using the global call one which may accept member function calls to things that are not iterators. | def dispatchMessageIteratorCall(self, tree):
# simple case not a member function just an iterator with arguments
if isinstance(tree.func, ast.Name):
self.write(f"FLAMEGPU->{tree.func.id}")
if isinstance(tree.func, ast.Attribute) :
if isinstance(tree.func.value, ast.Name):... | [
"def walk(self):\n for msg in self.msg.walk():\n yield MessageWrapper(msg)",
"def test_dispatch_inbound(self):\n msg_helper = MessageHelper()\n worker_helper = WorkerHelper()\n broker = self.setup_broker(worker_helper)\n self.assertEqual(broker.get_messages('vumi', 'f... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
A very limited set of function calls to members are supported so these are fully evaluated here. t_parent is the Call ast object required if the argument need to be modified (i.e. in the case of macro environment properties) Function calls permitted are; pyflamegpu.function a supported function call. e.g. pyflamegpu.ge... | def dispatchMemberFunction(self, t, t_parent):
# it could be possible that the Call object has no value property e.g. a()()
if not hasattr(t, "value"):
self.RaiseError(t, f"Function call is in an unsupported format.")
# Nested member functions (e.g. x.y.z())
if isinstance(t.... | [
"def _Call(self, t):\n # check calls but let attributes check in their own dispatcher\n funcs = self._device_functions + self.pythonbuiltins + [self._input_message_var] # message_input variable is a valid function name as certain message types have arguments on iterator\n if isinstance(t.func, ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Checks the decorators of the function definition much must be either 'pyflamegpu.agent_function', 'pyflamegpu.agent_function_condition' or 'pyflamegpu.device_function'. Each is then processed in a different way using a specific dispatcher. Function calls are actually checked and only permitted (or user defined) functio... | def _FunctionDef(self, t):
self.write("\n")
# check decorators
if len(t.decorator_list) != 1 or not isinstance(t.decorator_list[0], ast.Attribute):
self.RaiseError(t, "Function definitions require a single pyflamegpu decorator of either 'pyflamegpu.agent_function', 'pyflamegpu.agent_... | [
"def visit_functiondef(self, node: nodes.FunctionDef) -> None:\n annotations = _get_all_annotations(node)\n if self._ignore_function(node, annotations):\n return\n\n # Check that common arguments are correctly typed.\n for arg_name, expected_type in _COMMON_ARGUMENTS.items():\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Two type for for loop are supported. Either; 1) Message for loop in which case the format requires a iterator using the named pyflamegpu function argument of 'message_in' 2) A range based for loop with 1 to 3 arguments which is converted into a c style loop | def _For(self, t):
# if message loop then process differently
if isinstance(t.iter, ast.Name):
if t.iter.id == self._input_message_var:
self.dispatchMessageLoop(t)
else:
self.RaiseError(t, "Range based for loops only support message iteration using... | [
"def dispatchMessageIteratorCall(self, tree):\n # simple case not a member function just an iterator with arguments\n if isinstance(tree.func, ast.Name):\n self.write(f\"FLAMEGPU->{tree.func.id}\")\n if isinstance(tree.func, ast.Attribute) :\n if isinstance(tree.func.value... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
A very limited set of attributes are supported so these are fully evaluated here. Other places where attribute type expressions may occur will also evaluate them fully rather than recursively call this function. Attributes supported are only; pyflamegpu.attribute a supported attribute e.g. pyflamegpu.ALIVE. This will b... | def _Attribute(self,t):
# Only a limited set of globals supported
func_dict = None
# pyflamegpu singleton
if isinstance(t.value, ast.Name):
if t.value.id == "pyflamegpu":
if t.attr in self.fgpu_attrs:
# proceed
... | [
"def testAttributeIteration(self):\n kind = self.view.findPath('//Schema/Core/Kind')\n self.assert_(kind is not None)\n\n # Test iterating over literal attributes\n literalAttributeNames = ['classes'] \n for i in kind.iterAttributeValues(valuesOnly=True):\n self.failUnl... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Some basic checks are undertaken on calls to ensure that the function being called is either a builtin or defined device function. A special dispatcher is required | def _Call(self, t):
# check calls but let attributes check in their own dispatcher
funcs = self._device_functions + self.pythonbuiltins + [self._input_message_var] # message_input variable is a valid function name as certain message types have arguments on iterator
if isinstance(t.func, ast.Name... | [
"def careful_call(fn):",
"def check_functions_are_resolved(kernel):\n from loopy.symbolic import SubstitutionRuleExpander\n subst_expander = SubstitutionRuleExpander(kernel.substitutions)\n\n for insn in kernel.instructions:\n if isinstance(insn, MultiAssignmentBase):\n unresolved_calls... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
A function to visualize pymatgen Structure objects in jupyter notebook using chemview package. | def quick_view(structure, bonds=True, conventional=False, transform=None, show_box=True, bond_tol=0.2, stick_radius=0.1):
s = structure.copy()
if conventional:
s = SpacegroupAnalyzer(s).get_conventional_standard_structure()
if transform:
s.make_supercell(transform)
atom_types = [i.symb... | [
"def test_ipython_repr_no_nglview(self):\n molecule = Molecule().from_smiles(\"CCO\")\n molecule._ipython_display_()",
"def jupyter():",
"def _show_structure(\n self,\n view,\n structure_id,\n text,\n fingerprint,\n ligand,\n feature_name,\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Remove the sprite from all lists and cancel the update event. | def remove_from_sprite_lists(self):
super().remove_from_sprite_lists()
# It is very important to call this to prevent potential
# issues such as crashes or excess memory use from failed
# garbage collection.
pyglet.clock.unschedule(self.update) | [
"def remove(self, sprite):\n\n for layer in self.layers:\n try:\n layer.remove(sprite)\n except ValueError:\n pass",
"def remove(self, sprite):\r\n self.sprites.remove(sprite)",
"def remove_sprites(self, *sprites):\r\n with self.lock:\r\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Creates input data for tests using preprocessed standard star and its calibration files. The raw files will be downloaded and saved inside the path stored in the `$DRAGONS_TEST/raw_inputs` directory. Processed files will be stored inside a new folder called "dragons_test_inputs". The subdirectory structure should refle... | def create_inputs_recipe():
module_name, _ = os.path.splitext(os.path.basename(__file__))
path = os.path.join(CREATED_INPUTS_PATH_FOR_TESTS, module_name)
os.makedirs(path, exist_ok=True)
os.chdir(path)
os.makedirs("inputs/", exist_ok=True)
print('Current working directory:\n {:s}'.format(os.g... | [
"def create_inputs_recipe():\n import os\n from astrodata.testing import download_from_archive\n from geminidr.gmos.tests.spect import CREATED_INPUTS_PATH_FOR_TESTS\n from recipe_system.utils.reduce_utils import normalize_ucals\n from recipe_system.reduction.coreReduce import Reduce\n\n associated... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function checks that the ordering of the samples matches between the expression file and the metadata file. This ordering is used for calculating DEGs. | def compare_and_reorder_samples(expression_file, metadata_file):
# Check ordering of sample ids is consistent between gene expression data and metadata
metadata = pd.read_csv(metadata_file, sep="\t", header=0, index_col=0)
metadata_sample_ids = metadata.index
expression_data = pd.read_csv(expression_fi... | [
"def compare_internal_file_order(files: list):\n\n def list_equal(somelist: list) -> bool:\n return all(somelist[0] == x for x in somelist)\n\n data = []\n for file in files:\n with open(file, \"r\") as f:\n data.append([line.split(\",\")[0] for line in f.readlines()])\n\n lengt... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function reads in pseudomonas pathway data from `pathway_DB_filename` and formats and outputs it to `output_filename` in order to be used in GSEA_analysis.R | def format_pseudomonas_pathway_DB(pathway_DB_filename, local_dir, out_filename):
# Read in pathway data
pa_pathway_DB = pd.read_csv(
pathway_DB_filename,
names=["pathway id", "num genes", "genes"],
sep="\t",
header=None,
)
# Drop extra column
pa_pathway_DB.drop(colum... | [
"def database_get_pathway_information(path_results, list_organisms):\n path_results = Path(path_results)\n # Cycle organisms\n for id_organism in list_organisms:\n # Preallocation\n dict_organism = {}\n list_compound = []\n list_gene = []\n list_reactions = []\n li... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Compute the hash of a parsed JSON value using the given hash object. This function does not hash the JSON value, it hashes the object tree that is the result of parsing a string in JSON format. Hashables (JSON objects) are hashed entry by entry in order of the lexicographical ordering on the keys. Iterables are hashed ... | def hash_json( hash_obj, value ):
try:
items = iter(list(value.items( )))
except AttributeError:
# Must check for string before testing iterability since strings are iterable
if isinstance( value, str ):
_hash_string( hash_obj, value )
else:
try:
... | [
"def _hash(self, value, get_val, get_child):\n hasher = getattr(hashlib, self.hash_func)\n children = get_child(value)\n\n # If leaf node\n if len(children) < 1:\n return hasher(get_val(value)).hexdigest()\n\n h = hasher()\n for child in children:\n # ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Remove selected items from the tree. Because data is stored separately also need to deal with it, but deleting the matching items from the data list and updating all of the data indexes is a bit of a headache, so just make them empty. | def remove_treeItem(browser, tree):
items = tree.selectedItems()
for item in items:
if item.listIndex: # Only dataset items have a listIndex
browser.ui.workingDataTree.dataItems[item.listIndex] = []
sip.delete(item) | [
"def unselectAll(self):\n\t\tself.tree.UnselectAll()",
"def on_removeButton_clicked(self):\n for itm in self.subrepositories.selectedItems():\n self.__removed.append(itm.text())\n row = self.subrepositories.row(itm)\n self.subrepositories.takeItem(row)\n del itm"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Clone h5 item. Useful for Drag & Drop | def clone_item(item):
i = h5Item(item.text(0))
i.path = item.path
i.listIndex = item.dataIndex
i.originalIndex = item.originalIndex
i.data = item.data
return i | [
"def clone(self, *args, **kwargs):\r\n overrides = kwargs.setdefault(\"overrides\", {})\r\n overrides.setdefault(\"name\", \"Cloned: {0}\".format(self.name))\r\n return super(Tag, self).clone(*args, **kwargs)",
"def copy(self):\n new_h5 = FileHDFio(file_name=self.file_name, h5_path=sel... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Helper function to convert SPARQL results into a Pandas data frame. | def get_sparql_dataframe(query, service = "https://query.wikidata.org/sparql"):
sparql = SPARQLWrapper(service)
sparql.setQuery(query)
sparql.setReturnFormat(JSON)
result = sparql.query()
processed_results = json.load(result.response)
cols = processed_results['head']['vars']
out = []
f... | [
"def get_sparql_dataframe(service, query):\n sparql = SPARQLWrapper(service)\n sparql.setQuery(query)\n sparql.setReturnFormat(JSON)\n result = sparql.query()\n\n processed_results = json.load(result.response)\n cols = processed_results['head']['vars']\n\n out = []\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Build a set of resources who are available for a given time. It might make more sense to work based on a given restricted resource set. | def avail(self, time, resource_group):
a = set()
for r in self.resource_group.resources:
pass | [
"def _filter_resources_by_age(self, resources: [], resource_age_minutes: int):\n all_resources = []\n for resource in resources:\n if resource_age_minutes:\n start = self._to_utc_datetime(resource.updated_on)\n end = datetime.utcnow().replace(tzinfo=pytz.UTC)\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test to see if the mongodb client logger can persist a log entry to the database | def test_mongo_logging_client_persists_log():
error_message = "This is a test message."
logger = LoggingService(console_output=True)
result = logger.log(LogEntry(LogLevel.ERROR, __name__, error_message))
logger.log(LogEntry(LogLevel.WARN, __name__, error_message))
logger.log(LogEntry(LogLevel.INFO... | [
"def test_logging_to_mongo(self):\n assert ml.MongoClient is MockMongoClient\n\n handler = ml.MongoHandler(level=logging.DEBUG)\n self.assertIsInstance(handler.connection, MockMongoClient)\n\n # Ensure there is nothing in the database.\n self.assertEqual(handler.collection.count()... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
All horizontal squares from the piece's point of view. Returns a list of relative movements up to the board's bound. | def horizontals(self):
horizontal_shifts = set(izip_longest(map(
lambda i: i - self.x, range(self.board.length)), [], fillvalue=0))
horizontal_shifts.discard((0, 0))
return horizontal_shifts | [
"def get_moves_for_square(self, square):\n (x,y) = square\n\n # determine the color of the piece.\n color = self[x][y]\n\n # skip empty source squares.\n if color==0:\n return []\n\n # search all possible directions.\n moves = []\n for direction in ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
All vertical squares from the piece's point of view. Returns a list of relative movements up to the board's bound. | def verticals(self):
vertical_shifts = set(izip_longest([], map(
lambda i: i - self.y, range(self.board.height)), fillvalue=0))
vertical_shifts.discard((0, 0))
return vertical_shifts | [
"def find_all_verticals(board: Board) -> Set[Vertical]:\n verticals = set()\n\n for row in range(1, len(board.state[0]) - 1, 2):\n for col in range(len(board.state[0][0])):\n upper = Square(row, col)\n lower = Square(row + 1, col)\n\n if board.is_empty(upper) and board.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return list of relative movements allowed. | def movements(self):
raise NotImplementedError | [
"def possible_moves(self):\n legal_move_list = []\n for car_name, car in self.__cars.items():\n possible_moves = self.__possible_moves_helper(car)\n for move in possible_moves:\n cell_to_move = car.movement_requirements(move)[0]\n if not self.cell_co... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the cached territory occupied by the piece. | def territory(self):
cache_key = (
self.board.length, self.board.height, self.uid, self.index)
if cache_key not in self.territory_cache:
vector = self.compute_territory()
self.territory_cache[cache_key] = vector
else:
vector = self.territory_cache[... | [
"def get_tile_from_cache(self, x, y):\n\n if not self.map.is_on_map(x, y):\n return Tile.EMPTY\n\n return self.tile_cache[y + SPAWN_MARGIN][x + SPAWN_MARGIN]",
"def compute_territory(self):\n # Initialize the square occupancy vector of the board.\n vector = self.board.new_ve... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Compute territory reachable by the piece from its current position. Returns a list of boolean flags of squares indexed linearly, for which a True means the square is reachable. | def compute_territory(self):
# Initialize the square occupancy vector of the board.
vector = self.board.new_vector()
# Mark current position as reachable.
vector[self.index] = True
# List all places reacheable by the piece from its current position.
for x_shift, y_shift... | [
"def find_moves(self):\n\n from itertools import product\n free_position = self.find_free()\n return [list(free_position+i) for i in [[0,1],[1,0],[-1,0],[0,-1]] if tuple(i+free_position) in product(range(self.size),repeat=2)]",
"def locations_of_pieces_with_valid_moves(active_player, board):\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate M3U file for the given software into out_dir | def generate(software, out_dir, suffix, dry_run):
m3u_filename = software.name + (suffix if suffix else '') + '.m3u'
if not dry_run:
m3u_fd = open(os.path.join(out_dir, m3u_filename), 'w')
for i in software.images():
image_rel_path = os.path.relpath(i.path, out_dir)
if not dry_run... | [
"def create_m3u(self):\n\n m3u = \"#EXTM3U\\n\"\n for i in self.file_tag_map:\n m3u += \"#EXTINF:-1,%s - %s\\n\" % (self.tracks[i][0], \\\n self.tracks[i][1]) \n m3u += \"%s\\n\" % self.file_tag_map[i][1]\n\n return self.write_file(m3u, os.path.join... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate M3U file for the list of softwares into out_dir | def generate_all(softwares, out_dir, suffix, dry_run):
if not dry_run:
if not out_dir.exists():
out_dir.mkdir(parents=True)
multi_images_softwares = (x for x in softwares if x.nb_images() > 1)
for i in multi_images_softwares:
try:
generate(i, out_dir, suf... | [
"def generate(software, out_dir, suffix, dry_run):\n m3u_filename = software.name + (suffix if suffix else '') + '.m3u'\n\n if not dry_run:\n m3u_fd = open(os.path.join(out_dir, m3u_filename), 'w')\n\n for i in software.images():\n image_rel_path = os.path.relpath(i.path, out_dir)\n\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Recursively parses XML contents to python dict. We assume that `object` tags are the only ones that can appear multiple times at the same level of a tree. | def recursive_parse_xml_to_dict(xml):
if not xml:
return {xml.tag: xml.text}
result = {}
for child in xml:
child_result = recursive_parse_xml_to_dict(child)
if child.tag != 'object':
result[child.tag] = child_result[child.tag]
else:
if child.tag not in... | [
"def recursive_parse_xml_to_dict(xml):\r\n if not xml:\r\n return {xml.tag: xml.text}\r\n result = {}\r\n for child in xml:\r\n child_result = recursive_parse_xml_to_dict(child)\r\n if child.tag != 'object':\r\n result[child.tag] = child_result[child.tag]\r\n else:\r\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Key to sort hosts / domains alphabetically, by domain name. | def domain_sort_key(domain):
import re
domain_expr = r'(.*\.)?(.*\.)(.*)' # Eg: (www.)(google.)(com)
domain_search = re.search(domain_expr, domain)
if domain_search and domain_search.group(1):
# sort by domain name and then everything left of
# Eg: google, com, www
domain_valu... | [
"def subdomain_sorting_key(hostname):\n parts = hostname.split('.')[::-1]\n if parts[-1] == 'www':\n return parts[:-1], 1\n return parts, 0",
"def sort_domains(domains):\n domains.sort(domain_comparator)",
"def condense_hostname(hostname, top_level_domains=None):\n if top_l... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
draw and label a cube. edges is a list of numbers between 1 and 12, specifying which of the 12 cube edges to draw | def draw_cube(ax, xy, size, depth=0.3,
edges=None, label=None, label_kwargs=None, **kwargs):
if edges is None:
edges = range(1, 13)
x, y = xy
y -= size # set left/up corner as the first (0,0) for one cube
# first plot background edges
if 9 in edges:
ax.plot([x + d... | [
"def print_cube(self):\r\n for i in range(0, 6):\r\n print(self.faces[i])\r\n for j in range(0, n):\r\n print(self.faces[i].colours()[4*j:4*j + n])\r\n #print(self.faces[i].colours())\r",
"def print_cube(self) -> None:\n for face in range(0, 6):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Validates the .workflow file. | def validate_syntax(self):
resolves_present = False
uses_present = False
if not self.wf.get('workflow', None):
pu.fail('A workflow block must be present\n')
else:
for _, wf_block in dict(self.wf['workflow']).items():
if wf_block.get('resolves', Non... | [
"def workflow_validate(ctx, file):\n logging.debug('command: {}'.format(ctx.command_path.replace(\" \", \".\")))\n for p in ctx.params:\n logging.debug('{param}: {value}'.format(param=p, value=ctx.params[p]))\n try:\n load_reana_spec(click.format_filename(file))\n click.echo(\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
normalize the dictionary representation of the workflow | def normalize(self):
# modify from this:
#
# "workflow": {
# "test-and-deploy": {
# "resolves": "deploy"
# }
# }
#
# to this:
#
# "workflow": {
# "name": "test-and-deploy",
# "on": "push"... | [
"def normalise_workflow(workflow_dict):\n normalise_process(workflow_dict)\n if not 'steps' in workflow_dict:\n exit_perm_fail(\"No steps in Workflow\")\n\n if isinstance(workflow_dict['steps'], dict):\n new_steps = []\n for step_id, step in workflow_dict['steps'].items():\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
A GHA workflow is defined by specifying edges that point to the previous nodes they depend on. To make the workflow easier to process, we add forward edges. We also obtains the root nodes. | def complete_graph(self):
root_nodes = set()
for name, a_block in self.wf['action'].items():
a_block['name'] = name
for n in a_block.get('needs', []):
if not self.wf['action'][n].get('next', None):
self.wf['action'][n]['next'] = set()
... | [
"def _bfs_forward(self, start_node):\n visited = {node: (False) for node in self.layer_names}\n queue = [start_node]\n visited[start_node] = True\n while queue:\n node = queue.pop(0)\n if node != start_node:\n input_nodes = self.g.predecessors(node)\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Factory of ActionRunner instances, one for each action | def instantiate_runners(self):
for _, a in self.wf['action'].items():
if 'docker://' in a['uses']:
a['runner'] = DockerRunner(
a, self.workspace, self.env,
self.quiet, self.debug, self.dry_run)
continue
if 'shub://'... | [
"def ActionFactory(body=None, exception=None):\n # type: (Optional[Dict[six.text_type, Any]], Optional[Exception]) -> Type[Action]\n class TestAction(Action):\n def run(self, request): # type: (EnrichedActionRequest) -> Dict[six.text_type, Any]\n if exception:\n raise excepti... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generator of stages. A stages is a list of actions that can be executed in parallel. | def get_stages(self):
current_stage = self.wf['root']
while current_stage:
yield current_stage
next_stage = set()
for n in current_stage:
next_stage.update(self.wf['action'][n].get('next', set()))
current_stage = next_stage | [
"def stages(self):\n return StageManager(session=self._session)",
"def gen_stages(stages_def: List[StageDefinition]) -> List[RunbrickPyStage]:\n stages_dict: Dict[str, RunbrickPyStage] = {}\n stages_list: List[RunbrickPyStage] = []\n\n for sdef in stages_def:\n curr_stage = RunbrickPyStage(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Runs the singularity action | def run(self, reuse=False):
build = True
if 'shub://' in self.action['uses']:
image = self.action['uses']
build = False
elif './' in self.action['uses']:
image = 'action/' + os.path.basename(self.action['uses'])
singularityfile_path = os.path.join(... | [
"def run(self, simulation):",
"def singularity_start(self, image):\n env_vars = self.action.get('env', {})\n\n for s in self.action.get('secrets', []):\n env_vars.update({s: os.environ[s]})\n\n for e, v in self.env.items():\n env_vars.update({e: v})\n\n env_vars.u... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generates the image name from the image url. | def generate_image_name(self, image):
return image.replace('shub://', '').replace('/', '-') + '.simg' | [
"def get_generated_image_name(full_image_url):\r\n\r\n logging.debug('get_generated_image_name({})'.format(full_image_url))\r\n\r\n image_name = datetime.datetime.now().strftime(\"%Y%m%d%H%M%S\")\r\n image_extension = full_image_url.split(\".\")[-1]\r\n image_name = image_name + \".\" + image_extension\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check whether an instance exists or not. | def singularity_exists(self):
instances = Client.instances(quiet=self.quiet)
for instance in instances:
if self.pid in instance.name:
return True
return False | [
"def instance_exists(self, instance):\n pass",
"def check_instance_exists(self):\n return CloudInstances.objects.filter(customer_id=self.customer.customer_id, instance_id=self.instance['id'],\n region=self.instance['region']).exists()",
"def exists(\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Starts a singularity instance based on the image. | def singularity_start(self, image):
env_vars = self.action.get('env', {})
for s in self.action.get('secrets', []):
env_vars.update({s: os.environ[s]})
for e, v in self.env.items():
env_vars.update({e: v})
env_vars.update({'HOME': os.environ['HOME']})
#... | [
"def start_instance(instanceName=None):\n pass",
"def start_ssm(self, ssm_image):\n pass",
"def start_instance(InstanceId=None):\n pass",
"def _start_instance(self, instance):\n self.log.info(\"Starting instance %s\", instance.id)\n instance.start()",
"def test_launch_instance_fro... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Load foia sba datasets | def load_sba_datasets(dbm, direc):
foia_504_1991_present = pd.read_excel(direc + 'FOIA - 504 (FY1991-Present).xlsx')
foia_7a_1991_1999 = pd.read_excel(direc + 'FOIA - 7(a) (FY1991-FY1999).xlsx', skiprows=1)
foia_7a_2000_2009 = pd.read_excel(direc + 'FOIA - 7(a)(FY2000-FY2009).xlsx', skiprows=1)
foia_7a_... | [
"def load_dataset(f):\n return load_graphs(f)",
"def load_kiba_dataset():\n trainn_fold = json.load(\n open(os.path.join('dataset', 'regression', 'benchmark', 'KIBAtest', 'folds', 'train_fold_setting1.txt')))\n train_fold = []\n for e in zip(*trainn_fold):\n for ee in e:\n tra... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
We use the fmi standard to extract the correct set of config_params, inputs, outputs We look into the "causality" attribute for each variable in model description | def _extract_sim_config_from_fmi_std(self):
print("\n---- Looking to see if FMU model description contains required 'causality' type definitions ----")
sim_config_params = []
sim_inputs = []
sim_outputs = []
sim_other_vars = []
for variable in self.model_descrip... | [
"def doParametersOfInterest(self):\n\n print 'XsecCorrelation creating POI'\n # --- Signal Strength and BSM contribution as POI --- \n self.modelBuilder.doVar(\"mu_8TeV[1,0,3]\")\n self.modelBuilder.doVar(\"mu_7TeV[1,0,3]\")\n #self.modelBuilder.doVar(\"epsBSM[0,-1,1]\")\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Dump sim's config_params, inputs, and outputs to YAML file By default, we overwrite to main YAML config file. | def _dump_config_to_yaml_file(self,
sim_config_params = None,
sim_inputs = None,
sim_outputs = None,
sim_other_vars = None,
is_aux_yaml = False):
... | [
"def _save_configuration_to_yml(self):\n data = self.get_configuration_data()\n timestamp = self.model.timestamp\n with open(os.path.join(CHECKPOINTS_DIR, timestamp, 'config_{}.yml'.format(timestamp)), 'w') as outfile:\n yaml.dump(dict(data), outfile, default_flow_style=False)",
"d... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get string with the sim's config_params, inputs, and outputs for the model | def _get_sim_config_str(self):
log = "[FMU Validator] The set of configuration_parameters, inputs, and outputs defined is the following:\n"
log += "\n{}: {}".format("Sim Config Params -- Brain Config ", self.sim_config_params)
log += "\n{}: {}".format("Sim Inputs -- Brain Act... | [
"def get_config():\n return ImSimConfiguration()",
"def get_model_config(self, model_num=0):\n return [], resources.get_file(\n \"config/tests/methods/unsupervised/train_test.gin\")",
"def test_parse_config_file_model_outputs(self):\n model = SingleResidual()\n model.eval()\n\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Remove nonalphanumeric characters to make them valid with Bonsai interaction. | def _clean_non_alphanumeric_chars(self):
for i,variable in enumerate(self.model_description.modelVariables):
clean_name = re.sub(r'[^a-zA-Z0-9_]', '', variable.name)
if clean_name != variable.name:
log = "Sim variable '{}' has been renamed to '{}' ".format(variable.name,... | [
"def remove_special_characters(self, txt: str) -> str:",
"def rmchars(value):\n value = re.sub(\"[^A-Za-z0-9.-]+\", \"\", value)\n return value",
"def sanitize(text):\n text = str(text).strip().replace(' ', '_')\n return re.sub(r'(?u)[^-\\w.\\/]', '', text)",
"def removeSpecialChars(self) ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Template for simulating FMU models for Bonsai integration. Note, it calls FMUSimValidation to validate the model when first instanced. | def __init__(
self,
model_filepath: str,
fmi_version: str = FMI_VERSION,
start_time = START_TIME,
stop_time = STOP_TIME,
step_size = STEP_SIZE,
user_validation: bool = False,
use_unzipped_model: bool = False,
):
# validate simulation: config_v... | [
"def __set_fmu__(self, fmu_file, result_handler, solver, atol, rtol, verbose):\n if self.fmu is None:\n \n # TODO:\n # See what can be done in catching the exception/propagating it\n self.fmu = pyfmi.load_fmu(fmu_file)\n \n # Get the optio... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Close model and remove unzipped model from temporary folder. | def close_model(self):
# Ensure model has been initialized at least once
self._model_has_been_initialized("close_model")
# terminate fmu model
# - avoids error from calling self.fmu.terminate if termination has already been performed
self._terminate_model()
# f... | [
"def clean_up_temp_files():\n global __tmp_model_dir\n\n if __tmp_model_dir is not None:\n FileUtils.deleteDirectory(__tmp_model_dir)\n __tmp_model_dir = None",
"def delete_model(self):\n os.remove(self.filepath)\n self.cmodel = None",
"def cleanUp(self):\r\n # Close any... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get a list of all variables in the sim (removing duplicates, if any). Note, list is kept the same from first time this method is called. | def get_all_var_names(self):
if hasattr(self, "all_var_names"):
return self.all_var_names
# Append all variables in model (defined in YAML).
aux_all_var_names = []
aux_all_var_names.extend(self.sim_config_params)
aux_all_var_names.extend(self.sim_inputs)
aux... | [
"def collect_variables(self):\n variables = []\n for eq in self:\n variables.extend(eq.collect_variables())\n\n # Make the list items unique.\n variables = list(set(variables))\n\n return variables",
"def get_all_variables(self):\n return []",
"def vars(self)... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get var indices for each var name provided in list. | def _var_names_to_indices(self, var_names: List):
if type(var_names) is not type([]):
# Return empty array if input is not 'list' type
print("[_var_names_to_indices] Provided input is not of type list.")
return []
indices_array = []
names_array = []
... | [
"def indices_of_var(v):\n name = v.varName\n indices = name[2:].split(',')\n i, j = int(indices[0]), int(indices[1])\n return i, j",
"def _get_indices(vars):\n indices = set()\n for ix in [set(var.indices) for var in vars]:\n indices = indices.union(set(ix))\n return li... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get unique id for instance name (identifier). | def _get_unique_id(self):
now = datetime.now()
u_id = now.second + 60*(now.minute + 60*(now.hour + 24*(now.day + 31*(now.month + 366*(now.year)))))
return "instance" + str(u_id) | [
"def get_identifier(self):\n return _get_instance_id_from_arn(self.arn)",
"def instance_id(self) -> str:\n return pulumi.get(self, \"instance_id\")",
"def unique_instance_id(self):\n return self._uuid",
"def instance_identifier(self):\n return self._instance_identifier",
"def get... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Make sure all elements are in bond_len_dict, and return the value | def check_bond_len(dict, el_a, el_b):
if el_a in dict:
if el_b in dict[el_a]:
return dict[el_a][el_b]
print()
print(el_a + " and " + el_b + " bond length currently unsupported. Add value to the csv file.")
sys.exit() | [
"def check_bond_length(mol: Dict) -> Tuple[bool, Optional[List[str]]]:\n\n li_len = 2.8\n bond_length_limit = {\n # H\n (\"H\", \"H\"): 0.74,\n (\"H\", \"H\"): None,\n (\"H\", \"C\"): 1.09,\n (\"H\", \"O\"): 0.96,\n (\"H\", \"F\"): 0.92,\n (\"H\", \"P\"): 1.44,... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check for all atoms in bonding range | def bond_checker(atom, dict, bond_dict):
bound = []
for item, values in dict.items():
bond_range = check_bond_len(bond_dict, atom[0], values["element"]) + 0.2
if distance_checker(atom[1:], values["coor"]) <= bond_range:
bound.append(item)
return bound | [
"def bond_check(atom_distance,min_length=0,max_length=1.5):\n \n if atom_distance > min_length and atom_distance <= max_length:\n return True\n else:\n return False",
"def overlaps(self, atom, check_up_to, get_all_overlapping_atoms=True):\n if (check_up_to == 0):\n return ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Takes an atom dict and writes it to an .xyz file in foldername in /Created_QD with filename as name for the file | def dict2file(dict, filename, foldername):
if foldername:
if not os.path.exists("../Created_QD/" + foldername):
os.makedirs("../Created_QD/" + foldername)
file = open("../Created_QD/" + foldername + "/" + filename + ".xyz", "w")
else:
file = open("../Created_QD/" + filename +... | [
"def write_xyz(self, atoms, filename=None):\n if filename is None:\n raise ValueError(\"write_xyz: Must give a path to the output file\")\n else:\n with open(filename, \"w\") as f:\n f.write(str(len(atoms)) + \"\\n\\n\")\n for atom in atoms:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Finds atoms at the origin in a dict, returns its id | def base_atom(dict):
for atom, values in dict.items():
xyz = values["coor"]
if xyz[0] == xyz[1] == xyz[2] == 0:
return atom | [
"def find_demand_id(demand_dict, vn_id, fvr_id, svr, nbr):\n #print vn_id, fvr_id, svr, nbr\n for demand_id in demand_dict:\n if vn_id == demand_dict[demand_id]['vn_id'] and \\\n fvr_id == demand_dict[demand_id]['fnode_id'] and \\\n svr == demand_dict[demand_id]['svr'] and \\\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Converts strings y and n to boolean | def y2true(text):
while True:
if text == 'y':
return True
elif text == 'n':
return False
else:
text = input("Wrong input, try again: ") | [
"def b2YN(val):\n if val : return 'Y'\n else : return 'N'",
"def y_n(ch):\r\n chs = ['yes', 'y', 'no', 'n']\r\n ch = check(ch, chs)\r\n\r\n if ch == 'yes' or ch == 'y':\r\n return True\r\n return False",
"def str_to_bool(s):\n if len(s) > 0 and s[0] in \"yYtT1\":\n return True... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a matrix of map tiles | def createTiles():
Renderer.Clear()
map = []
w, h = len(testmap[0]), len(testmap)
x, y = 0, 0
for row in testmap:
for char in row:
map.append(makeTile(char, x, y))
x += 1
y += 1
x = 0
return map, w, h | [
"def generate_base_image_matrix():\n image_matrix = []\n\n for y in range(MAP_SIZE):\n row = []\n for x in range(MAP_SIZE):\n row.append(COLOUR_WATER)\n image_matrix.append(row)\n\n return image_matrix",
"def generate_tiles(self):\n array = []\n try:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This method parses poetic movements as specified in the movements_to_scrape list, follows each movement link and yields a request using parse_movement method | def parse(self, response):
movements_to_scrape = ["Beat","Black Arts","Black Mountain","Conceptual Poetry","Concrete Poetry",
"Confessional Poetry","Contemporary","Dark Room Collective","Formalism","Futurism",
"Harlem Renaissance","Jazz Poetry","Lang... | [
"def parse_movement(self, response):\n movement_name = response.meta['movement_name']\n movement_url = response.meta['movement_url']\n\n sresponse = scrapy.Selector(response)\n\n #Because each movement page contains a table that has maximum of ten rows, we need to go to the next page\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This method looks at each movement page and creates a new PoetItem for each poet found in page's table | def parse_movement(self, response):
movement_name = response.meta['movement_name']
movement_url = response.meta['movement_url']
sresponse = scrapy.Selector(response)
#Because each movement page contains a table that has maximum of ten rows, we need to go to the next page
#in or... | [
"def parse_poet(self, response):\n item = response.meta['item']\n\n sresponse = scrapy.Selector(response)\n poetdata = sresponse.xpath('//div[@class=\"view-content\"]')\n\n #TODO: Clear empty strings from poet item fields\n\n item['poet_basicbio'] = poetdata[0].xpath('div/span//te... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This method scrapes data (bio, url of all poems) from each poet page to continue creating the poet item | def parse_poet(self, response):
item = response.meta['item']
sresponse = scrapy.Selector(response)
poetdata = sresponse.xpath('//div[@class="view-content"]')
#TODO: Clear empty strings from poet item fields
item['poet_basicbio'] = poetdata[0].xpath('div/span//text()').extract(... | [
"def parse_poet_poems(self, response):\n poet_poems_url = response.meta['poet_poems_url']\n\n sresponse = scrapy.Selector(response)\n\n #like the movement pages, this page contains a table that has maximum of ten rows, we need to go to the next\n # page in order to extract all of the poe... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This method parses the poems found in the page of all poems available for a specific poet The poet poems url is the foreign key to poets collection | def parse_poet_poems(self, response):
poet_poems_url = response.meta['poet_poems_url']
sresponse = scrapy.Selector(response)
#like the movement pages, this page contains a table that has maximum of ten rows, we need to go to the next
# page in order to extract all of the poems associat... | [
"def parse_poet(self, response):\n item = response.meta['item']\n\n sresponse = scrapy.Selector(response)\n poetdata = sresponse.xpath('//div[@class=\"view-content\"]')\n\n #TODO: Clear empty strings from poet item fields\n\n item['poet_basicbio'] = poetdata[0].xpath('div/span//te... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This method parses each poem on poem pages and finally yields the poemitems | def parse_poet_poem(self, response):
poemitem = response.meta['poemitem']
sresponse = scrapy.Selector(response)
poemitem['poem_text'] = sresponse.xpath('//div[@property = "content:encoded"]//text()').extract()
poemitem['poem_copyright'] = sresponse.xpath('//div[@class = "poem-credit"]//p... | [
"def parse_poet_poems(self, response):\n poet_poems_url = response.meta['poet_poems_url']\n\n sresponse = scrapy.Selector(response)\n\n #like the movement pages, this page contains a table that has maximum of ten rows, we need to go to the next\n # page in order to extract all of the poe... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Creates a coroutine that does nothing for when no sleep is needed. | async def no_sleep_coro():
pass | [
"def __call__(self, request):\n # Store coroutine to suppress 'unawaited' warning message\n self._unawaited_coroutine = asyncio.sleep(0)\n return self._unawaited_coroutine",
"def nothing():\n while True:\n yield",
"def awaitable(obj):\n yield from asyncio.sleep(0)\n return o... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
A replacement sleep for Windows. Note that unlike `time.sleep` this may sleep for slightly less than the specified time. This is generally not an issue for Textual's use case. In order to create a timer that _can_ be cancelled on Windows, we need to create a timer and a separate event, and then we wait for either of th... | def sleep(secs: float) -> Coroutine[None, None, None]:
# Subtract a millisecond to account for overhead
sleep_for = max(0, secs - 0.001)
if sleep_for < 0.0005:
# Less than 0.5ms and its not worth doing the sleep
return no_sleep_coro()
timer = kernel32.CreateWait... | [
"def sleep(timeout, cancel_event):\n if cancel_event:\n cancel_event.wait(timeout=timeout)\n if cancel_event.isset():\n return False\n else:\n time.sleep(timeout)\n return True",
"def sleep(time: float):\n global canSleeped\n\n if (canSleeped):\n s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the cancel event so we know we can stop waiting for the timer. | def cancel_inner():
kernel32.SetEvent(cancel_event) | [
"def _cancel(self):\n self.waiter.set_result_if_pending(None)\n \n timer = self.timer\n if (timer is not None):\n self.timer = None\n timer.cancel()",
"def event_cancel(self):\n self.state = 'cancel'",
"def cancel(self, timerHandler):\n # TODO: thi... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Function responsible for waiting for the timer or the cancel event. | def wait_inner():
if (
kernel32.WaitForMultipleObjects(
2,
ctypes.pointer((HANDLE * 2)(cancel_event, timer)),
False,
INFINITE,
)
== WAIT_FAILED
):
time_... | [
"async def wait_for_cancel(self):\n await self._cancel",
"def sleep(timeout, cancel_event):\n if cancel_event:\n cancel_event.wait(timeout=timeout)\n if cancel_event.isset():\n return False\n else:\n time.sleep(timeout)\n return True",
"def _cancel(self):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check {sysproid}.{appname}.{cell} is running. | def _test_app_running(self, running_set, sysproid, cell, appname):
full_app_name = '%s.%s.%s' % (sysproid, appname, cell)
self.assertIn(full_app_name, running_set) | [
"def check_running():\n name = None\n if KEY_PLATFORM == KEY_WINDOWS:\n name = \"fabricator.exe\"\n elif KEY_PLATFORM == KEY_LINUX:\n name = \"fabricator\"\n if name is None:\n raise FatalEchoException(\"Tool should be running on either windows or linux\")\n\n for pid in psutil.p... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This method is called during a move's `action_done`. It'll actually move a quant from the source location to the destination location, and unreserve if needed in the source location. This method is intended to be called on all the move lines of a move. This method is not intended to be called when editing a `done` move... | def _action_done(self):
# First, we loop over all the move lines to do a preliminary check: `qty_done` should not
# be negative and, according to the presence of a picking type or a linked inventory
# adjustment, enforce some rules on the `lot_id` field. If `qty_done` is null, we unlink
... | [
"def _action_done(self):\n\n # First, we loop over all the move lines to do a preliminary check: `qty_done` should not\n # be negative and, according to the presence of a picking type or a linked inventory\n # adjustment, enforce some rules on the `lot_id` field. If `qty_done` is null, we unlin... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The response iterable as writeonly stream. | def stream(self):
return ResponseStream(self) | [
"def response_as_stream(self) -> Any:\n raise NotImplementedError # pragma: no cover",
"def getOutputStream(self):\r\n self._setHeaders()\r\n return self._response.getOutputStream()",
"def response_handling(self) -> global___Snippet.StreamingResponseHandling:",
"def __iter__(self):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns any live games currently happening with the API | def _get_live_games(self):
response = requests.get(self._get_score_url())
if response.status_code == 200:
return [g for g in response.json()['games'] if g['status']['state'] == self.desired_game_state] | [
"async def get_games(self):\n return await self._get_request(f'{self.base_url}/api/v2/games')",
"def get_games(self):\n r = requests.get(self.get_base_url() + GAMES_PATH)\n return r.json()",
"async def fetch_games(self):\n return await self.http.get_game_list()",
"def get_all_games... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |