query stringlengths 9 9.05k | document stringlengths 10 222k | negatives listlengths 19 20 | metadata dict |
|---|---|---|---|
Returns the result of processing the given tsurgeon operations on the given trees Returns a list of modified trees, eg, the result is already processed | def process_trees(trees, *operations):
request = build_request(trees, operations)
result = send_tsurgeon_request(request)
return [from_tree(t)[0] for t in result.trees] | [
"def process_trees(tree):\n name_target = tree[:-9].replace('trees/all_', '').replace('trees/pure_', '').replace('trees/recomb_', '')\n\n with open(tree, 'r') as check_tree:\n tree_txt = check_tree.read() \n\n if (tree_txt == 'not enough genomic information\\n'): \n return [name_target, np.N... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Apply configuration settings to celery application instance. | def configure_celery_app(celery_app: Celery) -> None:
configuration: ConfigParser = load_config_file()
celery_app.conf.update(
broker_url=configuration.get(section="celery", option="broker_url", fallback="redis://localhost:6379/0"), # noqa: E501
enable_utc=configuration.getboolean(section="cele... | [
"def init_app(self, app):\n # Instantiate celery and read config\n super(Celery, self).__init__(app.name,\n broker=app.config['CELERY_BROKER_URL'])\n # Update the config\n self.conf.update(app.config)",
"def init_celery(celery, app):\n celery.conf... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Apply settings from configuration file to flask application instance. | def configure_flask_application(application: Flask, settings_override: Dict = None) -> None:
configuration: ConfigParser = load_config_file()
server_name = configuration.get(section="flask", option="server_name", fallback="") # noqa: E501
if server_name:
application.config["SERVER_NAME"] = server_... | [
"def init_configuration(self):\n\n self.app.config.from_envvar('SETTINGS')",
"def add_settings(app):\n try:\n app.host = config[\"server\"][\"host\"]\n app.port = config[\"server\"][\"port\"]\n app.debug = config[\"server\"][\"debug\"]\n except KeyError:\n sys.exit(\"Config fi... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Wrap a function or data for execution. Returns a `Task` without running the function. This `Task` can be used as an argument for other deferred functions to build a call graph. The call graph can then be executed by an `Executor`. `fun` can also be noncallable data, in which case the resulting `Task` will evaluate to t... | def defer(fun, *args, **kwargs):
if not callable(fun) and not isinstance(fun, Task):
# return non-functions varbatim
return Task(_identity, [fun], {})
else:
return Task(fun, args, kwargs) | [
"def task(func):\n def task_wrapper(*args, **kwargs):\n return spawn(func, *args, **kwargs)\n return task_wrapper",
"def function(\n self,\n fn,\n *,\n args=(),\n kwargs=None,\n env: Optional[EnvType] = None,\n cwd: Optional[GenericPath] = None,\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Schedule a task for later execution. The task is saved to the `{directory}/todo` directory. Use `run` to execute all the tasks in the `{directory}/todo} directory. If you want, you can attach metadata to the task, which you can retrieve as `task.metadata` after the task has been run. | def schedule(self, task, metadata=None):
if self._noschedule:
return
task.errorvalue = None
task.returnvalue = None
task.metadata = metadata
taskfilename = (str(uuid()) + '.pkl')
with (self._directory / 'todo' / taskfilename).open('wb') as f:
di... | [
"def run(self, task_name):\n if task_name not in celery_app.tasks:\n logger.error(\n \"{}: No task named '{}' is registered\".format(\n self.__name__, task_name))\n return\n\n logger.info(\n \"{}: Starting to schedule {} for each activ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Execute all tasks in the `{directory}/todo}` directory. All tasks are executed in their own processes, and `run` makes sure that no more than `nprocesses` are active at any time. If `print_errors=True`, processes will print full stack traces of failing tasks. Since these errors happen on another process, this will not ... | def run(self, nprocesses=4, print_errors=False, save_session=False, autokill=None):
if save_session:
dill.dump_session(self._directory / 'session.pkl')
class TaskIterator:
def __init__(self, parent, todos, save_session):
self.parent = parent
self... | [
"def run_tasks(stdout):\n tasks = Task.objects.filter(time__lte=timezone.now() + timedelta(minutes=30), active=True)\n stdout.write(\"Working on {} tasks\".format(len(tasks)))\n for task in tasks.all():\n status = execute_task(task)\n if status == \"OK\":\n task.active = False\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Wait while `nprocesses` are running and return finished tasks. | def _finish_tasks(self, nprocesses, autokill):
while len(self._processes) >= nprocesses:
for file, proc in list(self._processes.items()):
if proc.poll() is not None:
task = self._retrieve_task(file)
try:
stdout, _ = proc... | [
"def _wait(self, timeout=None):\n if not self._processes:\n return\n start = time.time()\n while True:\n try:\n if all(not process.is_alive() for process in self._processes):\n # All the workers are dead\n return\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Yield all tasks in `{directory}/todo`. | def todo_tasks(self):
for todo in (self._directory / 'todo').iterdir():
with todo.open('rb') as f:
yield dill.load(f) | [
"def done_tasks(self):\n for done in (self._directory / 'done').iterdir():\n with done.open('rb') as f:\n try: # safeguard against broken tasks:\n yield dill.load(f)\n except EOFError as err:\n print(f'skipping {done.name} ({err})... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Yield all tasks in `{directory}/done`. | def done_tasks(self):
for done in (self._directory / 'done').iterdir():
with done.open('rb') as f:
try: # safeguard against broken tasks:
yield dill.load(f)
except EOFError as err:
print(f'skipping {done.name} ({err})') | [
"def todo_tasks(self):\n for todo in (self._directory / 'todo').iterdir():\n with todo.open('rb') as f:\n yield dill.load(f)",
"def fail_tasks(self):\n for fail in (self._directory / 'fail').iterdir():\n with fail.open('rb') as f:\n yield dill.load... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Yield all tasks in `{directory}/fail`. | def fail_tasks(self):
for fail in (self._directory / 'fail').iterdir():
with fail.open('rb') as f:
yield dill.load(f) | [
"def run_failed_cases(fail_dir='failed'):\n for path in Path(fail_dir).glob('**/*'):\n result = run_case(path)[1]\n print(str(path), result)",
"def iter_failed(self):\n for awsnexradfile in self.failed:\n yield awsnexradfile",
"def done_tasks(self):\n for done in (self.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Remove `{directory}` and all todo/done/fail tasks. | def clean(self, clean_todo=True, clean_done=True, clean_fail=True):
def remove(dir):
if dir.exists():
for f in dir.iterdir():
f.unlink()
dir.rmdir()
if clean_todo:
remove(self._directory / 'todo')
if clean_fail:
... | [
"def clean_targets(task, dryrun):\n for target in sorted(task.targets, reverse=True):\n if os.path.isfile(target):\n print(\"%s - removing file '%s'\" % (task.name, target))\n if not dryrun:\n os.remove(target)\n elif os.path.isdir(target):\n if os.li... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Execute `infile` and produce `outfile`. If `sessionfile` is given, load session from that file. Set `do_print` or `do_raise` to `True` if errors should be printed or raised. | def run_task(infile, outfile, sessionfile, do_print, do_raise):
if sessionfile:
dill.load_session(Path(sessionfile))
with infile.open('rb') as f:
task = dill.load(f)
try:
start_time = time.perf_counter()
task.returnvalue = evaluate(task)
task.errorvalue = None
... | [
"def play(quiet, session_file, shell, speed, prompt, commentecho, commentformat):\n run(session_file.readlines(),\n shell=shell,\n speed=speed,\n quiet=quiet,\n test_mode=TESTING,\n prompt_template=prompt,\n commentecho=commentecho,\n commentformat=commentformat)"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Execute a `task` and calculate its return value. `evaluate` walks the call chain to the `task`, and executes all the code necessary to calculate the return values. No `task` are executed more than once, even if several `PartOfTasks` lead to the same original `Task`. This is a recursive function that passes its state in... | def evaluate(task, known_results=None):
# because pickling breaks isinstance(task, Task)
if not 'Task' in task.__class__.__name__:
return task
if known_results is None:
known_results = {}
if task._id not in known_results:
if task.__class__.__name__ in ['TaskItem', 'TaskAttribu... | [
"def evaluate_task_result(task_ex, task_spec, result):\n\n if result.is_error():\n return {\n 'error': result.error,\n 'task': {task_ex.name: result.error}\n }\n\n # Expression context is task inbound context + action/workflow result\n # accessible under key task name ke... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Search LDAP according to key parameters | def search(key=None, value=None):
if key and value:
result = LDAP_CONN.search_s(
LDAP_TOP_DN,
ldap.SCOPE_ONELEVEL,
filterstr='({0}={1})'.format(key, value)
)
elif not key and not value:
result = LDAP_CONN.search_s(
... | [
"def _ldap_search(self, query, attrlist=None):\n import ldap\n conn = ldap.initialize(self._conf['ldap_uri'])\n try: \n conn.simple_bind_s(self._conf['ldap_binddn'], self._conf['ldap_bindpw'])\n except ldap.LDAPError, e:\n print e\n raise\n except ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add a log entry with timestamp `timestamp` to the LogDeque. Inserts in chronological order from latest to oldest. | def add(self, timestamp: datetime, entry: LogLine):
if len(self.entries) == 0:
self.entries.appendleft((timestamp, entry))
return self
i = 0
curr_entry_time, _ = self.entries[0]
while timestamp < curr_entry_time:
i += 1
if i > len(self.ent... | [
"def add_to_log(cls, timestamp, hours, minutes, seconds, notes):\n timestamp = LogEntry.normalize_timestamp(timestamp)\n # If/While the timestamp is already in the log...\n while timestamp in cls._log:\n # Resolve collision by incrementing it by one second.\n timestamp = c... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return all entries added to the LogDeque since the given time `since_time`. Exploits the latesttooldest ordering of entries to avoid unnecessary iteration. | def peek(self, since_time: datetime) -> list:
peeked_entries: deque = deque()
for timestamp, entry in self.entries:
entry_was_on_or_after_requested_time = since_time <= timestamp
if entry_was_on_or_after_requested_time:
peeked_entries.append(entry)
el... | [
"def get_new_objects_since(query_set, last_update_time):\n\tif last_update_time == '0':\n\t\treturn query_set.all().order_by('created_at')\n\n\treturn query_set.filter(created_at__gt=last_update_time).order_by('created_at')",
"def pop_tasks_till_timestamp(self, timestamp):\n tasks = []\n next_task =... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Save something into the logger's current state. Provide an arbitrary number of keyword arguments with numerical values. | def store(self, **kwargs):
for k, v in kwargs.items():
if not(k in self.logger_dict.keys()):
self.logger_dict[k] = []
self.logger_dict[k].append(v) | [
"def save_kwargs(self, kwargs: dict) -> None:\n d = kwargs.copy()\n d[\"eps\"] = self.eps\n d[\"torch_dtype\"] = self.torch_dtype\n d[\"importance_sampler\"] = self.importance_nested_sampler\n save_to_json(d, os.path.join(self.output, \"config.json\"))",
"def dump(self, **kwargs... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Objective function generator for Particle swarm optimizaiton task. Functions are randomly generated based on a sum of 3 exponential functions and a sinusoidal function. | def generate_an_objective_function(x_min, x_max):
if (x_max <= x_min):
raise Exception('x_min should be strcitly greater than x_max.')
interval_dist = np.abs(x_max - x_min)
print(interval_dist)
m1 = np.random.randint(low= x_min, high = x_min + np.random.uniform(low=0.01, high=0.4)*inte... | [
"def main():\n\n # SETUP OBJECTIVE FUNCTION ############################\n\n if objective_func == \"sphere\":\n # Sphere ##########################\n from ailib.optimization.function.sphere import Function\n #f = Function(1)\n f = Function(2)\n #f = Function(10)\n\n elif ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
has_lab_access() > bool Is this user allowed access to the lab? Users with lab access can add/modify/remove machines and update other physical lab information. | def has_lab_access(self):
return True | [
"def isUserAllowed(self):\n security = getSecurityManager()\n portal = getToolByName(self, 'portal_url').getPortalObject()\n return security.checkPermission(permissions.USE_LINK_MANAGEMENT,\n portal)",
"def has_access(self, user):\n with TRN:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
returns the filename of a randomly chosen image in dir | def random_file(dir):
images = [f for f in listdir(dir) if isimage(f)]
return choice(images) | [
"def random_file(dir):\r\n images = [f for f in listdir(dir) if isimage(f)]\r\n return choice(images)",
"def random_picture(self, place):\n random_filename = random.choice([\n x for x in os.listdir(place)\n if os.path.isfile(os.path.join(place, x))\n ])\n return ra... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Copy all parameters of a Resnet model from a pretrained model (except for the last fully connected layer) typeBlock == "BasicBlock" for resnet18 and resnet34 or "Bottleneck" for resnet50, resnet101 and resnet152 | def copyFeaturesParametersResnet(net, netBase, nbBlock1, nbBlock2, nbBlock3, nbBlock4, typeBlock="Bottleneck"):
if typeBlock not in ["BasicBlock", "Bottleneck"]:
print ('error in the block name, choose "BasicBlock", "Bottleneck"')
return
print ("copy net.conv1", net.conv1)
net.conv1.weight... | [
"def resnet34_v2(pretrained=False, **kwargs):\n model = ResNetV2(BasicBlockV2, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))\n return model",
"def resnet56(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [9,9,9], **kwargs)... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Initialize Saif Mohammad NRC Wordlevel Emotion Lexicon featurizer | def __init__(self, lexicon_path=nrc_emotion_lexicon_path):
self._id = 'NRCEmotionWordlevel'
self._lexicon_map = self.create_lexicon_mapping(lexicon_path) | [
"def __init__(self):\n nltk.download(\"vader_lexicon\", quiet=True)\n self.vader = SentimentIntensityAnalyzer()",
"def init(self, trainfiles):\n for filepaths in trainfiles:\n\n # load files and tokenize words in sentences\n with open(filepaths, \"r\") as text:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The file used in this test has only 19 diagnostics records in the second set. Twenty are expected. | def test_too_few_diagnostics_records(self):
log.debug('===== START TEST NOT ENOUGH DIAGNOSTICS RECORDS =====')
# Test the telemetered version
log.debug('----- TELEMETERED -----')
with open(os.path.join(RESOURCE_PATH, 'too_few_20140813.velpt.log'), 'rb') as file_handle:
num_... | [
"def test_general_report_parsing(self):\n reportTypes = ['URINE MICROBIOLOGY',\n 'BLOOD CULTURE MICROBIOLOGY',\n 'CATHETER TIP MICROBIOLOGY',\n 'CEREBROSPINAL FLUID MICROBIOLOGY',\n 'MICROBIOLOGY FROM SUPERFICIAL SITES',\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The file used in this test has 21 diagnostics records in the second set. Twenty are expected. | def test_too_many_diagnostics_records(self):
log.debug('===== START TEST TOO MANY DIAGNOSTICS RECORDS =====')
# Test the telemetered version
log.debug('----- TELEMETERED -----')
with open(os.path.join(RESOURCE_PATH, 'too_many_20140813.velpt.log'), 'rb') as file_handle:
num_... | [
"def test_full(self):\n with open(os.path.join(RESOURCE_PATH, 'unit_514-2014-351-2-0.mrg'), 'rU') as file_handle:\n parser = GliderParser(self.config, file_handle, self.exception_callback)\n\n particles = parser.get_records(40)\n # requested more than are available in file, s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Attempt to build a parser with a bad configuration. | def test_bad_configuration(self):
log.debug('===== START TEST BAD CONFIGURATION =====')
with open(os.path.join(RESOURCE_PATH, '20140813.velpt.log'), 'rb') as file_handle:
with self.assertRaises(ConfigurationException):
parser = VelptAbDclParser(self._bad_parser_config,
... | [
"def test_is_not_valid_parser_error(self):\n class MySchema(Schema):\n foo = IntOption()\n\n def mock_parse_all(self):\n assert False\n\n schema = MySchema()\n config = StringIO(\"[__main__]\\nfoo = 5\")\n parser = SchemaConfigParser(schema)\n parser.p... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The file used in this test has a record with a bad checksum. This results in 49 particles being retrieved instead of 50. The standard 20140813.velpt.log was used, the checksum of the third velocity record was corrupted to make it fail. | def test_bad_velocity_checksum(self):
log.debug('===== START TEST FOUND BAD VELOCITY CHECKSUM =====')
# Test the telemetered version
log.debug('----- TELEMETERED -----')
with open(os.path.join(RESOURCE_PATH, 'bad_velocity_checksum_20140813.velpt.log'), 'rb') as file_handle:
... | [
"def test_bad_data(self):\n\n # the first data record in this file is corrupted and will be ignored\n # we expect the first 2 particles to be the metadata particle and the\n # intrument particle from the data record after the corrupted one\n with open(os.path.join(RESOURCE_PATH, '1107941... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Uploads task files in dirpath with prefix to UHRS, each hit will be judged by num_judges workers | def upload_tasks_from_folder(self, task_group, dirpath, prefix="",
consensus_thresh=0.0, num_judges=5, priority=1000):
if self.state != State.IDLE:
raise Exception("cannot upload from state {}".format(self.state))
self.state = State.UPLOAD_START
task_... | [
"def put_jobs(base_path=None):\n logger.info(\"Begin to put jobs in queue......\")\n base_path = os.path.abspath(base_path)\n files = os.listdir(base_path)\n queueLock.acquire()\n for f in files:\n serial = f\n f = os.path.join(base_path, f)\n d = {\"file\": f, \"serial\": serial... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Difference from Trend Simple difference of close from trend values | def difference_from_trend(position: pd.DataFrame, trend: list) -> list:
diff_from_trend = []
for i, trend_val in enumerate(trend):
diff_from_trend.append(np.round(position['Close'][i] - trend_val, 3))
return diff_from_trend | [
"def test_trend_down(self):\n self.assertEquals(self.data_item.compute_trend(19.9), -1)",
"def test_trend_up(self):\n self.assertEquals(self.data_item.compute_trend(20.1), 1)",
"def test_trend_same(self):\n self.assertEquals(self.data_item.compute_trend(20), 0)",
"def to_trend(avg):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Construct a Unet generator | def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False):
super(UnetGenerator, self).__init__()
# construct unet structure
# REW: 先求最底层的跨层连接,再逐渐往上层去 大神级代码啊!
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=Non... | [
"def unet_generator(size=img_width, in_ch=img_depth, out_ch=img_depth, nf=64):\n max_nf = 8 * nf\n def block(x, s, nf_in, use_batchnorm=True, nf_out=None, nf_next=None):\n assert s>=2 and s%2==0\n if nf_next is None:\n nf_next = min(nf_in*2, max_nf)\n if nf_out is None:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test adding 2D dask image. | def test_dask_2D(make_napari_viewer):
viewer = make_napari_viewer()
da.random.seed(0)
data = da.random.random((10, 15))
viewer.add_image(data)
assert np.all(viewer.layers[0].data == data) | [
"def test_dask_nD(make_napari_viewer):\n viewer = make_napari_viewer()\n\n da.random.seed(0)\n data = da.random.random((10, 15, 6, 16))\n viewer.add_image(data)\n assert np.all(viewer.layers[0].data == data)",
"def test_cli_add_image_data():\n pth = io3d.datasets.join_path(\n \"medical\",... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test adding nD dask image. | def test_dask_nD(make_napari_viewer):
viewer = make_napari_viewer()
da.random.seed(0)
data = da.random.random((10, 15, 6, 16))
viewer.add_image(data)
assert np.all(viewer.layers[0].data == data) | [
"def test_dask_2D(make_napari_viewer):\n viewer = make_napari_viewer()\n\n da.random.seed(0)\n data = da.random.random((10, 15))\n viewer.add_image(data)\n assert np.all(viewer.layers[0].data == data)",
"def test_basic_add(self):\n data_loader = self.get_data_loader()\n\n count = 0\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test adding 2D zarr image. | def test_zarr_2D(make_napari_viewer):
viewer = make_napari_viewer()
data = zarr.zeros((200, 100), chunks=(40, 20))
data[53:63, 10:20] = 1
# If passing a zarr file directly, must pass contrast_limits
viewer.add_image(data, contrast_limits=[0, 1])
assert np.all(viewer.layers[0].data == data) | [
"def test_zarr_nD(make_napari_viewer):\n viewer = make_napari_viewer()\n\n data = zarr.zeros((200, 100, 50), chunks=(40, 20, 10))\n data[53:63, 10:20, :] = 1\n # If passing a zarr file directly, must pass contrast_limits\n viewer.add_image(data, contrast_limits=[0, 1])\n assert np.all(viewer.layer... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test adding nD zarr image. | def test_zarr_nD(make_napari_viewer):
viewer = make_napari_viewer()
data = zarr.zeros((200, 100, 50), chunks=(40, 20, 10))
data[53:63, 10:20, :] = 1
# If passing a zarr file directly, must pass contrast_limits
viewer.add_image(data, contrast_limits=[0, 1])
assert np.all(viewer.layers[0].data ==... | [
"def test_zarr_2D(make_napari_viewer):\n viewer = make_napari_viewer()\n\n data = zarr.zeros((200, 100), chunks=(40, 20))\n data[53:63, 10:20] = 1\n # If passing a zarr file directly, must pass contrast_limits\n viewer.add_image(data, contrast_limits=[0, 1])\n assert np.all(viewer.layers[0].data =... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test adding 2D xarray image. | def test_xarray_2D(make_napari_viewer):
viewer = make_napari_viewer()
np.random.seed(0)
data = np.random.random((10, 15))
xdata = xr.DataArray(data, dims=['y', 'x'])
viewer.add_image(data)
assert np.all(viewer.layers[0].data == xdata) | [
"def test_xarray_nD(make_napari_viewer):\n viewer = make_napari_viewer()\n\n np.random.seed(0)\n data = np.random.random((10, 15, 6, 16))\n xdata = xr.DataArray(data, dims=['t', 'z', 'y', 'x'])\n viewer.add_image(xdata)\n assert np.all(viewer.layers[0].data == xdata)",
"def test_zarr_2D(make_nap... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test adding nD xarray image. | def test_xarray_nD(make_napari_viewer):
viewer = make_napari_viewer()
np.random.seed(0)
data = np.random.random((10, 15, 6, 16))
xdata = xr.DataArray(data, dims=['t', 'z', 'y', 'x'])
viewer.add_image(xdata)
assert np.all(viewer.layers[0].data == xdata) | [
"def test_xarray_2D(make_napari_viewer):\n viewer = make_napari_viewer()\n\n np.random.seed(0)\n data = np.random.random((10, 15))\n xdata = xr.DataArray(data, dims=['y', 'x'])\n viewer.add_image(data)\n assert np.all(viewer.layers[0].data == xdata)",
"def test_zarr_nD(make_napari_viewer):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This method finds all object files mentioned in each section. It classifies every object file into a specific sw module. It then creates a dictionary of modules. The data of each module key is another dictionary that has object files as keys and a list of [section, size, sub_section, address] lists | def analyse_modules(section_data_dict):
modules = {}
seen_sections = OrderedDict()
for section in section_data_dict:
for [sub_section, address, size, object_name] in section_data_dict[section]["contents"]:
sw_module = classify_by_object_file(object_name)
if sw_module not in m... | [
"def updateSectionMap(self):\n self.sectionMap={}\n BLOCKSIZE=512\n #TODO: modify struct to be 'iilb' or something\n sections=self.fileReadSectionKeys(76,KEYS_SECTIONS,fixedOffsetBytes=16)\n for sectionName in sections.keys():\n blockIndex, entrySize, entryCount = secti... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get step size for given total item size and batch size. | def get_step_size(total_items, batch_size):
return math.ceil(total_items / batch_size) | [
"def get_step_size(total_items, batch_size):\n return int(np.ceil(total_items / batch_size))",
"def step_size(self) -> Timedelta:\n assert self._step_size is not None, \"No step size provided\"\n return self._step_size",
"def get_stepsize(self) -> float:\n return self.m_stepsize",
"def... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Boolean to indicate if customer deceased date is populated | def is_deceased(deceased_date: str) -> bool:
return False if deceased_date == '' else True | [
"def has_due_date(self):\n return True if self.due_date is not None else False",
"def can_charge(customer):\n if customer.date_purged is not None:\n return False\n if customer.default_source:\n return True\n return False",
"def _check_date(self, cr, uid, ids):\n for deleg in... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Absolute arrears balance for an account divided by the regular monthly payment amount. Rounded to 1 decimal place. The minimum value should be 0. | def months_in_arrears(
arrears_balance: float,
regular_payment_amount: float) -> float:
if regular_payment_amount in (0, math.isnan):
return 0
else:
return max(round(arrears_balance/regular_payment_amount, 1), 0) | [
"def minimum_monthly_payment(balance, mrate):\r\n return balance*mrate",
"def minpayment1(balance,annualInterestRate):\n \n epsilon=1\n monthlyPayment= -10\n \n endBalance=1\n ncount=0\n while endBalance>0:\n monthlyPayment += 10 \n endBalance=FinalBalance(balance,annu... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Coverts a Y/N string field to boolean | def yn_bool(yn_flag: str) -> bool:
return True if yn_flag.upper() == 'Y' else False | [
"def yn(value: bool) -> str:\n return \"Y\" if value else \"N\"",
"def str_to_bool(attr: str) -> bool:\n if attr != \"True\" and attr != \"False\":\n raise ValueError(\n \"The attribute is not a string representation of a Python\"\n \"bool ('True' or 'False')\"\n )\n\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add a boolean flag to argparse parser. | def boolean_flag(parser, name, default=False, help=None):
dest = name.replace('-', '_')
parser.add_argument("--" + name, action="store_true", default=default, dest=dest, help=help)
parser.add_argument("--no-" + name, action="store_false", dest=dest) | [
"def boolean_flag(\n parser, name: str, default: bool = False, help: str = None\n) -> None:\n dest = name.replace(\"-\", \"_\")\n parser.add_argument(\n \"--\" + name,\n action=\"store_true\",\n default=default,\n dest=dest,\n help=help\n )\n parser.add_argument(\"-... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Run command, return output as string. | def run_command(cmd):
output = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True).communicate()[0]
return output.decode("ascii") | [
"def run(command):\n return os.popen(command).read()",
"def command_with_output(command: str) -> str:\n output = subprocess.run(\n command, shell=True, text=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT\n ).stdout\n return f\"# {command}\\n\\n{output}\"",
"def run(command):\n os.s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns list of available GPU ids. | def list_available_gpus():
output = run_command("nvidia-smi -L")
# lines of the form GPU 0: TITAN X
gpu_regex = re.compile(r"GPU (?P<gpu_id>\d+):")
result = []
for line in output.strip().split("\n"):
m = gpu_regex.match(line)
assert m, "Couldnt parse "+line
result.append(int(... | [
"def list_visible_gpu_types() -> List[str]:\n cmd = [\"nvidia-smi\", \"-L\"]\n try:\n res = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,\n check=True)\n device_strs = res.stdout.decode(\"utf-8\").splitlines()\n devices = [x.split(\":\")... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns map of GPU id to memory allocated on that GPU. | def gpu_memory_map():
output = run_command("nvidia-smi")
gpu_output = output[output.find("GPU Memory"):]
# lines of the form
# | 0 8734 C python 11705MiB |
memory_regex = re.compile(r"[|]\s+?(?P<gpu_id>\d+)\D+?(?P<pid>\d+).+[ ](?P<gpu_memory>\d+)Mi... | [
"def get_gpu_mem(gpu_id=0):\n py3nvml.py3nvml.nvmlInit()\n gpu_handle = py3nvml.py3nvml.nvmlDeviceGetHandleByIndex(gpu_id)\n gpu_mem_info = py3nvml.py3nvml.nvmlDeviceGetMemoryInfo(gpu_handle)\n gpu_utilization_info = py3nvml.py3nvml.nvmlDeviceGetUtilizationRates(gpu_handle)\n gpu_mem = {}\n gpu_me... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Handles unsuppoted media requests with 415_UNSUPPORTED_MEDIA_TYPE | def mediatype_not_supported(error): # pragma: no cover
app.logger.warning(str(error))
return (
jsonify(
status=status.HTTP_415_UNSUPPORTED_MEDIA_TYPE,
error="Unsupported media type",
message=str(error),
),
status.HTTP_415_UNSUPPORTED_MEDIA_TYPE,
) | [
"def check_content_type(media_type):\n content_type = request.headers.get(\"Content-Type\")\n if content_type and content_type == media_type:\n return\n app.logger.error(\"Invalid Content-Type: %s\", content_type)\n abort(\n status.HTTP_415_UNSUPPORTED_MEDIA_TYPE,\n f\"Content-Type ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Retrieve a single Customer This endpoint will return a Customer based on it's id | def get(self, customer_id):
app.logger.info("Request to Retrieve a customer with id [%s]", customer_id)
customer = Customer.find(customer_id)
if not customer:
raise NotFound("404 Not Found: Customer with the id was not found.")
return customer.serialize(), status.HTTP_200_OK | [
"def get_customer(self):\n if self.customer_id:\n return self.client.customers.get(self.customer_id)",
"async def get_customer_by_id(self,id):\n async with self._db.acquire() as conn:\n data= await conn.execute(Customer.select().where((Customer.c.customer_id == id)))\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Delete a Customer This endpoint will delete a Customer based the id specified in the path | def delete(self, customer_id):
app.logger.info('Request to Delete a customer with id [%s]', customer_id)
customer = Customer.find(customer_id)
if customer:
customer.delete()
return '', status.HTTP_204_NO_CONTENT | [
"def delete_customer(customer_id): # noqa: E501\n db = get_db()\n\n try:\n _id = ObjectId(customer_id)\n except Exception:\n return 'Not found', 404\n cust = db['Customer'].find_one({\"_id\": _id})\n if cust is None:\n return 'Not found', 404\n result = db['Customer'].delete_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Update a Customer This endpoint will update a Customer based the body that is posted | def put(self, customer_id):
app.logger.info('Request to Update a customer with id [%s]', customer_id)
customer = Customer.find(customer_id)
if not customer:
api.abort(status.HTTP_404_NOT_FOUND, "Customer with id '{}' was not found.".format(customer_id))
app.logger.debug('Payl... | [
"def update_customer(body, customer_id): # noqa: E501\n if connexion.request.is_json:\n body = RequestCustomer.from_dict(connexion.request.get_json()) # noqa: E501\n return 'do some magic!'",
"def update_customer(customer_id='', data=None, api_key=''):\n url = 'https://www.asaas.com/api/v2/custo... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Creates a Customer This endpoint will create a Customer based the data in the body that is posted | def post(self):
app.logger.info('Request to Create a Customer')
customer = Customer()
app.logger.debug('Payload = %s', api.payload)
customer.deserialize(api.payload)
customer.create()
app.logger.info('Customer with new id [%s] saved!', customer.id)
location_url = ... | [
"def CreateCustomer(self, request, context):\n\n customers.append(request)\n\n return customer_pb2.CustomerResponse(id=request.id, success=True)",
"def test_create_customer_with_no_name(self):\n new_customer = {\"password\": \"bar\",\n \"first_name\": \"value1\", \"last_name\":... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Suspend a Customer This endpoint will suspend a customer based on its ID | def put(self, customer_id):
app.logger.info("Request to suspend customer with id: %s", customer_id)
customer = Customer.find(customer_id)
if not customer:
raise NotFound("Cus...tomer with id '{}' was not found.".format(customer_id))
customer.active = False
customer.up... | [
"def suspend_customers_services():\n service = get_service_instance()\n config = service.config\n now = timezone.now()\n invoicing_config = InvoicingConfig.objects.all()[0]\n connection = mail.get_connection()\n try:\n connection.open()\n except:\n logger.error(u\"Connexion error\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Adds a new constituent type to this syntax tree. If the given constituent is already in this tree, nothing happens. | def add_constituent(self, constituent: str):
if constituent not in self.constituents:
self.constituents.append(constituent) | [
"def add_node(self,node_type):\n #Our start node is more specific than this... Need to have another validation method\n if node_type not in node_types:\n raise Exception('node type must be one of greent.node_types')\n self.definition.node_types.append(node_type)",
"def put(self, l... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Adds a new rule to this syntax tree. The given constituent and subconstituents are added to the tree if they are not already present. A rule is used to define how a constituent should be broken down into other constituents when forming a sentence. A word is selected when a constituent cannot be broken down any further. | def add_rule(self, constituent: str, subconstituents: list):
self.add_constituent(constituent)
for sub in subconstituents:
self.add_constituent(sub)
self.rules.append({'constituent': constituent,
'subconstituents': subconstituents}) | [
"def add(self, rule):\r\n self.insertRule(rule, index=None)",
"def addRule(self, state, char, target):\n try:\n st = self._states[state]\n except:\n raise ValueError(\"Undefined state '\" + state + \"'\", 5)\n try:\n self._states[target]\n except... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Starts the prompting process. | def start(msg, window, path, callback):
if not path.endswith(os.sep):
path += os.sep
path = expanduser(path)
map_window_to_ctx[window.id()] = PromptContext(msg, path, callback)
window.run_command('dired_prompt') | [
"def start(self):\n while self.line:\n split_line = self.line.strip().split(' ')\n self._handle_cmd(split_line[0], split_line[1:])\n self._prompt_for_cmd()\n self.line = sys.stdin.readline()",
"def prompt(self):\n\t\t_globals._console.write(f'{self.prompt_str} ')... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns True if the current value is a complete directory name without a trailing separator, and there are no other possible completions. | def _needs_sep(self, path):
if not isdir(path) or path.endswith(os.sep):
return False
partial = basename(path)
path = dirname(path)
if any(n for n in os.listdir(dirname(path)) if n != partial and n.startswith(partial) and isdir(join(path, n))):
# There are oth... | [
"def isDir(self):\r\n if self._path=='':\r\n return True\r\n return os.path.isdir(self._path)",
"def is_directory_name(name):\n return name.endswith(posixpath.sep)",
"def _is_incremental_dir(directory):\n walker = os.walk(directory)\n root, dirs, files = next(walker)\n asser... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Split the path into the directory to search and the prefix to match in that directory. If the path is completely invalid, (None, None) is returned. | def _parse_split(self, path):
prefix = ''
if not path.endswith(os.sep):
prefix = basename(path)
path = dirname(path)
if not isdir(path):
return (None, None)
return (path, prefix) | [
"def _process_path_prefix(path_prefix):\n _validate_path(path_prefix)\n if not _GCS_PATH_PREFIX_REGEX.match(path_prefix):\n raise ValueError('Path prefix should have format /bucket, /bucket/, '\n 'or /bucket/prefix but got %s.' % path_prefix)\n bucket_name_end = path_prefix.find('/', 1)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Extract a list of athlete activities from Strava API. | def list_activities(self, page=1, per_page=30, **kwargs):
if self.credentials["expires_at"] <= time.time():
self._refresh_credentials()
api_response = []
try:
api_instance = swagger_client.ActivitiesApi()
api_instance.api_client.configuration.access_token = ... | [
"def get_athlete_details(an_athlete):\n athlete_results = [] \n url = base_url + '/v3/athletes/{0}'.format(an_athlete)\n r = requests.get(url, headers=extra_headers)\n results = r.json()\n athlete_results = {'athlete_id': an_athlete,\n 'athlete_sex': results.get('sex'),\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
XML ElementTree node creation helper function. | def _create_new_node(doc, tag, parent=None):
if parent is None:
parent = doc.getroot() # what if there is no root?
if parent is None:
element = ElementTree.Element(tag)
# noinspection PyProtectedMember
doc._setroot(element)
return element
else:
return Elemen... | [
"def createNode(nodeIdentifier, owner, config):",
"def create_element(doc, parent, tag, value=None, attributes=None):\n ele = doc.createElement(tag)\n parent.appendChild(ele)\n if value:\n text = doc.createTextNode(u\"%s\" % value)\n ele.appendChild(text)\n if attributes:\n [ele.s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
XML ElementTree text node creation helper function | def _create_text_node(doc, tag, value, parent=None):
node = _create_new_node(doc, tag, parent=parent)
node.text = value
return node | [
"def createTextNode(text):\n return Text(text)",
"def _text(data):\n\n # The following initialization approach requires Python 2.3 or higher.\n t = xml.dom.minidom.Text()\n t.data = data\n\n return t",
"def create_sub_child(root, node_name, text=None, **kwargs):\n sub_child = etree.Sub... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the numeric format string for the given attribute. | def set_numeric_format(self, attribute, format_string):
# Extend this to include format function capabilities. Maybe numeric_format is not the right name?
if attribute not in self._fields:
raise ValueError('attribute {} is not permitted for class {}'.format(attribute, self.__class__.__name__... | [
"def num_format(self, name, format):\n self._formats[name] = super().add_format({'num_format': format})",
"def set_number_format(self, kind=\"float\", *args, **kwargs):\n if kind==\"float\":\n formatter=format.FloatFormatter(*args,**kwargs)\n elif kind==\"int\":\n format... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a formatting function for the given attribute. This will default to `str` if no other option is presented. | def _get_formatter(self, attribute):
entry = self._numeric_format.get(attribute, None)
if isinstance(entry, string_types):
fmt_str = '{0:' + entry + '}'
return fmt_str.format
elif callable(entry):
return entry
else:
return str | [
"def format(stringArg=\"string\"):\n pass",
"def safer_getattr(object, name, default=None, getattr=getattr):\n if name in ('format', 'format_map') and (\n isinstance(object, str) or\n (isinstance(object, type) and issubclass(object, str))):\n raise NotImplementedError(\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Perform the basic validity check on the direct attributes with no recursive checking. Returns bool True if all requirements AT THIS LEVEL are satisfied, otherwise False. | def _basic_validity_check(self):
all_required = True
for attribute in self._required:
present = (getattr(self, attribute) is not None)
if not present:
logging.error(
"Class {} is missing required attribute {}".format(self.__class__.__name__, a... | [
"def isValid(self,root):\n if not Vocabulary.isValid(self,root):\n return(False)\n for a in root.getiterator():\n if a==root:\n continue\n uid = a.attrib['type']\n if not a.attrib.has_key('group'):\n printError(self.__class__,in... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Perform a recursive validity check on all present attributes. Returns bool True if requirements are recursively satisfied BELOW THIS LEVEL, otherwise False. | def _recursive_validity_check(self):
def check_item(value):
if isinstance(value, (Serializable, SerializableArray)):
return value.is_valid(recursive=True)
return True
valid_children = True
for attribute in self._fields:
val = getattr(self, at... | [
"def _basic_validity_check(self):\n\n all_required = True\n for attribute in self._required:\n present = (getattr(self, attribute) is not None)\n if not present:\n logging.error(\n \"Class {} is missing required attribute {}\".format(self.__class... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets a bytes array, which corresponds to the xml string in utf8 encoding, identified as using the namespace given by `urn` (if given). | def to_xml_bytes(self, urn=None, tag=None, check_validity=False, strict=DEFAULT_STRICT):
if tag is None:
tag = self.__class__.__name__
etree = ElementTree.ElementTree()
node = self.to_node(etree, tag, check_validity=check_validity, strict=strict)
if urn is not None:
... | [
"def to_byte_array(self, http_string): # NOTHING TO BE DONE HERE ############\r\n return bytes(http_string, \"UTF-8\")",
"def to_byte_array(self, http_string): # NOTHING TO BE DONE HERE ############\r\n return bytes(http_string, \"UTF-8\")\r\n\r\n #########################################... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets an xml string with utf8 encoding, identified as using the namespace given by `urn` (if given). | def to_xml_string(self, urn=None, tag=None, check_validity=False, strict=DEFAULT_STRICT):
return self.to_xml_bytes(urn=urn, tag=tag, check_validity=check_validity, strict=strict).decode('utf-8') | [
"def encodeXMLName(name):\n namespace, name = name\n if namespace is None: return name.encode(\"utf-8\")\n return (u\"{%s}%s\" % (namespace, name)).encode(\"utf-8\")",
"def get_xml_encoding(source):\n with get_xml_iterator(source) as iterator:\n start, tag, data, pos = next(iterator)\n i... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create from an array type object. | def from_array(cls, array):
raise NotImplementedError | [
"def _create_array_of_type(t):\n if t in _array_types:\n return _array_types[t]()\n\n array_type_name = 'ArrayOf%s' % t\n array_type = type(array_type_name, (DataObject,), {})\n\n def __init__(self):\n super(array_type, self).__init__(array_type_name)\n setattr(self, t, [])\n\n s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generates feature matrix of shape [num_samples, num_timesteps, num_features] | def generate_feature_matrix(waveform):
# Output after framing has shape:
# floor((audio_length + 2 * pad_length - frame_length) / hop_length) + 1
# Librosa takes pad_length = floor(frame_length / 2).
pad_length = samples_per_frame // 2
num_timesteps = (waveform.shape[0] + 2 * pad_length - samples_p... | [
"def _generate_feature_matrix(self):\n\n mask = None\n if self.training:\n # generate a randomized mask where 1 represents voxels used for training\n # the mask needs to be binary, where the value 1 is considered as a voxel which is to be loaded\n # we have following l... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The JSON Schema validator. Notes If `fastjsonschema` is not installed, this will raise a ValueError. See Also | def validator(self) -> Callable: # pragma: nocover
if fastjsonschema:
return fastjsonschema.compile(self.primitive())
raise RuntimeError(
"Can't compile validator, 'fastjsonschema' is not installed."
) | [
"def schema_validator(self, data, schema_path):\n\t\timport jsonschema\n\n\t\tschema = load_data(schema_path)\n\t\ttry:\n\t\t\tjsonschema.validate(data, schema)\n\t\texcept jsonschema.ValidationError as e:\n\t\t\tself.validation_error(e.message, e.path)",
"def validate(self) -> Tuple[bool, Optional[ValidationErro... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Watches a Cloud Formation stack for events and returns a Generator for consuming the events. | def watch(self, fetch):
seen = set()
if fetch:
# fetch previous events
initial_events = self.get_events()
for e in initial_events:
yield e
seen.add(e.event_id)
# start looping and dump the new events
complete = False
... | [
"def read(self):\n while True:\n # wait for events 5s max to detect changes in the devices map\n rlist, _, _ = select.select(\n list(self.devices.values()), [], [], 5\n )\n for device in rlist:\n # check the device is still valid in ca... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Variable object label the variable name (e.g x0) varType the type of variable (e.g. Pose3) N the number of particles to use (e.g. 50) userLabels userspecified labels to describe the variable | def __init__(self, label, varType, N=50, userLabels=['']):
self.label = label
self.varType = varType
self.N = N
self.userLabels = userLabels | [
"def variable_label(var_name: str, description: str) -> OrderedDict:\n return OrderedDict([\n ('@varname', var_name),\n ('#text', description)\n ])",
"def createvar(ds, name, varlabel, format, measlevel):\n \n try:\n index = ds.varlist[name].index\n ds.varlist[index].type =... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
MOVE SHIPS THAT COULD BE IN BATTLE WITH IN THE NEXT FIVE TURNS FIRST PLACE IN A HEAP BASE ON DISTANCE FROM THE CLOSEST ENEMY SECTION THEN MOVE EACH ON, FROM CLOSEST TO ENEMY FIRST (TO AVOID COLLISION) | def move_battling_ships(MyMoves):
battle_heap = []
## GET SHIPS TO BE MOVED
# for k, v in MyMoves.myMap.ships_battling.items():
# if len(v) > 0:
# handled_ships.update(v)
## FASTER WAY THAN LOOPING ABOVE
## BASICALLY COMBINING ALL SET FROM THE DICTIONARY
#handled_ships = se... | [
"def move_battle_heap(MyMoves, battle_heap):\n while battle_heap:\n section_distance, enemy_distance, ship_id, target_coord, over_thrust, strong_enough, enemy_val = heapq.heappop(battle_heap)\n\n if ship_id not in MyMoves.myMap.ships_moved_already:\n\n ship_coords = MyMoves.myMap.data_sh... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
CHECK A SECTION, BASED ON COORDS PROVIDED, IF ITS STRONG ENOUGH TO ATTACK FORWARD | def check_if_strong_enough(MyMoves, middle_coord):
## GET ACTUAL COORDS/DISTANCE OF THE ENEMY
value = MyMoves.myMatrix.matrix[MyMoves.myMap.my_id][0] ## 1 IS FOR HP MATRIX
# v_enemy = MyCommon.get_section_with_padding(value, ship_coords, MyCommon.Constants.ATTACKING_RADIUS, 0)
v_enemy = MyCommon.get_s... | [
"def legal(ini, fin):\n \"\"\"To check that, we move the piece at ini to fin without displaying and check whether that configuration puts\n the piece's king under check.\"\"\"\n\n if not on_attack(ini, fin):\n return False\n piece_at_fin = ch[fin[0]][fin[1]]\n move(ini, fin)\n if ch[fin[0]]... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
MOVE SHIPS ACCORDING TO THE HEAP PROVIDED IF SHIPS ARE IN IMMINENT BATTLE, WE DETERMINE IF WE ARE MOVING FORWARD BASED ON STRONG ENOUGH (DETERMINED EARLIER) OR IF ITS DYING (SO JUST KAMIKAZE) | def move_battle_heap(MyMoves, battle_heap):
while battle_heap:
section_distance, enemy_distance, ship_id, target_coord, over_thrust, strong_enough, enemy_val = heapq.heappop(battle_heap)
if ship_id not in MyMoves.myMap.ships_moved_already:
ship_coords = MyMoves.myMap.data_ships[MyMoves... | [
"def move_battling_ships(MyMoves):\n battle_heap = []\n\n ## GET SHIPS TO BE MOVED\n # for k, v in MyMoves.myMap.ships_battling.items():\n # if len(v) > 0:\n # handled_ships.update(v)\n\n ## FASTER WAY THAN LOOPING ABOVE\n ## BASICALLY COMBINING ALL SET FROM THE DICTIONARY\n #han... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
GET CLOSEST SECTION WITH ENEMY CAN SET DOCKED ONLY TO FIND CLOSEST DOCKED ENEMY SHIPS | def closest_section_with_enemy(MyMoves, ship_id, docked_only=False, move_now=False):
def get_closest_section_enemy(MyMoves, least_distance, closest_section):
for section in MyMoves.myMap.sections_with_enemy:
distance = MyMoves.EXP.sections_distance_table[ship_section][section[0]][section[1]]
... | [
"def check_if_strong_enough(MyMoves, middle_coord):\n\n ## GET ACTUAL COORDS/DISTANCE OF THE ENEMY\n value = MyMoves.myMatrix.matrix[MyMoves.myMap.my_id][0] ## 1 IS FOR HP MATRIX\n # v_enemy = MyCommon.get_section_with_padding(value, ship_coords, MyCommon.Constants.ATTACKING_RADIUS, 0)\n v_enemy = MyCo... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Compute the length of longest substring with at most k distinct characters. | def lengthOfLongestSubstringKDistinct(self, s, k):
if len(s) < 2:
return len(s)
if k < 0: return -1
# Define two pointers
fast = slow = 0
# Define a hash table for storing appeared elements and their index.
# The size of the hash table is k.
... | [
"def find_long_substr_len(self, s):\r\n if not s:\r\n return 0\r\n\r\n l = 0\r\n n = len(s)\r\n p = float(\"-inf\")\r\n\r\n for r in range(n):\r\n # Calculate target and actual number of unique characters\r\n t = r - l + 1\r\n x = len(se... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Copy content of filename_from to opened file_to. | def write_from(file_to, filename_from):
file_from = open(filename_from, "r")
file_to.write(file_from.read())
file_from.close() | [
"def copy_file(self, from_filename, to_filename, mode=None):\n with open(from_filename.format(**self.env), 'r') as f:\n self.create_file_with_content(to_filename, f.read(), mode=mode)",
"def copy_file(self, path: str, filename: str, new_path: str, new_filename: str = None):\n new_filename... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Open file but when debug is true, return stdout | def open_file(filename, debug, mode):
if debug:
return sys.stdout
else:
return open(filename, mode) | [
"def file_debug(level, line):\n\n\tif level <= g.FILE_DEBUG_LEVEL:\n\t\tprint(line)",
"def open_file(name, *args, **kwargs):\n if name == '-':\n return os.fdopen(sys.stdout.fileno(), *args, **kwargs)\n return open(name, *args, **kwargs)",
"def debug_cli():",
"def _maybe_open(filename):\n if fi... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
determines if a number is prime using primes_table which is updated by top level primes_sieve_generator | def is_prime(number, primes_table):
if number > 1:
if number == 2:
return True
if number % 2 == 0:
return False
max_test = int(math.sqrt(number) + 1)
for current in primes_table:
if current > max_test:
return True
if num... | [
"def primep(n):\n return fermat_test(n) and miller_rabin_test(n)",
"def isprime_generator_lazy(number):\n if number < 2: return False\n if number == 2: return True\n if number % 2 == 0: return False\n return not any(\n number % p == 0 \n for p in range(3, int(math.sqrt(number)) + 1, 2... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
insert data to root or create a root node | def insert(self, data):
if self.root:
self.root.set_data(data)
else:
self.root = Node()
self.root.set_data(data) | [
"def insert(self, data):\n if self.root is None:\n self.root = BST.Node(data)\n else:\n self._insert(data, self.root) # Start at the root",
"def insert(self, data):\n # check if node exists in the tree already\n if self.search(data) is None:\n self.root = self._insert(self.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
for setting the left child of the node | def set_left(self, node):
self.left_child = node | [
"def set_left_child(self, left):\n self.left = left",
"def set_left(self, left):\n self.left_child = left",
"def _set_left(self, left_child):\n self._left = left_child;",
"def set_left(self, node):\n if isinstance(node, BSNode) or node is None:\n self.left_node = node\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
for setting the right child of the node | def set_right(self, node):
self.right_child = node | [
"def set_right_child(self, right):\n self.right = right",
"def set_right(self, right):\n self.right_child = right",
"def set_right(self, node):\n if isinstance(node, BSNode) or node is None:\n self.right_node = node\n if node is not None:\n node.set_prev... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
returns the left child of the current node | def get_left(self):
return self.left_child | [
"def get_left_child(self):\n if (self.left_child != None):\n return self.left_child\n return None",
"def get_left_subtree(self):\r\n return self.node[self.LEFT]",
"def get_leftmost_child(self) -> Node or None:\n if len(self.children) == 0: return None\n return self.children... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return a new `DataSaver` with the same `arg_picker` and `learner`. | def new(self) -> DataSaver:
return DataSaver(self.learner.new(), self.arg_picker) | [
"def make_datasaver(learner_type, arg_picker):\n return functools.partial(_ds, learner_type, arg_picker)",
"def load_dataloader(args, train, valid):\n dataloader = {\n 'train': DataLoader(train, shuffle=True, batch_size=args.train_batch, num_workers=args.workers),\n 'val': DataLoader(valid, shu... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a `DataSaver` of a `learner_type` that can be instantiated with the `learner_type`'s keyword arguments. | def make_datasaver(learner_type, arg_picker):
return functools.partial(_ds, learner_type, arg_picker) | [
"def new(self) -> DataSaver:\n return DataSaver(self.learner.new(), self.arg_picker)",
"def fetch_dataloader(types, data_dir, params):\n dataloaders = {}\n\n for split in ['train', 'test', 'val']:\n if split in types:\n path = os.path.join(data_dir, split)\n\n if split in ['t... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Initialize StepMash, pass arguments to Mash superclass. | def __init__(self, **kwargs):
super(StepMash, self).__init__(**kwargs) | [
"def __init__(self, slide_iterator, workflow, workflow_kwargs, **kwargs):\n default_attr = dict()\n default_attr.update(kwargs)\n super(Workflow_runner, self).__init__(default_attr=default_attr)\n\n # set attribs\n self.workflow = workflow\n self.workflow_kwargs = workflow_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add up all water to return total volume. Go through all the step of this mash adding up all water additions. Also add water ingredients and any from a preceding mash. | def total_water(self):
total_water = self.total_type('water', 'gal') # ingredients
if (self.import_property('total_water', 'start_water')):
total_water += self.property('start_water')
for step in self.steps:
if ('type' in step and step['type'] == 'infuse'
... | [
"def boil_water(*args):\n init.game_state.inventory.items[\"water_bottle_safe\"][\"Quantity\"] += \\\n init.game_state.inventory.items[\"water_bottle_unsafe\"][\"Quantity\"]\n init.game_state.inventory.items[\"water_bottle_unsafe\"][\"Quantity\"] = 0",
"def bake_breads(amount, inventory, recipe):\n fo... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the total time that this mash takes to complete. This is just the sum of the times of the individual steps in the mash. | def total_time(self):
t = timedelta()
for step in self.steps:
if ('time' in step):
t += self.parsetime(step['time'])
return(t) | [
"def totalTime(self):\n return time.time()-self.start",
"def total_time(self):\n return sum(map(lambda s: s.duration, self.states))",
"def get_time_spent(self):\n return # osid.calendaring.Duration",
"def total_time_secs(self):\n return self.exec_time_secs + self.loading_time_secs... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Write out a time sequence of all steps, including decoctions. Goal is to create a useful presentation of all steps on a single timeline, including working out any implied rests because the decoction steps take longer than explicit steps in the main mash. | def steps_str(self, start_time=timedelta(), n=0, indent=''):
# Go through the sequence of steps in this mash and any decoctions
# taken to populate the set of stages for each.
stages = {}
self.find_stages(stages)
# Now set up pointers into each set a stages
mashes = sorte... | [
"def dump(self):\n f = open(self.filepath, 'w')\n self.prelude(f)\n # First, sum up all times within each task\n for task in self.tasks_ordered:\n dur = self.total_times.get(task, \"UNKNOWN\")\n print >>f, \"Task '{0}':\".format(task)\n\n if dur == \"UNKN... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create list for this mash's stages in main stages dict. | def find_stages(self, stages, mash_name='_main', start_time=timedelta()):
stage = []
stages[mash_name] = stage
#
t = start_time
vol = Quantity('0gal')
temp = Quantity()
num = 0
for step in self.steps:
num += 1
type = step['type']
... | [
"def stages(self) -> typing.List[str]:\n return typing.cast(\n typing.List[str],\n self._properties.get(\"stages\"),\n )",
"def get_lifecycle_stage_list(self):\n response = self.client.get(self.client.get_url())\n\n results = {}\n for item in response.json(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
copy input file to work directory with gzip compression, then delete original file | def __init__(self, input_file: T, work_directory: WorkDirectory):
self.__path = work_directory.path.joinpath(f'{input_file.name}.gz')
with open(input_file.path, 'rb') as source_file, gzip.open(self.__path, 'wb') as work_file:
shutil.copyfileobj(source_file, work_file)
logger.debu... | [
"def gzip_compress(file_in, file_out=None, rm_file_in=True):\n try:\n if not file_out:\n file_out = '.'.join([file_in, 'gz'])\n with open(file_in, 'rb') as fh_in:\n with gzip.open(file_out, 'wb') as fh_out:\n shutil.copyfileobj(fh_in, fh_out)\n if os.acce... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
get the top JCategories of all time by summing the total appearances of a category and dividing by 5 (since each category appears 5 time unless its Final Jeopardy, which isn't super important to figure out with this analysis) | def top_categories(df, n):
common_topics = df['J-Category'].value_counts()[:n]
common_topics
common_cats = pd.DataFrame(common_topics).reset_index().rename(
columns={"index": "J-Category", "J-Category": "Counts"})
counts = common_cats['Counts'].apply(lambda x: x / 5)
pd.Series(co... | [
"def get_top20(teams, category, ascend):\n\tteams_sorted = teams.sort_values(by = [category], ascending = ascend)\n\ttop20 = pd.DataFrame(teams_sorted.head(20), columns = ['TeamName', category])\n\treturn top20",
"def top_actors():\n reader = initialize_reader()\n actor_list = [{\"actor\": row[10], \"scored... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
make a bargraph to show the top n categories from the top_categories function | def graph_top_categories(df, color, save=False):
fig, ax = plt.subplots(1, 1, figsize=(6, 4), dpi=200)
ax.bar(common_cats.index, common_cats['Counts'], color=color)
ax.set_ylabel("Number of Episodes", fontsize=14)
# ax.set_title("Top 10 J-Categories", fontsize=14)
ax.set_xlabel("J-Categories", fonts... | [
"def top_categories(df, n):\n common_topics = df['J-Category'].value_counts()[:n]\n common_topics\n common_cats = pd.DataFrame(common_topics).reset_index().rename(\n columns={\"index\": \"J-Category\", \"J-Category\": \"Counts\"})\n counts = common_cats['Counts'].apply(lambda x: x / 5... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Call munkicommon error msg handler | def display_error(msg, *args):
munkicommon.display_error('Munkireport: %s' % msg, *args) | [
"def error(msg):\n message(msg, flag='e')",
"def _encountered_error(self, event):\n self._log.error(vlc.libvlc_errmsg())\n self.next()",
"def errReceived(self, data):\n self.log.error(data)",
"def error(self, message):\n pass",
"def set_error_message(self,key, *args):\n#------... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Call munkicommon warning msg handler | def display_warning(msg, *args):
munkicommon.display_warning('Munkireport: %s' % msg, *args) | [
"def warning(msg):\n _syslog(syslog.LOG_WARN, msg)",
"def warning(self, msg, *args, **kwargs):\n self.write(msg, level='WARNING', *args, **kwargs)",
"def warning(msg):\n message(msg, flag='w')",
"def warning(msg):\r\n sys.stderr.write(msg+\"\\n\")",
"def warning(msg):\n global logger\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Call munkicommon detail msg handler | def display_detail(msg, *args):
munkicommon.display_detail('Munkireport: %s' % msg, *args) | [
"def handle_inform(self, msg):\n print msg",
"def handle_reply(self, msg):\n print msg",
"async def handle_msg(self, msg):\n msg = msg.split('|')\n ignore_msg = ['', 'customgroups', 'formats', 'j']\n if len(msg) < 2 or msg[1] in ignore_msg:\n return\n elif ms... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns uptime in seconds or None, on BSD (including OS X). | def get_uptime():
try:
libc = ctypes.CDLL('libc.dylib')
except OSError:
return -1
if not hasattr(libc, 'sysctlbyname'):
# Not BSD
return -1
# Determine how much space we need for the response
sz = ctypes.c_uint(0)
libc.sysctlbyname('kern.boottime', None,... | [
"def uptime():\n if __boottime is not None:\n return time.time() - __boottime\n\n return _uptime_linux()",
"def get_uptime():\n\n now = datetime.datetime.utcnow()\n uptime = None\n\n # boot timestamp exists + boot-time timesync in update check successful on this boot + webui has time synced ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets a preference, See munkicommon.py for details | def set_pref(pref_name, pref_value):
CFPreferencesSetValue(
pref_name, pref_value, BUNDLE_ID,
kCFPreferencesAnyUser, kCFPreferencesCurrentHost)
CFPreferencesAppSynchronize(BUNDLE_ID)
print "set pref"
try:
CFPreferencesSetValue(
pref_name, pref_value, BUNDLE_ID,
... | [
"def user_preference(self, user_preference):\n\n self._user_preference = user_preference",
"def set(key, value, description=\"\"):\n p = Preference.select(Preference.q.pref_key == key)\n if p.count() == 0:\n Preference(pref_key=key, \n pref_value=value,\n pr... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Run scripts in directory scriptdir runtype is passed to the script if abort is True, a nonzero exit status will abort munki submitscript is put at the end of the scriptlist | def rundir(scriptdir, runtype, abort=False, submitscript=''):
if os.path.exists(scriptdir):
from munkilib import utils
# Directory containing the scripts
parentdir = os.path.basename(scriptdir)
display_detail('# Executing scripts in %s' % parentdir)
# Get all files in scri... | [
"def handle_script_abort_delete():\n if not script_exec_lock.locked():\n script_exec_lock.acquire()\n\n if _RUNNING_SCRIPT:\n _RUNNING_SCRIPT.kill(UIAbortException())\n script_exec_lock.release()\n Sessions.add_event(\"script:aborted\", None)",
"def end_pbs_script(opts, run_dir, output_d... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Retrieves the current version of BiGG database. Returns | def database_version():
response = requests.get(BASE_URL + "database_version")
response.raise_for_status()
data = response.json()
return Version(data[BIGG_MODELS_VERSION], data[API_VERSION], data[LAST_UPDATED]) | [
"def version(self):\n\t\treturn self.query('SELECT VERSION()',1)[0]",
"def get_database_revision():\n config = _get_database_config()\n return config.version",
"def get_virus_database_version(self):\n return self._run_and_parse(\n '/VERSION',\n regexp='AV Engine Version: (?P<d... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Download models from BiGG. You can chose to save the file or to return the JSON data. | def download_model(model_id, file_format="json", save=True, path="."):
if save:
response = requests.get("http://bigg.ucsd.edu/static/models/%s.%s" % (model_id, file_format), stream=True)
response.raise_for_status()
with open(os.path.join(path, "%s.%s" % (model_id, file_format)), "wb") as mo... | [
"def download_model():\n # path = '/home/tomas/code/tomasaltilio/Food_Detective/ResNET_acc32'\n path = 'gs://food-models-le-wagon/ResNET_acc32/'\n model = models.load_model(path)\n return model",
"def download_model_files(args):\n\n log(args, f\"Downloading model files\")\n\n dstpath = pathlib.P... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Lists all models available in BiGG. Returns | def list_models():
response = requests.get(BASE_URL + "models/")
response.raise_for_status()
data = response.json()
LOGGER.info("Found %i models", data[RESULTS_COUNT])
models = DataFrame(columns=["bigg_id", "metabolites", "reactions", "genes", "organism"])
for i, d in enumerate(data[RESULTS]):... | [
"def GetModels(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)",
"def models(self):\n \n if self._models is None:\n self._models = self._make_request(\"models\").json()\n \n return self._models",
"def show_all_models():\n query_set... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |