query stringlengths 9 9.05k | document stringlengths 10 222k | negatives listlengths 19 20 | metadata dict |
|---|---|---|---|
View all the images in the dataset, on a 3 by X grid size. | def view_images(dataset, size):
images, labels = dataset
assert images.shape[0] == labels.shape[0]
num_images = images.shape[0]
num_cols = 3
num_rows = np.ceil(num_images / num_cols).astype("int")
plt.figure(figsize=size)
for i in range(num_images):
image = images[i]
label =... | [
"def display_three_train_images(train_dataset):\r\n plt.figure(figsize=(10, 14))\r\n for input_images, _ in train_dataset.take(1):\r\n for i in range(3):\r\n plt.subplot(3, 1, i+1)\r\n plt.imshow(np.squeeze(input_images[i]), cmap='gray')\r\n plt.show()",
"def draw_dataset(dat... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Normalises and reshapes the images in the dataset. | def normalise(dataset):
# Scale images to the [0, 1] range
dataset = dataset.astype("float32") / 255
# Make sure images have shape (28, 28, 1)
return np.expand_dims(dataset, -1) | [
"def normalize_dataset(self):",
"def normalize_images(data, blend_cat, Args):\n im = data['X_train']['blend_image']\n std = np.std(im)\n mean = np.mean(im)\n data['X_train']['blend_image'] = (im - mean) / std\n data['X_val']['blend_image'] = (data['X_val']['blend_image'] - mean) / std\n data['X_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Checks the date and time, and then decides if a shift from master to slave (or vice versa) is needed. If necessary, makes the shift. | def main():
date = time.gmtime().tm_mday
if date == 1 or date == 2: # in case it missed once
# shift from slave to master, checking to ensure it hasn't already happened
status = check_status()
if status == 'slave':
slave_to_master()
elif status == 'master':
... | [
"def check_shift(self, shift):\n start_time = shift.start_time.astimezone(tz=shift.user.userprofile.timezone)\n end_time = shift.end_time.astimezone(tz=shift.user.userprofile.timezone)\n # shift_date = start_time.date() if start_time.date() == end_time.date() else None\n\n # The code bel... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check the status of the application, i.e., whether it is running on the master or slave. Also check to see if there are any issues, like the web dyno on the slave running, or both workers running etc. | def check_status():
# assume no web dynos on master - there should never be a web dyno on master
r = req.get(f"{MASTER_API_URL}/formation/worker", headers=MASTER_API_HEADERS)
if r.status_code != req.codes.ok:
print("Couldn't get master worker formation")
print(r.status_code, ":", r.text)
... | [
"def check_master_vpn_worker(self):\n LOG.info(\"Checking master/vpn\")\n success = True\n if not self.configurations[0].get(\"masterInstance\") or self.configurations[0].get(\"vpnInstance\"):\n success = False\n for configuration in self.configurations[1:]:\n if no... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Shift the process from master to slave, shifting data as needed. | def master_to_slave():
print("Shifting from master to slave")
stop_master_worker()
setup_slave_web()
prepare_push()
push_to_slave()
stop_slave_web()
start_slave_worker()
print("DONE!") | [
"def slave_to_master():\n print(\"Shifting from slave to master\")\n stop_slave_worker()\n setup_slave_web()\n pull_from_slave()\n commit_pull_to_db()\n stop_slave_web()\n start_master_worker()\n print(\"DONE!\")",
"def slave_operation():\n # receive data from master node\n x = comm.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Shift the process from slave to master, shifting data as needed. | def slave_to_master():
print("Shifting from slave to master")
stop_slave_worker()
setup_slave_web()
pull_from_slave()
commit_pull_to_db()
stop_slave_web()
start_master_worker()
print("DONE!") | [
"def master_to_slave():\n print(\"Shifting from master to slave\")\n stop_master_worker()\n setup_slave_web()\n prepare_push()\n push_to_slave()\n stop_slave_web()\n start_slave_worker()\n print(\"DONE!\")",
"def become_master(slave_process, old_name):\n s = socket.socket()\n s.bind(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets up the web server on the slave, then checks it. | def setup_slave_web():
print("Starting slave web")
r = req.patch(f"{SLAVE_API_URL}/formation/web", json=API_PAYLOAD_1, headers=SLAVE_API_HEADERS)
if r.status_code != req.codes.ok:
print("Unable to start the web dyno on slave")
print(r.text)
return False
#wait a bit for the web pr... | [
"def webserver_start():\n run(_webserver_command())",
"def local_webserver_start():\n if not _is_webserver_running():\n local(_webserver_command())",
"def setup():\n # Setup requires root privleges\n env.user = \"root\"\n env.disable_known_hosts = True\n jmeter_version=\"2.13\"\n\n c... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Stops the web process on the slave. | def stop_slave_web():
print("Stopping slave web")
r = req.patch(f"{SLAVE_API_URL}/formation/web", json=API_PAYLOAD_0, headers=SLAVE_API_HEADERS)
if r.status_code != req.codes.ok:
print("Unable to stop the web dyno on slave")
print(r.text)
return False
#wait a bit for the web proc... | [
"def webserver_stop():\n run(\"kill $(cat %s)\" % GUNICORN_PIDFILE)\n run(\"rm %s\" % GUNICORN_PIDFILE)",
"def stop_slave_worker():\n print(\"Stopping slave worker\")\n r = req.patch(f\"{SLAVE_API_URL}/formation/worker\", json=API_PAYLOAD_0, headers=SLAVE_API_HEADERS)\n if r.status_code != req.code... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Starts the worker process on the master. | def start_master_worker():
print("Starting master worker")
r = req.patch(f"{MASTER_API_URL}/formation/worker", json=API_PAYLOAD_1, headers=MASTER_API_HEADERS)
if r.status_code != req.codes.ok:
print("Unable to start the worker dyno on master")
print(r.text)
return False
#wait a b... | [
"def __start_local_master():\n import params\n component_name = __get_component_name()\n\n __setup_hdfs_dirs()\n\n utils.exec_hawq_operation(\n hawq_constants.START,\n \"{0} -a -v\".format(component_name),\n not_if=utils.chk_hawq_process_status_cmd(params.hawq_master_address_port, component... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Stops the worker process on the master. | def stop_master_worker():
print("Stopping master worker")
r = req.patch(f"{MASTER_API_URL}/formation/worker", json=API_PAYLOAD_0, headers=MASTER_API_HEADERS)
if r.status_code != req.codes.ok:
print("Unable to stop the worker dyno on master")
print(r.text)
return False
#wait a bit... | [
"def stop_slave_worker():\n print(\"Stopping slave worker\")\n r = req.patch(f\"{SLAVE_API_URL}/formation/worker\", json=API_PAYLOAD_0, headers=SLAVE_API_HEADERS)\n if r.status_code != req.codes.ok:\n print(\"Unable to stop the worker dyno on slave\")\n print(r.text)\n return False\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Starts the worker process on the slave. | def start_slave_worker():
print("Starting slave worker")
r = req.patch(f"{SLAVE_API_URL}/formation/worker", json=API_PAYLOAD_1, headers=SLAVE_API_HEADERS)
if r.status_code != req.codes.ok:
print("Unable to start the worker dyno on slave")
print(r.text)
return False
#wait a bit fo... | [
"def __initiate_slave_node(self):\n master_cmd = self.__spark_installation_path + '/sbin/start-slave.sh spark://' + self.__host_ip + ':7077'\n print(master_cmd)\n #os.chdir(self.__home_dir)\n output = os.system(master_cmd)\n if output != 0:\n raise Exception(\"Terminati... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Stops the worker process on the slave. | def stop_slave_worker():
print("Stopping slave worker")
r = req.patch(f"{SLAVE_API_URL}/formation/worker", json=API_PAYLOAD_0, headers=SLAVE_API_HEADERS)
if r.status_code != req.codes.ok:
print("Unable to stop the worker dyno on slave")
print(r.text)
return False
#wait a bit for ... | [
"def stop_slave(instance):\n stmt = \"stop slave\"\n instance.get_connection()\n instance.execute_stmt(connection=instance.connection, stmt=stmt)\n if is_slave_running(instance):\n print \"Error: unable stop slave replication.\"\n return 1",
"def Stop(self):\n if self.child_pid:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Uses the current cursor position, which is in a code view, and gets the corresponding instruction address that is associated to the code. Returns the start of the function if unable to calculate. | def get_src_to_inst(self) -> int:
# get the Qt document
doc: QCodeDocument = self.document()
# get the current position of the cursor
cursor = self.textCursor()
pos = cursor.position()
# get the node at the associated cursor position
current_node = doc.get_stmt... | [
"def _next_code_addr_core(self):\n\n next_addr = self._next_unscanned_addr()\n if next_addr is None:\n return None\n\n start_addr = next_addr\n\n while True:\n string_length = self._scan_for_printable_strings(start_addr)\n if string_length:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculate a checksum for the given path. Will eventually use this to ensure config has changed before reloading. | def checksum(path):
with open(path, 'r') as f:
return md5(f.read()).digest() | [
"def file_checksum(path):\n\n with abort_if_file_changes_during_read(path):\n m = hash_implementation()\n\n with open(path, 'rb') as f:\n for chunk in read_in_chunks(f, io.DEFAULT_BUFFER_SIZE):\n m.update(chunk)\n\n return m.hexdigest()",
"def get_checksum(path: U... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set up inotify if requested. | def _setup_inotify(self, flag):
i = None
if flag:
try:
import inotify.adapters
except ImportError:
raise AssertionError(
'cannot use inotify, package not installed')
else:
i = inotify.adapters.Inot... | [
"def SetupFileWatcher(filename, cb):\n wm = pyinotify.WatchManager()\n handler = FileEventHandler(wm, filename, cb)\n asyncnotifier.AsyncNotifier(wm, default_proc_fun=handler)",
"def __init__(self, watch_manager, default_proc_fun=None, map=None):\n if default_proc_fun is None:\n default_proc_fun = pyin... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create watch directory if it does not exist. | def _setup_watch(self, watch):
assert not isfile(watch), 'watch dir is a file'
if pathexists(watch):
return watch
os.makedirs(watch)
if self.chown:
try:
os.chown(watch, *self.chown)
except OSError:
pass # Non-fatal
... | [
"def watchDirectory(self, path):\n pass",
"def _create_file_dir(self):\n path = os.path.dirname(self.filename)\n if not os.path.exists(path):\n try:\n os.makedirs(path)\n except OSError:\n time.sleep(1)\n self._create_file_dir... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Reload configuration. If reload command is given, run that, otherwise, signal process with HUP. | def reload_command(self):
if self.reload is None:
if not self.check_command():
LOGGER.info('Command dead, restarting...')
self.start_command(wait_for_config=False)
else:
LOGGER.info('Sending HUP signal...')
self.process.sen... | [
"def reload_config():\n subprocess.run([SUPERVISOR_CMD, \"reload\"])",
"def reload(kwargs=None):\n sudo(\"supervisorctl restart %(name)s\" % kwargs)",
"def reload_daemon():\n subprocess.run([\n 'systemctl',\n 'daemon-reload'\n ], check=True)",
"def _reload(self):\n try:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return False if command is dead, otherwise True. | def check_command(self):
return self.process is not None and self.process.poll() is None | [
"def dead(self):\n self.tamagotchi.update_status(TimeKeeper.time_check())\n if self.tamagotchi.health == 0:\n return True\n else:\n return False",
"def is_dead(self):\n return self.health <= 0",
"def is_dead(self):\n return self.hp <= 0",
"def is_dead(s... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get unique list of new config files in watch dir. | def get_config(self):
config = set()
while True:
filenames = self.get_config_files()
for fn in filenames:
if fn not in self.watch_names:
filenames.remove(fn)
if fn in config:
filenames.remove(fn)
... | [
"def getPreviousApplicationSettingsDirsByTime() -> List[java.io.File]:\n ...",
"def _get_cfg_list():\n config.update({'sensors': {}})\n conf_files = filter(lambda x: x.endswith('.yaml') and not x.startswith('unicon-setting'),\n os.listdir(path=f\"{cpath}\"))\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Kill the running command. | def kill(self):
if self.process is not None:
LOGGER.info('Killing command...')
self.process.kill()
self.process = None | [
"def kill(self):\n\n self.proc.kill()",
"def kill(self):\n self._stop_proc(signal.SIGKILL)",
"def kill_subprocess(self):\n try:\n self.process.kill()\n except OSError:\n pass\n return",
"def kill(self):\n self.rpc_process.kill()",
"def stop(sel... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Backs up entire configuration. | def backup_config(self):
prev_config = set()
for src in self.config:
dst = '%s.prev' % src
LOGGER.debug('Backing up %s to %s', src, dst)
try:
shutil.copy(src, dst)
except IOError as e:
if e.errno != errno.ENOENT:
... | [
"def backup_config():\n copy(CONFIG_FILE, CONFIG_FILE + '.bak')",
"def backupCfg( self ):\n self.handle.sendline( \"\" )\n self.handle.expect( self.prompt )\n self.handle.sendline( \"cp %s%s %s%s.backup\" % (self.switchDirectory, self.conf, self.switchDirectory, self.conf) )\n self.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Backup old config, write new config, test config, HUP or restore. | def test_and_swap(self, config):
LOGGER.info('Attempting to apply new configuration')
backup = self.backup_config()
# We have backed up ALL config files (not just the ones we might
# replace). If any error occurs from here out, we will need to restore
# our config, so we will use... | [
"def backup_config():\n copy(CONFIG_FILE, CONFIG_FILE + '.bak')",
"def backup_current_config():\n LOGGER.debug(\"Backing up current config\")\n\n backup(VIMRC)\n backup(VIMDIR)\n #backup(BASHRC)\n #backup(ZSHRC)\n backup(PYLINTRC)",
"def _backup_config(config_file, bak_path=None):\n try:\n if... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Plot and save histograms from predicted steerings and real steerings. Arguments | def make_and_save_histogramsX(pred_steerings, real_steerings,
img_name = "histogramsX.png"):
pred_steerings = np.array(pred_steerings)
real_steerings = np.array(real_steerings)
max_h = np.maximum(np.max(pred_steerings), np.max(real_steerings))
min_h = np.minimum(np.min(pred_... | [
"def make_and_save_histogramsY(pred_steerings, real_steerings,\n img_name = \"histogramsY.png\"):\n pred_steerings = np.array(pred_steerings)\n real_steerings = np.array(real_steerings)\n max_h = np.maximum(np.max(pred_steerings), np.max(real_steerings))\n min_h = np.minimum(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Plot and save histograms from predicted steerings and real steerings. Arguments | def make_and_save_histogramsY(pred_steerings, real_steerings,
img_name = "histogramsY.png"):
pred_steerings = np.array(pred_steerings)
real_steerings = np.array(real_steerings)
max_h = np.maximum(np.max(pred_steerings), np.max(real_steerings))
min_h = np.minimum(np.min(pred_... | [
"def make_and_save_histogramsX(pred_steerings, real_steerings,\n img_name = \"histogramsX.png\"):\n pred_steerings = np.array(pred_steerings)\n real_steerings = np.array(real_steerings)\n max_h = np.maximum(np.max(pred_steerings), np.max(real_steerings))\n min_h = np.minimum(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Plot and save confusion matrix computed from predicted and real labels. Arguments | def plot_confusion_matrix(real_labels, pred_prob, classes,
normalize=False,
img_name="confusion.png"):
real_labels = np.array(real_labels)
# Binarize predicted probabilities
pred_prob = np.array(pred_prob)
pred_labels = np.zeros_like(pred_prob)
... | [
"def confusion_matrix_plot(y_true, y_pred) -> None:\n from sklearn.metrics import ConfusionMatrixDisplay, confusion_matrix\n\n cm = confusion_matrix(y_true, y_pred)\n plot = ConfusionMatrixDisplay(confusion_matrix=cm).plot()\n plot.ax_.set_title(\"Confusion Matrix\")",
"def plot_print_confusion_matrix... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Discover and add a Tasmota sensor. | async def async_discover_sensor(tasmota_entity, discovery_hash):
async_add_entities(
[
TasmotaSensor(
tasmota_entity=tasmota_entity, discovery_hash=discovery_hash
)
]
) | [
"async def async_add_binary_sensor(mac):\n if USB_MOTION_ID in api_stick.devices[mac].features:\n _LOGGER.debug(\"Add binary_sensors for %s\", mac)\n async_add_entities([USBBinarySensor(api_stick.devices[mac])])\n\n # Register services\n platform.async_register_ent... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Installs the OpenMPI package on the VM. | def _Install(vm):
vm.Install('build_tools')
vm.Install('wget')
vm.RemoteCommand('wget %s -P %s' % (MPI_URL, INSTALL_DIR))
vm.RemoteCommand('cd %s && tar xvfz %s' % (INSTALL_DIR, MPI_TAR))
make_jobs = vm.NumCpusForBenchmark()
shared_lib_command = ('--enable-shared' if FLAGS.openmpi_enable_shared
... | [
"def _Install(vm):\n version_to_install = FLAGS.openmpi_version\n if not version_to_install:\n return\n current_version = GetMpiVersion(vm)\n if current_version == version_to_install:\n return\n\n first_dot_pos = version_to_install.find('.')\n second_dot_pos = version_to_install.find('.', first_dot_pos ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Installs the OpenMPI package on the VM. | def AptInstall(vm):
vm.RobustRemoteCommand('sudo apt-get {}'.format(REMOVE_MPI_CMD))
_Install(vm) | [
"def _Install(vm):\n version_to_install = FLAGS.openmpi_version\n if not version_to_install:\n return\n current_version = GetMpiVersion(vm)\n if current_version == version_to_install:\n return\n\n first_dot_pos = version_to_install.find('.')\n second_dot_pos = version_to_install.find('.', first_dot_pos ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Uninstalls the OpenMPI package on the VM. | def _Uninstall(vm):
vm.RemoteCommand('cd {0} && sudo make uninstall'.format(MPI_DIR)) | [
"def _Uninstall(vm):\n vm.RemoteCommand('cd {0} && sudo make uninstall'.format(GetMpiDir()))",
"def Uninstall(vm):\n vm.RemoteCommand('rm -rf tpu')",
"def Uninstall(_):\n # No clean way to uninstall everything. The VM will be deleted at the end\n # of the test.\n pass",
"def AptUninstall(vm):\n _Uninsta... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This method provides an ability to set order book's deep on the fly. If some of deep's parameters is <0 (bid_count or ask_count) then method raise the custom ChangeOrderBookDeepError exception. | def set_deep(self, deep: Deep) -> None:
def is_deep_invalid(var: Deep):
return not isinstance(var, Deep) \
or False in [str(value).isdigit() for value in deep.__dict__.values()] \
or deep.bid_count < 0 \
or deep.ask_count < 0
# Exit ... | [
"def test_overflow_bids_market_default_depth(new_order_book: Callable[[], OrderBook]) -> NoReturn:\n book = new_order_book\n\n for _ in range(book.depth):\n book.add_offer('bids', 1, 1)\n\n assert book.depth == len(book.bids)\n assert not book.asks\n\n # try to put 21th lot into bids\n with... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Private method that provide an ability to sort the orders by price | def __sort_orders_by_price(self):
self.orders = sorted(self.orders, key=lambda o: o.price, reverse=True) | [
"def test_sorting_descending_by_price():",
"def test_ordering_by_price_desc(self):\n request = self.factory.get('/api/v1/cars', {'distance': 10000,\n 'ordering': '-price'})\n response = CarAdViewSet.as_view({'get': 'list'})(request)\n self.as... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This method provide an ability to find order by id and reject it. | def reject_order(self, order: Order) -> None:
order = self.get_order_by_id(order.id)
order.status = OrderStatus.REJECT | [
"def _on_order_not_found(self, msg):\r\n parts = msg[\"id\"].split(\":\")\r\n oid = parts[1]\r\n self.debug(\"### got 'Order not found' for\", oid)\r\n # we are now going to fake a user_order message (the one we\r\n # obviously missed earlier) that will have the effect of\r\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This method provide an ability to fill order in order book. This action means that order is completed. | def fill_order(self, order: Order) -> None:
order = self.get_order_by_id(order.id)
order.status = OrderStatus.FILL | [
"def onOrderFilled(self, broker_, order):\n pass",
"def sync_completed_order(self):\n self.completed_order = yield get_private('complete_order', {'currency': self.currency})",
"def mark_completed(self, order_id=None):\n self.status = \"paid\"\n if order_id and not self.order_id:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Converts the auto incremented id of a ShortURL object and turns it into a shorturl hash | def encode(shorturl_id: int) -> str:
short_resource = []
while shorturl_id > 0:
character_index = shorturl_id % BASE
short_resource.append(CHARACTER_SPACE[character_index])
shorturl_id //= BASE
return "".join(short_resource[::-1]) | [
"def _create_url_hash(url):\n try:\n url_obj = Urlshort.objects.create(hash_value=\"\", original_url=\"\")\n url_obj.save()\n url_obj = Urlshort.objects.latest(\"id\")\n\n url_hash = _url_id_encode(url_obj.id)\n url_encoded = url.encode(\"utf8\")\n Urlshort.objects.filte... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Overrides the save to initially save to get the id then computes the shorturl hash and saves it to the model within a transaction context | def save(self, **kwargs):
res = super().save(**kwargs)
short_path_component = encode(res.id)
self.validated_data["short_path_component"] = short_path_component
return super().save(**kwargs) | [
"def save(self,\n force_insert=False,\n force_update=False,\n using=None,\n update_fields=None):\n # If the short url wasn't specified\n if not self.short_url:\n # We pass the model instance that is being saved\n self.short_url = cr... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Attempt to place mover into contents. Returns a Boolean representation of success. | def contain(self, mover):
# Check if mover can exit old location
old_location = mover.location
if(not old_location):
return False
if(not old_location.allow_exit(mover)):
return False
# Check if mover can enter current location
if(not self.allow_ent... | [
"def mover(self, mapa):\n # Obter nova posicao da cabeça \n prox_posicaoX = (self.corpo[CABECA][X] + self.vel[X]) % mapa.tamanho[X]\n prox_posicaoY = (self.corpo[CABECA][Y] + self.vel[Y]) % mapa.tamanho[Y]\n prox_posicao = [[prox_posicaoX, prox_posicaoY]]\n # Adicionar cabeca ao i... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Determines if the mover is permitted to enter the room | def allow_entry(self, mover):
return True | [
"def allowedToEnter(self):\n if base.cr.isPaid():\n return True\n place = base.cr.playGame.getPlace()\n myHoodId = ZoneUtil.getCanonicalHoodId(place.zoneId)\n if myHoodId in \\\n (ToontownGlobals.ToontownCentral,\n ToontownGlobals.MyEstate,\n T... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Determines if the mover is permitted to exit the room. | def allow_exit(self, mover):
return True | [
"def check_exit(self, position, direction):\n if self.get_room((position[0] + direction[0], position[1] + direction[1])):\n return True\n return False",
"def is_exit(self, state):\n if state.cell.x == self.maze.exit_x and state.cell.y == self.maze.exit_y:\n return True\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Called after the mover has entered the room. | def entered(self, mover):
pass | [
"def exited(self, mover):\n pass",
"def after_turn(self):\n pass",
"def game_over(self):\n self.end_of_level()\n self.message_holder.add_widget(self.you_lose_label)\n Clock.schedule_once(self.goto_menu, 5)",
"def leaving(self):\n pass",
"def complete_room(self):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Called after the mover has exited the room. | def exited(self, mover):
pass | [
"def leave(self):\n pass",
"def leaving(self):\n pass",
"def end_game(self) -> None:\n pass",
"def game_ended(self):\n\t\tpass",
"def after_turn(self):\n pass",
"def entered(self, mover):\n pass",
"def on_leave(self, room, user):\n pass",
"def _leave(self, *ar... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Creates a tree from list. First element is root value, others are children nodes. (values or subtrees). | def construct(lst):
t = Tree()
t.root = lst[0]
for node in lst[1:]:
if isinstance(node, list):
t.nodes.append(construct(node))
else:
t.nodes.append(node)
return t | [
"def _make_tree(pddl_list):\n\n root = PDDL_Tree(pddl_list[0])\n\n for child in pddl_list[1:]:\n if isinstance(child, list):\n if len(child) == 0:\n root.add_child(PDDL_Tree(PDDL_Tree.EMPTY))\n else:\n subtree = PDDL_Tree._... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
convert list of dicts to ndarray of type np.float32 | def dicts2ndarray(data_dicts):
# NEVER make any assumption about the order of .keys() return
aps = [ap for ap in data_dicts[0].keys() if ap != 'tag']
aps.sort()
data_num = len(data_dicts)
data_len = len(data_dicts[0][aps[0]])
ndary = np.zeros([data_num, len(aps), data_len], dtype=np.float32)
... | [
"def dict2array(datadict):\n data = np.zeros(len(datadict.keys()), len(datadict[datadict.keys()[0]]))\n idx = 0\n for key in datadict.keys():\n data[i] = np.asarray(datadict[key], dtype=float32)\n\n return data",
"def translate_pandas_to_numpy(data_list:list) -> list:\n list_size = len(data_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Assembles a list of circuits into a qobj which can be run on the backend. | def assemble_circuits(circuits, run_config=None, qobj_header=None, qobj_id=None):
qobj_header = qobj_header or QobjHeader()
run_config = run_config or RunConfig()
if isinstance(circuits, QuantumCircuit):
circuits = [circuits]
userconfig = QobjConfig(**run_config.to_dict())
experiments = []
... | [
"def assemble_circuits(circuits, qobj_id=None, qobj_header=None, run_config=None):\n qobj_config = QasmQobjConfig()\n if run_config:\n qobj_config = QasmQobjConfig(**run_config.to_dict())\n\n # Pack everything into the Qobj\n experiments = []\n max_n_qubits = 0\n max_memory_slots = 0\n f... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convert all config sections to have unique names. Adds unique suffixes to config sections for compability with configparser. | def unique_config_sections(config_file):
section_counters = defaultdict(int)
output_stream = io.StringIO()
with open(config_file) as fin:
for line in fin:
if line.startswith('['):
section = line.strip().strip('[]')
_section = section + '_' + str(section_co... | [
"def find_unique_keys(base_config, comp_config, base_name):\n unique_keys = []\n unique_sections = []\n\n for section in base_config:\n if str(section) == 'DEFAULT':\n continue #.cfg has DEFAULT key, we do not use\n if not comp_config.has_section(section):\n unique_label... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The last value for the W array is correct | def test_W_end(self):
self.assertAlmostEqual(attempt.W[-1], 9.494852380803035) | [
"def new_w_vector():\n return [1] * 201",
"def null_w(self, t):\n\n B = self.B_w\n u = self.u_w(t)\n\n null = np.matmul(B, u)\n\n return np.reshape(null, (2 * self.vec_num_points, ))",
"def uw(self):\n return sm.unitvec(self.w)",
"def _build_n_w(self):\n\n F = np.z... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The last value for the Z array is correct | def test_Z_end(self):
self.assertAlmostEqual(attempt.Z[-1], 41.47999849170943) | [
"def get_z(self):\n return self._z",
"def z(self):\r\n return self.unif[2]",
"def relu(z):\r\n\r\n a = np.maximum(z, 0)\r\n\r\n return a",
"def z(self):\n return self.coords[2]",
"def z(self) -> float:\n return self.A[3] if self.scalar_vector else self.A[2]",
"def de_addressi... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The maxIndex variables are correct | def test_maxIndex(self):
self.assertEqual(attempt.maxIndexZ, 113)
self.assertEqual(attempt.maxIndexW, 134) | [
"def maxQualifiedIndex(self, indices):\n entry = self.getConfig()\n # the leader keep its own record updated to the newest\n indices[self.datacenter_id] = len(self.log) - 1\n # print('!!!!!', indices)\n if entry['config'] == 'single':\n return sorted([indices[x] for x i... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Checks mount point is owned by swift | def is_ug_swift(d, r):
stats = os.stat(d.mount)
uid = stats.st_uid
gid = stats.st_gid
user = pwd.getpwuid(uid).pw_name
group = grp.getgrgid(gid).gr_name
if user == group == 'swift':
return True
else:
r.msgkey('user', user)
r.msgkey('group', group)
return Fals... | [
"def is_mount_point(self):\n return os.path.ismount(self._full_name)",
"def ownercheck(self, userhost):\n if self.cfg and self.cfg.owner:\n if userhost in self.cfg.owner: return True\n return False",
"def validate_permissions(pod_obj):\n\n cmd_output = pod_obj.exec_cmd_on_pod(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Checks the relevant swift mount points and diskusage | def main():
results = []
results.extend(check_mounts())
results.extend(diskusage())
return results | [
"def _mock_disk_usage(self, blocks, avail, frsize=1024) -> None:\n mock_statvfs_patcher = patch(\"eden.cli.doctor.os.statvfs\")\n mock_statvfs = mock_statvfs_patcher.start()\n self.addCleanup(lambda: mock_statvfs.stop())\n statvfs_tuple = collections.namedtuple(\"statvfs\", \"f_blocks f_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sort variables based on their rank and shift. Note that this relies on all variables having a unique rank. | def sort_variables(variables):
return tuple(sorted(variables, key=lambda v: (v.rank, v.shift))) | [
"def sort(self, varnames):\n varnames = self._find_vars(varnames, unique=True, empty_ok=False)\n var_ind_list = list(map(self._varlist.index, varnames))\n new_srtlist = var_ind_list + [None]*(self._nvar - len(varnames))\n if self._srtlist == new_srtlist:\n return\n sort... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given a set of criteria, find the matching variables(s). | def get_matching(variables, strict=True, single=True, **criteria):
matching = []
for var in variables:
for crit_name, crit_info in criteria.items():
if getattr(var, crit_name) == crit_info:
continue
else:
break
else:
matching.ap... | [
"def query_variables(md):\n\n # save as dictionaries with searchers as keys\n x_searchers = {}\n b_target = {}\n\n t_max = 0\n\n for var in md.getVars():\n my_var_name = var.varName\n my_var_value = var.x\n # print('%s %g' % (my_var_name, my_var_value))\n\n if 'x' in my_va... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Match variable to VariableFactory using rank, name, and units. | def match_factory(variable, factories):
if not isinstance(factories, tuple):
factories = (factories,)
for factory in factories:
if (
variable.rank == factory.rank
and variable.name == factory.name
and variable.units == factory.units
):
ret... | [
"def _variable_factory(self, var):\n try:\n variable = MOM6Variable(var, self.fh, **self.initializer)\n except TypeError:\n variable = self.fh.variables[var][:]\n return variable",
"def createVariable(self, name: unicode, offset: int, dataType: ghidra.program.model.data.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the lags for a given VariableFactory. | def get_variable_lags(var_factory):
if var_factory in shifted_variables:
return lags
return (0,) | [
"def lags(fs):\n\n return type(fs)(f.lag for f in fs)",
"def get_shifted_variables(var_factory):\n shifted = []\n for lag in get_variable_lags(var_factory):\n shifted.append(var_factory[lag])\n return tuple(shifted)",
"def get_valid_lags(binned_spiketrain_i, binned_spiketrain_j):\n\n b... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get all possible shifted variables given a VariableFactory. | def get_shifted_variables(var_factory):
shifted = []
for lag in get_variable_lags(var_factory):
shifted.append(var_factory[lag])
return tuple(shifted) | [
"def get_variable_lags(var_factory):\n if var_factory in shifted_variables:\n return lags\n return (0,)",
"def vars(self):\n return [Var(i,self.dims[i]) for i in range(self.nvar)] # TODO: use stored state info (=1 sometimes)",
"def basis(self, index, vars = None) :\n if index == self.__i... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the path to our major ldso symlink. (Which allows us to change which ldso we are actively using without patching a bunch of binaries) | def ld_linux_path(root):
return os.path.join(root, 'lib', 'ld-linux-xpkg.so') | [
"def _find_ld_version():\n if sys.platform == 'darwin':\n return _find_exe_version('ld -v', _MAC_OS_X_LD_VERSION)\n else:\n return _find_exe_version('ld -v')",
"def get_linked_libpython():\n if is_windows():\n return\n libdl = ctypes.CDLL(ctypes.util.find_library(\"dl\"))\n lib... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Diagonal distance h_diagonal(n) = min(abs(n.x goal.x), abs(n.y goal.y)) h_straight(n) = (abs(n.x goal.x) + abs(n.y goal.y)) h(n) = D_diagnoal h_diagonal(n) + D_straight (h_straight(n) 2h_diagonal(n))) | def heuristic_cost_estimate(start, goal,d_diagnoal,d_straight):
start_x = start.x
start_y = start.y
goal_x = goal.x
goal_y = goal.y
h_diagonal = min(np.abs(start_x - goal_x),np.abs(start_y - goal_y))
h_straight = np.abs(start_x - goal_x) + np.abs(start_y - goal_y)
h = d_diagnoal * h_diagona... | [
"def dist_between(current, neighbor,d_diagnoal,d_straight):\n start_x = current.x\n start_y = current.y\n goal_x = neighbor.x\n goal_y = neighbor.y\n\n h_diagonal = min(np.abs(start_x - goal_x),np.abs(start_y - goal_y))\n h_straight = np.abs(start_x - goal_x) + np.abs(start_y - goal_y)\n h = d_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Diagonal distance h_diagonal(n) = min(abs(n.x goal.x), abs(n.y goal.y)) h_straight(n) = (abs(n.x goal.x) + abs(n.y goal.y)) h(n) = D_diagnoal h_diagonal(n) + D_straight (h_straight(n) 2h_diagonal(n))) | def dist_between(current, neighbor,d_diagnoal,d_straight):
start_x = current.x
start_y = current.y
goal_x = neighbor.x
goal_y = neighbor.y
h_diagonal = min(np.abs(start_x - goal_x),np.abs(start_y - goal_y))
h_straight = np.abs(start_x - goal_x) + np.abs(start_y - goal_y)
h = d_diagnoal * h_... | [
"def heuristic_cost_estimate(start, goal,d_diagnoal,d_straight):\n start_x = start.x\n start_y = start.y\n goal_x = goal.x\n goal_y = goal.y\n\n h_diagonal = min(np.abs(start_x - goal_x),np.abs(start_y - goal_y))\n h_straight = np.abs(start_x - goal_x) + np.abs(start_y - goal_y)\n h = d_diagnoa... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
convert the path in real to grid, e.g. 21 > 2.15 sx= ix reso + reso/2 | def convertGridPathToReal(pathInGrid, sx, sy, gx, gy, grid_reso = 0.1):
pathInReal = (pathInGrid * grid_reso + grid_reso / 2)
stepNum = pathInReal.shape[1]
# Replace head and tail
pathInReal[:, 0] = [sx, sy]
pathInReal[:, 0] = [sx, sy]
pathInReal[:, stepNum - 1] = [gx, gy]
pathInReal[:, ste... | [
"def solve_path(grid: List, path: List[Step]):\n current_xy = (int(len(grid) / 2), int(len(grid) / 2))\n for step in path:\n\n x, y = current_xy\n direction = step[0]\n distance = int(step[1:4])\n print(f'Traveling {distance} steps {readable_direction(direction)} from (x = {x}, y =... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
One dimensional exponential cutoff power law derivative with respect to parameters | def fit_deriv(x, amplitude, x_0, alpha, x_cutoff):
xx = x / x_0
xc = x / x_cutoff
d_amplitude = xx ** (-alpha) * np.exp(-xc)
d_x_0 = alpha * amplitude * d_amplitude / x_0
d_alpha = -amplitude * d_amplitude * np.log(xx)
d_x_cutoff = amplitude * x * d_amplitude / x_cutoff... | [
"def func_full_exp(x, c1, c2, c3, c4, c5, c6, c7):\n x = np.power(10, x)\n thermalCore = c1 * np.sqrt(x) * np.exp(-c2 * x)\n a = map(lambda y: 0 if y < c5 else 1, x)\n b = map(lambda y: 0 if y < c6 else 1, x)\n #b1 = map(lambda y: 1 - y, b)\n a = np.array(a)\n b = np.array(b)\n b1 = 1.0 - b\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate strong password to add to csv file and clipboard. | def generate_pw():
chars = string.ascii_letters + string.digits + '!@#$%^&*()'
password = ''.join(random.choice(chars) for i in range(16))
pyperclip.copy(password)
print('Password copied to clipboard.')
return password | [
"def generate_password(self):\n raise NotImplementedError()\n # self.passvoid = 'cde'",
"def generate_readback_password():\n # Generate 16 character random password.\n pw = ''.join(chr(random.randint(0, 255)) for i in range(8)).encode('hex')\n\n # Write password to secret file.\n with op... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add new account to pw.csv and generate a strong password. | def main(script):
try:
# ensure user entered account name and user name
account_name = sys.argv[1]
user_name = sys.argv[2]
except IndexError:
print('python add_pw.py [account name] [user name]')
else:
# read in csv file
pw_file = open('pw.csv')
pw_obje... | [
"def new_password():\n new_pass = generate_password()\n entry_pass.delete(0, END)\n entry_pass.insert(0, new_pass)",
"def add_pass(input_master_pass, input_service, input_email, input_pass):\n f = load_key(input_master_pass)\n encrypted_service = str(f.encrypt(input_service), 'utf-8')\n encrypte... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add Site Static Resource Directory | def addMobileStaticResourceDir(self, dir: str) -> None:
self.__rootMobileResource.addFileSystemRoot(dir) | [
"def add_dirs_to_static(static_webapp_name):\n static_dir = '$HOME/webapps/%s' % static_webapp_name\n with settings(warn_only=True):\n with cd(static_dir):\n run(\"mkdir static && mkdir media\")\n run(\"rm index.html\")\n run(\"touch index.html\")\n with cd(code_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function is for viewing the plot of your cost history. | def plot_cost_history(alpha, cost_history):
cost_df = pandas.DataFrame({
'Cost_History': cost_history,
'Iteration': range(len(cost_history))
})
return ggplot(cost_df, aes('Iteration', 'Cost_History')) + geom_point() + ggtitle('Cost History for alpha = %.3f' % alpha ) | [
"def plot_history(self, cost_history):\n x_val = [i for i in range(len(cost_history))]\n fig, ax = plt.subplots()\n ax.plot(x_val, cost_history)\n ax.set_xlabel(\"Number of iterations\")\n ax.set_ylabel(\"Cost\")\n ax.set_title(\"Cost history of logistic regression\")\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Searches inside the index for umbra3d | def search_umbra(text):
result = _search_blog('umbra3d', text)
_print_results(result)
return result | [
"def clustering_dbscan_o3d():\n pass",
"def search():\n\tif not request.vars.search_term:\n\t\tredirect(URL('index'))\n\tterm = request.vars.search_term\n\torigterm = term\n\tterm = term.replace(' ','|')\n\tartists = db.executesql(\"select distinct(m1.id), m1.art_name, m1.artist_type, m1.country, m1.b_year,m1.... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create an embed with the lyrics | def _lyrics_embed(colour, page: Dict[str, Any], data: Dict[str, Any]) -> discord.Embed:
title = [
x.get("value")
for x in data.get("names")
if x.get("language") == LANGUAGE_MAP.get(page["cultureCode"])
]
em = discord.Embed(
title=title[0] if title ... | [
"async def lyrics(self, ctx: commands.Context, *, song_name: str):\n try:\n client = await self.obtain_client()\n except AttributeError:\n await ctx.send(\"Not key for KSoft.Si has been set, ask owner to add a key.\")\n return\n try:\n music_lyrics = ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Fuse conv and bn into one module. | def _fuse_conv_bn(conv, bn):
conv_w = conv.weight
conv_b = conv.bias if conv.bias is not None else torch.zeros_like(
bn.running_mean)
factor = bn.weight / torch.sqrt(bn.running_var + bn.eps)
conv.weight = nn.Parameter(conv_w *
factor.reshape([conv.out_channels, 1,... | [
"def fuse_conv_bn(module):\n last_conv = None\n last_conv_name = None\n\n for name, child in module.named_children():\n if isinstance(child,\n (nn.modules.batchnorm._BatchNorm, nn.SyncBatchNorm)):\n if last_conv is None: # only fuse BN that is after Conv\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Recursively fuse conv and bn in a module. During inference, the functionary of batch norm layers is turned off but only the mean and var alone channels are used, which exposes the chance to fuse it with the preceding conv layers to save computations and simplify network structures. | def fuse_conv_bn(module):
last_conv = None
last_conv_name = None
for name, child in module.named_children():
if isinstance(child,
(nn.modules.batchnorm._BatchNorm, nn.SyncBatchNorm)):
if last_conv is None: # only fuse BN that is after Conv
continue... | [
"def _fuse_conv_bn(conv, bn):\n conv_w = conv.weight\n conv_b = conv.bias if conv.bias is not None else torch.zeros_like(\n bn.running_mean)\n\n factor = bn.weight / torch.sqrt(bn.running_var + bn.eps)\n conv.weight = nn.Parameter(conv_w *\n factor.reshape([conv.out_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Updates this configuration object from a dictionary. | def update_from_dict(self, dct):
if not dct:
return
all_props = self.__class__.CONFIG_PROPERTIES
for key, value in six.iteritems(dct):
attr_config = all_props.get(key)
if attr_config:
setattr(self, key, value)
else:
... | [
"def update_config(self, update_dict):\n self.config = recursive_merge_dicts(self.config, update_dict)",
"def update_config(cls, **kwargs):\n for key, val in kwargs.items():\n setattr(cls, key, val)",
"def _update_config(self, config_dict):\n for key in config_dict.keys():\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Merges listbased attributes into one list including unique elements from both lists. When ``lists_only`` is set to ``False``, updates dictionaries and overwrites singlevalue attributes. The resulting configuration is 'clean', i.e. input values converted and validated. If the conversion is not possible, a ``ValueError``... | def merge(self, values, lists_only=False):
if isinstance(values, self.__class__):
self.merge_from_obj(values, lists_only=lists_only)
elif isinstance(values, dict):
self.merge_from_dict(values, lists_only=lists_only)
else:
raise ValueError("{0} or dictionary ex... | [
"def removeDupes(self):\n \n for key in self.attr_dict_list[0].keys():\n self.attrs[key] = set()\n\n for d in self.attr_dict_list:\n for attr in d:\n self.attrs[attr].add(tuple(d[attr].items()))\n\n for key in self.attrs:\n temp = [dict(t) ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Creates a copy of the current instance. | def copy(self):
return self.__class__(self) | [
"def copy(self):\n return self.__copy__()",
"def clone(self):\r\n import copy\r\n return self._wrap(copy.copy(self.obj))",
"def copy(self):\n return Sample(**self.__dict__)",
"def __copy__(self):\n trait_data = self.__getstate__()\n inst = self.__class__.create(trait_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Cleans the input values of this configuration object. Fields that have gotten updated through properties are converted to configuration values that match the format needed by functions using them. For example, for listlike values it means that input of single strings is transformed into a singleentry list. If this conv... | def clean(self):
all_props = self.__class__.CONFIG_PROPERTIES
for prop_name in self._modified:
attr_config = all_props.get(prop_name)
if attr_config and attr_config.input_func:
self._config[prop_name] = attr_config.input_func(self._config[prop_name])
self.... | [
"def clean(self, value):\n value = self.validate_to_python(value)\n self.run_validators(value)\n return value",
"def _clean_inputs(self, inputs):\n return inputs",
"def _trans_format(self):\n config_dict = vars(self._config)\n for item, value in config_dict.items():\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Whether the current object is 'clean', i.e. has no nonconverted input. | def is_clean(self):
return not self._modified | [
"def __bool__(self):\n return self.is_valid",
"def _clean( self ):\n\t\tself.__is_dirty = False",
"def clean(self):\n if not self.is_input and not self.is_output:\n raise ValidationError(\"TransformationXput with pk={} is neither an input nor an output\".format(self.pk))\n if sel... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the current world size (number of distributed processes). | def world_size() -> int:
return dist.get_world_size() if dist.is_initialized() else 1 | [
"def get_world_size(self):\n return self.WORLD_SIZE",
"def get_data_parallel_world_size():\n return torch.distributed.get_world_size(group=get_data_parallel_group())",
"def _get_data_parallel_world_size():\n global mpu\n if mpu is not None:\n return mpu.get_data_parallel_world_size()\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Decorator that only runs the function on the process with rank 0. | def rank_zero_only(fn):
def wrapped(*args, **kwargs):
if rank() == 0:
return fn(*args, **kwargs)
return wrapped | [
"def rank_zero_fn(fn: Callable[..., TReturn]) -> Callable[..., Optional[TReturn]]:\n\n @wraps(fn)\n def wrapped_fn(*args: Any, **kwargs: Any) -> Optional[TReturn]:\n if get_global_rank() == 0:\n return fn(*args, **kwargs)\n return None\n\n return wrapped_fn",
"def parallel_functi... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Equivalent to print, but only runs on the process with rank 0. | def print_rank_zero(*args, **kwargs) -> None:
print(*args, **kwargs) | [
"def print_from_rank_zero(msg, output_channel='stdout'):\n if is_rank_zero():\n print(msg)",
"def r_print(*args):\n if comm.rank == 0:\n print('ROOT:', end=' ')\n for i in args:\n print(i, end=' ')\n # noinspection PyArgumentList\n print()",
"def print_on_node... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Identifies uncorrelated samples and updates the arrays of the reduced potential energy and dhdlt retaining data entries of these samples only. 'sta' and 'fin' are the starting and final snapshot positions to be read, both are arrays of dimension K. | def uncorrelate(sta, fin, do_dhdl=False):
if not P.uncorr_threshold:
if P.software.title()=='Sire':
return dhdlt, nsnapshots, None
return dhdlt, nsnapshots, u_klt
u_kln = numpy.zeros([K,K,max(fin-sta)], numpy.float64) # u_kln[k,m,n] is the reduced potential energy of uncorrelated sample inde... | [
"def msms_det(temp_pick_dict, data_dict):\n t=time.time()\n # prep input\n data_list, temp_list, dt_ot_list = [], [], []\n for net_sta, [temp, norm_temp, dt_list] in temp_pick_dict.items():\n if net_sta not in data_dict: continue\n data, norm_data = data_dict[net_sta][1:3]\n data_li... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Plots the free energy change computed using the equilibrated snapshots between the proper target time frames (f_ts and r_ts) in both forward (data points are stored in F_df and F_ddf) and reverse (data points are stored in R_df and R_ddf) directions. | def plotdFvsTime(f_ts, r_ts, F_df, R_df, F_ddf, R_ddf):
fig = pl.figure(figsize=(8,6))
ax = fig.add_subplot(111)
pl.setp(ax.spines['bottom'], color='#D2B9D3', lw=3, zorder=-2)
pl.setp(ax.spines['left'], color='#D2B9D3', lw=3, zorder=-2)
for dire in ['top', 'right']:
ax.spines[dire... | [
"def data_vis():\n dataroot = 'solar_data.txt'\n debug = False \n diff = False\n X, y = read_data(dataroot, debug, diff)\n\n # First plot the original timeseries\n fig = plt.figure(figsize=(40,40))\n\n fig.add_subplot(3,3,1)\n plt.plot(y)\n plt.title('Avg Global PSP (vent/cor) [W/m^2]')... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Plots the free energy differences evaluated for each pair of adjacent states for all methods. The layout is approximately 'nb' bars per subplot. | def plotdFvsLambda2(nb=10):
x = numpy.arange(len(df_allk))
if len(x) < nb:
return
xs = numpy.array_split(x, len(x)/nb+1)
mnb = max([len(i) for i in xs])
fig = pl.figure(figsize = (8,6))
width = 1./(len(P.methods)+1)
elw = 30*width
colors = {'TI':'#C45AEC', 'TI-CU... | [
"def plot_comparison():\n fig,ax = plt.subplots(figsize=(7,3),ncols=2)\n fig.suptitle('TIM4 difference for distributed stiffness')\n for i in np.arange(200,1200,200):\n UIC60props = UIC60properties()\n tim4 = Timoshenko4(UIC60props,0.5)\n tim4el = Timoshenko4eb(UIC60props,Pad(K = i*10*... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Plots the ave_dhdl array as a function of the lambda value. If (TI and TICUBIC in methods) plots the TI integration area and the TICUBIC interpolation curve, elif (only one of them in methods) plots the integration area of the method. | def plotTI():
min_dl = dlam[dlam != 0].min()
S = int(0.4/min_dl)
fig = pl.figure(figsize = (8,6))
ax = fig.add_subplot(1,1,1)
ax.spines['bottom'].set_position('zero')
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
ax.xaxis.set_ticks_position('bott... | [
"def plot(self, kind, det_thrsh=.999, band_conf=.95, det_conf=None,\n methods=None, title=True, filetype='png', alpha=.3, shade=True,\n scale=1., hide_data=False, legend=True, xlim=None, ylim=None,\n rollwindow=2e-26, rollcolor=None, band=True, bestonly=False,\n suffi... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Searches for winning sequence in columns. | def check_columns(self, win: list) -> bool:
for row in range(self.size):
column = [self.tags[x][row] for x in range(self.size)]
for j in range(len(column) - len(win) + 1):
if win == column[j:j+self.win_condition]:
return True | [
"def check_columns():\n global game_still_going\n # Check if any of the rows have all the same value.\n column1 = board[0] == board[3] == board[6] != '_'\n column2 = board[1] == board[4] == board[7] != '_'\n column3 = board[2] == board[5] == board[8] != '_'\n # If any column does have a match, the... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check for winning sequence in all possible diagonals that are at least as long as winning condition. | def check_diagonals(self, win: list) -> bool:
for i in range(self.size - self.win_condition + 1):
# [x x ]
# [ x x ]
# [ x x]
# [ x]
diagonal = []
x = i
y = 0
for j in range(self.size - i):
... | [
"def __checkDiagonalsRL(self):\n for i in range(self.board.cols - self.winCondition, self.board.cols - 1):\n counter = [0, 0]\n for j in range(i + 1):\n position = (j * self.board.cols) + i - j\n counter = self.check(position, counter)\n if c... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Checks if the board is fully packed with figures, which in practice means => if the tags array is full. | def full_board(self) -> bool:
counter = 0
for column in self.tags:
if None in column:
counter += 1
return counter == 0 | [
"def is_full(board):\r\n return False",
"def is_full(board):\n return False",
"def check_grid_full(self):\n for row in self.game_state:\n for e in row:\n if e is None:\n return False\n return True",
"def is_full(self):\n print(\"Implem: c... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Checks for empty spaces in the tags list. If the empty space is found it's coordinates are being packed into tuple and into new list. | def check_for_moves(self) -> list:
avail_moves = []
for x in range(self.size):
for y in range(self.size):
if self.tags[x][y] is None:
avail_moves.append((x, y))
return avail_moves | [
"def empty_space(array):\n global l\n global empty_coordinates_list\n #This is not needed anymore.\n # def x(x):\n # if x == None:\n # x = int(randint(0,30))\n # return x\n\n # def y(y):\n # if y == None:\n # y = int(randint(0,30))\n # return y\n #... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Function mashes together classes functionality and performs the AI's move. It recursively calls the minimax algorithm and after finding the best move it adds tag into the tags list. | def bot_handle_move(self) -> None:
best_value = -INFINITY # default best value for maximizing player (bot in this app is a maximizing player)
available_moves = self.check_for_moves() # for more info check the minimax algorithm theory
depth = int(1.4*self.size - self.win_... | [
"def move(self):\n value,move_Location,search_nodes=self._Min_Max_decision();\n\n #print(value)\n #print(move_Location)\n print(\"Minimax algorithm has generated: \"+str(search_nodes)+\" search nodes for deciding a move\")\n print(\"Computer has decided move:\" + str(move_Location... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Workqueue element site restriction check (same as workRestrictions) | def testPassesSiteRestriction(self):
# test element ala MonteCarlo
ele = WorkQueueElement(SiteWhitelist=["T1_IT_CNAF", "T2_DE_DESY"], SiteBlacklist=["T1_US_FNAL"])
self.assertFalse(ele.passesSiteRestriction("T1_US_FNAL"))
self.assertFalse(ele.passesSiteRestriction("T2_CH_CERN"))
... | [
"def testPassesSiteRestrictionLocationFlags(self):\n # test element ala MonteCarlo\n ele = WorkQueueElement(SiteWhitelist=[\"T1_IT_CNAF\", \"T2_DE_DESY\"], SiteBlacklist=[\"T1_US_FNAL\"])\n self.assertFalse(ele.passesSiteRestriction(\"T1_US_FNAL\"))\n self.assertFalse(ele.passesSiteRestr... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Workqueue element site restriction check (same as workRestrictions) | def testPassesSiteRestrictionLocationFlags(self):
# test element ala MonteCarlo
ele = WorkQueueElement(SiteWhitelist=["T1_IT_CNAF", "T2_DE_DESY"], SiteBlacklist=["T1_US_FNAL"])
self.assertFalse(ele.passesSiteRestriction("T1_US_FNAL"))
self.assertFalse(ele.passesSiteRestriction("T2_CH_CER... | [
"def testPassesSiteRestriction(self):\n # test element ala MonteCarlo\n ele = WorkQueueElement(SiteWhitelist=[\"T1_IT_CNAF\", \"T2_DE_DESY\"], SiteBlacklist=[\"T1_US_FNAL\"])\n self.assertFalse(ele.passesSiteRestriction(\"T1_US_FNAL\"))\n self.assertFalse(ele.passesSiteRestriction(\"T2_C... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Workqueue element data location check, using the input and PU data location flags | def testPossibleSitesLocationFlags(self):
ele = WorkQueueElement(SiteWhitelist=["T1_IT_CNAF", "T2_DE_DESY"])
# test element with InputDataset and no location, but input flag on
ele['Inputs'] = {"/MY/BLOCK/NAME#73e99a52": []}
ele['NoInputUpdate'] = True
self.assertItemsEqual(poss... | [
"def testPassesSiteRestrictionLocationFlags(self):\n # test element ala MonteCarlo\n ele = WorkQueueElement(SiteWhitelist=[\"T1_IT_CNAF\", \"T2_DE_DESY\"], SiteBlacklist=[\"T1_US_FNAL\"])\n self.assertFalse(ele.passesSiteRestriction(\"T1_US_FNAL\"))\n self.assertFalse(ele.passesSiteRestr... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test run get_most_volatile() with stock prices from a file. | def test_run(filename='prices.csv'):
prices = pd.read_csv(filename, parse_dates=['date'])
print("Most volatile stock: {}".format(get_most_volatile(prices))) | [
"def get_most_expensive_cars(table):\n cur, con = database.connect_to_database()\n query = \"SELECT t.* FROM \" + table + \" t WHERE t.price = \\\n (select max(subt.price) from \" + table + \" subt);\"\n return pandas.read_sql_query(query, con)",
"def download_all():\r\n f = open('stock_symbols... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
True if expires is not equal to orig_expires. | def updated(self):
return self.expires != self.orig_expires | [
"def test_expires(self):\n # We aren't bother going to test the actual time in expires, that\n # way lies pain with broken tests later.\n up = self.get(self.good_data)\n hdrs = dict(up.get_headers(1))\n lm = datetime(*utils.parsedate_tz(hdrs['Last-Modified'])[:7])\n exp = d... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Called to move/remove one item. Returns True if the item was purged, False if it was moved to self.new_expiry. | def remove_one(self):
item = self.expiry.pop(0)
if item.updated:
self.new_expiry.append(item)
return
del self.index[item.target]
return | [
"def consume(self, item, index):\n\n slot = self.holdables[index]\n\n # Can't really remove things from an empty slot...\n if slot is None:\n return False\n\n if slot.holds(item):\n self.holdables[index] = slot.decrement()\n return True\n\n return ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Purge stuff from the cache which is expired/oldest. Stuff is purged which is older than TTL or if the total number of entries is in excess of MAX_ASSOCS. | def purge(self):
if not self.index:
return
now = time()
while self.expiry[0].orig_expires <= now or len(self.index) > MAX_ASSOCS:
self.remove_one()
if not self.expiry:
if not self.index:
return
self.... | [
"def _clean_cache(self):\r\n query = _AppEngineUtilities_Cache.all()\r\n query.filter('timeout < ', datetime.datetime.now())\r\n results = query.fetch(self.max_hits_to_clean)\r\n db.delete(results)\r\n #for result in results:\r\n # result.delete()\r",
"def purge(self, ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get common letters of two words | def get_common_letters(word1: str, word2: str) -> str:
common = ''
for x, y in zip(word1, word2):
if x == y:
common += x
return common | [
"def common_chars(string1, string2):\n \n common = Counter(string1.casefold()) & Counter(string2.casefold())\n return sum(common.values())",
"def num_common_letters(goal_word, guess):\n \"*** YOUR CODE HERE ***\"\n a=get_list(goal_word)\n b=get_list(guess)\n i=0\n num=0\n while i<len(a)... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests the filtering for a plan by its metal level to only match silver level plans. | def test_filtering_plans_by_metal_level_matches_only_silver(self):
silver_plan_inputs = [
{
'plan_id': '05276NA2900195',
'state': 'MI',
'metal_level': 'Silver',
'rate': '283.39',
'rate_area': '1'
},
... | [
"def test_instrument_inventory_filtering():\n filt = 'GR150R'\n data = mm.instrument_inventory('niriss',\n add_filters={'filter': filt},\n return_data=True)\n\n filters = [row['filter'] for row in data['data']]\n\n assert all([i == filt... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that the zipcode data is cleaned properly and contains only unique rate areas. | def test_clean_zipcode_data_is_unique(self):
input = {
'11111': [('NY', '5')],
'22222': [('WI', '2')],
'33333': [('WI', '2'), ('NY', '5')],
'44444': [('WI', '2'), ('WI', '2')],
'55555': [('WI', '2'), ('WI', '2'), ('NY', '5')],
'66666': [('... | [
"def test_zip_detail_bad(self):\n city, state = get_city_and_state('99990')\n self.assertEqual('', city)\n self.assertEqual('', state)",
"def test_only_five_digit_zipcodes_match(self):\n\n incorrect_zipcodes = ['1', 'abcdef', '123ab', '12345-6789', 'abc-def']\n non_string_zipcod... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests the plan ratea data is cleaned properly and is returned with sorted unique values for each rate area. | def test_clean_plan_rates_sorts_and_makes_data_unique(self):
input = {
('IN', '1'): [
'304.5',
'422.28',
'386.79',
'382.7',
'332.21',
'422.28',
'382.7'
],
('SD', '... | [
"def test_clean_zipcode_data_is_unique(self):\n\n input = {\n '11111': [('NY', '5')],\n '22222': [('WI', '2')],\n '33333': [('WI', '2'), ('NY', '5')],\n '44444': [('WI', '2'), ('WI', '2')],\n '55555': [('WI', '2'), ('WI', '2'), ('NY', '5')],\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests that when the conditions are right, a zipcode is properly mapped to a rate. | def test_zipcode_is_successfully_mapped(self):
zipcode = '11111'
cleaned_zipcode_data_input = {'11111': [('NY', '5')]}
cleaned_plan_data_input = {('NY', '5'): ['294.44', '294.87', '339.6']}
expected = '294.87'
slcsp_rate = retrieve_slcsp_for_zipcode(
zipcode,
... | [
"def test_only_five_digit_zipcodes_match(self):\n\n incorrect_zipcodes = ['1', 'abcdef', '123ab', '12345-6789', 'abc-def']\n non_string_zipcodes = [1, [123, 143], {'test': '123'}, 344.234, True]\n cleaned_zipcode_data_input = {'11111': [('NY', '5')]}\n cleaned_plan_data_input = {('NY', '... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests that if no matching rate is found for a zipcode, an empty string is returned instead per the exercise instructions. | def test_no_rate_found_is_empty_string(self):
zipcode = '11111'
cleaned_zipcode_data_input = {'22222': [('NH', '12')]}
cleaned_plan_data_input = {('NY', '5'): ['294.44', '294.87', '339.6']}
expected = ''
slcsp_rate = retrieve_slcsp_for_zipcode(
zipcode,
... | [
"def compute_zip_code(zip_code_text):\n zip_code = None\n if zip_code_text and len(zip_code_text) >= 5 and zip_code_text.isdigit():\n zip_code = zip_code_text[:5]\n return zip_code",
"def test_empty_string_returned_if_no_plan_areas_exist(self):\n\n zipcode = '11111'\n cleaned_zipcode... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests that a rate is not returned when a zipcode is given in a format that is not 5 digits. | def test_only_five_digit_zipcodes_match(self):
incorrect_zipcodes = ['1', 'abcdef', '123ab', '12345-6789', 'abc-def']
non_string_zipcodes = [1, [123, 143], {'test': '123'}, 344.234, True]
cleaned_zipcode_data_input = {'11111': [('NY', '5')]}
cleaned_plan_data_input = {('NY', '5'): ['294... | [
"def CheckZipCode(zipcode):\n # see if there are enough digits\n if (len(zipcode) >= 5):\n # check if numerical\n try:\n int(zipcode)\n return True\n except:\n return False\n else:\n return False",
"def validate_zipcode(zipcode):\n return re... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests that an empty string is returned if no plan areas exist for a given zipcode. | def test_empty_string_returned_if_no_plan_areas_exist(self):
zipcode = '11111'
cleaned_zipcode_data_input = {'11111': []}
cleaned_plan_data_input = {('NY', '5'): ['294.44', '294.87', '339.6']}
expected = ''
slcsp_rate = retrieve_slcsp_for_zipcode(
zipcode,
... | [
"def test_empty_string_returned_if_too_many_plan_areas_exist(self):\n\n zipcode = '11111'\n cleaned_zipcode_data_input = {'11111': [('WI', '9'), ('NY', '5')]}\n cleaned_plan_data_input = {('NY', '5'): ['294.44', '294.87', '339.6']}\n\n expected = ''\n\n slcsp_rate = retrieve_slcsp... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests that an empty string is returned if more than one plan area exists for a given zipcode. | def test_empty_string_returned_if_too_many_plan_areas_exist(self):
zipcode = '11111'
cleaned_zipcode_data_input = {'11111': [('WI', '9'), ('NY', '5')]}
cleaned_plan_data_input = {('NY', '5'): ['294.44', '294.87', '339.6']}
expected = ''
slcsp_rate = retrieve_slcsp_for_zipcode(... | [
"def test_empty_string_returned_if_no_plan_areas_exist(self):\n\n zipcode = '11111'\n cleaned_zipcode_data_input = {'11111': []}\n cleaned_plan_data_input = {('NY', '5'): ['294.44', '294.87', '339.6']}\n\n expected = ''\n\n slcsp_rate = retrieve_slcsp_for_zipcode(\n zip... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |