query stringlengths 9 9.05k | document stringlengths 10 222k | negatives listlengths 19 20 | metadata dict |
|---|---|---|---|
Accepts a list of items and a rectangular distance DataTable. The item indexes match those of the distance_table dims. We will sort the items s.t. for each item the next one is the nearest one not already chosen. | def greedy_distance_sort(distance_table, items_to_sort):
ret = [items_to_sort[0]]
while len(ret) < len(distance_table.dims):
# find item nearest to the last one in the list
last_item_index = items_to_sort.index(ret[-1])
distances = distance_table.get_points(distance_table.dims[last_item_index])
... | [
"def match_ref_items(ref_items, items, diff_fun, tol):\n assert len(items) == len(ref_items), 'items and ref_items must be same len'\n assert type(items) is np.ndarray, 'items must be a numpy array'\n assert type(ref_items) is np.ndarray, 'ref_items must be a numpy array'\n\n diff = diff_fun(ref_items, ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the empty list of outputs for this module. | def get_outputs(self):
return [] | [
"def outputs(self) -> List[Node]:\n return self._outputs",
"def get(cls) -> typing.List[Output]:\n return cls._outputs",
"def list_output_modules(self):\n try:\n return self._send_command(self._client.list_output_modules)\n except AttributeError:\n return ()\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The view method of this module draws the control panel and the histograms. We need at least one input to be able to draw something. | def view(self, tables):
if not tables:
return View(self, 'No tables to show.')
self.widgets.color.guess_or_remember(('histogram text', tables), ['name'])
self.widgets.text.guess_or_remember(('histogram colors', tables), ['name'])
self.widgets.shift.guess_or_remember(('histogram shift', tables... | [
"def draw(self, view):\n super().draw()",
"def show_plot(self, e):\n plots = plotting.Input(self.name)\n #if user pick in Plot Units section 'Rate' and 'Spectrum', plot:\n if self.var.get() == 'Rate':\n if e == 'spec':\n plots.plot_spectrum_rate()\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Adds an externally defined loss to collection of losses. | def add_loss(loss):
tf.add_to_collection(LOSSES, loss) | [
"def add_loss(self, losses, inputs=None):\n if context.in_eager_mode():\n raise RuntimeError('Layer.add_loss not supported in Eager mode.')\n losses = _to_list(losses)\n if not losses:\n return\n self._losses += losses\n if inputs is not None:\n inputs = _to_list(inputs)\n if not in... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a tensor whose value represents the total loss. Notice that the function adds the given losses to the regularization losses. | def get_total_loss(add_regularization_losses=True, name="total_loss"):
losses = get_losses()
if add_regularization_losses:
losses += get_regularization_losses()
return tf.add_n(losses, name=name) | [
"def _compute_regular_loss(self):\n regular_loss = self._l2_loss() + self._l1_loss() + self._cross_l_loss()\n return tf.reduce_sum(regular_loss)",
"def get_total_loss(splits=[''], collection='outputs', with_summaries=True, verbose=0): \n losses = []\n for split in splits:\n full_lo... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return logp function that accepts dictionary with unused variables as input | def loose_logp(model, vars):
return model.compile_fn(
model.logp(vars=vars, sum=False),
inputs=model.value_vars,
on_unused_input="ignore",
) | [
"def log_op():\n prepare_logs(scalars)\n return {\"scalars\": scalars}",
"def log_wealth_optim(f, pnl):\n return -np.mean(np.log(1 + f * pnl))",
"def log_p_m_x(log_Bs, myTheta):\n print(\"TODO\")",
"def log_gap(function_name):\n def return_function(*args):\n if isinstance... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Some sanity checks on split line (that time and clk are integers). | def check_cycle_line(self, line):
try: time = int(line[0])
except: self.logger.die('first col (time) in sim output not int')
try: clk = int(line[1], 2)
except: self.logger.die('second col (clk) in sim output not bin')
self.logger.info('emulator pass, time=%s, clk=%s' % (tim... | [
"def _parseTimeLine(lineno, line):\n m = re.match(RE_ALL, line)\n if m is None:\n return InvalidTimeTrackingEntry(\n lineno=lineno,\n line=line,\n errorMessage=\"Bad format\")\n else:\n trigram = m.group('trigram')\n\n dt = m.group('date')\n try:... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parse a message response sent by the Redis datastore on a subscribed channel. | def parse_response(self):
return self._subscription.parse_response() | [
"def channelMessageReceived(self, channel, message, subchannel):",
"def __unpack__(self):\n for item in self.pubsub.listen():\n message = item.get('data')\n if item['type'] == 'message':\n yield message",
"def _parse(self):\n\t\t\n\t\tself.reply_msg = MessageHandler.f... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the file descriptor used for passing to the select call when listening on the message queue. | def get_file_descriptor(self):
return self._subscription.connection and self._subscription.connection._sock.fileno() | [
"def get_channel_handler_by_fd(self, sock_fileno):\r\n with self.channel_fds_lock:\r\n if self.channel_fds.get(sock_fileno):\r\n return self.channel_fds[sock_fileno].handler\r\n return None",
"def get_connection_poller():\r\n if hasattr(select, \"epoll\"):\r\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Begins a new game by taking in a list of ters and a list of civs. Play out a series of turns and end the game when one player reaches the victory points. | def New_Game(board, players, victory):
for civ in players:
print(civ.start)
for home in civ.start:
for ter in board:
if home==ter:
ter.owner=civ
turn = 1
return take_turn(board,players,turn,victory,orders=[]) | [
"def start(self):\r\n adventure = input(\"Would you like to go to the beach or the forest?\")\r\n if adventure == \"beach\":\r\n Beach(self.player).start()\r\n elif adventure == \"forest\":\r\n Forest(self.player).start()\r\n else:\r\n print(\"Hmmm, I don... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Takes in an old board and a list of 3term tuples which describe player investments. It then calculates which player had the greatest investment in each territory and changes owners. Finally, it counts up the values of each territory and supplies the owner with the appropriate points. | def eval_orders(board,orders):
contests = []
for order in orders: #in case a player has no orders
if order==[]:
orders.remove([])
for ter in board:
for player in orders:
for order in player:
territory=order[0]
contender = order[2]
... | [
"def formWorstLineup(team):\n players = list(map(lambda Batter: Batter.transitionMatrixSimple(), team.batters))\n averagePlayer = team.averagePlayer().transitionMatrixSimple()\n availablePositions = set(range(9))\n worstLineup = [team.averagePlayer()] * 9\n for bestRemaining in range(4):\n wor... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Caclculates Relative Synonymous codon usage (RSCU) wij = RSCUij/ RSCU i,max | def compute_rscu_weights(df_codcount):
aa_groups = df_codcount.groupby('Amino_Acid')
aa = df_codcount['Amino_Acid'].unique() #make a list of all amino acids to iterate over
df_list = []
for a in aa:
d=aa_groups.get_group(a)
d['RSCU'] = d['Obs_Freq'].values/d['Obs_Freq'].mean() #o... | [
"def ricci_scalar(self):\n R_0101 = self.covariant_riemann()\n q_inv = self.induced_metric(inverse=True)\n return 2 * R_0101 * (q_inv[0,0]*q_inv[1,1] - q_inv[0,1]**2)",
"def rs(self):\n return self.rads/self.rad_schw",
"def shear_Reuss(self):\r\n s = self.Sij\r\n return... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Constructs a version of 'fn' that applies to smaller batches. | def batchify(fn, chunk):
if chunk is None:
return fn
def ret(inputs):
return torch.cat([fn(inputs[i:i+chunk]) for i in range(0, inputs.shape[0], chunk)], 0)
return ret | [
"def batch_apply(fn, inputs, batch_size):\n\n batched_inputs, pad_size = batchify(inputs, batch_size)\n results = np.concatenate([fn(batch) for batch in batched_inputs])\n if pad_size:\n results = results[:-pad_size]\n return results",
"def batch_apply_func_to_df(df, func, batch_size=250):\n in_dfs = [d... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Render rays in smaller minibatches to avoid OOM. | def batchify_rays(num_frames, rays_flat, chunk=1024 * 32, **kwargs):
all_ret = {}
for i in range(0, rays_flat.shape[0], chunk):
ret = render_rays(num_frames, rays_flat[i : i + chunk], **kwargs)
for k in ret:
if k not in all_ret:
all_ret[k] = []
all_ret[k].... | [
"def startRayTracing(self):\n\n final = numpy.zeros((self.imageplane.getHeight(), self.imageplane.getWidth(), 3))\n\n cpu_count = multiprocessing.cpu_count()\n\n stepSize = int(self.imageplane.getHeight()/cpu_count)\n\n processes = []\n managers = []\n workers = []\n\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Write a DataMap to a location in the data directory. If root is a string, then the saving is restricted to what's inside that key. The result is flattened such that the root field doesn't exist in the output. If root is a data map, then fields also within the base map are omitted If fields are given, only fields within... | def save_data_map(self, location, data_map, *, root=None, fields=None, lang='en'):
location = self.get_data_path(location)
result = {}
if not root and not fields:
raise Exception("Either a root (string or dictionary) or a list of fields " +
"must be given when persi... | [
"def save_split_data_map(self, location, base_map, data_map, key_field, lang='en'):\n location = self.get_data_path(location)\n\n # Split items into buckets separated by the key field\n split_data = collections.OrderedDict()\n for entry in data_map.values():\n base_entry = bas... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Writes a DataMap to a folder as separated json files. The split occurs on the value of key_field. Fields that exist in the base map are not copied to the data maps | def save_split_data_map(self, location, base_map, data_map, key_field, lang='en'):
location = self.get_data_path(location)
# Split items into buckets separated by the key field
split_data = collections.OrderedDict()
for entry in data_map.values():
base_entry = base_map[entry... | [
"def writeSplitDatasetToJson(dataset:typings.dataset,split:bool): \n for index,key in enumerate(dataset):\n os.makedirs('dataset',exist_ok=True)\n if (key != \"__status\" and key!='__invalid'):\n os.makedirs('dataset/{}'.format(key), exist_ok=True)\n os.makedirs('dataset/dataset_txt/{}'.format(key... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Read subnets from config file or if not set try to get them from default VPC | def _provide_subnets(self):
if not self.cfg.aws.subnet:
logging.debug("Subnets are not provided")
# Try to get subnet from default VPC or VPC set in aws-vpc config parameter
vpc = self._provide_vpc()
if vpc:
subnet_list = vpc.subnets.all()
... | [
"def load_excluded_subnets():\n \n control_networks = list()\n with open('/etc/map/testbed-control-subnets.txt') as f:\n for line in f:\n subnet = ipaddress.ip_network(line.strip())\n control_networks.append(subnet)\n\n return control_networks",
"def test_vmware_service_re... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get boto3 Vpc object for either configured VPC, or if not, default VPC for the configured region, if not available return None | def _provide_vpc(self):
if self.vpc_id:
if self.vpc_id.lower() == 'none':
return None
return self.ec2.Vpc(self.vpc_id)
vpcs = list(self.ec2.vpcs.filter(Filters=[{'Name':'isDefault', 'Values':['true']}]))
if len(vpcs) > 0:
logging.debug(f'Defaul... | [
"def get_vpc_info(self):\n if not self.base['cluster'].get('vpc'):\n res = self.ec2.describe_vpcs()\n self.base['cluster']['vpc'] = [vpc['VpcId'] for vpc in res['Vpcs'] if vpc['IsDefault']][0]\n logger.info('No vpc selected, using default vpc')\n logger.info(self.b... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Find role for AWS ECS instances. | def _get_instance_role(self) -> str:
# if instance role is set in config, return it
if self.cfg.aws.instance_role:
logging.debug(f'Instance role provided from config: {self.cfg.aws.instance_role}')
return self.cfg.aws.instance_role
# check if ecsInstanceRole is present ... | [
"def find_role(cls, keyword):\n return _CompilerRole.find(keyword)",
"def cmd_role_get(self, args):\n role_id = args[0]\n self._get_obj(role_id, 'role')",
"def cloudwatch_logs_role_arn(self) -> str:\n return pulumi.get(self, \"cloudwatch_logs_role_arn\")",
"def _get_elastic_ip_node... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Find AWS Batch service role. | def _get_batch_service_role(self):
# if batch service role is set in config, return it
if self.cfg.aws.batch_service_role:
logging.debug(f'Batch service role provided from config: {self.cfg.aws.batch_service_role}')
return self.cfg.aws.batch_service_role
# check if ecsIn... | [
"def _get_job_role(self):\n if self.cfg.aws.job_role:\n job_role = self.cfg.aws.job_role\n logging.debug(f'Using Batch job role provided from config: {job_role}')\n return job_role\n else:\n logging.debug('Batch job role will be created by cloudformation')\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Find AWS Batch job role. | def _get_job_role(self):
if self.cfg.aws.job_role:
job_role = self.cfg.aws.job_role
logging.debug(f'Using Batch job role provided from config: {job_role}')
return job_role
else:
logging.debug('Batch job role will be created by cloudformation')
... | [
"def _get_batch_service_role(self):\n # if batch service role is set in config, return it\n if self.cfg.aws.batch_service_role:\n logging.debug(f'Batch service role provided from config: {self.cfg.aws.batch_service_role}')\n return self.cfg.aws.batch_service_role\n\n # che... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Find AWS EC2 Spot Fleet role. | def _get_spot_fleet_role(self):
if self.cfg.aws.spot_fleet_role:
role = self.cfg.aws.spot_fleet_role
logging.debug(f'Using Spot Fleet role provided from config: {role}')
return role
else:
logging.debug('Spot Fleet role will be created by cloudformation')
... | [
"def find_role(cls, keyword):\n return _CompilerRole.find(keyword)",
"def _get_instance_role(self) -> str:\n\n # if instance role is set in config, return it\n if self.cfg.aws.instance_role:\n logging.debug(f'Instance role provided from config: {self.cfg.aws.instance_role}')\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a tuple of BLAST database basename, path (if applicable), and label suitable for job name. Gets user provided database from configuration. For custom database finds basename from full path, and provides correct path for db retrieval. For standard database the basename is the only value provided by the user, and... | def _get_blastdb_info(self) -> Tuple[str, str, str]:
db = self.cfg.blast.db
db_path = 'None'
if db.startswith('s3://'):
#TODO: support tar.gz database
bname, key = parse_bucket_name_key(db)
if not self.dry_run:
try:
bucket =... | [
"def get_database_uri(self, label: str = \"default\") -> str:\n return self.config[\"databases\"][label]",
"def guess_database(args):\n return _guess_database_file(args.gtf, args.database)",
"def get_db_name(self):\n return self.config.get(\"db\", \"name\")",
"def _getDBName(self):\n r... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get a list of batch job ids | def get_job_ids(self) -> List[str]:
# we can only query for job ids by jobs states which can change
# between calls, so order in which job states are processed matters
ids = defaultdict(int)
logging.debug(f'Retrieving job IDs from job queue {self.job_queue_name}')
for sta... | [
"def job_ids(self):\n return [elem[\"id\"] for elem in self.all()]",
"def get_job_ids(self, offset=0, length=-1):\n start = offset\n if length >= 0:\n end = offset + (length - 1)\n else:\n end = length\n return [as_text(job_id) for job_id in\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Save batch job ids in a metadata file in S3 | def upload_job_ids(self) -> None:
bucket_name, key = parse_bucket_name_key(f'{self.results_bucket}/{ELB_METADATA_DIR}/{ELB_AWS_JOB_IDS}')
bucket = self.s3.Bucket(bucket_name)
bucket.put_object(Body=json.dumps(self.job_ids).encode(), Key=key) | [
"def put_s3_batch(data, bucket, prefix):\n # Pickle data\n data = pickle.dumps(data)\n\n # s3 setup\n s3 = boto3.resource(\"s3\")\n\n # timestamp = str(time.time()).replace('.', '')\n filename = f\"{prefix}.pickle\"\n obj = s3.Object(bucket, filename)\n obj.put(Body=data)\n\n return filen... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Save query length in a metadata file in S3 | def upload_query_length(self, query_length: int) -> None:
if self.dry_run: return
bucket_name, key = parse_bucket_name_key(f'{self.results_bucket}/{ELB_METADATA_DIR}/{ELB_AWS_QUERY_LENGTH}')
bucket = self.s3.Bucket(bucket_name)
bucket.put_object(Body=str(query_length).encode(), Key=key) | [
"def _save(self, s3_prefix):\n bucket_name, prefix = split_s3_path(s3_prefix)\n bucket = self.s3_conn.get_bucket(bucket_name)\n self._compute_percentages()\n self.stats['last_updated'] = datetime.now().isoformat()\n key = boto.s3.key.Key(\n bucket=bucket,\n n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Retrieve the list of AWS Batch job IDs from AWS First it tries to get them from S3, if this isn't available, gets this data from AWS Batch APIs. | def _load_job_ids_from_aws(self):
with NamedTemporaryFile() as tmp:
bucket_name, key = parse_bucket_name_key(os.path.join(self.results_bucket, ELB_METADATA_DIR, ELB_AWS_JOB_IDS))
bucket = self.s3.Bucket(bucket_name)
try:
bucket.download_file(key, tmp.name)
... | [
"def get_job_ids(self) -> List[str]:\n # we can only query for job ids by jobs states which can change\n # between calls, so order in which job states are processed matters\n ids = defaultdict(int) \n logging.debug(f'Retrieving job IDs from job queue {self.job_queue_name}')\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Removes ancillary data from the end user's result bucket | def _remove_ancillary_data(self, bucket_prefix: str) -> None:
bname, _ = parse_bucket_name_key(self.results_bucket)
if not self.dry_run:
s3_bucket = self.s3.Bucket(bname)
s3_bucket.objects.filter(Prefix=bucket_prefix).delete()
else:
logging.debug(f'dry-run: wo... | [
"def clear_report_results(self):",
"def delete(self,*args, **kwargs):\n for item in self.get_resultitem_dict().values():\n item.delete()\n super(Result, self).delete(*args, **kwargs)",
"def clear(self):\n del self.results\n self.results = list()",
"def remove_data(self, ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Iterate over cloudformation stack events and extract error messages for failed resource creation or deletion. Cloudformation stack object must already be initialized. | def _get_cloudformation_errors(self) -> List[str]:
# cloudformation stack must be initialized
assert self.cf_stack
messages = []
for event in self.cf_stack.events.all():
if event.resource_status == 'CREATE_FAILED' or \
event.resource_status == 'DELETE_FAIL... | [
"def get_errors(exc):\n while True:\n yield exc\n exc = getattr(exc, \"__cause__\") or getattr(exc, \"__context__\")\n if exc is None:\n return",
"def _diff_stack(self, stack: Stack, **_: Any) -> Status:\n if self.cancel.wait(0):\n return INTERRUPTED\n\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This constructor method is used to assign the object of the Class Car to car attribute and parking slot number to parking_slot attribute of the class. | def __init__(self, car, parking_slot):
self.car = car
self.parking_slot = parking_slot | [
"def __init__(self, name, length, location, orientation):\r\n # Note that this function is required in your Car implementation.\r\n # However, is not part of the API for general car types.\r\n self.__name = name\r\n self.__length = length\r\n self.__location = location\r\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This method is used to return the vehicle registration number of the car. | def get_vehicle_registration_number(self):
return self.car.get_registration_number() | [
"def get_registration_number(self):\n return self._registration_number",
"def car_slot_no_for_registration_no(self, reg_id):\n\n if reg_id not in self.cars_parked:\n return \"Not found\"\n\n slot_number = 0\n\n for node in self.slots_occupied:\n if not node:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This method is used to return the age of the driver, driving the car. | def get_driver_age(self):
return self.car.get_driver_age() | [
"def get_age(self):\n if self.basics['death']:\n return self.basics['death'] - self.basics['birth']\n else:\n return datetime.datetime.now().year - self.basics['birth']",
"def age(self):\n age = _f.getage(self.pvec)\n return age",
"def age(self) -> int:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This method is used to return the parking slot number in which the car is parked. | def get_parking_slot(self):
return self.parking_slot | [
"def car_slot_no_for_registration_no(self, reg_id):\n\n if reg_id not in self.cars_parked:\n return \"Not found\"\n\n slot_number = 0\n\n for node in self.slots_occupied:\n if not node:\n continue\n if node.reg_id == reg_id:\n slot_... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
nc = neo4j_connect object start = start stage (short_form_id string) end = end stage (short_form_id string) Returns list of intermediate stages. | def expand_stage_range(nc, start, end):
stages = [start, end]
statements = [
'MATCH p=shortestPath((s:FBDV {short_form:"%s"})<-[:immediately_preceded_by*]-(e:FBDV {short_form:"%s"})) RETURN extract(x IN nodes(p) | x.short_form)' % (start, end)]
r = nc.commit_list(statements)
stages.append(r[0]['... | [
"def get_stages(self):\n def resolve_intersections(stage):\n \"\"\"Removes actions from a stage that creates\n conflict between the selected stage candidates.\"\"\"\n actions_to_remove = set()\n for a in stage:\n if self.action[a].get('next', None):\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Plot filled region between `y1` and `y2`. This function works exactly the same as matplotlib's fill_between, except that it also plots a proxy artist (specifically, a rectangle of 0 size) so that it can be added it appears on a legend. | def fill_between(x, y1, y2=0, ax=None, **kwargs):
ax = ax if ax is not None else plt.gca()
ax.fill_between(x, y1, y2, **kwargs)
p = plt.Rectangle((0, 0), 0, 0, **kwargs)
ax.add_patch(p)
return p | [
"def fill_between(x1, y1, x2, y2, x_axis_label=None, y_axis_label=None,\n x_axis_type='linear', y_axis_type='linear',\n title=None, plot_height=300, plot_width=450,\n fill_color='#1f77b4', line_color='#1f77b4', show_line=True,\n line_width=1, fill_alph... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
function to save the csv file uploaded in html page to mysql df | def upload():
if request.method == 'POST':
files = request.files['file']
# files.save('test.csv')
data = pd.read_csv(files)
obj = insert_to_mysql.MysqlIo()
msg = obj.write_to_db(data)
return render_template('message.html', msg=msg) | [
"def _saveCSV( self ):",
"def upload_output(df_output):",
"def filedownload(df):\r\n\r\n csv = df.to_csv(index=False)\r\n b64 = base64.b64encode(csv.encode()).decode() # strings <-> bytes conversions\r\n href = f'<a href=\"data:file/csv;base64,{b64}\" download=\"model_performance.csv\">Download CSV Fi... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This method will check to see if going to the next or previous month will change the year and returns the corrected month and year. month the month to see if it is in the same year year the year that you are checking return month,year the corrected values of each | def monthConversion(month,year):
if(month > 12):
year+=1
month = 1
if(month < 1):
year-=1
month = 12
return month,year | [
"def prev_month(month, year):\n prev_month = (month - 1) % 13\n prev_year = year\n if prev_month == 0:\n prev_month = 12\n prev_year -= 1\n return (prev_month, prev_year)",
"def _check_year(self):\n if (self.last_team_update[0] < date.today().year):\n self.last_team_upd... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculate rapidity as a function of pesudorapidity, transverse momentum and mass | def rapidity(eta, pt, m):
return np.log((np.sqrt(m**2 + pt**2 * np.cosh(eta)**2) + pt * np.sinh(eta)) / np.sqrt(m**2 + pt**2)) | [
"def momentum_resolution(p) :\n return 0.005",
"def _get_continuum_mass_estimate(self):\n\n pass",
"def calculate_speed_enhancement_factor(particle_mass, molecular_mass):\n\n return ((particle_mass + molecular_mass) / particle_mass) ** 0.5",
"def calc_motive(self):\n # For brevity, \"dimensionle... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Configures all the main windows menues | def _setupMenues(self):
self._menues["file"] = qt.QPopupMenu(self)
self.menuBar().insertItem('&File',self._menues["file"])
self._actions["exit-faraday"].addTo(self._menues["file"]);
self.menuBar().insertSeparator()
self._menues["shell"] = qt.QPopupMenu(self)
se... | [
"def _initMenu(self):\n #--- Menu Project ---#\n self.mi_newProject.setShortcut(\"Ctrl+Shift+N\")\n self.mi_newProject.triggered.connect(self.on_miNewProject)\n self.mi_loadProject.setShortcut(\"Ctrl+Shift+L\")\n self.mi_loadProject.triggered.connect(self.on_miLoadProject)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets up the main toolbar | def _setupMainToolbar(self):
self._actions["new_shell"].addTo(self.main_toolbar)
self._actions["toggle-hosttree"].addTo(self.main_toolbar)
self._actions["toggle-logconsole"].addTo(self.main_toolbar)
self._actions["maximize-shell"].addTo(self.main_toolbar)
self._actions["clear-ho... | [
"def create_toolbars(self):\n self.create_tools()\n self.plugin_manager.create_toolbars()",
"def __init__(self, *args):\n _aui.AuiToolBar_swiginit(self,_aui.new_AuiToolBar(*args))",
"def create_tool_bar(self):\n edit_tool_bar = self.addToolBar('Edit')\n\n tool_group = QActionG... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This method is to be able to handle custom events in order to show custom dialogs or pop ups | def customEvent(self, event):
if event.type() == EXCEPTION_ID:
self.showExceptionDialog(event.text, event.callback, event.exception_objects)
elif event.type() == SHOWDIALOG_ID:
self.showSimpleDialog(event.text, event.level)
elif event.type() == SHOWPOPUP_ID:
... | [
"def event(self,ev):\n if ev.type()==QtCore.QEvent.User:\n ErrorDialog.postError(ev.error)\n return True\n return QtWidgets.QWidget.event(self,ev)",
"def showWindow(self, sender):",
"def onOk(self, ev):\n self.EndModal(wx.ID_OK)",
"def OnAbout(self, event):\n\t\tdial... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
fetch the trading fees for a market | async def fetch_trading_fee(self, symbol: str, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'pair': market['id'],
}
response = await self.privateGetFeeInfo(self.extend(request, params))
#
# {
# "ma... | [
"async def fetch_trading_fees(self, params={}):\n await self.load_markets()\n response = await self.privateGetFees(params)\n #\n # {\n # \"maker_fee_rate\": \"0.0050\",\n # \"taker_fee_rate\": \"0.0050\",\n # \"usd_volume\": \"43806.92\"\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns all the metadata associated to the parent batch. | def get_child_batch_metadata(parent_batch_id, metadata_type):
response = batch_execution_metadata_table.query(
IndexName="ParentBatchIdIndex",
KeyConditionExpression=Key(Attributes.PARENT_BATCH_ID).eq(parent_batch_id),
)
items = []
for item in response["Items"]:
if item[Attribut... | [
"def get_child_batch_metadata_all(\n parent_batch_id,\n):\n response = batch_execution_metadata_table.query(\n IndexName=\"ParentBatchIdIndex\",\n KeyConditionExpression=Key(Attributes.PARENT_BATCH_ID).eq(parent_batch_id),\n )\n\n return response[\"Items\"]",
"def metadata(self):\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Fetches a batch execution metadata by the batch_execution_job_id. | def get_batch_metadata(batch_id):
response = batch_execution_metadata_table.get_item(
Key={
Attributes.BATCH_ID: batch_id,
},
)
return response["Item"] if "Item" in response else None | [
"def get_child_batch_metadata_all(\n parent_batch_id,\n):\n response = batch_execution_metadata_table.query(\n IndexName=\"ParentBatchIdIndex\",\n KeyConditionExpression=Key(Attributes.PARENT_BATCH_ID).eq(parent_batch_id),\n )\n\n return response[\"Items\"]",
"def get_execution(self, exe... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns all metadata associated with parent batch without filtering | def get_child_batch_metadata_all(
parent_batch_id,
):
response = batch_execution_metadata_table.query(
IndexName="ParentBatchIdIndex",
KeyConditionExpression=Key(Attributes.PARENT_BATCH_ID).eq(parent_batch_id),
)
return response["Items"] | [
"def get_child_batch_metadata(parent_batch_id, metadata_type):\n response = batch_execution_metadata_table.query(\n IndexName=\"ParentBatchIdIndex\",\n KeyConditionExpression=Key(Attributes.PARENT_BATCH_ID).eq(parent_batch_id),\n )\n\n items = []\n for item in response[\"Items\"]:\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Updates the status of a given batch_execution_job_id | def update_batch_status(
batch_execution_job_id,
status,
):
response = batch_execution_metadata_table.update_item(
Key={
Attributes.BATCH_ID: batch_execution_job_id,
},
UpdateExpression="set #st=:s",
ExpressionAttributeValues={
":s": status,
},... | [
"def update_batch_status(batch_id, status, error_message=\"\"):\n\n response = batch_execution_metadata_table.update_item(\n Key={\n Attributes.BATCH_ID: batch_id,\n },\n UpdateExpression=\"set #st=:s, #errorMessage=:message\",\n ExpressionAttributeValues={\":s\": status, \... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Updates the step token of the given batch. Used for async waiting in step function invocations. | def update_batch_step_token(
batch_id,
step_token,
):
response = batch_execution_metadata_table.update_item(
Key={
Attributes.BATCH_ID: batch_id,
},
UpdateExpression="set #st=:s",
ExpressionAttributeValues={
":s": step_token,
},
Express... | [
"def step_pointer(caller, step=1):\r\n ptr = caller.ndb.batch_stackptr\r\n stack = caller.ndb.batch_stack\r\n nstack = len(stack)\r\n if ptr + step <= 0:\r\n caller.msg(\"{RBeginning of batch file.\")\r\n if ptr + step >= nstack:\r\n caller.msg(\"{REnd of batch file.\")\r\n caller.nd... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Inserts new batch metadata input post First level job | def insert_batch_metadata_input(
batch_id, parent_batch_id, down_sampling_rate, input_manifest, batch_status
):
dynamo_db_item = {
Attributes.BATCH_ID: batch_id,
Attributes.DOWN_SAMPLING_RATE: Decimal(str(down_sampling_rate)),
Attributes.BATCH_METADATA_TYPE: BatchMetadataType.HUMAN_INPU... | [
"def _set_up_new_batch(self, *_):\n self.batch = []",
"def batch_insert_push(self, batch_data):\n data = self.data\n for key, value in batch_data.items():\n data[key] = value",
"def _insert_metadata(self, metadata):\n kwargs = {\n 'output': self.output,\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Updates the status of a given batch_id | def update_batch_status(batch_id, status, error_message=""):
response = batch_execution_metadata_table.update_item(
Key={
Attributes.BATCH_ID: batch_id,
},
UpdateExpression="set #st=:s, #errorMessage=:message",
ExpressionAttributeValues={":s": status, ":message": error_m... | [
"def update_batch_status(\n batch_execution_job_id,\n status,\n):\n response = batch_execution_metadata_table.update_item(\n Key={\n Attributes.BATCH_ID: batch_execution_job_id,\n },\n UpdateExpression=\"set #st=:s\",\n ExpressionAttributeValues={\n \":s\":... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Append new down_sample location to the existing batchId | def update_batch_down_sample_location(
batch_id,
down_sample_location,
):
response = batch_execution_metadata_table.update_item(
Key={
Attributes.BATCH_ID: batch_id,
},
UpdateExpression="set #attr_down_sample_location=:down_sample_location",
ExpressionAttributeVal... | [
"def append(self, ch):\n self.batch.append(ch)\n if 'defrag_collection_est_size' not in ch:\n self.trust_batch_estimation = False\n self.batch_size_estimation += self.chunk_size_estimation\n else:\n self.batch_size... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Marks a whole batch as failed, including all children batches. | def mark_batch_and_children_failed(
batch_id,
error_message="",
):
update_batch_status(batch_id, BatchStatus.INTERNAL_ERROR, error_message=error_message)
items = get_child_batch_metadata_all(batch_id)
for item in items:
mark_batch_and_children_failed(item["BatchId"], error_message) | [
"def _handle_unsuccessful_batch(self, batch_id: str, batch: Optional[Dict] = None):\n if not batch:\n # We have to fetch the batch again as the kafka event doesn't currently include the\n # resource details\n try:\n batch = data_loader_helper.batch_get_resource... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Collects all possible information about the database, its tables, their columns and indexes into a DatabaseSchema object. | def schematize (self):
schema = DatabaseSchema (self.getDatabaseName ())
tableNames = self.getTableNames ()
for tableName in tableNames:
schema.addTable (self.schematizeTable (tableName))
return schema | [
"def _backcompute_schema(self, cursor):\n raw_stats_types = self.connection.tables()\n if not raw_stats_types:\n raise weewx.UninitializedDatabase(\"Uninitialized stats database\")\n # Some stats database have schemas for heatdeg and cooldeg (even though\n # they are not used)... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Compiles the information provided into a ColumnSchema object. | def schematizeColumn (self, columnName, dataType, isNullable, extra):
column = ColumnSchema (columnName, dataType, isNullable, extra)
return column | [
"def _rebuild_internal_schema(self):\n\t\tself.columns = OrderedDict()\n\t\tself.primary_cgroup = None\n\n\t\tfor cgroup, schema in self._cgroups.iteritems():\n\t\t\tfor colname, dtype in schema['columns']:\n\t\t\t\tassert colname not in self.columns\n\t\t\t\tself.columns[colname] = ColumnType()\n\t\t\t\tself.colum... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Fetches the MySQL database connection assigned to this schematizer. Override this method to define a different way to provide a connection for the schematizer. | def getConnection (self):
return self._mysqlConnection | [
"def get_connection(self):\n return MySQLHelper.create_db_connection()",
"def get_database_connection():\n return connection",
"def get_mysql_con():\n\tfrom PySQLPool import getNewPool, getNewConnection, getNewQuery\n\tfrom settings import MYSQL_USER, MYSQL_PASSWORD, MYSQL_HOST, MYSQL_DB\n\tgetNewPool... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Fetch the constraint associated with the given index, if there is one. | def getIndexConstraint (self, tableName, indexName):
constraint = None
cursor = self.getConnection ().cursor ()
cursor.execute ("""
select constraint_type
from information_schema.table_constraints
where
table_schema = %s and
table_name = %s an... | [
"def __getitem__(self, key):\n for constraint in self.constraints:\n name = getattr(constraint, 'name', None)\n if name is not None and name == key:\n return constraint\n try:\n found = constraint[key]\n except (KeyError, TypeError):\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function determines the direction that a line was crossed. This procedure is based on a cross product. No concern has been made for squared terms or magnitudes, only the sign of the cross product. | def line_cross_direction(self, p0, p1, p2) :
# print( 'line_cross_direction' )
u = np.array(p0) - np.array(p1)
v = np.array(p1) - np.array(p2)
v = v[::-1]
v[0] *= -1
uv_mag_sqr = np.sum(u*v)
return np.sign( uv_mag_sqr ) | [
"def ifLineCross(line2p, Line2p):\r\n downCrs = (line2p[0] > Line2p[0]) and (line2p[1] < Line2p[1])\r\n upCrs = (line2p[0] < Line2p[0]) and (line2p[1] > Line2p[1])\r\n if downCrs:\r\n return -1 # line2p down cross Line2p\r\n elif upCrs:\r\n return 1 # line2p up cross Line2p\r\n else:\... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function tests to see if the car has crossed any of the check points based on the car travelling from it's old position, pos_old, to it's new position, pos_new. | def crossed_check_point_test(self, pos_old, pos_new) :
for line in self.check_point_lines :
value = self.crossed_line_test(pos_old, pos_new, line)
if value != 0 :
return value
return 0 | [
"def obs_check(self, q_tree, x_new, label_new, obs_check):\n\n if q_tree[0][0] != x_new[0] and q_tree[0][1] != x_new[1]:\n return False\n\n if (q_tree[0], x_new) in obs_check.keys():\n return obs_check[(q_tree[0], x_new)]\n\n if (x_new, q_tree[0]) in obs_check.keys():\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Transfer CA certificate for an upgrade to a subcloud Returns the next state in the state machine on success. Any exceptions raised by this method set the strategy to FAILED. | def perform_state_action(self, strategy_step):
if self.subcloud_type == consts.SYSTEM_MODE_SIMPLEX:
return self.next_state
self.info_log(strategy_step, "Start transferring CA certificate...")
retry_counter = 0
while True:
try:
sysinv_client = se... | [
"def perform_state_action(self, strategy_step):\n\n self.info_log(strategy_step, \"Performing simplex upgrade for subcloud\")\n\n subcloud_sysinv_client = None\n subcloud_barbican_client = None\n try:\n subcloud_sysinv_client = self.get_sysinv_client(\n strategy... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Check if an array contains an element as 0 | def arr_has_0_check(int_arr) -> bool:
return 0 in int_arr | [
"def iszero(self):\n return all((v == 0 for v in self.b))",
"def is_all_negative(arr):\n for e in arr:\n if e >= 0:\n return False\n return True",
"def is_zero_vector(nablaG):\n\n result = True\n for i in range(len(nablaG)):\n if not((isinstance(nablaG[i], numbers.Num... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Instructions displayed to the user per the recipients file. | def recitals_recipients(self):
print("Loading recipient data")
print("Important, you must have the following header convention (names can be anything) "
"with the corresponding data to use this program:")
print('Email To | CC (Can be blank) | Subject | Body | Attachment path with E... | [
"def recitals_sender(self):\n print(\"Loading the sender data.\")\n print(\"Please provide the file path for the following on the 2nd row: \")\n print(\"SMTP server | SMTP Port | Your_Email | Your_Password \"\n \"(Leave blank if not needed) | optional: Signature path in .html forma... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Instructions displayed to the user per the sender file. | def recitals_sender(self):
print("Loading the sender data.")
print("Please provide the file path for the following on the 2nd row: ")
print("SMTP server | SMTP Port | Your_Email | Your_Password "
"(Leave blank if not needed) | optional: Signature path in .html format.")
pri... | [
"def inform(msg: str):\n # Dynamic user update messages\n print(\" %-80s\" % msg, end=\"\\r\", flush=True)",
"def display_text(self):\n\n print(\"\\n\" * 100)\n print(\"Help MacGyver (M) to escape !\\n\")\n print(\"Controls:\\n\")\n print(\" Z\")\n print(\"Q S D\\n\")... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
If the sizes of disk offerings are not configurable and there are no disk offerings with the requested size, an exception should be thrown. | def test_create_volume_no_noncustomized_offering_with_size(self):
location = self.driver.list_locations()[0]
self.assertRaises(
LibcloudError,
self.driver.create_volume,
'vol-0', location, 11) | [
"def showErrorDiskSize(self):\n ButtonChoiceWindow(self.__screen, ERROR_DISK_SIZE.localize(),\n ERROR_DISK_SIZE_MSG.localize(),\n buttons=[(OK.localize(), 'ok')],\n width=50)",
"def __check_size__(self, size):\n # size mus... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
S1, S2, Target are the windowed dataframe Counts the number of samples for each class label and generates the data distribution plot and saves to the provided directory | def plot_training_data_distribution_multi_source(s1, s2, target, activity_list, save_dir):
unique, counts = np.unique(target, return_counts=True)
target_dict = dict(zip(unique, counts))
target_samples = np.fromiter(target_dict.values(), dtype=float).astype(int)
unique, counts = np.unique(s1, retur... | [
"def display_class_distributions(labels_path='labels', class_map):\n labels_path = labels_path if labels_path.endswith('/') else labels_path + '/'\n \n txt_labels = sorted(glob(f'{labels_path}*.txt'))\n labels_df = pd.concat((pd.read_csv(file, sep=\" \", header=None) for file in txt_labels if os.path.ge... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculates accuracy for a given level of perturbation (=epsilon) based on the dataset provided through dataset_name. Applies gaussian perturbation to the samples in the dataset according to the given level of perturbation. The accuracy is calculated from the perturbed dataset and the resulting predictions of the model. | def get_accuracy(gauss_eps, *args):
modelf, data, dataset_name = args
gauss_eps = gauss_eps
perturb_generator = PerturbationGenerator(
dataset_name=dataset_name,
gaussian_noise_eps=[gauss_eps]
)
logits, labels = modelf.logits(
data,
perturb_generator,
"genera... | [
"def accuracy(tree, dataset):\n\n right = 0\n for data in dataset:\n res = tree.predict(data)\n if abs(res - data[-1]) <= 0.5:\n right += 1\n return right / dataset[:, 0].size",
"def estimate_epsilons(\n modelf,\n data,\n dataset_name,\n n_classes,\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Optimizes levels of gaussian perturbation (=epsilons) based on a list of target accuracies. The target accuracies are calcualted with interpolation between min & max. The data is perturbed with gaussian noise at a certain level (=epsilon). The model's accuracy for the perturbed data is calculated. The levels of perturb... | def estimate_epsilons(
modelf,
data,
dataset_name,
n_classes,
number_perturbation_levels,
accuracy_deviation_acceptable,
accuracy_deviation_acceptable_last_step,
gauss_eps_start=0.05,
opt_delta_gauss_eps=0.5):
start_time = time.time()
# ca... | [
"def fit(self):\n # if self.verbose == 1:\n # print ('The list of all perturbation with its probability: \\n')\n # for perturb in range(len(self.p_list)):\n # print('%s perturbation with probability of: %s \\n' %(self.p_list[perturb], self.p_prob[perturb]))\n #p_cu... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Stores levels of gaussian perturbation (=epsilons) to a file. | def store_epsilons(
model_path,
epsilons,
modelf,
data,
data_name,
filename="optimized_epsilons"):
resulting_accuracy_list = []
for gauss_eps in epsilons:
resulting_accuracy_list.append(get_accuracy(gauss_eps, modelf,
... | [
"def add_gaussian_noise(path, sigma):\n \n data = np.loadtxt(path + '/output_data.txt').view(complex)\n noise = np.random.normal(0,sigma,data.size)\n noise = np.reshape(noise, data.shape)\n noised_data = data + noise\n np.savetxt(path + '/noised_data.txt', noised_data.view(float))\n return nois... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test an ir transceiver. | async def test_ir_transceiver(
hass: HomeAssistant,
ir_transceiver: Sensor,
receive_message: Callable[[str], None],
transport_write: MagicMock,
) -> None:
entity_id = "remote.ir_transceiver_1_1"
state = hass.states.get(entity_id)
assert state
assert state.state == "off"
# Test tur... | [
"def test_connection(self):\n for sender in self.senders:\n sender.test_connection()",
"def _testBee(self, source):\n pass\n source.send('at',command='vr')\n return source.wait_read_frame()",
"def device_test(self):\n # Create a MPI packet object\n mpi_packet... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Elevation matrix to read the elevation text | def load_elevation_matrix():
with open("mpp.txt") as f:
elevation_matrix = [elevation_matrix.split() for elevation_matrix in f]
for i in range(5):
for row in elevation_matrix:
del row[-1]
return elevation_matrix | [
"def print_elevation_ME():\n dem = ee.Image('USGS/SRTMGL1_003')\n xy = ee.Geometry.Point([86.9250, 27.9881])\n elev = dem.sample(xy, 30).first().get('elevation').getInfo()\n print('Mount Everest elevation (m):', elev)\n return",
"def elevation_plot_data(self):\n if self.elevations is None:\n... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Connects to a PostgreSQL service through DSN. | def connect_dsn(self, dsn):
if self.debug:
print("Connecting to PostgreSQL")
try:
self.con = I_sql.connect(dsn)
if self.debug:
print("Connected to PostgreSQL")
self.con.set_isolation_level(I_sql.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
... | [
"def _get_conn(self, dsn):\n return psycopg2.connect(dsn)",
"def setup_postgres():\n conn = psycopg2.connect(\"postgresql://python:{}@{}:5432/kin\".format(PYTHON_PASSWORD, POSTGRES_HOST))\n logging.info('Successfully connected to the database')\n return conn",
"def open(self):\n\n conn_st... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Deletes an entire schema from a database. | def deleteSchema(self, schema):
if self.schemaTarget == schema:
self.schemaTarget = ""
return self.query("DROP SCHEMA IF EXISTS {}", (), schema) | [
"def delete_schema(self):\n try:\n os.remove(self.schema_shelf_file)\n except OSError:\n pass",
"def _drop_schema(schema_name, database):\n database.engine.execute(f'DROP USER {schema_name}')",
"def drop_schemas(self) -> None:\n self.drop_schema()\n for servi... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Inserts fields into a table. | def insert(self, table, fields):
field_keys = ', '.join(fields.keys())
_fields = '\',\''.join(fields.values())
return self.query("INSERT INTO {} ({}) VALUES ({})", (field_keys, _fields), table) | [
"def insert_into_table(db_conn, field_names_list, field_values_list, table_name):\n with db_conn, db_conn.cursor() as cursor:\n execute_values(cursor, \"\"\"INSERT INTO {tbl_name}({field_names_list})\n VALUES %s\"\"\".format(tbl_name=table_name,\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Updates rows within a table, when given a where clause with the fields given. | def update(self, table, where, fields):
whereClasues = ' AND '.join(where)
_resolvedFields = []
for key in fields.keys():
_resolvedFields.append(key + " = '" + fields[key] + "'")
_resolvedFieldsToStr = ', '.join(_resolvedFields)
return self.query(... | [
"def update_data(self, table_name,\n updated_fields,\n updated_values,\n condition_fields,\n condition_values):\n s = [\"{0}={1}\".format(f, v) for f,v in zip(updated_fields, updated_values)]\n update_str = \",\".join(s)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Checks if a Schema Exists within the targeted Database. | def checkIfSchemaExists(self, name):
result = self.query(
"""
SELECT EXISTS(
SELECT schema_name FROM information_schema.schemata WHERE schema_name = {}
);
""", (name))
value = str(result[0]).replace("(", "").replace(")", "").replace(",",... | [
"def checkExistence_DB(self):\n DBlist = self.client.list_database_names()\n if self.DB_NAME in DBlist:\n # print(f\"DB: '{self.DB_NAME}' exists\")\n return True\n # print(f\"DB: '{self.DB_NAME}' not yet present OR no collection is present in the DB\")\n return Fals... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the Debugger Mode. (Enabled) will display all the SQL Commands within the console. | def setDebugMode(self, debugMode):
self.debug = debugMode | [
"def set_debug_mode(self, mode):\n self._debug_mode = mode\n self.config.debug = mode",
"def set_debug(cls, on=True):\n cls.debug = on",
"def setDebugMode(self, debug):\n return _core.CGPkronSum_setDebugMode(self, debug)",
"def SetDebugMode(self, debug):\n self.config.set(\"... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets the amount of rows that were affected by the last Execution. | def getAffectedRowsCount(self):
return self.affectedRows | [
"def rowcount(self):\n self._check_that_read_query_was_issued()\n return self._delegate.rowcount",
"def _get_rowCount(self) -> \"int\" :\n return _core.TableCommandInput__get_rowCount(self)",
"def total_rows_count(self) -> int:\n return pulumi.get(self, \"total_rows_count\")",
"def... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets the ID of the last Row in the last Execution. | def getLastRowID(self):
return self.lastRowID | [
"def lastrowid(self):\n if self._last is not None:\n return self._last\n with self._statement.getGeneratedKeys() as rs:\n if rs.isClosed():\n return self._last\n last = []\n while rs.next():\n last.append(rs.getLong(1))\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Run srw in ``cfg_dir`` The files in ``cfg_dir`` must be configured properly. | def run(cfg_dir):
with pkio.save_chdir(cfg_dir):
_run_elegant() | [
"def run_srun():\n import sys\n import subprocess\n from batchSettings import independent_srun_args\n script_name = sys.argv[0]\n args = independent_srun_args + [\"python\", script_name, \"all\"]\n logging.debug(\"Calling srun with the following arguments: %s\", args)\n subprocess.run([\"srun\"] + args)",
... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Read ClosedEnd Fund price and Net Asset Value (NAV) quotes from csv file format. Cef tickers and file names are taken from user defined dict CEF_DATA_SOURCES | def read_raw_cef_data(cef_symbol):
cef = pd.read_csv(CEF_DATA_SOURCES[cef_symbol][0])
cef = cef[["timestamp", "close"]]
cef.columns = [DATE_COL_NAME, PRICE_COL_NAME]
cef[NAV_COL_NAME] = pd.read_csv(CEF_DATA_SOURCES[cef_symbol][1])["close"]
cef[DATE_COL_NAME] = pd.to_datetime(cef[DATE_COL_NAME])
cef = cef.sort_val... | [
"def _open_convert_csv_files(self):\n comb_index = None\n for e in self.feeds:\n self.symbol_data[e] = {}\n self.latest_symbol_data[e] = {}\n\n for s in self.feeds[e]:\n csv_filename = get_ohlcv_file(e, s, self.timeframe, self.start_date, self.end_date)\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculating Zscore of the value in the cell [df[DATE_COL_NAME]==period_end_date, col_name] from period_start_date to period_end_date. DataFrame should contain column DATE_COL_NAME and col_name. | def calculate_zscore(df, col_name, period_start_date, period_end_date):
data = df.loc[
(df[DATE_COL_NAME] >= period_start_date) & (df[DATE_COL_NAME] <= period_end_date), col_name]
curr_value = df.loc[df[DATE_COL_NAME] == period_end_date, col_name].values[0]
average_value = data.mean()
std = data.std()
zscore = (... | [
"def calculate_trailing_residual_zscores(df_input, regressor_col_name, simulation_begin_date, calc_period):\n\tdf = df_input.copy()\n\tall_dates = df[DATE_COL_NAME]\n\tdates = df.loc[df[DATE_COL_NAME] >= simulation_begin_date, DATE_COL_NAME].reset_index(drop=True)\n\tfor date in dates:\n\t\tperiod_start_date = find... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculating holding period return from period_start_date to period_end_date. DataFrame should contain column DATE_COL_NAME and col_name. | def calculate_return(df, col_name, period_start_date, period_end_date):
base_value = df.loc[df[DATE_COL_NAME] == period_start_date, col_name].values[0]
curr_value = df.loc[df[DATE_COL_NAME] == period_end_date, col_name].values[0]
price_return = (curr_value - base_value) / base_value * 100
df.loc[df[DATE_COL_NAME] =... | [
"def compute_df(real_stock_data, period_utils):\n df = real_stock_data.df\n\n months = period_utils.months\n gross_returns = (1 + df['m_return']).rolling(months, 1).apply(np.prod) - 1\n gross_returns = gross_returns.shift(-months)\n\n returns = period_utils.annualized_returns(gros... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Select an existing period begin date in ordered list of dates | def find_valid_period_start_date(dates, date, period):
period_start_date = date - period
period_dates = dates[dates >= period_start_date]
first_date = period_dates.iloc[0]
return first_date | [
"def start_period(self, date, period):",
"def starting_date(self) -> datetime:\n return min([x.starting_date for x in self.subaccounts])",
"def get_dates(start, end):\n\n files = []\n\n while start <= end:\n p = start\n start += timedelta(days=1)\n files.append(p)\n\n return... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculating trailing calc_period zscores for residuals = y y_predicted from simulation_begin_date to last date, the regressand is always Price Returns. | def calculate_trailing_residual_zscores(df_input, regressor_col_name, simulation_begin_date, calc_period):
df = df_input.copy()
all_dates = df[DATE_COL_NAME]
dates = df.loc[df[DATE_COL_NAME] >= simulation_begin_date, DATE_COL_NAME].reset_index(drop=True)
for date in dates:
period_start_date = find_valid_period_st... | [
"def value_factor_backtest_monthly(real_yields, price_data, zscore_lookback, z_score_smoothening, n_securities, long_short, sample_start, sample_end):\n #Calculate Z-Score of Real Yields\n ry_zscore = (real_yields - real_yields.rolling(260*zscore_lookback).mean())/real_yields.rolling(260*zscore_lookback).std(... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Processing raw input Cef data which may take some time. Calculating factors for conducting analysis and saving data to new csv file. This method should be called only if we made changes to TRAIN_DATA_RATIO, AVERAGES_CALC_PERIOD, START_DATE etc. | def calculate_cef_data(symbol, analysis_data_period, calc_period):
print("Processing data ... Please wait ... for 'Data processing finished!' indication below!")
df = read_raw_cef_data(symbol)
dates = df[DATE_COL_NAME].reset_index(drop=True)
end_date = str(dates.values[-1]).split("T")[0]
end_date = datetime.strpti... | [
"def process(self):\n\n print(\n f\"Transit processing all lightcurves data product: {self.all_lightcurves_dataproduct}\"\n )\n\n self.extract_and_save_lightcurves()\n\n self.create_best_light_curve_and_fit_image()",
"def _common_read(csv_file, raters):\n \n # Import a... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Running trade simulation on the given time series based on residual zscore. Simulation takes long and short positions. One position at a time. | def run_residual_trade_simulation(trade_simul_data, zscore_buy_long=-1.5, zscore_cover_long=-0.5, zscore_sell_short=1.5,
zscore_cover_short=0.5):
data = trade_simul_data[[DATE_COL_NAME, PRICE_COL_NAME, RESIDUAL_ZSCORE_COL_NAME]]
trades = pd.DataFrame(
columns=[DATE_COL_NAME, PRICE_... | [
"def _run(prices, options, verbose=True, get_invested_value=None):\n # For each stock, calculate the running yearly volatility:\n sigmas = Simulation._calculate_sigmas_wrapper(prices)\n\n if verbose:\n print(\"Finished calculating the yearly sigmas.\")\n\n # Then extract the v... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Classifies the given pattern. | def classify(self, pattern):
import numpy
w = numpy.array([self.weights])
x = numpy.array([pattern])
u = numpy.dot(w,x.transpose())
return self.actvf(u) | [
"def add_pattern(self, pattern_name, pattern_class):\n if pattern_name not in self.config['pattern']:\n self.config['pattern'][pattern_name] = {}\n self.pattern[pattern_name] = pattern_class(\n self.config['pattern'][pattern_name],\n self.config['system']\n )",
... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Handles the error when the server fails to find a sequence. Function expects a client state object, a username, and the sequence id that raises the exception. Function shall return the state from GET /Elixys/state. | def handle_sequence_not_found(client_state, username, sequence_id):
client_state = getCurrentClientState(username)
current_app.logger.debug("Failed to find sequence: " + str(sequence_id) + \
"Client state: " + str(client_state))
# Was it the sequence that the user is currently on?
if clien... | [
"def handle_invalid_sequence(username, sequence_id):\n current_app.logger.error(\"Cannot run invalid sequence (\" +\n str(sequence_id) + \"\\nUser:\" + str(username))\n return {\"type\":\"error\", \"description\":\"Invalid sequence\"}",
"def testBadStateid(t, env):\n c = env.c1\n c.init_con... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Handles the error when the server fails to find a component Function expects a client state, username and component id that generate the exception. Function return the state from GET /Elixys/state. | def handle_component_not_found(client_state, username, component_id):
current_app.logger.debug("Failed to find component " + str(component_id))
# Was it the component that the user is currently on?
if client_state["componentid"] == component_id:
# Yes
sequence_id = 0
try:
... | [
"def handle_sequence_not_found(client_state, username, sequence_id):\n client_state = getCurrentClientState(username)\n\n current_app.logger.debug(\"Failed to find sequence: \" + str(sequence_id) + \\\n \"Client state: \" + str(client_state))\n\n # Was it the sequence that the user is currently ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Handles the error when the server fails to find a reagent. Function expects a client state, username and reagent id. Function shall redirect user to previous screen on client. Function returns the state from GET /Elixys/state. | def handle_reagent_not_found(client_state, username, reagent_id):
current_app.logger.debug("Failed to find reagent " + str(reagent_id))
# This error should only occur if the user has
# the sequence they are currently viewing delete out from
# under them. Redirect them to the last Select Sequence scree... | [
"def error():\n try:\n status = int(request.environ['REDIRECT_STATUS'])\n except:\n # if there's an exception, it means that a client accessed this directly;\n # in this case, we want to make it look like the endpoint is not here\n return api_404_handler()\n msg = 'Unknown erro... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Handles the error when the use attempts to run an invalid sequence | def handle_invalid_sequence(username, sequence_id):
current_app.logger.error("Cannot run invalid sequence (" +
str(sequence_id) + "\nUser:" + str(username))
return {"type":"error", "description":"Invalid sequence"} | [
"def handle_seq_abort():\n global _RUNNING_SEQ\n\n with Sessions.current() as session: # noqa: F841\n if _RUNNING_SEQ:\n _RUNNING_SEQ.kill()\n _RUNNING_SEQ = None\n log.info(\"Sequence aborted by user\")\n Sessions.add_event(\"seq:err\", \"Sequence aborted b... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
output file name for the given neutron component in the output directory of the job directory This has to be unique so that different components don't write to the same output file. | def outputfilename(component):
f = '%s-%s.out' % (component.__class__.__name__, component.componentname)
return f | [
"def name_file(self, output_filename):\n return self.output_path / output_filename",
"def _make_output_file_path_unique(self, run_name: str, op_name: str,\n output_file: str) -> str:\n if not output_file.startswith(\"/tmp/\"):\n return outp... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
>>> get_relative_change(10, 0) 999.0 >>> get_relative_change(10, 1) 9.0 >>> get_relative_change(10, 5) 1.0 >>> get_relative_change(10, 10) 0.0 >>> get_relative_change(10, 15) 0.5 >>> get_relative_change(10, 20) 1.0 >>> get_relative_change(10, 100) 9.0 >>> get_relative_change(0, 10) 999.0 >>> get_relative_change(0, 0) 0... | def get_relative_change(val1, val2):
assert val1 >= 0, val1
assert val2 >= 0, val2
if val1 == 0:
val1 = EPSILON
if val2 == 0:
val2 = EPSILON
if val1 > val2:
return 1 - val1 / float(val2)
return val2 / float(val1) - 1 | [
"def getRelativeFieldChange(self):\n\t\treturn self.relative_field_change",
"def pct_change():\n original_value = bank_of_rick.original_value\n current_total_value = sum(total_value())\n return 100 * (current_total_value - original_value) / original_value",
"def fracChange(new, control, deltaT):\n r... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function creates an ray transfer matrix for d distance | def distance(d):
arr01 = array([
[1, d],
[0, 1]
], float)
print("The ray transfer matrix for your setup at d distance is", )
print(arr01) | [
"def reconstruction_d_matrix(self):\n\n d_matrix = np.zeros_like(self.d_matrix)\n for n_step in np.arange(self.nsteps):\n transfer_matrix = self.transfer_matrix_from_simframe(os.path.abspath(os.path.join(os.getcwd(),\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function creates an ray transfer matrix for a lens with f focal length | def thinlens(f):
arr02 = array([
[1, 0],
[-1/f, 1]
])
print("The ray transfer matrix for your thin lens of focal lenth f is", )
print(arr02) | [
"def refflatmirror():\n arr05 = array([\n [1, 0],\n [0, 1]\n ], float)\n print(\"The ray transfer matrix for reflaction in a flat interface is \")\n print(arr05)",
"def traject(origin0,tetaOrigin0):\r\n\r\n #cf to \"Analytic ray curv... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function creates an ray transfer matrix for refraction in a flat interface n1 = initial refractive index n2 = final refractive index | def refraflat(n1 ,n2):
arr03 = array([
[1, 0],
[0, n1/n2]
], float)
print("The ray transfer matrix for refraction in a flat interface is ")
print(arr03) | [
"def refflatmirror():\n arr05 = array([\n [1, 0],\n [0, 1]\n ], float)\n print(\"The ray transfer matrix for reflaction in a flat interface is \")\n print(arr05)",
"def refracurvemirror(Re):\n arr06 = array([\n [1,... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function creates an ray transfer matrix for reflection from a flat mirror. You don't need to create any input here. | def refflatmirror():
arr05 = array([
[1, 0],
[0, 1]
], float)
print("The ray transfer matrix for reflaction in a flat interface is ")
print(arr05) | [
"def refracurvemirror(Re):\n arr06 = array([\n [1, 0],\n [-2/Re, 1]\n ], float)\n print(\"The ray transfer matrix for refraction at a curved mirror is \")\n print(arr06)",
"def refraflat(n1 ,n2):\n arr03 = array([\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function creates an ray transfer matrix for refraction in a curved mirror. You just need to enter effective radius of curvature, Re in the bracket. | def refracurvemirror(Re):
arr06 = array([
[1, 0],
[-2/Re, 1]
], float)
print("The ray transfer matrix for refraction at a curved mirror is ")
print(arr06) | [
"def refflatmirror():\n arr05 = array([\n [1, 0],\n [0, 1]\n ], float)\n print(\"The ray transfer matrix for reflaction in a flat interface is \")\n print(arr05)",
"def traject(origin0,tetaOrigin0):\r\n\r\n #cf to \"Analytic ray curv... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function creates an ray transfer matrix for single prism. You just need to enter k,d,n here, k = cos(a)/cos(b) > You have to calculate this. a = the angle of refraction(in radian) b = the angle of incidence(in radian) d = Prism path length n = Refractive index of the prism material | def sinpri(k, d, n):
arr07 = array([
[k, d/(n*k)],
[0, 1/k]
], float)
print("The ray transfer matrix for a single prism is: ")
print(arr07) | [
"def traject(origin0,tetaOrigin0):\r\n\r\n #cf to \"Analytic ray curve tracing for outdoor sound propagation\"\r\n #to understand what follows in this function\r\n #the formulation of the ray traject in the article is true for\r\n #the local orthonormal coordinates (r,h) were h is the axis directed\r\n ... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function creates an ray transfer matrix for multiple prism. You just need to enter n, B in the bracket. here, n = the number of prism you are using B = the total optical propagation distance of the multiple prism expander After inputing the first two argument in the code, the program will ask you to put the value ... | def mulpri(n, B, *k):
M = 1
for i in range(n):
k = float(input("Enter the beam expansion factor, k=", ))
M = k*M
arr08 = array([
[M, B],
[0, 1/M]
], float)
print("The ray transfer matrix for a multiple prism is... | [
"def sinpri(k, d, n):\n arr07 = array([\n [k, d/(n*k)],\n [0, 1/k]\n ], float)\n print(\"The ray transfer matrix for a single prism is: \")\n print(arr07)",
"def generate_k_star_system(n, k):\n dag = np.zeros((n,n))\n r = int(math... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test _SystemWriter utility class | def test_system_writer(self):
class BaseWriter(object):
def flush(self):
return 'flush called'
def write_comment(self, comment):
return 'write comment ' + comment
s = modelcif.dumper._SystemWriter(BaseWriter(), {}, {})
# These methods are... | [
"def test_generate_global_info(self):\n\t\tassert False, \"Write Test\"",
"def write_tool_methods():\n if not os.path.exists(TOOLS_PATH):\n with open(TOOLS_PATH, 'w+'):\n pass\n template_str = 'from ..utils import cfl\\nimport os\\nimport subprocess\\nimport tempfile as tmp\\n\\n'\n tem... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test ChemCompDumper with invalid value for ccd | def test_chem_comp_dumper_bad_ccd(self):
system = modelcif.System()
c1 = ihm.NonPolymerChemComp('C1', name='C1')
c1.ccd = 'garbage'
e1 = modelcif.Entity([c1])
system.entities.append(e1)
dumper = modelcif.dumper._ChemCompDumper()
self.assertRaises(KeyError, _get... | [
"def test_create_cpd_info():\n\tdf_master = pd.DataFrame(['C([C@@H]1[C@H]([C@@H]([C@H](C(O1)O)O)O)O)O',\n\t\t 'C([C@@H]1[C@@H]([C@@H]([C@H]([C@H](O1)O)O)O)O)O',\n\t\t 'C([C@H]([C@H]([C@@H](C(=O)CO)O)O)O)O',\n\n'C[C@@H]1CC[C@H]2C[C@@H](/C(=C/C=C/C=C/[C@H](C[C@H](C(=O)[C@@H]([C@@H](/C(=C/[C@H](C(=O)C[C@H](OC(=O)[C@@H... | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |