query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Send some status about the bots
async def status(self, msg, *args): content = self.get_status() await msg.channel.send(**{ 'content': content, 'reference': msg.to_reference(), 'mention_author': True, })
[ "def send_robot_status(self, robot_status):\n self.robot_status_sender.send(robot_status)", "async def _site_status_channel(self, ctx: commands.Context):", "def statusJogo():\n sendToAll(\"\\nPalavra: {} \\nLetras erradas: {}\".\\\n format(palavraOculta, letrasUtilizadas))", "async def st...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Back up output to local path.
def backup_output_path(self): backup_path = TaskOps().backup_base_path if backup_path is None: return FileOps.copy_folder(TaskOps().local_output_path, backup_path)
[ "def backup_to_file(path, *options, env={}, do_async=False, pipe_stdout=None):\n\treturn backup(\"--output-file\", path, \\\n\t\t\t\"--namespace\", lib.NAMESPACE, \\\n\t\t\t\"--verbose\", \\\n\t\t\t*options,\n\t\t\tenv=env, do_async=do_async, pipe_stdout=pipe_stdout)", "def backup(self):\n ds = self.datash...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Output step all records.
def output_step_all_records(self, step_name, desc=True, weights_file=True, performance=True): records = self.all_records logging.debug("All records in report, records={}".format(self.all_records)) records = list(filter(lambda x: x.step_name == step_name, records)) logging.debug("Filter s...
[ "def run(self):\n print('#' + '\\t'.join(OutputRecord.get_header_fields()),\n file=self.args.output_tsv)\n for chunk in chunk_by_query(self.sam_file, expand_xa=True):\n #print('STARTING CHUNK', file=sys.stderr)\n if not chunk:\n continue # ignore empt...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Dump report to file.
def dump(self): try: _file = FileOps.join_path(TaskOps().step_path, "reports.csv") FileOps.make_base_dir(_file) data = self.all_records data_dict = {} for step in data: step_data = step.serialize().items() for k, v in st...
[ "def write_report(self):\n pass", "def write_report(cls, filepath, filename, report, is_train):\n\n full_filepath = cls._generate_filepath(filepath, filename,'.report',\n is_train)\n print full_filepath\n cls._write_to_file(full_filepath, r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
test integrity of lensfuncs
def test_integrity_of_lensfuncs(): ra_source, dec_source = [120.1, 119.9, 119.9], [41.9, 42.2, 42.2] id_source, z_sources = [1, 2, 3], [1, 1, 1] galcat = GCData([ra_source, dec_source, z_sources, id_source], names=('ra', 'dec', 'z', 'id')) galcat_noz = GCData([ra_source, dec_source, ...
[ "def constrained_lens_object_test():\n return # TODO", "def verify_function(self, function, name, innames, outnames, structures, store):\n return", "def test_all_ufunc(self) :\n pass", "def c_test_mutate_function(self, function):\r\n return 1", "def test_func7(self):\n pass", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Emit a TiltBrushMesh as a .fbx file
def write_fbx_meshes(meshes, outf_name): import FbxCommon global n (sdk, scene) = FbxCommon.InitializeSdkObjects() docInfo = FbxDocumentInfo.Create(sdk, 'DocInfo') docInfo.Original_ApplicationVendor.Set('Google') docInfo.Original_ApplicationName.Set('Tilt Brush') docInfo.LastSaved_ApplicationVendor.Set('...
[ "def setFbxExportOptions_SkeletalMesh():\n\n # 현재 FBX 설정을 저장해놓는다\n mel.eval('FBXPushSettings;')\n\n # 아래와 같이 설정을 바꾸고\n mel.eval('FBXResetExport;'\n 'FBXExportSplitAnimationIntoTakes -c;'\n 'FBXExportInputConnections -v false;'\n 'FBXExportIncludeChildren -v false;'\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns an instance of layer_class populated with the passed data, or None if the passed data is empty/nonexistent. fbx_mesh FbxMesh data list of Python data converter_fn Function converting data > FBX data layer_class FbxLayerElementXxx class allow_index Allow the use of eIndexToDirect mode. Useful if the data has man...
def create_fbx_layer(fbx_mesh, data, converter_fn, layer_class, allow_index=False, allow_allsame=False): # No elements, or all missing data. if not allow_allsame and (len(data) == 0 or data[0] == None): return None layer_elt = layer_class.Create(fbx_mesh, "") direct = layer_elt.GetDire...
[ "def GradientClippingDataclassField(description: str, default: Dict={}):\n allow_none = True\n\n\n class GradientClippingMarshmallowField(fields.Field):\n \"\"\"Custom marshmallow field class for gradient clipping.\n\n Deserializes a dict to a valid instance of `ludwig.modules.optimization_modul...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Emit a TiltBrushMesh as a .fbx file
def add_mesh_to_scene(sdk, scene, mesh, contentid): global n name = contentid+"_"+str(n) n+=1 # Todo: pass scene instead? fbx_mesh = FbxMesh.Create(sdk, name) fbx_mesh.CreateLayer() layer0 = fbx_mesh.GetLayer(0) # Verts fbx_mesh.InitControlPoints(len(mesh.v)) if RELOCATE_BRUSHES is True: print...
[ "def write_fbx_meshes(meshes, outf_name):\n import FbxCommon\n global n\n (sdk, scene) = FbxCommon.InitializeSdkObjects()\n\n docInfo = FbxDocumentInfo.Create(sdk, 'DocInfo')\n docInfo.Original_ApplicationVendor.Set('Google')\n docInfo.Original_ApplicationName.Set('Tilt Brush')\n docInfo.LastSaved_Applicatio...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create an instance of the appropriate DriverFields class.
def create(node): if 'pxe' in node.driver: return PXEDriverFields(node) else: return GenericDriverFields(node)
[ "def create_driver(self):\n pass", "def __new__(cls, **kwargs):\n instance = object.__new__(cls)\n for fname, field in cls.fields().items():\n setattr(instance, fname, field.get_initial_value())\n return instance", "def __init__(self, name, description, field_type_processo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Build a patch to clean up the fields. Build a jsonpatch to remove the fields used to deploy a node using the PXE driver. Note that the fields added to the Node's instance_info don't need to be removed because they are purged during the Node's tear down.
def get_cleanup_patch(self, instance, network_info): patch = [] driver_info = self.node.driver_info fields = ['pxe_deploy_kernel', 'pxe_deploy_ramdisk'] for field in fields: if field in driver_info: patch.append({'op': 'remove', '...
[ "def release_schema_patch(self, extension_field=None, language='en'):\n output = {}\n\n # Replaces `null` with sentinel values, to preserve the null'ing of fields by extensions in the final patch.\n for extension in self.extensions():\n try:\n patch = extension.remote(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Announce that a Guardian is present and participating in the decryption. A guardian announces by presenting their id and their shares of the decryption
def announce( self, guardian_key: ElectionPublicKey, tally_share: DecryptionShare, ballot_shares: Dict[BallotId, Optional[DecryptionShare]] = None, ) -> None: guardian_id = guardian_key.owner_id # Only allow a guardian to announce once if guardian_id in self....
[ "def announceVictory(self, secret):\n print\n print 'Congratulations, you won!'\n print 'The secret was', self._patternAsString(secret)", "async def balanceof(self, ctx, verifier_id):\n balances = await self.get_all_balances()\n short_id = verifier_id[:4] + \".\" + verifier_id[-4:]\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Announce that a Guardian is missing and not participating in the decryption.
def announce_missing(self, missing_guardian_key: ElectionPublicKey) -> None: missing_guardian_id = missing_guardian_key.owner_id # If guardian is available, can't be marked missing if missing_guardian_id in self._available_guardians: log_info(f"guardian {missing_guardian_id} already...
[ "def announceDefeat(self, secret):\n print\n print 'The secret was', self._patternAsString(secret)\n print 'Good luck next time.'", "def announcement_complete(self) -> bool:\n # If a quorum not announced, not ready\n if len(self._available_guardians) < self._context.quorum:\n log...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check the guardian's collections of keys and ensure the public keys match for the guardians.
def validate_missing_guardians( self, guardian_keys: List[ElectionPublicKey] ) -> bool: # Check this guardian's collection of public keys # for other guardians that have not announced missing_guardians: Dict[GuardianId, ElectionPublicKey] = { guardian_key.owner_id: guardi...
[ "def _validate_keys(self):\n if type(self.keys) != dict:\n raise securesystemslib.exceptions.FormatError(\n \"keys dictionary is malformed!\")\n\n securesystemslib.formats.KEYDICT_SCHEMA.check_match(self.keys)\n\n for keyid, key in six.iteritems(self.keys):\n securesystemslib.formats.PUB...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determine if the announcement phase is complete
def announcement_complete(self) -> bool: # If a quorum not announced, not ready if len(self._available_guardians) < self._context.quorum: log_warning("cannot decrypt with fewer than quorum available guardians") return False # If guardians missing or available not account...
[ "def is_complete(self):\n return self.status.value and self.status.value.upper() == \"COMPLETED\"", "def is_complete(self):\n return self.status == \"DONE\"", "def is_complete(self):\n pass", "def IsCompleted(self) -> bool:", "def is_complete(self):\n return self.status == \"DONE\"",...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get all missing guardian keys
def get_missing_guardians(self) -> List[ElectionPublicKey]: return list(self._missing_guardians.values())
[ "def yet_keys(self):\n return [k for k in self.d if not self.d[k]]", "def _get_missing_keys(self):\n REQUIRED_KEYS = [\n 'date_purchased', 'cost', 'supply_type_id'\n ]\n\n return [key for key in REQUIRED_KEYS if not key in self.request.data]", "def build_keys_to_skip(self)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the plaintext ballots for the election by composing each Guardian's decrypted representation of each selection into a decrypted representation This is typically used in the spoiled ballot use case.
def get_plaintext_ballots( self, ciphertext_ballots: List[SubmittedBallot], manifest: Manifest ) -> Optional[Dict[BallotId, PlaintextTally]]: if not self.announcement_complete(): return None ballots = {} for ciphertext_ballot in ciphertext_ballots: ballot_s...
[ "def encrypt_ballot(request, election):\n answers = utils.from_json(request.POST['answers_json'])\n ev = homomorphic.EncryptedVote.fromElectionAndAnswers(election, answers)\n return ev.ld_object.includeRandomness().toJSONDict()", "def decrypt(text,key):\n # The period is the number of columns the initial gr...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Save a guardians tally share.
def _save_tally_share( self, guardian_id: GuardianId, guardians_tally_share: DecryptionShare ) -> None: self._tally_shares[guardian_id] = guardians_tally_share
[ "def _save_ballot_shares(\n self,\n guardian_id: GuardianId,\n guardians_ballot_shares: Dict[BallotId, Optional[DecryptionShare]],\n ) -> None:\n for ballot_id, guardian_ballot_share in guardians_ballot_shares.items():\n shares = self._ballot_shares.get(ballot_id)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Save a guardian's set of ballot shares.
def _save_ballot_shares( self, guardian_id: GuardianId, guardians_ballot_shares: Dict[BallotId, Optional[DecryptionShare]], ) -> None: for ballot_id, guardian_ballot_share in guardians_ballot_shares.items(): shares = self._ballot_shares.get(ballot_id) if share...
[ "def _save_tally_share(\n self, guardian_id: GuardianId, guardians_tally_share: DecryptionShare\n ) -> None:\n self._tally_shares[guardian_id] = guardians_tally_share", "def save_spendings(cls, obj_spends):\n saved = []\n for spendable in cls.objects_by_public_ids(obj_spends.keys())...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Shares are ready to decrypt.
def _ready_to_decrypt(self, shares: Dict[GuardianId, DecryptionShare]) -> bool: # If all guardian shares are represented including if necessary # the missing guardians reconstructed shares, the decryption can be made return len(shares) == self._context.number_of_guardians
[ "def decrypt_fable():", "def decrypt(self, data):", "def finalize(self):\n return self._decryptor.finalize()", "def _post_decrypt_checks(self, aad, plaintext, protected_message, request_id):", "def main(self):\n self.key = self.read_key()\n cipher_bin = self.to_bin(False)\n self....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Filter a guardian pair and compensated share dictionary by missing guardian.
def _filter_by_missing_guardian( missing_guardian_id: GuardianId, shares: Dict[GuardianPair, CompensatedDecryptionShare], ) -> Dict[GuardianId, CompensatedDecryptionShare]: missing_guardian_shares = {} for pair, share in shares.items(): if pair.designated_id == missing_guardian_id: m...
[ "def filter_pairs(pairs):\n\treturn [pair for pair in pairs if filter_pair(pair, )]", "def combine_pair_not(self,pair_stats): \n return pair_stats", "def apply_filters(self):\n hurst_cut = 0\n coint_cut = 0\n half_life_cut = 0\n mean_cross_cut = 0\n\n # Create an ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get_next_meeting() should return a python dict containing the following information, so we are going to check typing. {
def test_get_next_meeting(): result = schedule.get_next_meeting() if result: assert result['name'], 'Result has no `name` key' assert result['date'], 'Result has not `date` key' assert isinstance(result['name'], str), 'name is not a string' assert isinstance(result['date'], arr...
[ "def test_get_next_ops_meeting():\n result = schedule.get_next_workshop()\n\n if result:\n assert result['name'], 'Result has no `name` key'\n assert result['date'], 'Result has not `date` key'\n\n assert isinstance(result['name'], str), 'name is not a string'\n assert isinstance(r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get_next_workshop() should return a python dict containing the following information, so we are going to check typing. {
def test_get_next_workshop(): result = schedule.get_next_workshop() if result: assert result['name'], 'Result has no `name` key' assert result['date'], 'Result has not `date` key' assert isinstance(result['name'], str), 'name is not a string' assert isinstance(result['date'], a...
[ "def test_get_next_ops_meeting():\n result = schedule.get_next_workshop()\n\n if result:\n assert result['name'], 'Result has no `name` key'\n assert result['date'], 'Result has not `date` key'\n\n assert isinstance(result['name'], str), 'name is not a string'\n assert isinstance(r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get_next_ops_meeting() should return a python dict containing the following information, so we are going to check typing. {
def test_get_next_ops_meeting(): result = schedule.get_next_workshop() if result: assert result['name'], 'Result has no `name` key' assert result['date'], 'Result has not `date` key' assert isinstance(result['name'], str), 'name is not a string' assert isinstance(result['date']...
[ "def test_get_next_meeting():\n result = schedule.get_next_meeting()\n\n if result:\n assert result['name'], 'Result has no `name` key'\n assert result['date'], 'Result has not `date` key'\n\n assert isinstance(result['name'], str), 'name is not a string'\n assert isinstance(result...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get_next_competition() should return a python dict containing the following information, so we are going to check typing. {
def test_get_next_competition(): result = schedule.get_next_competition() if result: assert result['name'], 'Result has no `name` key' assert result['date'], 'Result has not `date` key' assert isinstance(result['name'], str), 'name is not a string' assert isinstance(result['dat...
[ "def fetch_next_match() -> Optional[MatchDict]:\n future_matches = Match.objects.filter(start_date_time__gt=timezone.now())\n\n if not any(future_matches):\n return None\n\n next_match = min(future_matches, key=lambda match: match.start_date_time)\n\n return {\n \"round_number\": next_matc...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
use current scores of prediction to build curriculum mask
def build_curriculum(scores): def do_one_row(row): failed_once = False for i, _ in enumerate(row): if failed_once: row[i] = 0 else: if row[i] == 0: row[i] = 1 failed_once = True return row tmp...
[ "def predict_and_update(self, z):", "def _do_calibration(self, scores, y_pred):\n raise NotImplementedError(\"This method should be assigned from configuration\")", "def fuel_prediction(initial_pos, d1, d2, avg_speed):\n\n\n\n return 0", "def update(self, new_true, new_preds):\n new_score = F...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Store kwargs with whitelisted prefixes into this objects attributes. Raises an exception if one of the kwargs does not match a whiltelisted prefix.
def _store_kwargs(self, kwargs, allowed_prefixes): def starts_with_legal_prefix(key): for prefix in allowed_prefixes: if key.startswith(prefix): return True return False for key in kwargs.keys(): if not starts_with_legal_prefix(ke...
[ "def set_prefix_kwargs(self, **kwargs):\n self._prefix_kwargs = kwargs", "def set_prefix_arg(self, name, value):\n self._prefix_kwargs[name] = value", "def update(self, **kwargs):\n for key, value in kwargs.items():\n if key not in self.VALID_NAMES:\n continue\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return all attributes of this class that start with a given prefix. The prefix is stripped in the result. This can be used to pass on some parameters to subclasses.
def _get_prefix_attributes(self, prefix): return filter_dict_by_prefix(self.__dict__, prefix)
[ "def getAttrPrefix(self, *args):\n return _libsbml.XMLToken_getAttrPrefix(self, *args)", "def IncludePrefixAttrFlags(self):\r\n\t\treturn self._get_attribute('includePrefixAttrFlags')", "def filter_dict_keystartswith(d, prefix):\n if d is None or isinstance(d, Undefined):\n return d\n\n if s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return extra parameters that should be passed to the module. You should take care to update the dictionary from the ``super`` implementation when overriding this function. You usually do not want to just discard the parameters that are specified by the super class.
def _get_extra_module_parameters(self): return {"n_features": self.n_features_}
[ "def extraParameters(self): # real signature unknown; restored from __doc__\n return {}", "def additional_param_for_ns(self) -> Dict[str, str]:\n return self._additional_param_for_ns", "def extra_fields(self):\n return {}", "def extend_param(self):\n return self._extend_param", "...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fit the estimator to data. This derives the number of object features from the data and then delegates to ``skorch.NeuralNet.fit``. See the documentation of that method for more details.
def fit(self, X, y=None, **fit_params): dataset = self.get_dataset(X, y) (_n_objects, self.n_features_) = dataset[0][0].shape NeuralNet.fit(self, X=dataset, y=None, **fit_params)
[ "def fit(self, data):\n pass", "def fit_training_data(self):\n self.model.fit(self.X_train)", "def fit(self):\n self.model.fit(self.features, self.label)", "def fit_data(self):\n pass", "def fit(self, data):\r\n assert len(data) > self.n_neighbors\r\n assert len(Ima...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Filter columns in a tiny demo maf file
def test_filter_maf_file_cols(self): maf_lines = [ ['# comment 1'], # keep the comments ['# comment 2'], ['Hugo_Symbol', 'foo_value'], # foo_value column should be removed in output ['SUFU', '1'], ['GOT1', '2'] ] # run the script in a t...
[ "def test_metadata_filter_general(self):\n metadata = pd.DataFrame({'foo': {'haib18CEM5332_HMGTJCCXY_SL342402': 1}})\n table_factory = DataTableFactory(PACKET_DIR, metadata_tbl=metadata)\n tbl = table_factory.macrobes()\n self.assertEqual(tbl.shape, (1, 37))", "def get_filters(columns)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Pickle data into file specified by filename.
def pickle(self,data,filename): pickle.dump(data, open(filename, 'wb'))
[ "def toPickle(filename, data):\r\n f = open(filename, 'w')\r\n cPickle.dump(data,f)\r\n f.close()", "def pickle_and_write_data(data, file_name):\r\n file = open(file_name, \"ab\") # creates a binary file and opens in append mode\r\n pickle.dump(data, file) # pickles and stores the data list in th...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This method handles the initial setup of the agent to populate useful fields (such as what team we're on). A distanceCalculator instance caches the maze distances
def registerInitialState(self, gameState): ''' Make sure you do not delete the following line. If you would like to use Manhattan distances instead of maze distances in order to save on initialization time, please take a look at CaptureAgent.registerInitialState in captureAgents.py. ''' self...
[ "def registerInitialState(self, gameState):\n\n ''' \n Make sure you do not delete the following line. If you would like to\n use Manhattan distances instead of maze distances in order to save\n on initialization time, please take a look at\n CaptureAgent.registerInitialState in captureAgents.py. \n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Replace 0's with 1 in positions of a bow dataframe to indicate that feature words are present in docs
def get_bow_dummies(self): # Get an np matrix of zeros based on defined dim zero_matrix = np.zeros(self.dim, np.int) # Create a dataframe containing feature columns and 0's zero_df = pd.DataFrame(zero_matrix, columns=self.features) # Get a dictionary of index and features per ...
[ "def lexicon_features(tokens, feats):\n# feats['neg_words'] = 0;\n# feats['pos_words'] = 0;\n# tokens = list([token.lower() for token in tokens])\n# feats['neg_words'] , feats['pos_words'] = np.count_nonzero(np.in1d(tokens, list(neg_words))), np.count_nonzero(np.in1d(tokens, list(pos_words)))\n n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Remove agent from all environments and the model.
def delete(self): self.model.remove_agents(self)
[ "def remove_agents(self, agents):\n for agent in list(make_list(agents)): # Soft copy as list is changed\n self._agents.remove(agent)\n agent.envs.remove(self)", "def delete_agents_from_env(env):\n agents = get_agents_from_environment(env)\n for agent in agents:\n udcli(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the agents' position from a spatial environment.
def position(self, env=None): # TODO make position explicit 'position' for custom subclasses env = self._find_env(env) return env._agent_dict[self]
[ "def get_agent_position(agent):\n pos = Vector2()\n pos.x = agent.pose.pose.position.x\n pos.y = agent.pose.pose.position.y\n return pos", "def get_agent_position(self, agent_name):\n return self.agent_positions[agent_name]", "def current_agent_position(self):\n return self.environment...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Changes the agents' location in the selected environment.
def move_to(self, position, env=None): env = self._find_env(env) env.move_agent(self, position)
[ "def set_agent_loc(self, agent, r, c):\n assert (0 <= r < self.size[0]) and (0 <= c < self.size[1])\n i = agent.idx\n # If the agent is currently on the board...\n if self._agent_locs[i] is not None:\n curr_r, curr_c = self._agent_locs[i]\n # If the agent isn't actu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the agents' neighbors from its environments.
def neighbors(self, env=None, distance=1, **kwargs): if env: if isinstance(env, (list, tuple)): envs = [self._find_env(en) for en in env] else: return self._find_env(env).neighbors( self, distance=distance, **kwargs) elif len(se...
[ "def neighbors(self):\n return self._neighbors", "def get_neighbors(self):\n return self.neighbors", "def get_neighbors(self):\n return self.neighbours_names", "def neighbors(self):\n vertex = self.vertex()\n neighbors = vertex.children()\n if vertex.has_parent():\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds agent to chosen environment.
def enter(self, env): env = self._find_env(env, new=True) env.add_agents(self)
[ "def addAgent(self,agent):\n self.agents.append(agent)", "def addAgent(self, agent):\n loc = agent.location\n self.cellGrid[loc.row,loc.column].addAgent(agent)\n self.agentTrack.append(agent)", "def add(self, agent: Agent) -> None:\n self._agents[agent.unique_id] = agent", "...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Removes agent from chosen environment.
def exit(self, env=None): env = self._find_env(env) env.remove_agents(self)
[ "def remove_from_project(self) -> None:\n self.project.agents.remove(self.agent_name)", "def delete_agent(self, agent):\r\n return self.delete(self.agent_path % (agent))", "def delete_agents_from_env(env):\n agents = get_agents_from_environment(env)\n for agent in agents:\n udcli('del...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Removes agents from the environment.
def remove_agents(self, agents): for agent in list(make_list(agents)): # Soft copy as list is changed self._agents.remove(agent) agent.envs.remove(self)
[ "def delete_agents_from_env(env):\n agents = get_agents_from_environment(env)\n for agent in agents:\n udcli('deleteAgent -agent %s' % agent)", "def test_remove_agents():\r\n\r\n model = make_forest()\r\n model.env.remove_agents(model.agents)\r\n\r\n assert len(model.env.agents) == 0\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Stops kernel and saves fundamental series to disk.
def kernelStopping(self): # Always call parent method to be safe. super().kernelStopping() self.writeFundamental()
[ "def stop_step_sweep(self):\n self.write(\":SOUR:SWE:CONT:STAT OFF\")", "def handle_stop(self):\n if self.save_fh:\n self.save_fh.close()", "def stop(self):\n self.kc.stop_channels()\n self.km.shutdown_kernel(now=True)\n del self.km", "def stopWrite(self):\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Saves the fundamental value at self.currentTime to self.fundamental_series.
def measureFundamental(self): obs_t = self.oracle.observePrice(self.symbol, self.currentTime, sigma_n=0) self.fundamental_series.append({'FundamentalTime': self.currentTime, 'FundamentalValue': obs_t})
[ "def writeFundamental(self):\n dfFund = pd.DataFrame(self.fundamental_series)\n dfFund.set_index('FundamentalTime', inplace=True)\n self.writeLog(dfFund, filename='fundamental_{symbol}_freq_{self.log_frequency}_ns'.format(self.symbol))\n\n print(\"Noise-free fundamental archival complete...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Logs fundamental series to file.
def writeFundamental(self): dfFund = pd.DataFrame(self.fundamental_series) dfFund.set_index('FundamentalTime', inplace=True) self.writeLog(dfFund, filename='fundamental_{symbol}_freq_{self.log_frequency}_ns'.format(self.symbol)) print("Noise-free fundamental archival complete.")
[ "def logging_data(self):\n with open('sensor_data.log','w') as f:\n json.dump(self.read_continuous_data, f)", "def _logger(self):\r\n\r\n # Create filename for log\r\n filenameF = self._vna.getDateFormatted() + \".txt\"\r\n filenameF = \"Logs/\" + filenameF \r\n f...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
select(self, str) > str select(self, QUrl) > QUrl
def select(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads return "" or QUrl
[ "def isolateSelect(*args, **kwargs):\n\n pass", "def selectionConnection(*args, **kwargs):\n\n pass", "def select(sparql_query):", "def do_select(menu):\n if menu.matches:\n match = menu.matches[menu.index]\n else:\n match = \"\".join(menu.input)\n menu.emit(\"selected\", match)",...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks if the game has been stuck (likely waiting to snap) for too long. If it has, press buttons to try to remedy this.
def checkStuck(stuckCount, pi): if stuckCount > 2000: # Execute button presses for high count pi.send("Press A") time.sleep(1) pi.send("Press A") time.sleep(2) pi.send("Press A") time.sleep(2) # Reset the count to 0 return 0 else: ...
[ "def wrong_button(self):\n pyglet.clock.unschedule(self.too_long)\n self.take_rest()\n self.penalty(0)", "def wait_for_correct_position(self):\n start_time = time()\n while not self._correct_position():\n if time() - start_time > 30.0:\n print(\"Somethi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a new knowledge entry.
def create(self, request): if not hasattr(request, "data"): request.data = request.POST attrs = self.flatten_dict(request.data) if not attrs.get('include_answer_page', None): if 'answer_page_title' in attrs: del attrs['answer_page_title'] if 'a...
[ "def create_and_add_entry(self, **attrs):\n return self.add_entry(self.create_entry(**attrs))", "def create_entry(self, entry_group_name, entry_id, entry):\n try:\n entry = self.__datacatalog.create_entry(parent=entry_group_name,\n entry_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Partition of all values of `dimension` within `processes`
def partition(self, dimension, processes=None): if processes: q = (self._table.source.isin(processes) | self._table.target.isin(processes)) values = self._table.loc[q, dimension].unique() else: values = self._table[dimension].unique() return P...
[ "def get_partition_multiples(self):\n multiples = [1]\n if self.work_dimension == 1:\n if not self.local_work_size:\n multiples = [1]\n else:\n multiples = [self.local_work_size[0], 1]\n elif self.work_dimension == 2:\n if not self....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Filter flows according to source_query, target_query, and flow_query.
def find_flows(flows, source_query, target_query, flow_query=None, ignore_edges=None): if flow_query is not None: flows = flows[eval_selection(flows, '', flow_query)] if source_query is None and target_query is None: raise ValueError('...
[ "def _process_alert_source_filters(query, filters):\n if filters:\n if not is_valid_model_filters(models.AlertSource, filters):\n return\n query = query.filter_by(**filters)\n\n return query", "def apply_filters(self, query, filters):\n assert isinstance(query, peewee.Query)\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the values for each of the limb properties in the arguments list with a nonNone value. Returns this instance for method chaining
def assign(self, *args, **kwargs) -> 'Property': for i in range(len(args)): value = args[i] if value is not None: self.set(KEYS[i], value) for short_key, long_key in LIMB_KEY_LOOKUP.items(): if short_key in kwargs and kwargs[short_key] is not None: ...
[ "def setattrs(self, **kwargs):\n for key, value in kwargs.items():\n setattr(self, key, value)\n return self", "def __init__(self, *args):\n this = _libsbml.new_ListOfParameters(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *a...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Values for each limb as a tuple containing the limbordered values of the Property
def values(self) -> tuple: return ( self.left_pes, self.right_pes, self.left_manus, self.right_manus )
[ "def oo_properties(**args):\n props = []\n for key in args:\n prop = PropertyValue()\n prop.Name = key\n prop.Value = args[key]\n props.append(prop)\n\n return tuple(props)", "def get_values(state):\n keys = ['player_ammo', 'player_block', 'player_prev',\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a deep copy of the Property instance. The clone attempts to
def clone(self): def deep_copy(value): try: if hasattr(value, 'clone'): value.clone() except Exception: pass try: json.loads(json.dumps(value)) except Exception: pass ...
[ "def clone(self):\n clone = super(Property, self).clone()\n clone.fget = self.fget\n clone.fset = self.fset\n clone.cached = self.cached\n return clone", "def clone(self):\r\n import copy\r\n return self._wrap(copy.copy(self.obj))", "def clone(self):\n ret...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create the organized store with items arranged by aisle uses a single list and a dictionary Assuming N items in inventory and G items in grocery_list Students don't implement it this way, I was having fun seeing how concise I could be (at the expense of understandability and efficiency)
def organize(inventory, grocery_list, exists=set()): lst = sorted(inventory, key=lambda x : x.aisle) #sort by aisle - O(N*logN) aisles = [[] for y in lst if not exist_test(y.aisle, exists)] #create unique aisles only - O(N) [aisles[y.aisle].append(y.grocery) for y in lst if y.grocery in grocery_list] #append groc...
[ "def add_to_inventory(inventory, item_name, count, weight, item_type, gathered_items=[]):\n\n item_dict = collectable_item(item_name, count, weight, item_type)\n if inventory:\n for index in range(len(inventory)):\n for key in inventory[index]:\n if item_name == key:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
查看 spot 与 future 资金
def test_008(self): j = { "accountId": a_id } result = requests.post(getAsset, json=j, headers=header) print('<---------- spot ---------->') for i in result.json()['data']['position']['spot']: print(i) print('<---------- future ---------->') ...
[ "def market_info(self, symbol):\n r = requests.get(self.base_url + f'/game/locations/{symbol}/marketplace', headers = self.auth_header)\n return r.text", "def test_info_with_votes(self):\n agnt=TwoPartyAgent()\n self.bill.add_record(agnt,True)\n import StringIO\n buff=Str...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a counter for all words in text.
def calculate_word_counts(text : Text)->Counter: return Counter(tokenized_text(text))
[ "def word_frequency(text):\n tokenizer = RegexpTokenizer(r'\\w+')\n tokens = tokenizer.tokenize(text)\n\n stop = set(stopwords.words('english'))\n tokens_without_stop = list(filter(lambda word: word.lower() not in stop, tokens))\n\n counts = Counter(tokens_without_stop)\n return counts", "def wo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Append a highscore to the list.
def appendScore(self, l): score = Highscore(l[0], l[1]) self.scores.append(score)
[ "def add_high_score(self, score: int) -> bool:\n if len(self.high_scores) < MAXIMUM_NUMBER_OF_HIGH_SCORES or (score > self.high_scores[-1]):\n self.high_scores.append(score)\n self._cleanup_high_scores()\n return True\n else:\n return False", "def add_scor...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the next highest highscore. Return it and delete it.
def getNextHighest(self): maxScore = -1 idx = -1 for i, s in enumerate(self.scores): if s.score > maxScore: maxScore = s.score idx = i if idx != -1: score = self.scores[idx] del self.scores[idx] ret...
[ "def getHighScore(self):\n return max(self.scores)", "def del_max(self):\n if self.is_empty():\n raise NoSuchElementException(\"Priority queue underflow\")\n\n _max = self._pq[1]\n self._exch(1, self._n)\n self._n -= 1\n self._sink(1)\n self._pq[self._n + 1] = None\n if self._n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if a score is high enough to be a highscore.
def isHighscore(self, score): score = float(score) if len(self.scores) < 10: return True lowest = float('inf') for s in self.scores: if s.score < lowest: lowest = s.score if score > lowest: return True else: ...
[ "def check_high_score(self):\n if self.stats.score > self.stats.high_score:\n self.stats.high_score = self.stats.score\n self.prep_high_score()", "def check_high_score(self):\r\n if self.stats.score > self.stats.high_score:\r\n self.stats.high_score = self.stats.scor...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Extract subjects urls from material pages.
def get_subjects_urls(self, subjects: Iterable[Subject]) -> List[str]: self.logger.debug('Finding subjects urls.') all_rows = self.browser.find_elements(*MaterialLocators.SUBJECT_ROW) subjects = {(s.name.strip('. '), s.teacher.strip('. ')) for s in subjects} subjects_urls = [] fo...
[ "def get_subjects_IOP_urls(url):\n # f = open(\"test.txt\", 'a+')\n body = getBody(url)\n\n html = soup(body,'html.parser')\n # print(html.original_encoding)\n div_content = html.find(id=\"content\")\n a_elems = div_content.find_all(\"a\", recursive=True, class_=\"entry-image-post-link\".encode('u...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
extract appname. if appLaunch field does not exist, skip that log but such a log is few like 0.0...01 %.
def _extract_appname(self, log): appname = "" if "appLaunch" in log: appname = log["appLaunch"]["appName"] else: self.logger.info("no applaunch field") self.logger.info(log["event"]) pass return appname
[ "def get_app_log(self, emulator_id: str, package_name: str):\n pass", "def app_line(line):\r\n if line:\r\n aux_apk, packagename = line.split('=')\r\n apk = aux_apk.split(':')[1]\r\n return [packagename, apk]", "def log_app_start():\n\n LOGGER.info(\"\")\n LOGGER.info(\"####...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
extract location info. if location field does not exist, (x,y,x) = (0,0,0)
def _extract_location_xyz(self, log): if "location" in log: x = log["location"]["latitude"] y = log["location"]["longitude"] z = log["location"]["altitude"] else: self.logger.debug("NaN case") x = "NaN" # matlab Nan? y = "NaN" ...
[ "def coordinates(self, encoded_location):\n x = encoded_location % 13 \n y = math.floor(encoded_location/13) \n return x, y", "def get_location(self):\n return (self.__x, self.__y)", "def get_location() -> Tuple[float]:\n\n return (float(__PyLocationHelper__.longitude), float(__Py...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
convert ${unix time}${millisecond} to periodic information on a weekly basis. return as string as format x,y
def _convert_timestamp_2_periodic_time(self, timestamp): l = "" # daily periodic theta = self.two_pi_by_one_day_second * (int(timestamp[0:-3]) % self.one_day_second) #x = 1 + np.cos(theta) #y = 1 + np.sin(theta) x = np.cos(theta) y = np.sin(theta) ...
[ "def weekly():", "def hourly(x):\n ann_salary = x * 40.0 * 52.0 #assumes 40 hours a week and 52 weeks in a year\n print(f\"The anual salary is ${ann_salary:,.2f}\")", "def millis_to_human_readable(time_millis):\n weeks = 0\n days = 0\n hours = 0\n minutes = 0\n seconds =...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Some tablenames are forbidden because they're reserved words in the database, and can potentially cause issues.
def test_protected_table_names(self): with self.assertRaises(ValueError): class User(Table): pass with self.assertRaises(ValueError): class MyUser(Table, tablename="user"): pass
[ "def _valid_table_name(name):\n if name[0] not in \"_\" + string.ascii_letters or not set(name).issubset(\n \"_\" + string.ascii_letters + string.digits\n ):\n return False\n\n else:\n return True", "def table_name() -> str:\n pass", "def _class_name_to_table_name_transforma...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Make sure help_text can be set for the Table.
def test_help_text(self): help_text = "The manager of a band." class Manager(Table, help_text=help_text): pass self.assertEqual(Manager._meta.help_text, help_text)
[ "def help_text(self, help_text):\n self._help_text = help_text", "def set_help(self, help_text):\n self.__help_str = help_text", "def test_init_sets_help_text(self):\n self.assertEqual(PartialDateField().help_text, PartialDateField.help_text)\n self.assertEqual(PartialDateField(help_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a SQLAlchemy engine depending on the configuration passed. At this time it only supports mysql"
def __my_create_engine(self, config): return { 'mysql': lambda c: create_engine( "mysql://" + c["user"] + ":" + c["password"] + "@" + c["host"] + "/" + c["database"], encoding="utf-8", ...
[ "def create_engine_from_config(config):\n return create_engine(config.get('app', 'db_dsn'),\n echo=config.getboolean('app', 'debug'))", "def new_db_engine(config):\n url = config.get('DATABASE_URL')\n db_config = avalon.models.SessionHandlerConfig()\n db_config.engine = avalon....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This is a method to make the color segmentation of the image called 'fachada1.png', the segmentation will be using six different trackbars two per channel (H,S,V).
def color_segmentation(self): cv.namedWindow("Segmentation parameters") self.create_trackbar("h-u", "Segmentation parameters") self.create_trackbar("h-l","Segmentation parameters") self.create_trackbar("s-u","Segmentation parameters") self.create_trackbar("s-l","Segmentation para...
[ "def __classifyColors__(self):\r\n wSize = self.windowSize\r\n numOfWindPerDim = np.int(np.sqrt( self.numofClusters ))\r\n for row in range( self.image.shape[0] ):\r\n for col in range( self.image.shape[1] ):\r\n pixelU = self.segmentedImage[row,col...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
generate_session_key creates a new user if not found
def test_generate_session_key_creates_user(self, db_mock): repo = Repository() db_instance = db_mock.return_value db_instance.get_user_info_by_google_id.return_value = None db_instance.new_session.return_value = "1234" self.assertEquals(repo.generate_session_key({"id": 123}), "...
[ "def _set_user_key(session: SessionBase, user_key: str) -> None:\n session[views.SESSION_USER_KEY_NAME] = user_key\n session.save()", "def generate_user(self):\n token = str(uuid.uuid4())\n return self.generate_subid(token=token, return_user=True)", "def generate_session_key(self, lifetime=None):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Bool, true if you can push to the branch.
def can_push(self) -> bool: return pulumi.get(self, "can_push")
[ "def developer_can_push(self) -> bool:\n return pulumi.get(self, \"developer_can_push\")", "def deploy_on_push(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"deploy_on_push\")", "def git_branch_check():\n current_branch = str(subprocess.Popen('git branch | grep \"*\" | sed \...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Bool, true if developer level access allows to merge branch.
def developer_can_merge(self) -> bool: return pulumi.get(self, "developer_can_merge")
[ "def allows_merge_commit(self):\n\n return self.data[\"mergeCommitAllowed\"]", "def has_merge_perm(self, user, obj):\n return user.has_perm('trackable_object.change_trackableobject')", "def can_merge(self, mr):\n g = self._impl._git.git\n # http://stackoverflow.com/a/6283843\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Bool, true if developer level access allows git push.
def developer_can_push(self) -> bool: return pulumi.get(self, "developer_can_push")
[ "def can_push(self) -> bool:\n return pulumi.get(self, \"can_push\")", "def deploy_on_push(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"deploy_on_push\")", "def is_push_enabled(self) -> bool:\n return pulumi.get(self, \"is_push_enabled\")", "def developer_can_merge(s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Bool, true if the branch has been merged into it's parent.
def merged(self) -> bool: return pulumi.get(self, "merged")
[ "def can_merge(self, mr):\n g = self._impl._git.git\n # http://stackoverflow.com/a/6283843\n # fetch source branch\n g.fetch(mr.downstream_repo.full_fs_path, mr.source_branch)\n # find merge base\n merge_base = g.merge_base(mr.downstream.commit_id, mr.target_branch)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Import the SDMX MSD into JSON Schema. Overrides parent.
def load_schema(self): schema = { "type": "object", "properties": {} } msd = self.parse_xml(self.schema_path) for concept in msd.findall('.//Concept'): concept_id = self.alter_key(concept.attrib['id']) self.add_item_to_field_order(concept...
[ "def load_schema(self):\n\n schema = {\n \"type\": \"object\",\n \"properties\": {}\n }\n\n msd = self.parse_xml(self.schema_path)\n for concept in msd.findall('.//Concept'):\n concept_id = concept.attrib['id']\n self.add_item_to_field_order(co...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests for method `createRegionDimensions`.
def test_createRegionDimensions(self): classList = {} classList[RegionType.REGION_TYPE_BOX] = RegionDimensions.RegionDimensionsBox classList[RegionType.REGION_TYPE_CYLINDER] = RegionDimensions.RegionDimensionsCylinder classList[RegionType.REGION_TYPE_SPHERE] = RegionDimensions.Regi...
[ "def test_createRegion(self):\n region = Region.create(self.apiclient,\n self.services[\"region\"]\n )\n\n list_region = Region.list(self.apiclient,\n id=self.services[\"region\"][\"regionid\"]\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests for class `RegionDimensionsCylinder`.
def test_RegionDimensionsCylinder_extractFromLinesWithKey(self): regionDimensions = RegionDimensions.RegionDimensionsCylinder() numberParameters = 8 self.assertEquals(numberParameters, len(regionDimensions._keys)) line = "RegionParameters=-500.000000 -500.000000 300.000000 0.00...
[ "def test_createRegionDimensions(self):\r\n\r\n classList = {}\r\n classList[RegionType.REGION_TYPE_BOX] = RegionDimensions.RegionDimensionsBox\r\n classList[RegionType.REGION_TYPE_CYLINDER] = RegionDimensions.RegionDimensionsCylinder\r\n classList[RegionType.REGION_TYPE_SPHERE] = Region...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialize with real y, predicted y, and probabilities y_proba should be a single column vector.
def __init__(self, clf, y_real, y_pred, y_proba): self.clf = clf self.y_real = y_real self.y_pred = y_pred self.y_proba = y_proba
[ "def initialize_probabilities(self):\n self.probabilities = np.ndarray((2, len(self.variables)), dtype=np.float)\n self.update_probabilities()", "def predict_proba(self, X, y=None, **kwargs):\n predicted_data = np.array([])\n for element in self.elements:\n element_transform...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if there is more data left on the pipe
def more_data(pipe_out): r, _, _ = select.select([pipe_out], [], [], 0) return bool(r)
[ "def has_data(self):\n return len(self._read_buf) > self._read_pos", "def has_unlimited_buffer(self):\n return self._buffer_size < 0", "def hasMoreData(self):\n return self.line != ''", "def eof(self):\n\t\tif not self._input: raise PlumberExceptions.PipeTypeException(self)\n\t\tresult = ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read data on a pipe Used to capture stdout data produced by libiperf
def read_pipe(pipe_out): out = b'' while more_data(pipe_out): out += os.read(pipe_out, 1024) return out.decode('utf-8')
[ "def read_pipe(self, curr_data, pipe, task_name):\n # TODO: Look for another way to check if theres data to receive?\n # Attempt to get more data\n try:\n data = pipe.channel.recv(1)\n except:\n return curr_data\n \n # If new line, store line as a prin...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Redirects stdout and stderr to a pipe
def output_to_pipe(pipe_in): os.dup2(pipe_in, 1) # stdout # os.dup2(pipe_in, 2) # stderr
[ "def redirect_stdio():\n\n oldstdout = -1\n try:\n if _redirect:\n try:\n stdoutno = procutil.stdout.fileno()\n stderrno = procutil.stderr.fileno()\n # temporarily redirect stdout to stderr, if possible\n if stdoutno >= 0 and stderr...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialise the iperf shared library
def __init__(self, role, verbose=True, lib_name='libiperf.so.0'): # TODO use find_library to find the best library try: self.lib = cdll.LoadLibrary(lib_name) except OSError: raise OSError('Could not find shared library {0...
[ "def initialize_library():\n # return the handle to the shared object\n if os.name == \"nt\":\n pass\n # libc = load_windows_dll()\n else:\n libc = load_linux_so()\n return libc", "def __init__(self):\n par_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialise a new iperf test struct iperf_test iperf_new_test()
def _new(self): return self.lib.iperf_new_test()
[ "def __init__(self,\n role,\n verbose=True,\n lib_name='libiperf.so.0'):\n # TODO use find_library to find the best library\n try:\n self.lib = cdll.LoadLibrary(lib_name)\n except OSError:\n raise OSError('Could not find shar...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set/reset iperf test defaults.
def defaults(self): self.lib.iperf_defaults(self._test)
[ "def testCmdDefaults(self):\n\n self.tcli_obj.color_scheme = 'cstring'\n self.tcli_obj.display = 'dstring'\n self.tcli_obj.mode = 'estring'\n\n # Sanity check defaults.\n assert (\n not tcli.FLAGS.interactive and\n tcli.FLAGS.cmds is None and\n tcli.FLAGS.display == 'raw' and\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The bind address the iperf3 instance will listen on use to listen on all available IPs
def bind_address(self): result = c_char_p(self.lib.iperf_get_test_bind_address(self._test)).value if result: self._bind_address = result.decode('utf-8') else: self._bind_address = '*' return self._bind_address
[ "def _allBindAddresses(self):\n if not config.BindAddresses:\n if getattr(socket, \"has_ipv6\", False):\n if conflictBetweenIPv4AndIPv6():\n # If there's a conflict between v4 and v6, then almost by\n # definition, v4 is mapped into the v6 space...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Toggles json output of libiperf Turning this off will output the iperf3 instance results to stdout/stderr
def json_output(self): enabled = self.lib.iperf_get_test_json_output(self._test) if enabled: self._json_output = True else: self._json_output = False return self._json_output
[ "def suppress_output_before_run(app):\n if not hasattr(app.pargs, 'output_handler_override'):\n return\n elif app.pargs.output_handler_override == 'json':\n app._suppress_output()", "def suppress_output_after_render(app, out_text):\n if not hasattr(app.pargs, 'output_handler_override'):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The test duration in seconds.
def duration(self): self._duration = self.lib.iperf_get_test_duration(self._test) return self._duration
[ "def duration(self) -> int:\n return 0", "def duration(self):\n return 0", "def test_time(self) -> float:\n return self._test_time", "def get_duration(self):\n try:\n if self.is_skipped:\n return \"00:00\"\n assert self.start_time\n a...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The number of streams to use.
def num_streams(self): self._num_streams = self.lib.iperf_get_test_num_streams(self._test) return self._num_streams
[ "def stream_count(self):\n return self._stream_count", "def max_concurrent_streams(self):\n return self.get(SettingsFrame.MAX_CONCURRENT_STREAMS, 2**32+1)", "def stream_count(self, stream_count):\n self._stream_count = stream_count", "def get_total_session_count(self) -> int:\n ret...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Toggle zerocopy. Use the sendfile() system call for "Zero Copy" mode. This uses much less CPU. This is not supported on all systems. Note there isn't a hook in the libiperf library for getting the current configured value. Relying on zerocopy.setter function
def zerocopy(self): return self._zerocopy
[ "def setAllowCopy(self,value):\n self.PDFreactorConfiguration.in1[\"allowCopy\"] = value", "def copy_disabled(self, copy_disabled):\n\n self._copy_disabled = copy_disabled", "def set_do0_off(self):\n self.device.eDigitalOut(channel=0,writeD=1,state=0)\n print('set_do0_off')", "def ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Run the iperf3 server instance.
def run(self): def _run_in_thread(self, data_queue): """Runs the iperf_run_server :param data_queue: thread-safe queue """ output_to_pipe(self._pipe_in) self.lib.iperf_run_server(self._test) # TODO json_output_string not available on ea...
[ "def test_client_succesful_run_output_to_screen(self):\n client = iperf3.Client()\n client.server_hostname = '127.0.0.1'\n client.port = 5207\n client.duration = 1\n client.json_output = False\n\n server = subprocess.Popen([\"iperf3\", \"-s\", \"-p\", \"5207\"])\n sl...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
calculate the mean of the input numbers
def mean(mean_numbers): return sum(mean_numbers) / float(len(mean_numbers))
[ "def arithmetic_mean(numbers):\r\n return numpy.mean(numbers)", "def get_mean(numlist):\n return np.mean(numlist)", "def mean (num_list):\n list_mean=sum(num_list)/len(num_list)\n\n return list_mean", "def mean(vals):", "def mean(data):\n n = len(data)\n return sum(data)/float(n)", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
calculate the median of the input numbers
def median(median_numbers): sorted_numbers = sorted(median_numbers) length = len(sorted_numbers) if len(median_numbers) % 2: # uneven numbers of integers return sorted_numbers[length / 2] return (sorted_numbers[length / 2] + sorted_numbers[length / 2 - 1]) / 2.0
[ "def _get_median(list_of_numbers):\n if list_of_numbers:\n length = len(list_of_numbers)\n list_of_numbers = sorted(list_of_numbers)\n if length == 2:\n median = sum(list_of_numbers) / 2\n elif length % 2 == 0:\n median = (list_of_numb...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
calculate the standard deviation of the input numbers
def stddev(std_numbers): mean = sum(std_numbers) / float(len(std_numbers)) sum_std = 0.0 for x in std_numbers: sum_std += (mean - x) * (mean - x) variance = sum_std / float(len(std_numbers)) stddev = math.sqrt(variance) return stddev
[ "def stddev(self):\r\n if len(self) < 2:\r\n return float('NaN')\r\n # The stupidest algorithm, but it works fine.\r\n arr = self.samples()\r\n mean = sum(arr) / len(arr)\r\n bigsum = 0.0\r\n for x in arr:\r\n bigsum += (x - mean)**2\r\n return sqrt(bigsum / (len(arr) - 1))", "def s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
GetSelection() > int Returns the index of the selected item or wxNOT_FOUND if no item is selected.
def 取选中项索引(self): # real signature unknown; restored from __doc__ return self.GetSelection()
[ "def getSelectedIndex(self):\n tup = self.curselection()\n if len(tup) == 0:\n return -1\n else:\n return tup", "def get_selected_index(self) -> int:\n return self._selected_index", "def get_selected_index(self) -> int:\n if self.evt == EVT.ENG.SELECT:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
GetStringSelection() > String Returns the label of the selected item or an empty string if no item is selected.
def 取现行选中项文本(self): # real signature unknown; restored from __doc__ return self.GetStringSelection()
[ "def get_selected_text(self):\n # get_selected = self.selectedItems()\n get_selected = self.currentItem()\n if get_selected:\n # base_node = get_selected[0]\n # item_name = base_node.text(0)\n item_name = get_selected.text(0)\n\n # When child-item is ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Select(n) This is the same as SetSelection() and exists only because it is slightly more natural for controls which support multiple selection.
def 选择项目(self, n): # real signature unknown; restored from __doc__ return self.Select(n)
[ "def select_using(self, n): \n self.select(self._selection[n])", "def 置现行选中项(self, n): # real signature unknown; restored from __doc__\n return self.SetSelection(n)", "def selection_to(self, index):\n self.tk.call(self._w, 'selection', 'to', index)", "def setSelection(self, p_int, p_int_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
SetSelection(n) Sets the selection to the given item n or removes the selection entirely if n == wxNOT_FOUND.
def 置现行选中项(self, n): # real signature unknown; restored from __doc__ return self.SetSelection(n)
[ "def select_using(self, n): \n self.select(self._selection[n])", "def SetSelection(self, index):\n self._list.SetSelection(index)", "def setSelection(self, p_int, p_int_1, p_int_2, p_int_3): # real signature unknown; restored from __doc__\r\n pass", "def _set_selection(self, new_sel_index...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
SetStringSelection(string) > bool Selects the item with the specified string in the control.
def 置现行选中项文本(self, string): # real signature unknown; restored from __doc__ return self.SetStringSelection(string)
[ "def SetStringSelection(*args, **kwargs):\n return _core_.ItemContainer_SetStringSelection(*args, **kwargs)", "def SetStringSelection(self, text):\n self._list.SetStringSelection(text)", "def set_selection(self, selection):\n for num in self.cryptomattes:\n if self.cryptomattes[n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create scatter output table. This function is used by the FL scatter gather node to reduce a dynamic number of silo outputs into a single input for the usersupplied aggregation step.
def create_scatter_output_table(aggregated_output: Output(type="mltable"), **kwargs): # kwargs keys are inputs names (ex: silo_output_silo_1) # values are uri_folder paths save_mltable_yaml(aggregated_output, kwargs.values())
[ "def table(self):\n\n param=self.x_param\n\n device=self.device\n\n base_params=device.get_params()\n\n data_tot=DataFrame()\n\n for i in range(len(param)):\n\n print_index=1\n\n for name in param.names:\n\n device._set_params(param(i))\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Poll current weather condition and log it to file
def main(logfile, location_id, units, api_key, interval, mode): logging.basicConfig( filename=logfile, filemode="a", format="%(created)f %(message)s", level=logging.INFO, ) url = build_url(location_id, api_key, units) while True: result = get_data(url) ...
[ "def check_room_humidity(): \n if room1_humidity() or room2_humidity(): # Low Humidity needs a mist\n if len(initial_time('humidity_timestamp.txt')) == 0:\n time_write_to_file('humidity_timestamp.txt', 'w') \n water_misting(True) # Turn ON water mister\n time.sleep(5) # H...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds standard commandline arguments for interacting with hither/slurm calling conventions. Included arguments are verbose (v|vv|vvv...), test (t), outfile (o), workercount (w), jobcache, nojobcache, usecontainer, nocontainer, useslurm, slurmpartition, slurmacceptsharednodes, slurmjobsperallocation, slurmmaxsimultaneous...
def add_standard_args(parser: ArgumentParser) -> ArgumentParser: parser.add_argument('--verbose', '-v', action='count', default=0, help="Set verbosity level. Add vs for more verbosity.") # Note: Whatever 'number of iterations' means for your application should be locally defined. parser.add_argument...
[ "def add_cmdline_args(argparser):\n DictionaryAgent.add_cmdline_args(argparser)\n agent = argparser.add_argument_group('Fairseq Arguments')\n agent.add_argument(\n '-tr', '--truncate',\n type=int, default=-1,\n help='truncate input & output lengths to speed up t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Swaps portions of the mother and father's lists to create a child and returns a new genome with the swapped characteristics.
def crossover(mother: Layout, father: Layout): # make a copy of the mother child = Layout.deepcopy(mother) # get the count of 'chromosomes' to swap chromosomes_to_swap = len(child.get_guests()) // 2 # for each of the chromosomes we need to swap, select a random # numb...
[ "def child_born(father_gene: str, mother_gene: str):\n len_gene = len(father_gene)\n excerpt_f = np.random.choice(range(3, len_gene - 1))\n excerpt_m = len_gene - excerpt_f\n # print(f\"父亲基因抽取后面的:{excerpt_f},母亲基因抽取前面的:{excerpt_m}\")\n part_gene_f = father_gene[len_gene - excerpt_f...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Perform a single integrator step from a supplied state.
def step(self, state):
[ "def integrate(self, state, t, dt, grad=None):\n if grad is None:\n grad = state.grad(t)\n next_state = state + dt * grad\n return next_state", "def step(self, dt):\n self.state = integrate.odeint(self.dstate_dt, self.state, [0, dt])[1]\n self.time_elapsed += dt", "def static_progres...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Activate organizations created before activation moderation/activation was possible.
def activate_existing_organization(apps, schema_editor): Organization = apps.get_model("organization", "Organization") Organization.objects.all().update(is_active=True)
[ "def activate(self):\r\n self.update_enrollment(is_active=True)", "def set_is_org_active(self, is_org_active):\n self.is_org_active = is_org_active", "def test_user_activate_page_view_with_success_for_org_admin(self):\n # Convert our User's ID into an encrypted value.\n user = User.o...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }