query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
ionbox interaction with the left plane hard wall make a dummy particle with the same diameter as the ion and touching left of the left wall s. t. it is closest to the ion
def _left_wall_lj_force(simul_box, ion_dict): with tf.name_scope("left_wall_lj_force"): # if (ion[i].posvec.z > 0.5 * box.lz - ion[i].diameter) mask = ion_dict[interface.ion_pos_str][:, -1] < ((-0.5 * simul_box.lz) - ion_dict[interface.ion_diameters_str]) #TODO: remove this mask if not cause of sim ...
[ "def mover( x, v, npart, L, mpv, vwall, tau) :\n \n #* Move all particles pretending walls are absent\n x_old = np.copy(x) # Remember original position\n x[:] = x_old[:] + v[:,0]*tau \n\n #* Loop over all particles\n strikes = np.array([0, 0])\n delv = np.array([0., 0.]) \n xwall = np...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
ion interacting with discretized right wall electrostatic between ion and rightwall
def _electrostatic_right_wall_force(simul_box, ion_dict): with tf.name_scope("electrostatic_right_wall_force"): return _electrostatic_wall_force(simul_box, ion_dict, simul_box.tf_right_plane)
[ "def walls(self):", "def _change_wall(self,):\n \n pass", "def _left_wall_lj_force(simul_box, ion_dict):\n with tf.name_scope(\"left_wall_lj_force\"):\n # if (ion[i].posvec.z > 0.5 * box.lz - ion[i].diameter)\n mask = ion_dict[interface.ion_pos_str][:, -1] < ((-0.5 * simul_box.lz)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
ion interacting with discretized left wall electrostatic between ion and left wall
def _electrostatic_left_wall_force(simul_box, ion_dict): with tf.name_scope("electrostatic_left_wall_force"): return _electrostatic_wall_force(simul_box, ion_dict, simul_box.tf_left_plane)
[ "def _left_wall_lj_force(simul_box, ion_dict):\n with tf.name_scope(\"left_wall_lj_force\"):\n # if (ion[i].posvec.z > 0.5 * box.lz - ion[i].diameter)\n mask = ion_dict[interface.ion_pos_str][:, -1] < ((-0.5 * simul_box.lz) - ion_dict[interface.ion_diameters_str]) #TODO: remove this mask if not cau...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Updates the forces acting on each ion and returns the updated ion_dict
def for_md_calculate_force(simul_box, ion_dict): with tf.name_scope("for_md_calculate_force"): pef = _particle_electrostatic_force(simul_box, ion_dict) erw = _electrostatic_right_wall_force(simul_box, ion_dict) elw = _electrostatic_left_wall_force(simul_box, ion_dict) plj = _particle...
[ "def _forces(self):\n # Loop through force in all links (tree and twigs)\n tree_uids = [self.tree_id] + self.twigIds\n force_trees = sum(\n [normal_force_between_bodies(self.source_model.uid, i) for i in tree_uids]\n )\n\n # Loop through force in all links\n forc...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The `init` function initializes the `illegal` attribute by checking if each element in `self.diff` is less than `self.lowerbound`.
def init(self, part: Part): FMConstrMgr.init(self, part) self.illegal = [d < self.lowerbound for d in self.diff]
[ "def __init__(self, p_min, p_max, domains):\n \n if not check(p_min, p_max, domains):\n raise Exception(\"some constraint is violated!\") \n \n self._p_min = p_min\n self._p_max = p_max\n self._domains = domains", "def __init__(self, allowed, prohibit...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The function `select_togo` returns the index of the minimum value in the `diff` list.
def select_togo(self): return min(range(self.num_parts), key=lambda k: self.diff[k])
[ "def __get_min_delta_index(self, search_list, reference ):\n \n delta_list = [ abs(val - reference) for val in search_list ]\n \n min_delta = min ( delta_list )\n return delta_list.index( min_delta )", "def select_left(self, tasks, selection):\n window, task_i = selec...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The function `check_legal` checks if a move is legal and returns the status of the move.
def check_legal(self, move_info_v): status = FMConstrMgr.check_legal(self, move_info_v) if status != LegalCheck.AllSatisfied: return status _, from_part, to_part = move_info_v self.illegal[from_part] = self.illegal[to_part] = False if any(self.illegal): r...
[ "def is_legal_move(self, move):\n return move in self.legalMoves", "def move_is_legal(self,move):\n\t\tassert isinstance(move,Move)\n\n\t\tif move in self.possible_moves():\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False", "def test_check_legal_move():\r\n gc = GameController()\r\n board = Board(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get_business_manager_for_current_zone() Retrieve a Business Manager for the current Zone.
def get_business_manager_for_current_zone(cls) -> BusinessManager: from sims4communitylib.utils.location.common_location_utils import CommonLocationUtils return cls.get_business_manager_by_zone_id(CommonLocationUtils.get_current_zone_id())
[ "def get_business_manager_by_zone_id(cls, zone_id: int) -> BusinessManager:\n return cls.get_business_service().get_business_manager_for_zone(zone_id=zone_id)", "def get_manager( self, manager_name ):\n\n try:\n return self.managers[ manager_name ]\n except:\n return Non...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get_business_manager_by_zone_id(zone_id) Retrieve a Business Manager for a Zone.
def get_business_manager_by_zone_id(cls, zone_id: int) -> BusinessManager: return cls.get_business_service().get_business_manager_for_zone(zone_id=zone_id)
[ "def get_business_manager_for_current_zone(cls) -> BusinessManager:\n from sims4communitylib.utils.location.common_location_utils import CommonLocationUtils\n return cls.get_business_manager_by_zone_id(CommonLocationUtils.get_current_zone_id())", "def get_business_funds_by_zone_id(cls, zone_id: int)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get_business_funds_for_current_zone() Retrieve the Funds object that manages the Simoleons for the Business of the current Zone.
def get_business_funds_for_current_zone(cls) -> Union[BusinessFunds, None]: from sims4communitylib.utils.location.common_location_utils import CommonLocationUtils return cls.get_business_funds_by_zone_id(CommonLocationUtils.get_current_zone_id())
[ "def get_business_funds_by_zone_id(cls, zone_id: int) -> Union[BusinessFunds, None]:\n business_manager = CommonBusinessUtils.get_business_manager_by_zone_id(zone_id)\n if business_manager is None:\n return\n return business_manager.funds", "def get_business_manager_for_current_zon...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get_business_funds_by_zone_id(zone_id) Retrieve the Funds object that manages the Simoleons for the Business of a Zone.
def get_business_funds_by_zone_id(cls, zone_id: int) -> Union[BusinessFunds, None]: business_manager = CommonBusinessUtils.get_business_manager_by_zone_id(zone_id) if business_manager is None: return return business_manager.funds
[ "def get_business_funds_for_current_zone(cls) -> Union[BusinessFunds, None]:\n from sims4communitylib.utils.location.common_location_utils import CommonLocationUtils\n return cls.get_business_funds_by_zone_id(CommonLocationUtils.get_current_zone_id())", "def get_business_manager_by_zone_id(cls, zone...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get_business_service() Retrieve an instance of the Business Service.
def get_business_service(cls) -> BusinessService: import services return services.business_service()
[ "def business_service(self) -> BusinessService:\n assert_value(self.token)\n return BusinessService(self.token, prod=self.prod)", "def BusinessProcess(self):\n return self._businessProcess", "def get_service():\n\n service = build(\"customsearch\", \"v1\",\n developerKey=api_k...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ensure this input won't cause locale.strxfrm to barf
def input_is_ok_with_locale(x: str) -> bool: # Bad input can cause an OSError if the OS doesn't support the value try: get_strxfrm()(x) except OSError: return False else: return True
[ "def _format_maybe_minus_and_locale(self, fmt, arg):\n return self.fix_minus(locale.format_string(fmt, (arg,), True)\n if self._useLocale else fmt % arg)", "def testTransliterationExceptions(self):\n self.assertRaises(ValueError, pytils.translit.translify, u'\\u00bfHabla...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load the enwik8 dataset from the Hutter challenge.
def enwik8(path=None, n_train=int(90e6), n_valid=int(5e6), n_test=int(5e6)): if path is None: path = here('data/enwik8.gz') with gzip.open(path) if path.endswith('.gz') else open(path) as file: X = np.fromstring(file.read(n_train + n_valid + n_test), dtype=np.uint8) trX, vaX, teX = np.s...
[ "def load_dataset(self):", "def load_dataset(self) -> None:\n raise NotImplementedError", "def load_matt_mahoney_text8_dataset(name='mm_test8', path='raw_data'):\n path = os.path.join(path, name)\n logging.info(\"Load or Download matt_mahoney_text8 Dataset> {}\".format(path))\n\n maybe_download_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes the data (a single sequence of tokens) and slices out a batch of subsequences to provide as input to the model. For each input instance, it also slices out the sequence that is shifted one position to the right, to provide as a target for the model.
def sample_batch(data, length, batch_size): # Sample the starting indices of the sequences to slice out. starts = torch.randint(size=(batch_size,), low=0, high=data.size(0) - length - 1) # Slice out the input sequences seqs_inputs = [data[start:start + length] for start in starts] # -- the start ...
[ "def input_and_target(data):\r\n input_seq=[]\r\n target_seq=[]\r\n for i in range(len(data)):\r\n input_seq.append(data[i][:-1])\r\n target_seq.append(data[i][1:])\r\n return input_seq, target_seq", "def prepare_batch(batch_data, batch_sent_lens):\n cur_max = max(batch_sent_lens) # g...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Masks out all values in the given batch of matrices where i <= j holds, i < j if mask_diagonal is false In place operation
def mask_(matrices, maskval=0.0, mask_diagonal=True): h, w = matrices.size(-2), matrices.size(-1) indices = torch.triu_indices(h, w, offset=0 if mask_diagonal else 1) matrices[..., indices[0], indices[1]] = maskval
[ "def contour_matrix_mask(contour_matrix: np.ndarray) -> np.ndarray:\n return ndimage.binary_fill_holes(contour_matrix) # ~ to invert", "def custom_mask(mask):\n \n new_mask = np.zeros(mask.shape[0]*mask.shape[1])\n new_mask = new_mask.reshape(mask.shape[0], mask.shape[1])\n for i in range(1):\n for j in...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute the _compression_ of a dataset under a model. That is, given a model, in how many bits could we represent the dataset. This requires us to turn a given probability distribution into a code for the outcomes.
def compute_compression(model, data, context, batch_size, verbose=False, tbw:SummaryWriter=None, tok=None, skip=0): bits, tot = 0.0, 0 batch = [] # Buffer, every time it fills up, we run it through the model # --- For the sake of speed we want to process the data in batches. For...
[ "def computeEncoding(model, imagePath):\n\t# load the input image and convert it from RGB (OpenCV ordering) to dlib ordering (RGB)\n\timage = cv2.imread(imagePath)\n\t# compute the embedding\n\tencoding = model.feed(image)\n\treturn(encoding)", "def encode_dataset(batch_size,downscale_factor,dataset, pooling_func...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Lists all exporters with the given url
def get_all_by_url(url): return Exporter.objects.filter(url=url).all()
[ "def get_all_by_url(url):\n return Exporter.get_all_by_url(url)", "def GetAllExporters(self):\n return self.native.get_all_exporters()", "def get_by_url(exporter_url):\n return Exporter.get_by_url(exporter_url)", "def list_report_downloads(_request, course_id):\n\n course_id = SlashSeparatedCo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Lists all default exporters
def get_all_default_exporter(): return Exporter.objects.filter(enable_by_default=True).all()
[ "def get_all_default_exporter():\n return Exporter.get_all_default_exporter()", "def GetAllExporters(self):\n return self.native.get_all_exporters()", "def get_exports(self):\n exports = []\n\n # pylint: disable=no-member\n for export in self._pe.DIRECTORY_ENTRY_EXPORT.symbols:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets all template matching with the given list template id
def get_all_by_template_list(template_id_list): queryset = Exporter.objects.all() for pk in template_id_list: queryset = queryset.filter(templates=pk) return queryset.all() # TODO: test if works to replace __all
[ "def get_all_by_template_list(template_id_list):\n return Exporter.get_all_by_template_list(template_id_list)", "def _template_matches_ids(session, *files):\n template_matches = itertools.chain(*(file.template_matches or () for file in files))\n return [template_match.id for template_match in tem...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
To string value for exporter's template list
def get_templates_to_string(self): version_name_list = [] for template in self.templates.all(): version_name_list.append(template.display_name) return_value = ", ".join(version_name_list) return return_value
[ "def get_template_value(self) -> str:\n\t\ttry:\n\t\t\tresponse = str(self.yaml_data['statementMapping']['template']['value'])\n\t\texcept KeyError:\n\t\t\tresponse = None\n\t\treturn response", "def render_literal_value(self, value, type_):\n if isinstance(value, string_types):\n value ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns all Xsl exporter with the given id list
def get_all_by_xsl_id_list(xsl_id_list): return ExporterXsl.objects.filter( xsl_transformation__in=xsl_id_list ).all()
[ "def get_all_by_template_list(template_id_list):\n return Exporter.get_all_by_template_list(template_id_list)", "def get_all_by_template_list(template_id_list):\n queryset = Exporter.objects.all()\n for pk in template_id_list:\n queryset = queryset.filter(templates=pk)\n return ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
`database_tables()` retrieves three pandas data frame objects of the tables retrieved from the iecsa0600_099 database.
def database_tables(): # SQL Auth SERVER = "" USER = "" PASSWORD = "" DATABASE = "" def grab_imitmidx(): return """ SELECT item_no, item_desc_1, item_desc_2, pur_or_mfg FROM imitmidx_sql;""" def grab_iminvloc(): return """ SELECT item_no, avg_cost, las...
[ "def all_dataframes(self):\n\n with self.Session.begin() as session:\n inspector = inspect(self.engine)\n schemas = inspector.get_schema_names()\n main = [{table_name: inspector.get_columns(table_name, schema=schema) for table_name in inspector.get_table_names(schema=schema)}...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
`write_local_tables()` accepts three pandas.DataFrame objects
def write_local_tables(Itmidx, Invloc, Bmprdstr, Sfdtlfil): PATH = "099.db" try: os.remove(PATH) # Make this line a comment before running in Debian WSL # os.remove("/mnt/c/sqlite/099.db") # Uncomment this line for usage in Debian WSL except PermissionError: with open(PATH, 'w') as ...
[ "def write_data_to_sql(input_formatted_tables: dict, **kwargs) -> None:\n _LOGGER.info(f\"writing data to sql database\")\n for formatted_table_name, formatted_table in input_formatted_tables.items():\n write_dataframe_to_sql(formatted_table, formatted_table_name, **kwargs)", "def savePivotTables(df)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
`read_local_tables()` accepts a "sqlite3.connection" object generated by `write_local_tables()` to minimize the number of SQLite connections. Otherwise, if `load.read_local_tables()` is called in isolation on the interpreter, a new SQLite connection is made. This function returns pandas.DataFrame objects for each of th...
def read_local_tables(Local_Connection = sqlite3.connect("C://sqlite/099.db")): #assert(Local_Connection is not None) # Marked for potential removal ''' try: del Imitmidx, Invloc, Bmprdstr, Sfdtlfil, imitmidx_sql, iminvloc_sql, bmprdstr_sql, sfdtlfil_sql gc.collect() gc.disable() ...
[ "def all_dataframes(self):\n\n with self.Session.begin() as session:\n inspector = inspect(self.engine)\n schemas = inspector.get_schema_names()\n main = [{table_name: inspector.get_columns(table_name, schema=schema) for table_name in inspector.get_table_names(schema=schema)}...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Remove any keys from dictionary beginning with an underscore
def remove_under(dictionary): keys = [key for key in dictionary.keys()] for key in keys: if key.startswith('_'): dictionary.pop(key)
[ "def strip_leading_underscores_from_keys(d: Dict) -> Dict:\n newdict = {}\n for k, v in d.items():\n if k.startswith(\"_\"):\n k = k[1:]\n if k in newdict:\n raise ValueError(f\"Attribute conflict: _{k}, {k}\")\n newdict[k] = v\n return newdict", "def cl...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Remove any keys from nested dictionary beginning with the word `contraction`
def remove_contraction(dictionary): keys = [key for key in dictionary.keys()] for key in keys: if isinstance(key, str) and key.startswith('contraction'): dictionary.pop(key)
[ "def remove_under(dictionary):\n keys = [key for key in dictionary.keys()]\n for key in keys:\n if key.startswith('_'):\n dictionary.pop(key)", "def test_flatten_ignore_keys(self):\n dic = {\n 'a': {'a': [1, 2, 3]},\n 'b': {'b': 'foo', 'c': 'bar'},\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Transform JSON serialised data into a networkx Graph object
def deserialize_networkx_graph(data): data = python_to_numpy_recursive(data) graph = node_link_graph(data) return graph
[ "def dict_to_networkx(data):\n data_checker(data)\n G = nx.Graph(data)\n return G", "def convert_json(graph: rdflib.Graph):\n serialized_json = graph.serialize(format='json-ld')\n return json.loads(serialized_json.decode(\"utf-8\"))", "def _parse_graph(self, graph_json):\n json_obj = json....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Transform a networkx Graph object into a JSON serialised dictionary
def serialize_networkx_graph(graph): data = node_link_data(graph) data = numpy_to_python_recursive(data) return data
[ "def as_dict(self):\n\n return self.graph", "def deserialize_networkx_graph(data):\n\n data = python_to_numpy_recursive(data)\n graph = node_link_graph(data)\n\n return graph", "def dict_to_networkx(data):\n data_checker(data)\n G = nx.Graph(data)\n return G", "def flat_graph(self) ->...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert all numpy values in nested dictionary to pure python values
def numpy_to_python_recursive(dictionary): for key, value in dictionary.items(): if isinstance(value, dict): numpy_to_python_recursive(value) elif isinstance(value, np.ndarray): dictionary[key] = value.tolist() elif isinstance(value, (np.int32, np.int64)): ...
[ "def python_to_numpy_recursive(dictionary):\n\n for key, value in dictionary.items():\n\n if isinstance(value, dict):\n python_to_numpy_recursive(value)\n\n elif isinstance(value, list):\n if key in ['xy', 'direction']:\n dictionary[key] = np.array(value)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert all numpy values in nested dictionary to pure python values
def python_to_numpy_recursive(dictionary): for key, value in dictionary.items(): if isinstance(value, dict): python_to_numpy_recursive(value) elif isinstance(value, list): if key in ['xy', 'direction']: dictionary[key] = np.array(value) else: ...
[ "def numpy_to_python_recursive(dictionary):\n\n for key, value in dictionary.items():\n\n if isinstance(value, dict):\n numpy_to_python_recursive(value)\n\n elif isinstance(value, np.ndarray):\n dictionary[key] = value.tolist()\n\n elif isinstance(value, (np.int32, np.i...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks index 'pos' of 'string' seperated by 'sep' for substring 'word' If present, removes 'word' and returns amended string
def check_string(string, pos, sep, word): if sep in string: temp_string = string.split(sep) if temp_string[pos] == word: temp_string.pop(pos) string = sep.join(temp_string) return string
[ "def _cut(self, word, ending, pos):\n\n match = ending.search(word, pos)\n if match:\n try:\n ignore = match.group(\"ignore\") or \"\"\n except IndexError:\n # No ignored characters in pattern.\n return True, word[:match.start()]\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
check_file_name(file_name, file_type="", extension="") Checks file_name for file_type or extension. If present, returns amended file_name without extension or file_type
def check_file_name(file_name, file_type="", extension=""): file_name = check_string(file_name, -1, '.', extension) file_name = check_string(file_name, -1, '_', file_type) return file_name
[ "def file_type(self, file_name):\n string = \"\"\n check = False\n for i in range(len(file_name) - 1, 0, -1):\n if file_name[i] != \".\":\n string = file_name[i] + string\n else:\n check = True\n break\n if check is True ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
If an extension exists on file_name, replace it with new extension. Otherwise add new extension
def replace_ext(file_name, extension): path, ext = os.path.splitext(file_name) if ext != f'.{extension}': file_name = path + f'.{extension}' return file_name
[ "def change_ext(filename, new_ext):\n return re.sub(r\"\\.\\w+$\", new_ext, filename)", "def _change_ext(self, filename):\n name = utils.get_name(self.name)\n ext = utils.get_ext(filename)\n self.name = name + ext", "def replace_file_extension(file_name, extension):\n prefix, _, _ = f...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parses and saves DID parts from self.__did_reference raises ValueError if fails parsing
def _extractDidParts(self): matches = DID_RE.match(self.__did_reference) if matches: self.__did, self.scheme, self.method, self.idString, self.path, self.query, self.fragment = matches.groups() else: raise ValueError("Could not parse DID.") return self
[ "def validate(cls, did: str):\n if not cls.is_valid(did):\n raise InvalidDIDError('\"{}\" is not a valid DID'.format(did))\n return did", "def parse_deceased_field(deceased_field):\n dob_index = -1\n dob_tokens = [Fields.DOB, '(D.O.B', '(D.O.B.', '(D.O.B:', '(DOB', '(DOB:', 'D.O.B.'...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes a did string and returns a subclass of Did based on the did method or None if no subclass exists.
def getDIDModel(did_reference): scheme, method, idstring = did_reference.split(":", 2) method = method.strip() package = 'didery.did.methods.' + method class_name = method.capitalize() try: module = importlib.import_module(package) except ModuleNotFoundError as er: return None ...
[ "def select_subclass(self, subclass_str):\n if subclass_str in ['', 'None', 'none', None]:\n return None\n for sc in self.subclasses_available:\n if subclass_str.lower() in sc.name.lower():\n return sc(owner=self.owner)\n return None", "def guess_type(cls,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Derives a session key with the given passphrase and salt. This key is then used during encrypt and decrypt operations. Calling this method with the same passphrase and salt shall always yield the same key.
def derive_key(self, passphrase, salt): pass
[ "def makeKey(password, salt):\n return KDF.PBKDF2(\n password, salt, dkLen=32, count=5000, hmac_hash_module=Hash.SHA256\n )", "def derive_key(self, data, salt=None): \r\n\t\tdigest=None\r\n\t\tif salt==None:\r\n\t\t\tsalt=os.urandom(16)\r\n\t\tif self.digest == 'SHA-512':\r\n\t\t\tdigest ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
checks the magic_no_from_client which will be give in byte tries to confirm that it is in 0x497E raises an error if its not a 0x497E
def check_magic_no(header): try: magic_no = ((header[0] << 8) + header[1]).to_bytes(2, 'big') if int.from_bytes(magic_no, 'big') != 0x497E: sys.exit(1) print('Magic number acceptable.\n') except: print('Error while checking the magic number\n') sys.exit(1)
[ "def _check_magick(self):\n\n self.seek(0,0)\n self.ei_magic = self.read(4)\n classes = {0:'Invalid',1:'32-bit',2:'64-bit'}\n if self.ei_magic != '\\x7fELF':\n raise RuntimeError(\"input {0} doesn't contain supported ELF header\".format(self.name))\n\n self.ei_class = ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
checks the port number, returns true if port is in range 1024 to 64000 if not the print the error and exit s
def process_port_number(port): try: port = int(port) if port in range(1024, 64001): print('Port number is valid. Your port number is {}\n'.format(port)) return port else: sys.exit(1) except: print('Unacceptable port number: Must be in range b...
[ "def port_num(port):\n print(\"checking port numbers\")\n if port not in PORT_RANGE:\n return False\n else:\n return True", "def validate_port(self):\n\n if self.port == None:\n self.port = \"\"\n else:\n try:\n self.port = int(self.port)\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
param length_file1 which is the size of the new_file given from the file_response len_file from the server param lentgth file2 which is the size of the new_file that client has got from file_response file_Data checks the length of the new_file that you get from the server with what client has processed from the data. i...
def check_file_length(length_file1, length_file2): try: if length_file1 == length_file2: print('The file has successfully downloaded.\n') else: raise OSError except: print('length of new_file and len data did not match\n') print('Expected {} but, got {}.'...
[ "def fileCompare(file1, file2):\n with open(file1, \"r\") as iFile, open(file2, \"r\") as oFile:\n testCaseNumber = 1\n for iLine, oLine in zip(iFile, oFile):\n print(\"For testcase \", testCaseNumber)\n numberArray = iLine.rstrip().split(' ')\n assert findLongestLe...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tries to get a ip address and port number if it fails then prints the error and exit I go through this process because ip_address could be a name of the host instead of the actual dotted decimal notation
def try_get_address_info(ip_address, port_number): try: return socket.getaddrinfo(ip_address, port_number)[0][4] except: print("Error while trying to get a ip_address and port number of server") sys.exit(1)
[ "def ip_error(session, url):\n soup = bs(\n session.get('http://www.iplocation.net/find-ip-address').text,\n 'html.parser'\n )\n ip_ = soup.find(style='font-weight: bold; color:green;').text\n raise ConnectionError(\n 'Connection to {} failed using IP address {}'.format(url, ip_)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
tries to send the packet if this process works then return True if not then print the error then exit
def try_send(s, packet): try: s.sendall(packet) except: print('Problem occurred while sending') sys.exit(1)
[ "def send_packet():", "def try_send(self, args): \n if self.debug_stub:\n print \"{0}_stub : ------- : \".format(self.stub_name)\n \n\n \n # send IR wake up \n try :\n if self.debug_stub:\n print \"{0}_stub : {1} --> {2} : IR\".format(self....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
checks the arguments that I was given it should be just server.py and port_number but you never know! checks the number of arguments if not 4 then print the error and exit
def check_arguments(): try: if len(sys.argv) != 4: if len(sys.argv) < 4: print('Expected 4 arguments, got only {}'.format(len(sys.argv))) else: print('Expected 4 arguments, got {}'.format(len(sys.argv))) sys.exit(1) e...
[ "def process_ports(args):\n text = None\n # Checking the correct number of arguments was passed\n if len(args) != 3:\n text = \"Invalid number of ports entered\"\n else:\n # Checking that the ports passed are all ints\n try:\n a, b, c = int(args[0]), int(args[1]), int(arg...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
takes the socket_fd which has ip_address and port number of the server and name of the file tris contact with the server to get the file from server
def contact_server(socket_fd, file_name): try: check_file_exists(file_name) file_len_bytes = len(file_name).to_bytes(2, 'big') file_request = bytearray() + 0x497E.to_bytes(2, 'big') + 0x01.to_bytes(1, 'big') + file_len_bytes file_request += file_name.encode('utf-8') except: ...
[ "def receive_file_from_socket(self):\n pass", "def recv_file(self, client_sock):\n # Create ephemeral socket\n ephem_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n ephem_sock.bind(('', 0)) # 0 = first available port\n ephem_sock.listen(1)\n\n # Send the epheme...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Benchmark performance with a variety of expectations using NYC Taxi data (yellow_trip_data_sample_201901.csv) found in the tests/test_sets/taxi_yellow_trip_data_samples directory, and used extensively in unittest and integration tests for Great Expectations. To simulate a more realistic usage of Great Expectations with...
def test_taxi_trips_benchmark( benchmark: BenchmarkFixture, tmpdir: py.path.local, pytestconfig: _pytest.config.Config, number_of_tables: int, write_data_docs: bool, ): _skip_if_bigquery_performance_tests_not_enabled(pytestconfig) checkpoint = taxi_benchmark_util.create_checkpoint( ...
[ "def test_extra_runs_per_team_in_2016(self):\n\n \"\"\"First test case.\"\"\"\n expected_output = {'Mumbai Indians': 3, 'Gujarat Lions': 0, 'Rising Pune Supergiants': 1}\n sql_methods.create_table_populate('resources/matches.csv', self.connection)\n sql_methods.create_table_populate('res...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
initalise noise handler, if requested
def _init_noise(self): if self._send_noise: self._noise = ceof.noise.Server(noise_dir=self._noise_dir, plain=False, peer_dir=self._peer_dir, gpg_config_dir=self._gpg_config_dir) self._noise.start()
[ "def init_noise_model(self):\n\n self.noise = galsim.PoissonNoise(self.rng)\n self.logger.info('Poisson noise model created.')\n \n return", "def honk(self):\n self.honkNoise.play()", "def noisePrior(self):\n self.options.useAutoNoise = True\n self.initialized = ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load object from the database. Failing that, create a new empty (default) instance of the object and return it (without saving it to the database).
def load(cls): try: return cls.objects.get() except cls.DoesNotExist: return cls()
[ "def load(cls):\r\n\r\n try:\r\n return cls.objects.get()\r\n except cls.DoesNotExist: # pragma: no cover\r\n return cls()", "def create_or_get(cls, id):\n obj = cls.query.get(id)\n if obj is not None:\n return obj\n else:\n return cl...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get companies form xlsx file
def get_companies(self, file): # file = 'Zefix-Crawl-Test.xlsx' data_frame = pd.read_excel(file, engine='openpyxl') first_column = data_frame.columns[0] self.companies = data_frame[first_column].tolist() # noqa
[ "def get_companies_list():\r\n\t\r\n\tcompanies_list = {}\r\n\t\r\n\t#Open excel file.\r\n\twb = openpyxl.load_workbook(r\"C:\\Users\\Vartotojas\\Desktop\\Projects\\UK Companies House API\\Companies List.xlsx\")\r\n\tsheet = wb.active\r\n\r\n\t#Get all data to dictionary.\r\n\tmax_row = sheet.max_row\r\n\t\r\n\tfor...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if API with given API ID is running or not.
def is_already_running(self, api_id): process_info = self.supervisor_xml_rpc_service.get_process_info(api_id) if process_info is None: return False if process_info == RETURNCODE.OS_ERROR: logging.error('API is not running or connection to supervisor failed!') ...
[ "def is_running(self, sdi_id: str) -> Optional[bool]:\n response = self.get_status(sdi_id)\n if response.ok:\n return str(response.detail[\"state\"]) == \"2\"\n return None", "def supported(self) -> bool:\n return self.api_id.value in self.vapix.api_discovery", "def isAPIO...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deleting the supervisor config file for given API
def delete_api_config(self, api_id): logging.info("Deleting supervisor config for API: {}".format(api_id)) config_file = self.define_supervisor_config_file(api_id=api_id) filesystem_service.delete_file(config_file)
[ "def delete_config(cls, username):\n\n file = username + \".json\"\n try:\n os.remove(cls.cwd + \"/user_configs/\" + file)\n except FileNotFoundError:\n logger.info('Found no configurations for ' + username + '.')\n logger.info('Deleted configurations for ' + userna...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Method to obtain the directory tree of a repository. The ignored directories and files that were inputted are also ignored.
def extract_directory_tree(input_path, ignore_dirs, ignore_files, visual=0): ignore_set = ['.git', '__pycache__', '.idea', '.pytest_cache'] ignore_set = tuple(list(ignore_dirs) + list(ignore_files) + ignore_set) if visual: paths = DisplayablePath.make_tree(Path(input_path), criteria=lambda ...
[ "def walk_git_repository(repodir='.'):\n return _walk_repository(repodir, '.gitignore', parse_gitignore,\n match_gitignore)", "def _walk_repository(repodir, ignore_file=None, parse_ignore=None,\n match_ignore=None, path=None, depth=0):\n\n if path is None:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determine whether two numbers are close to each other. That is, that the difference between both is less than or equal to APPROXIMATION_CONSTANT.
def is_close(number1, number2): return math.isclose(number1, number2, abs_tol=APPROXIMATION_CONSTANT)
[ "def approx_equals(a, b):\n return (a - b) < 1.5e-16", "def closeEnough(val1, val2, percentErrorAllowed=0.1):\n avg = (val1+val2)/2\n diff = abs(val1-val2)\n err = abs(100*diff/avg)\n if err <= percentErrorAllowed:\n log.debug(\"%s == %s (error: %.02f%%)\" % (val1, val2, err))\n retur...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
test is screen.colliderect(actor) true?
def is_onscreen(self): x,y = self.loc w,h = get_screen_size() screen = Rect(0, 0, w, h) actor = Rect(x, y, self.width, self.height) if screen.colliderect(actor): return True else: return False
[ "def is_onscreen(self):\n x, y = self.loc\n w, h = get_screen_size()\n\n screen = Rect(0, 0, w, h)\n actor = Rect(x, y, self.width, self.height)\n\n if screen.colliderect(actor):\n return True\n else:\n return False", "def is_actor(x):\n return isi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find videos in a simple XML file for a YT channel
def test_simple(self): xmlFile = open(self.simpleFile, 'r') channel = YTChannel(xmlFile) videos = channel.videos() self.assertEqual(videos, self.expected)
[ "def openFolderVideos(self,node):\n listVideos = []\n\n print \"Searching videos in [\" + node.file + \"]\"\n # Search files in folder\n listFic = os.listdir(node.file)\n listFic.sort()\n for fic in listFic:\n suffix=fic[fic.rfind(\".\"):].lower()\n #p...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return True if test name is not in the list or Python or OS version is not met.
def _check_python_and_os(self, test_name): if (test_name in self.MIN_VERSION_OR_OS and not self.MIN_VERSION_OR_OS[test_name]): return False return True
[ "def check_python_version():", "def virus_test(name):\n return ('virus' in name)", "def _is_test_case(name):\n if not os.path.isfile(name) or not os.path.exists(name):\n return False\n if not re.search(_TEST_FILE_PATTERN, name):\n return False\n # check if file is executable\n if no...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Search for all executables generated by the testcase. If the testcase is called e.g. 'test_multipackage1', this is searching for each of 'test_multipackage1.exe' and 'multipackage1_?.exe' in both onefile and onedirmode.
def _find_exepath(self, test): assert test.startswith('test_') name = test[5:] + '_?' parent_dir = self._distdir patterns = [ # one-file deploy pattern os.path.join(parent_dir, test+'.exe'), # one-dir deploy pattern os.path.join(parent_dir,...
[ "def queryPATH(test):\r\n matches = []\r\n def appendIfMatches(exeFile):\r\n if isExecutable(exeFile):\r\n if test(exeFile):\r\n matches.append(exeFile)\r\n\r\n for path in os.environ[\"PATH\"].split(os.pathsep):\r\n path = path.strip('\"')\r\n if os.path.exists(path):\r\n for fileInPat...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Run building of test script. Return True if build succeded False otherwise.
def test_building(self): OPTS = ['--debug', '--noupx', '--specpath', self._specdir, '--distpath', self._distdir, '--workpath', self._builddir] if self.verbose: OPTS.extend(['--debug', '--log-level=INFO']) else: OPTS.append(...
[ "def build_tests():\r\n run(dir(\"Macaroni\", \"Next\", \"Tests\"), \"cavatappi -d -i\")", "def build(configure, output):\n try:\n run(configure, output)\n run(['make', '-j8', 'clean'], output)\n run(['make', '-j8'], output)\n run(['make', '-j8', 'check'], output)\n except Cal...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test running of all created executables. multipackagetests generate more than one exefile and all of them have to be run.
def test_exe(self): self._msg('EXECUTING TEST ' + self.test_name) found = False retcode = 0 stderr = '' for exe in self._find_exepath(self.test_file): found = True rc, err = self._run_created_exe(exe) retcode = retcode or rc if rc ...
[ "def build_tests():\r\n run(dir(\"Macaroni\", \"Next\", \"Tests\"), \"cavatappi -d -i\")", "def test_standalone_cpp_output(self):\n\n if os.path.isdir(self.out_dir):\n shutil.rmdir(self.out_dir)\n\n self.do('import model sm')\n self.do('generate e+ e- > e+ e- @2')\n self....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compare log files (now used only by multipackage test_name). Return True if .toc files match or when .toc patters are not defined.
def test_logs(self): logsfn = glob.glob(self.test_file + '.toc') # Other main scripts do not start with 'test_'. assert self.test_file.startswith('test_') logsfn += glob.glob(self.test_file[5:] + '_?.toc') # generate a mapping basename -> pathname progs = dict((os.path.sp...
[ "def _compare_files( f1, f2, syncopts ):\n data_ok = True\n meta_ok = True\n # Fast check, if f1.ctime older, nothing to do\n if f2.ctime > f1.ctime:\n return( data_ok, meta_ok )\n # Check for data changes\n if f1.size != f2.size:\n data_ok = False\n elif syncopts[ 'synctimes' ] a...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create test suite and add test cases to it. test_types Test classes to create test cases from. Return test suite with tests.
def create_suite(self, test_types, with_crypto=False, run_known_fails=False): suite = unittest.TestSuite() for _type in test_types: tests = self._detect_tests(_type.test_dir) # Create test cases for a specific type. for test_name in tests: ...
[ "def construct_test_suite():\n suite = unittest.TestSuite()\n suite.addTest(unittest.makeSuite(UserInputOneTests))\n suite.addTest(unittest.makeSuite(UserInputTwoTests))\n suite.addTest(unittest.makeSuite(UserInputThreeTests))\n return suite", "def makeSuite():\n result = unittest.TestSuite()\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Enforce that we are not older that the minimum version. This is a loose check to avoid creating or updating our service record if we would do so with a version that is older that the current minimum of all services. This could happen if we were started with older code by accident, either due to a rollback or an old and...
def _check_minimum_version(self): if not self.obj_attr_is_set('version'): return if not self.obj_attr_is_set('binary'): return minver = self.get_minimum_version(self._context, self.binary) if minver > self.version: raise exception.ServiceTooOld(thisver...
[ "def outofdate(self):\n if self.device_version and self.bundle_version:\n try:\n return VersionInfo.parse(self.device_version) < VersionInfo.parse(\n self.bundle_version\n )\n except ValueError as ex:\n logger.warning(\"Mod...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the minimum service version, checking all cells. This attempts to calculate the minimum service version for a set of binaries across all the cells in the system. If require_all is False, then any cells that fail to report a version will be ignored (assuming they won't be candidates for scheduling and thus excluding...
def get_minimum_version_all_cells(context, binaries, require_all=False): if not all(binary.startswith('nova-') for binary in binaries): LOG.warning('get_minimum_version_all_cells called with ' 'likely-incorrect binaries `%s\'', ','.join(binaries)) raise exception.ObjectActionErr...
[ "def _check_minimum_version(self):\n if not self.obj_attr_is_set('version'):\n return\n if not self.obj_attr_is_set('binary'):\n return\n minver = self.get_minimum_version(self._context, self.binary)\n if minver > self.version:\n raise exception.ServiceTo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Plots several PDFs on top of one another. pairlist iterable of (r, gr) pairs to plot labels iterable of names for the pairs. If this is not the same length as the pairlist, a legend will not be shown (default []). offset offset to place between plots. PDFs will be sequentially shifted in the ydirection by the offset. I...
def plotPDFs(pairlist, labels=None, offset ='auto', rmin = None, rmax = None): if labels is None: labels = [] if offset == 'auto': offset = _findOffset(pairlist) gap = len(pairlist) - len(labels) labels = list(labels) labels.extend([""] * gap) for idx, pair in enumerate(pairlis...
[ "def comparePDFs(pairlist, labels=None, rmin = None, rmax = None, show = True,\n maglim = None, mag = 5, rw = None, legend = True):\n if labels is None:\n labels = [2]\n labeldata = None\n labelfit = None\n else:\n labeldata = labels[1]\n labelfit = labels[0]\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Plot two PDFs on top of each other and difference curve. pairlist iterable of (r, gr) pairs to plot labels iterable of names for the pairs. If this is not the same length as the pairlist, a legend will not be shown (default []). rmin The minimum rvalue to plot. If this is None (default), the lower bound of the PDF is n...
def comparePDFs(pairlist, labels=None, rmin = None, rmax = None, show = True, maglim = None, mag = 5, rw = None, legend = True): if labels is None: labels = [2] labeldata = None labelfit = None else: labeldata = labels[1] labelfit = labels[0] rfit, grf...
[ "def plotPDFs(pairlist, labels=None, offset ='auto', rmin = None, rmax = None):\n if labels is None:\n labels = []\n if offset == 'auto':\n offset = _findOffset(pairlist)\n\n gap = len(pairlist) - len(labels)\n labels = list(labels)\n labels.extend([\"\"] * gap)\n\n for idx, pair in ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Truncate a PDF to specified bounds. r rvalues of the PDF gr PDF values. rmin The minimum rvalue. If this is None (default), the lower bound of the PDF is not altered. rmax The maximum rvalue. If this is None (default), the upper bound of the PDF is not altered. Returns the truncated r, gr
def truncatePDFs(r, gr, rmin = None, rmax = None): if rmin is not None: sel = r >= rmin gr = gr[sel] r = r[sel] if rmax is not None: sel = r <= rmax gr = gr[sel] r = r[sel] return r, gr
[ "def randtrunc(a=None,b=None,mu=0.0,sd=1.0):\n assert type(a) != str, \"The minimum value needs to be numeric.\"\n assert type(b) != str, \"The maximum value needs to be numeric.\"\n assert type(mu) != str,\"The average value needs to be numeric.\"\n assert type(sd) != str, \"The st. deviation needs t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find an optimal offset between PDFs.
def _findOffset(pairlist): maxlist = [max(p[1]) for p in pairlist] minlist = [min(p[1]) for p in pairlist] difflist = numpy.subtract(maxlist[:-1], minlist[1:]) offset = 1.1 * max(difflist) return offset
[ "def calc_sag_offset_idx(self):\n return self.offset_pnt-1", "def calc_offset(self,path,i,chunk_sz):\n i=int(i)\n chunk_sz=int(chunk_sz)\n if os.path.isfile(path):\n return (path,i*chunk_sz)\n\n self.lock.acquire()\n self.check_key(path) #Don't know if it is TH...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Split an interval of the given size into the given number of subintervals The sizes of the subintervals will at most the ceil of the exact fractional size, with later subintervals possibly being smaller (or even empty).
def split_almost_equally(size: int, *, num_parts: int) -> Iterable[slice]: size_per_part = size // num_parts + (1 if size % num_parts != 0 else 0) for i in range(num_parts): yield slice(min(i * size_per_part, size), min((i + 1) * size_per_part, size))
[ "def split_into_chunks(alist, sizes):\n\n indices = np.cumsum(sizes)\n return np.split(alist, indices[:-1])", "def splitSlice(s, size):\n i, j, k = s.start, s.stop, s.step\n if k > 0:\n wrap1 = slice(i, size, k)\n wrap2 = slice((i - size) % k, j - size, k)\n else:\n wrap1 = sli...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Wait for an AsyncResult while checking the health of the pool. If the processes of an mp.Pool die unexpectedly, the pool will respawn them
def get_async_result( future_res: "mp.pool.AsyncResult[T]", pool: mp.Pool, health_check_interval: float = 1.0, ) -> T: processes = list(pool._pool) while True: try: res = future_res.get(health_check_interval) except mp.TimeoutError: pass else: ...
[ "async def wait_ready(self):\n if not self.condition():\n # skip\n return None\n if not self.check:\n return None\n\n while True:\n if self.process and self.process.returncode is not None:\n return self.process.returncode\n i...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the number of partitions on a given side and the entity types Each of the entity types that appear on the given side (LHS or RHS) of a relation type is split into some number of partitions. The ones that are split into one partition are called "unpartitioned" and behave as if all of their entities belonged to al...
def get_partitioned_types( config: ConfigSchema, side: Side ) -> Tuple[int, Set[EntityName], Set[EntityName]]: entity_names_by_num_parts: Dict[int, Set[EntityName]] = defaultdict(set) for relation_config in config.relations: entity_name = side.pick(relation_config.lhs, relation_config.rhs) e...
[ "def bipartition_width(membership, g):\n return len(get_bipartition_eids(membership, g))", "def num_pieces_per_side(self):\n return self._num_pieces_per_side", "def split_num_slices_per_axis(self):\n return self.__split_num_slices_per_axis", "def extract_partition_count(self):\n return int...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
input integers n, m returns n//m, n m(n//m)
def division_algorithm(n, m): if m > n: return 0, n q = n//m return q, n - m*q
[ "def partition(m, n):\n return [ ( m + i - 1 ) // n for i in range(1, n + 1 )]", "def split_num(m, n):\n avg_sz = m / n\n rem = m - avg_sz * (n - 1)\n result = [avg_sz] * (n - 1)\n remrem = rem - avg_sz\n for i in range(0, remrem):\n result[i] += 1\n remrem -= 1\n return result ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
input integer output integer representing the length of the cycle of repeating digits in 1/d
def find_cycle_length(d): seen = set() k = 1 has_cycle = False cycle_length = 0 look_for = None found_cycle = False while not has_cycle and k != 0: k *= 10 q, r = division_algorithm(k, d) if r == look_for: has_cycle = True if found_cycle: ...
[ "def cycle_length(number):\n if number % 2 == 0:\n return cycle_length(number / 2)\n if number % 5 == 0:\n return cycle_length(number / 5)\n\n i = 1\n while True:\n if (pow(10, i) - 1) % number == 0:\n return i\n i += 1", "def recurring_length(d, digit=1, rems=No...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
input integer n output list of smallest number d that has largest cycle length of numbers up to n
def reciprocal_cycles(n): res = [0]*(n+1) max_length = 0 d = 0 for i in range(1, n + 1): cycle_length = find_cycle_length(i) if cycle_length > max_length: max_length = cycle_length d = i res[i] = d return res
[ "def largestCycle(limit):\n\n cycle = 6\n number = 7\n\n for nums in range(2, limit + 1):\n if periodlength(nums) > cycle:\n cycle = periodlength(nums)\n number = nums\n\n return [number, cycle]", "def solution(n: int = 1000) -> int:\n prev_numerator, prev_denominator =...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Produces a list of the jobs in the queue, ordered by self.priority (RAM or CORES) and ascending or descending
def sorted_jobs(self): return sorted(self.jobs.items(), key=lambda item: getattr(item[1], self.priority), reverse=self.descending)
[ "def __schedule_bigjobs(self):\n # no scheduling for now (start bigjob in the user specified order)\n return self.bigjob_list", "def queued_archive_jobs():\n\n jobs = slurm_jobs()\n\n # return [ job for job in jobs if (job['job_state'] == 'Q' and job['queue'] == 'archivelong')]\n return [\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Collects jobs from the sorted list that fit together within the specified resource limit. Removes (pop) collected jobs from the queue (pop). May return an empty dictionary if queue is empty or no jobs fit.imit
def dequeue(self, resource_limit): jobs = {} for job, resource in self.sorted_jobs(): if resource_limit - resource >= Resources.EMPTY: jobs[job] = resource resource_limit = resource_limit - resource for job in jobs: self.jobs.pop(job) ...
[ "def job_limit(self) -> Dict[str, Any]:\n url = self.get_url('jobs_limit')\n return map_jobs_limit_response(self.session.get(url).json())", "def find_some_jobs(self):\n max_amounts = self.available_job_slots\n nb = 0\n jobs = []\n if not sum(max_amounts.values()):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Callback to run after a job is finished to restore reserved resources and check for exceptions. Expected to be called as part of the Future.add_done_callback(). The callback is invoked on a background thread. If the callback itself raises an Exception, that Exception is logged and ignored, so we instead queue exception...
def job_done_callback(self, rsc, logger, future): # Always restore the resources. try: self.restore(rsc, logger) except Exception as ex: self.exceptions.put(ex) # if the future was cancelled, there is no more work to do. Bail out now because calling result() or ...
[ "def finish_job(self, job_state):\n galaxy_id_tag = job_state.job_wrapper.get_id_tag()\n external_job_id = job_state.job_id\n\n # To ensure that files below are readable, ownership must be reclaimed first\n job_state.job_wrapper.reclaim_ownership()\n\n # wait for the files to appe...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Raise an exception if a job does not fit within total_resources
def raise_if_oversized(self, job): rsc = Resources.from_job(job) if rsc.exceeds(self.total_resources): raise OversizedJobException('Job {} resources {} exceed total resources {}'. format(job, rsc, self.total_resources))
[ "def _check_capacity_exceeded(ctx, allocs):\n # The SQL generated below looks like this:\n # SELECT\n # rp.id,\n # rp.uuid,\n # rp.generation,\n # inv.resource_class_id,\n # inv.total,\n # inv.reserved,\n # inv.allocation_ratio,\n # allocs.used\n # FROM resource_prov...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Pulls jobs off the queue in groups that fit in currently available resources, allocates resources, and submits jobs to the pool_executor as Futures. Attaches a callback to each future to clean up (e.g. check for execptions, restore allocated resources)
def start_queued_jobs(self, pool_executor, logger, runtime_context): runnable_jobs = self.jrq.dequeue(self.available_resources) # Removes jobs from the queue submitted_futures = set() for job, rsc in runnable_jobs.items(): if runtime_context.builder is not None: job....
[ "def _queue_compliance_jobs(executor, futures_to_cb, condition_counts, tac_compliance_data, compliance_data,\n monthly_stats, db_config, operators, month, year, condition_tuples,\n per_operator_record_counts, statsd, metrics_run_root, debug_query_performance, run_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Cast s to type t. Return None if a ValueError or TypeError occurs.
def safe_cast(t, s, default=None): try: return t(s) except (ValueError, TypeError): return default
[ "def coerceVal( s ):\n if not isinstance( s, (str,) ): return s\n if s == 'True': return True\n if s == 'False': return False\n try: return int(s)\n except ValueError:\n try: return float(s)\n except ValueError: return s", "def type_infer(s):\n def capitalize_ul(s):\n \"\"\"...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a decorator function. Functions decorated with it will continue to execute for `shutdown_time` seconds when the program is stopped with SIGTERM. After that it will be killed with SIGUSR1 and the program will exit with status code 1. If the function terminates in the given time frame, the program will exit with c...
def graceful_exit(shutdown_time): _signal.signal(_signal.SIGUSR1, lambda s,f: exit(1)) pid = _os.getpid() def killer_thread(): _time.sleep(shutdown_time) _os.kill(pid, _signal.SIGUSR1) def start_killer_thread(sigTermFlag): sigTermFlag.set() thread = _threading.Thread(target=killer_thread) th...
[ "def functiontimeout(seconds=10, error_message=os.strerror(errno.ETIME)):\n def decorator(func):\n def _handle_timeout(signum, frame):\n raise FunctionTimeoutError(error_message)\n\n def wrapper(*args, **kwargs):\n signal.signal(signal.SIGALRM, _handle_timeout)\n si...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the first index `i` of the list `ls` such that cond(ls, i) is true.
def first_index(ls, cond): for i, e in enumerate(ls): if cond(ls, i): break return i
[ "def find_indices(lst, condition):\n return [i for i, elem in enumerate(lst) if condition(elem)]", "def find_first(L, p):\n for i, x in enumerate(L): # Yields (0, L[0]), (1, L[1]),...\n if p(x): return i\n return -1", "def indexOf(list, predicate):\n for i, x in enumerate(list):\n if p...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Implements a modified ear slicing algorithm, optimized by zorder curve hashing and extended to handle holes, twisted polygons, degeneracies and selfintersections in a way that doesn't guarantee correctness of triangulation, but attempts to always produce acceptable results for practical data.
def earcut( exterior: list[T], holes: list[list[T]] ) -> list[Sequence[T]]: # exterior points in counter-clockwise order outer_node: Node = linked_list(exterior, 0, ccw=True) triangles: list[Sequence[T]] = [] if outer_node is None or outer_node.next is outer_node.prev: return triangles ...
[ "def triangulation(e, i):\n vertices = []\n holes = []\n segments = []\n index_point = 0\n\n #-- Slope computation points\n a = [[], [], []]\n b = [[], [], []]\n for ip in range(len(e)-1):\n vertices.append(e[ip])\n if a == [[], [], []] and index_point == 0:\n a = [e...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a circular doubly linked list from polygon points in the specified winding order
def linked_list(points: Sequence[T], start: int, ccw: bool) -> Node: last: Node = None # type: ignore if ccw is (signed_area(points) < 0): for point in points: last = insert_node(start, point, last) start += 1 else: end = start + len(points) for point in reve...
[ "def create_linked_list(points, clockwise):\n coord_sum = 0\n length = len(points)\n last = None\n\n # calculate original winding order of a polygon ring\n for i, j in zip(range(length), [length-1] + range(length-1)):\n p1 = points[i]\n p2 = points[j]\n coord_sum += (p2[0] - p1[0...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if a diagonal between two polygon nodes is valid (lies in polygon interior)
def is_valid_diagonal(a: Node, b: Node): return ( a.next.i != b.i and a.prev.i != b.i and not intersects_polygon(a, b) # doesn't intersect other edges and ( locally_inside(a, b) and locally_inside(b, a) and middle_inside(a, b) and ( ...
[ "def is_valid_diagonal(a, b):\n return not intersects_polygon(a, a.p, b.p) and locally_inside(a, b) and locally_inside(b, a) and middle_inside(a, a.p, b.p)", "def test_diagonalizing_gates_overlapping(self):\n diag_op = ValidOp(qml.S(0), qml.PauliX(0))\n diagonalizing_gates = diag_op.diagonalizing...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if a polygon diagonal intersects any polygon segments
def intersects_polygon(a: Node, b: Node) -> bool: p = a while True: if ( p.i != a.i and p.next.i != a.i and p.i != b.i and p.next.i != b.i and intersects(p, p.next, a, b) ): return True p = p.next if p is a: ...
[ "def intersects_polygon(start, a, b):\n node = start\n while True:\n p1 = node.p\n p2 = node.next.p\n\n if p1 != a and p2 != a and p1 != b and p2 != b and intersects(p1, p2, a, b):\n return True\n\n node = node.next\n if node == start:\n break\n\n re...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
check whether a polygon node forms a valid ear with adjacent nodes
def is_ear(ear: Node) -> bool: a: Node = ear.prev b: Node = ear c: Node = ear.next if area(a, b, c) >= 0: return False # reflex, can't be an ear # now make sure we don't have other points inside the potential ear ax = a.x bx = b.x cx = c.x ay = a.y by = b.y cy = c....
[ "def _is_a_vertex_of_polygon(x, y, polygon):\n return (x, y) in polygon", "def intersects_polygon(a: Node, b: Node) -> bool:\n p = a\n while True:\n if (\n p.i != a.i\n and p.next.i != a.i\n and p.i != b.i\n and p.next.i != b.i\n and intersect...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find the leftmost node of a polygon ring
def get_leftmost(start: Node) -> Node: p = start leftmost = start while True: if p.x < leftmost.x or (p.x == leftmost.x and p.y < leftmost.y): leftmost = p p = p.next if p is start: break return leftmost
[ "def leftmost(pts):\n return withmin(xcoord, pts)", "def leftmost(node):\n crnt = node\n while crnt.left:\n crnt = crnt.left\n return crnt", "def get_leftmost(nodes, root):\n if root is not None:\n leftmost_node = root\n while nodes[leftmost_node].left_child is no...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if a point lies within a convex triangle
def point_in_triangle( ax: float, ay: float, bx: float, by: float, cx: float, cy: float, px: float, py: float, ) -> bool: return ( (cx - px) * (ay - py) >= (ax - px) * (cy - py) and (ax - px) * (by - py) >= (bx - px) * (ay - py) and (bx - px) * (cy - py) >= (c...
[ "def point_in_triangle(self, p, a, b, c):\n\n if self.same_side(p, a, b, c) and self.same_side(p, b, a, c) and self.same_side(p, c, a, b):\n return True\n else:\n return False", "def isConvexQuad(pts):\n for i in range(0, 4):\n if(crossProduct(pts[i], pts[(i+1)%4], pt...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Whether sector in vertex m contains sector in vertex p in the same coordinates.
def sector_contains_sector(m: Node, p: Node): return area(m.prev, m, p.prev) < 0 and area(p.next, m, m.next) < 0
[ "def inSurface(self,p): \n tot = 0\n \n sx = self.points[0].x-self.points[1].x\n sy = self.points[0].y-self.points[1].y\n sz = self.points[0].z-self.points[1].z\n k = (100 * mp.vector(sx,sy,sz).mag())\n p1 = mp.point(p.x+(sx*k*self.radius),p.y+(sy*k*self.radius),p.z+...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Link two polygon vertices with a bridge. If the vertices belong to the same ring, it splits polygon into two. If one belongs to the outer ring and another to a hole, it merges it into a single ring.
def split_polygon(a: Node, b: Node) -> Node: a2 = Node(a.i, a.point) b2 = Node(b.i, b.point) an = a.next bp = b.prev a.next = b b.prev = a a2.next = an an.prev = a2 b2.next = a2 a2.prev = b2 bp.next = b2 b2.prev = bp return b2
[ "def split_polygon(a, b):\n a2 = Node(a.p)\n b2 = Node(b.p)\n an = a.next\n bp = b.prev\n\n a2.source = a\n b2.source = b\n\n a.next = b\n b.prev = a\n\n a2.next = an\n an.prev = a2\n\n b2.next = a2\n a2.prev = b2\n\n bp.next = b2\n b2.prev = bp\n\n return b2", "def me...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate position mask for tensor.
def position_mask(tensor): # Maybe it would be more productive to use a global buffer of positions `(max_batch_size, max_seq_len)` # and get a mask from this buffer using slicing. batch_size, seq_len = tensor.shape mask = torch.arange(1, seq_len + 1, dtype=torch.long, device=tensor.devic...
[ "def pos_mask(self, pos):\n mask = np.zeros(len(self.labels), dtype=np.bool)\n mask[pos] = True\n return mask", "def mask_data(token_tensor, mask_inds):\n for i in range(token_tensor.input_ids.shape[0]):\n token_tensor.input_ids[i, mask_inds[i]] = 103", "def generate_mask(self, x)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
fix the activation range by setting running stat
def fix(self): self.running_stat = False
[ "def update_running_avg(self):\n # For SGD...\n # Note: its pow 2 because we later do pow 0.5\n opt_s = self.optimizer.state\n ra = self.running_avg_step\n bg = self.big_gamma\n with torch.no_grad():\n for pg in self.optimizer.param_groups:\n if pg...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add a difference (a commit where the metric changed significantly).
def AddDifference(self, change, values_a, values_b): if change.patch: kind = 'patch' commit_dict = { 'server': change.patch.server, 'change': change.patch.change, 'revision': change.patch.revision, } else: kind = 'commit' commit_dict = { 'rep...
[ "def test_diffs_regeneration(self):\n with self.integration_setup():\n self.cli.invoke(diffs, ['12', '1000'])\n # change the output\n (self.diff_dir / 'v1' / 'v2').write({'update': 'update'})\n # this *won't* recalculate the output; input trees haven't changed\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return _BugUpdateInfo for the differences.
def BuildUpdate(self, tags, url): if len(self._differences) == 0: raise ValueError("BuildUpdate called with 0 differences") differences = self._OrderedDifferencesByDelta() missing_values = self._DifferencesWithNoValues() owner, cc_list, notify_why_text = self._PeopleToNotify() status = None ...
[ "def getBugInfo(self):\n return self.infoInstance('BugInfo')", "def diff(self):\n posts = self.status()\n updates = filter(lambda x : x['action'] == 'update', posts)\n\n d = difflib.Differ()\n diff_result = {}\n for update in updates:\n post = update['post']\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the list of differences sorted by absolute change.
def _OrderedDifferencesByDelta(self): if self._cached_ordered_diffs_by_delta is not None: return self._cached_ordered_diffs_by_delta diffs_with_deltas = [(diff.MeanDelta(), diff) for diff in self._differences if diff.values_a and diff.values_b] ordere...
[ "def get_diffs(self):\n return list(self.iter_diffs())", "def sort_clues(clues):\n # This can be accomplished in newer (>= 2.4) Python versions using:\n # clues.sort(key=lambda x: x.diff)\n tmps = [(x.diff, x) for x in clues]\n tmps.sort()\n return [x[1] for x in tmps]", "def deltas(xs):\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the list of differences where one side has no values.
def _DifferencesWithNoValues(self): if self._cached_commits_with_no_values is not None: return self._cached_commits_with_no_values self._cached_commits_with_no_values = [ diff for diff in self._differences if not (diff.values_a and diff.values_b) ] return self._cached_commits_with...
[ "def missing_values(self):\n\n all_values = list(range(1, self.size**2+1))\n return [\n val for val in all_values if val not in self.values and val != 0]", "def difference(a, b):\n return [x for x in a if x in a and not x in b]", "def get_missing(list1, list2):\n return list(set(l...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the people to notify for these differences.
def _PeopleToNotify(self): ordered_commits = [ diff.commit_info for diff in self._OrderedDifferencesByDelta() ] + [diff.commit_info for diff in self._DifferencesWithNoValues()] # CC the folks in the top N commits. N is scaled by the number of commits # (fewer than 10 means N=1, fewer than 100 ...
[ "def voicemails_old(self):\n return self._voicemails_old", "def OnParticipantsChanged(properties, context):\n added = properties['participantsAdded']\n for p in added:\n if p != settings.ROBOT_NICK+'@appspot.com':\n Notify(context, \"Hi, \" + p)", "def diff_results(new, old):\n\tinteresting = [...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Pick a random open position on the board to play. If there are no more open positions, returns None.
def play(self): open_positions = [] for i, j in it.product(range(0, 3), range(0, 3)): if self.board[i, j] == '': open_positions += [(i, j)] return random.choice(open_positions) if len(open_positions) > 0 else None
[ "def pick_empty_position(board, rows, cols):\n pos = None\n while not pos:\n test_pos = (random.choice(rows), random.choice(cols))\n if is_empty(test_pos, board):\n pos = test_pos\n board[test_pos] = 1\n return pos", "def random_move (self, player):\n choices = ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gives you some bird pictures
async def bird(self, ctx:utils.Context): await ctx.channel.trigger_typing() headers = {"User-Agent": "Apple.py/0.0.1 - Discord@Caleb#2831"} async with self.bot.session.get("https://some-random-api.ml/img/birb", headers=headers) as r: data = await r.json() with utils.Embed(us...
[ "def show_bird(prediction):\n try: \n img = Image.open('./images/' + prediction + '.jpg')\n st.image(img, use_column_width=True, caption='your lovely ' + FULL_NAMES[prediction])\n except FileNotFoundError:\n st.write('no image available for your lovely ' + FULL_NAMES[prediction])", "def...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Download the sysroot for the Faasm toolchain
def download_sysroot(ctx): url = get_sysroot_url() tar_name = get_sysroot_tar_name() tar_path = get_sysroot_tar_path() if not exists(FAASM_LOCAL_DIR): makedirs(FAASM_LOCAL_DIR) if exists(FAASM_SYSROOT): print("Deleting existing sysroot at {}".format(FAASM_SYSROOT)) check_ou...
[ "def rootfs_msrtools(request, record_property):\n fs = request.param\n record_property(\"rootfs\", fs.name())\n fs.download()\n fs.ssh_key().download()\n return fs", "def scp_sysroot(ctx, user, host):\n _scp_dir(user, host, \"llvm-sysroot\")", "def _download(self):\n self._system.downlo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Download the Faasm toolchain
def download_toolchain(ctx, version=None): url = get_toolchain_url() tar_name = get_toolchain_tar_name(version=version) tar_path = get_toolchain_tar_path(version=version) if exists(TOOLCHAIN_INSTALL): print("Deleting existing toolchain at {}".format(TOOLCHAIN_INSTALL)) check_output("rm ...
[ "def FetchAndInstall(self, arch):\n # Fist get the URL for this architecture\n col = terminal.Color()\n print col.Color(col.BLUE, \"Downloading toolchain for arch '%s'\" % arch)\n url = self.LocateArchUrl(arch)\n if not url:\n print (\"Cannot find toolchain for arch '%s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }