query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Sends an API request to run one's test page on WebPagetest.org.
def WebPagetest(request, key): test = models.user_test.Test.get_mem(key) if not test: msg = 'No test was found with test_key %s.' % key return http.HttpResponseServerError(msg) current_user = users.get_current_user() if (test.user.key().name() != current_user.user_id() and not users.is_current_...
[ "def test_api_use_websurfer_topup_post(self):\n body = Internet()\n response = self.client.open(\n '/api/use/websurfer-topup/',\n method='POST',\n data=json.dumps(body),\n content_type='application/json')\n self.assert200(response,\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sync all foreign models in instance to data using their class object and manager name. More info
def _sync_foreign_model(self, instance, data, cls, manager_name): # Remove all foreign instances that are not featured in data data_ids = [item["id"] for item in data if "id" in item] for existing_foreigns in getattr(instance, manager_name).all(): if existing_foreigns.id not in data_...
[ "def auto_sync(instance, **kwargs):\n from .models import AutoSyncRecord\n\n for autosync in AutoSyncRecord.objects.filter(datafile__source=instance).prefetch_related('object'):\n autosync.object.sync(save=True)", "def sync_entities_watching(instance):\n for entity_model, entity_model_getter in en...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Method has to check if any of `modules` contains `callable` object with name `method_name` and return list of such objects
def methods_importer( method_name: str, modules: List[Union[str, ModuleType]] ) -> List[Callable]: result = [] for module in modules: try: if isinstance(module, ModuleType): mod = module elif isinstance(module, str): mod = importlib.import_modu...
[ "def get_function_list_from_modlist(self):\n function_list = []\n function_name_list = []\n for module in self.module_list:\n for name, obj in inspect.getmembers(module, inspect.isfunction):\n if inspect.getmodule(obj) == module:\n function_list.appe...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
on change event of survey_id field, if note is available in selected survey then display this note in note fields.
def on_change_survey(self, cr, uid, ids, survey_id, context=None): if not survey_id: return {} notes = self.pool.get('survey').read(cr, uid, survey_id, ['note'])['note'] return {'value': {'note': notes}}
[ "def _onNotesChoice(self, ev):\n sel = self.__notesChoice.GetSelection()\n sel = self.__notesChoice.GetClientData(sel)\n self.__fileList.NotesColumn(sel)", "def textCtrlNotes_TextChanged(self, event):\n self.SelectedClass.note = event.GetString()", "def __show_note_by_id():\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parses the given string in infix notation.
def parse_infix(input: str) -> Node: parsed = ParsedString(input).tokenize() ans = parse_e(parsed) return ans
[ "def ParseInfix(s, operators=None):\n operators = operators or [\n '||', '&&', '->', '==', '<=', '>=', '<', '>', '!=',\n ' in ', '++?', '++', '+', '-', '*', '/', '%', '^', '!']\n unary_operators = ['-', '!']\n for op in operators:\n parts = SplitRaw(s, op)\n if len(parts) > 1:\n # Right is t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Normalize time in arbitrary timezone to UTC naive object.
def normalize_time(timestamp): offset = timestamp.utcoffset() if offset is None: return timestamp return timestamp.replace(tzinfo=None) - offset
[ "def normalize_to_utc(date, timezone):\n local_tz = pytz.timezone(timezone)\n new_date = date.replace(tzinfo = local_tz)\n utc_tz = pytz.timezone('UTC')\n new_date = new_date.astimezone(utc_tz)\n return new_date", "def localize(time, zone=''):\n dt = time.datetime\n dt = dt.replace(tzinfo=tz....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determines if time is going to happen in the next window seconds.
def is_soon(dt, window): soon = (utcnow() + datetime.timedelta(seconds=window)) return normalize_time(dt) <= soon
[ "def is_soon(dt, window):\r\n soon = (utcnow() + datetime.timedelta(seconds=window))\r\n return normalize_time(dt) <= soon", "def check_timer(self, wanted_time):\n if time.time() - self.start_time >= wanted_time:\n return True\n return False", "def check_time_went_to_sleep_is_befo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
sent message to line when BTC price change
def line_sent(price): now = datetime.datetime.now() LINE_ACCESS_TOKEN = " " # Line Token url = "https://notify-api.line.me/api/notify" print("[%02i:%02i:%02i] Price Change : Send Message" % (now.hour, now.minute, now.second)) message = "[%02i:%02i:%02i] Now BTC Price : %s" % (now.hour, now.minute, n...
[ "def price_tick_handler(msg):\n print msg", "def m_ts_OrderUpdated(self, sender, e):\r\n print(\"Order was updated with price of {0}.\".format(e.NewOrder.LimitPrice))", "def _on_op_private_ticker(self, msg):\r\n msg = msg[\"ticker\"]\r\n if msg[\"sell\"][\"currency\"] != self.curr_quote:...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates vocab tables for src_vocab_file and tgt_vocab_file
def create_vocab_tables(src_vocab_file, tgt_vocab_file, share_vocab): src_vocab_table = lookup_ops.index_table_from_file( src_vocab_file, default_value=UNK_ID) if share_vocab: tgt_vocab_table = src_vocab_table else: tgt_vocab_table = lookup_ops.index_table_from_file( ...
[ "def create_vocab_tables(src_vocab_file, tgt_vocab_file, share_vocab):\n src_vocab_table = lookup_ops.index_table_from_file(\n src_vocab_file, default_value=UNK_ID)\n if share_vocab:\n tgt_vocab_table = src_vocab_table\n else:\n tgt_vocab_table = lookup_ops.index_table_from_file(\n tgt_vocab_fi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load embed text into python dictionary
def load_embed_text(embed_file): emb_dict = dict() emb_size = None with codecs.getreader("utf-8")(tf.gfile.GFile(embed_file, "rb")) as f: for line in f: tokens = line.strip().split(" ") word = tokens[0] vec = list(map(float, tokens[1:])) emb_dict[...
[ "def load_embed_txt(embed_file):\n emb_dict = dict()\n emb_size = None\n\n is_first_line = True\n with codecs.getreader(\"utf-8\")(tf.gfile.GFile(embed_file, \"rb\")) as f:\n for line in f:\n tokens = line.rstrip().split(\" \")\n if is_first_line:\n is_first_l...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test Jacobi symbol function. Test data by clux .com Copyright (c) 2015 Eirik Albrigtsen.
def test_jacobi_symbol(): assert jacobi_symbol.jacobi_symbol(-1, 5) == 1 assert jacobi_symbol.jacobi_symbol(-1, 13) == 1 assert jacobi_symbol.jacobi_symbol(-1, 3) == -1 assert jacobi_symbol.jacobi_symbol(-1, 7) == -1 assert jacobi_symbol.jacobi_symbol(2, 3) == -1 assert jacobi_symbol.jacob...
[ "def jacobi1(a, b):\n a = array(a)\n b = array(b)\n #guess = array([1.0, 1.0, 1.0])\n sol = jacobi(a, b, N=1000)\n\n return sol", "def TtestSparseGrid_Jacobi(self):\n from scipy.special import gamma\n\n vt='single'\n LevelMax=5\n \n alpha=[i for i in range(1,3)]\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
set up some polynomials
def setUp(self): self.f1 = uniutil.polynomial(enumerate([3, 6, 81, 1]), Z) self.f2 = uniutil.polynomial(enumerate([1, 81, 6, 3]), Z) self.f3 = uniutil.polynomial(enumerate([37, 6, 18, 1]), Z) self.f4 = uniutil.polynomial(enumerate([91, 7, 14, 1]), Z) # f5 = (x - 6)(x - 5)...x(x +...
[ "def make_polynomial_function(coeffs):\n pass", "def main():\n\n evaluate_polynomial()", "def build_poly(x, degree): \n # ***************************************************\n # COPY YOUR CODE FROM EX03 HERE\n # polynomial basis function: TODO\n # this function should return the matrix form...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
return the size and line number of all the files end with file_format in file_path
def get_files_size_and_line_number(file_path, file_format): logging.info("[get_file_size_and_line_number] file_path: %s, file_format: %s", file_path, file_format) size = 0 lines = 0 for root, dirs, files in os.walk(file_path): for file in files: for one_format in file_format: ...
[ "def fileLineCount():\n pass", "def file_stat(self, file_path):", "def read_file(path_to_file):\n 8", "def file_num(self, path, ext):\n cmd = 'shell ls %s' % path\n res = self.adb.cmd(*cmd.split()).communicate()[0]\n return len([x for x in res.splitlines() if ext in x])", "def trace...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Save updates to this user. Updates will be made column by column based on the result of self.what_changed().
def save(self, context=None): updates = self.obj_get_changes() self.dbapi.update_user(context, self.id, updates) self.obj_reset_changes()
[ "def _save_edited_user(self):\n self._user_to_edit.name = self._entry_edit_user_name.get()\n self._build_listboxes_gui()\n self._disable_bttns()\n self._edit_user_window.destroy()", "def save(self):\r\n\t\tif not self._modified:\r\n\t\t\treturn\r\n\r\n\t\tupdated_fields = []\r\n\t\tfor...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the name of the appliance
def name(self): return self.appliance_name
[ "def _get_appliance(request):\n appliance_entity = next((e for e in request.entities if e['type'] == 'appliance'), None)\n\n if appliance_entity:\n return appliance_entity['text'].lower()\n elif 'appliance' in request.frame:\n return request.frame['appliance']\n else:\n raise Except...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Inject an extend method in obj that will used append method.
def define_extend_as_seq_of_appends(obj): assert hasattr( obj, 'append' ), f'Your object needs to have an append method! Object was: {obj}' def extend(self, items): for item in items: self.append(item) if isinstance(obj, type): obj = type(obj.__name__, (obj,), {}) ...
[ "def extend(self, *args, **kwargs): # real signature unknown; restored from __doc__\n pass", "def extend(class_to_extend):\n def decorator(func):\n# if hasattr(class_to_extend, func.func_name):\n# raise except_osv(_(\"Developper Error\"),\n# _(\"You can extend the cla...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Make item2kv from a item2key function (the value will be the item itself).
def item_to_key(item2key): def item2kv(item): return item2key(item), item return item2kv
[ "def item_to_key_params_and_val(item_to_key_params_and_val, key_str_format):\n\n def item2kv(item):\n key_params, val = item_to_key_params_and_val(item)\n if isinstance(key_params, dict):\n return key_str_format.format(**key_params), val\n else:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Make an item2kv function that uses the current time as the key, and the unchanged item as a value. The offset_s, which is added to the output key, can be used, for example, to align to another system's clock, or to get a more accurate timestamp of an event.
def utc_key(offset_s=0.0): if offset_s == 0.0: # splitting for extra speed (important in real time apps) def item2kv(item): return time.time(), item else: def item2kv(item): return time.time() + offset_s, item return item2kv
[ "def item_to_key(item2key):\n\n def item2kv(item):\n return item2key(item), item\n\n return item2kv", "def determine_timestamp(item):\n # There is no standard for this.\n # The following are common to some APIs.\n for key in ['creationTimestamp', 'timeCreated']:\n if...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Make item2kv from a function that produces key_params and val, and a key_template that will produce a string key from the key_params
def item_to_key_params_and_val(item_to_key_params_and_val, key_str_format): def item2kv(item): key_params, val = item_to_key_params_and_val(item) if isinstance(key_params, dict): return key_str_format.format(**key_params), val else: return key...
[ "def item_to_key(item2key):\n\n def item2kv(item):\n return item2key(item), item\n\n return item2kv", "def workflow_safe_parameters_key_value(kv_pair):\n key = ''\n value = ''\n for k, v in kv_pair.items():\n key = k\n value = v\n return {'key': key, 'value': val...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
the function print a data from dict 'new_dict' according to the number(picked_n) the user choosed.
def pickedFromDict(picked_num, new_dict): #1-printing mariah's last name #2-printing mariah's birth date #3-printing mariah's hobbies #4-printing mariah's last hobbie #5-adds "coocking" to mariah's hobbies and printing mariah's updated hobbies #6-printing mariah's birth date into tuple of ...
[ "def print_partial_dict(self, sorted_dict: dict, print_number: int = None) -> None:\r\n # creates a list of the items to be printed. print_number limits amount printed\r\n print_list = list(sorted_dict.items())[:print_number]\r\n\r\n # prints: ex: apple - 1\r\n for item in print_list:\r\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Iterator for page links
def iter_page_links(self) -> Iterable[str]: base_url = 'https://www.med.navy.mil' r = requests.get(self.starting_url, verify=CERTIFICATE_DIR + '/cat3.pem') soup = bs4.BeautifulSoup(r.content, features="html.parser") # get target column of list items issuance_list = soup.find('di...
[ "def extract_linked_items(pages):\n for page in pages:\n for iterate in iterate_on_items(page):\n yield((iterate[1:])[:-1])", "def links(self):\r\n for match in self._href_re.finditer(self.content):\r\n url = match.group(1) or match.group(2) or match.group(3)\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse document objects from page of text
def parse_docs_from_page(self, page_url: str, page_text: str) -> Iterable[Document]: # parse html response url = "https://www.med.navy.mil/directives/Pages/Publications.aspx" base_url = 'https://www.med.navy.mil' parsed_docs = [] doc_name_list = [] if (page_url.find("Publ...
[ "def _parse_pages (doc):\n\trsrcmgr = PDFResourceManager()\n\tlaparams = LAParams()\n\tdevice = PDFPageAggregator(rsrcmgr, laparams=laparams)\n\tinterpreter = PDFPageInterpreter(rsrcmgr, device)\n\n\ttext_content = []\n\t#for i, page in enumerate(doc.get_pages()):\n\tfor i, page in enumerate(PDFPage.create_pages(do...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Request de retrait pour l'article et la quantité souhaités
def retrait(idarticle, qte, idrequest, sortie): datahello = """<WWKS Version="2.0" TimeStamp="2013-04-16T11:14:00Z"> <HelloRequest Id="1001"> <Subscriber Id="100" > <Capability Name="KeepAlive"/> <Capability Name="Status"/> <Capability Name="Input"/> <Capability Name="InitiateInput"/>...
[ "def calculer_complexite_temps(recette):\r\n complexite_temps = 0\r\n\r\n # Extraction des opérations associées à un ingrédient, en tenant compte de la quantité de l'ingrédient\r\n for ingredient in recette.find_all('ingredient'):\r\n if ingredient.has_attr('action'):\r\n if ingredient.ha...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Provide an API call to Pushover for mobile notifications of events in the script. "message" is a string that will display on the Pushover notification. "app_token" is a string for the app token provided by Pushover.
def pushover(message, app_token): import urllib, httplib conn = httplib.HTTPSConnection("api.pushover.net:443") conn.request("POST", "/1/messages.json", urllib.urlencode({ "token": app_token, "user": "uU95W9hYqeW3b24uyPaT1skT1SG35N", "message": message, }), { "Content-type": "application/x-www...
[ "def send_pushover(user, appkey, message):\n import httplib\n import urllib\n try:\n pushover = httplib.HTTPSConnection(\"api.pushover.net:443\")\n pushover.request(\"POST\", \"/1/messages.json\",\n urllib.urlencode({\n \"token\": appkey,\n \"user\": u...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Install our virtual environment; removing the old one if it exists
def install_environment(root): sys.stdout.write('Installing virtualenv into %s \n' % root) try: import virtualenv except ImportError: sys.stdout.write('Installing virtualenv into global interpreter \n') subprocess.call([VE_GLOBAL_SCRIPT, PROJECT_ROOT]) import virtualenv ...
[ "def reinstall() -> None:\n pipenv.remove()\n install()", "def virtualenv_install():\r\n # Installs the latest virtual environment from the local prod-requirements.txt.\r\n prod_rev = latest_requirements_revision()\r\n assert re.match(r'[0-9a-f]+', prod_rev)\r\n\r\n active_env_rev = active_env()\r\n if...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Decorator validates if the app parameter is registered in the process_context
def valid_process_name(function): def _wrapper(options, *args, **kwargs): from synergy.conf.process_context import ProcessContext if options.app not in ProcessContext.CONTEXT: msg = 'Aborting: application <%r> defined by --app option is unknown. \n' % options.app sys.stdout....
[ "def context_process(process):\n\n def _context_process(self, instance):\n context = instance.context\n processed_tag = \"_\" + self.__class__.__name__ + \"_processed_\"\n\n if context.data.get(processed_tag):\n self.log.info(\"Operated on context level, skipping.\")\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert an adjacency graph in scipy sparse matrix format into an iGraph format.
def convert_sparse_to_igraph(indices, matrix): # sources, targets = matrix.nonzero() # weights = matrix[sources, targets] # weights = np.array(weights)[0] # print(dir(louvain)) # ig = igraph.Graph(zip(sources, targets), directed=True, # edge_attrs={'weight': weights}) # return...
[ "def from_nxgraph(G):\n return nx.to_scipy_sparse_matrix(G).astype('float32')", "def create_adjacency_matrix(graph):\n index_1 = [edge[0] for edge in graph.edges()]\n index_2 = [edge[1] for edge in graph.edges()]\n values = [1 for edge in graph.edges()]\n node_count = max(max(index_1)+1,max(index_2...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a value K which scales logarithmically to the number of cells in a sample.
def get_k(df): return int(np.log(len(df.columns)))
[ "def k_entropy(s,k):\n t = 0.0\n for kmer in get_kmers(s,k):\n t += count_occurrences(s,kmer)\n \n e = 0.0\n for kmer in get_kmers(s,k):\n e += math.log(count_occurrences(s,kmer) / t, 2)\n return -e", "def MaxHks(N): \n return np.log2(N-1)/2", "def log_marg_k(self, k):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Authorize with Spotify API and fetch bearer token.
def authorize(self): try: auth_url = 'https://accounts.spotify.com/api/token' headers={} data={} data_string = f"{self.client_id}:{self.client_secret}" data_bytes = data_string.encode("ascii") base_bytes = base64.b64encode(data_bytes) base_message = base_bytes.decode("ascii") headers['Autho...
[ "def request_token():\n client_id = os.environ.get(\"SPOTIFY_CLIENT_ID\")\n client_secret = os.environ.get(\"SPOTIFY_CLIENT_SECRET\")\n\n client_credentials = f\"{client_id}:{client_secret}\"\n encoded_credentials = base64.b64encode(client_credentials.encode())\n\n headers = {\n \"Authorizatio...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert milliseconds to seconds
def millisec_to_sec(self, millisec): return millisec / 1000
[ "def millis_to_seconds(millis) -> float:\n\n return millis / 1000", "def _secs_to_ms(value):\n return int(round(value * 1000.))", "def as_seconds(*, seconds=0, minutes=0, hours=0, days=0, weeks=0, milliseconds=0, as_type=None):\n delta = datetime.timedelta(seconds=seconds, minutes=minutes, hours=hours,\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Wait for the polling interval, then do the real message check.
def __check_for_messages(self): # Wait for at least poll_interval sec polling_interval = self.conf.messaging_server.polling_interval time.sleep(polling_interval) if self.conf.messaging_server.debug: LOG.debug("Topic {}: Checking for new messages".format( self...
[ "def _checkStatus(self):\n \n if self.last_messages == self.messages_received:\n self.channel.stop_consuming()\n else:\n self.last_messages = self.messages_received\n self.timer_id = self.conn.call_later(300, self._checkStatus)", "def longPoll(self):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gracefully stop working on things
def _gracefully_stop(self): pass
[ "def stopclean(self):\n raise Exception(\"Not implemented\")", "def stop(self, force=False):\n pass", "def _force_stop(self):\n if self.force_stop_func(instance=self.instance):\n self.force_stop_func(instance=self.instance, _set=True)\n raise StoppedException", "def ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Prepare to restart the RPC Server
def _restart(self): pass
[ "def restart_rpc_server(self):\n if self.server:\n self.server.shutdown()\n self.server = None\n if self.server_thread:\n self.server_thread = None\n self.start_rpc_server()", "def restart(event):\n elements.REMOTE_SERVER.restart()", "def _RestartServer( ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets environment variables for a nox session object.
def set_environment_variables(env_dict, session): for key, value in env_dict.items(): session.env[key] = value
[ "def set_env(**kwargs):\n _env.set(**kwargs)", "def setEnvironment():\n\tisAllCaps = lambda s: True if s.upper() == s else False\n\tN = [nuke.root()[i].name() for i in nuke.root().knobs() if isAllCaps(nuke.root()[i].name())]\n\tV = [nuke.root()[i].value() for i in nuke.root().knobs() if isAllCaps(nuke.root()[i...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check all files against the defined precommit hooks.
def lint(session): session.install("pre-commit") session.run("pre-commit", "run", "--all-files")
[ "def precommit(exit=True):\n tmpdir = tempfile.mkdtemp()\n\n try:\n copy_index(tmpdir)\n\n modified = check_output(['git', 'diff', '--cached', '--name-only',\n '--diff-filter=ACMRT'])\n modified = [name.strip() for name in modified.splitlines()]\n pa...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fetch the large training and test data set.
def _fetch_large(): # Large training data: resource( target=data_path("eeg", "SMNI_CMI_TRAIN.tar.gz"), url="https://kdd.ics.uci.edu/databases/eeg/SMNI_CMI_TRAIN.tar.gz", ) dependency( target=data_path("eeg", "train"), source=data_path("eeg", "SMNI_CMI_TRAIN.tar.gz"), ...
[ "def get_or_init_training_set(self):\n \n self._download_data_files(TRAINING_SET, TRAIN_URL)\n self._download_data_files(TEST_SET, TEST_URL)", "def fetchTrainingData(self, debug=True):\n\n training_datasets = []\n item_list = self.api.get_relation_data()\n\n for item in item_list:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function is used to create the container in Phantom using finding data.
def _create_container(self, finding): container_dict = {} container_dict['name'] = finding['Title'] container_dict['source_data_identifier'] = finding['Id'] container_dict['description'] = finding['Description'] container_creation_status, container_creation_msg, container_id = ...
[ "def _create_container(self, finding):\n\n container_dict = {}\n container_dict['name'] = finding['Title']\n container_dict['source_data_identifier'] = finding['Id']\n container_dict['description'] = finding['Description']\n\n container_creation_status, container_creation_msg, con...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function is used to create artifacts in given container using finding data.
def _create_artifacts(self, finding, container_id): artifacts = [] for resource in finding.pop('Resources'): resource_artifact = {} resource_artifact['name'] = '{} Resource Artifact'.format(resource['Type']) resource_artifact['container_id'] = container_id ...
[ "def _create_artifacts(self, finding, container_id):\n\n artifact = {}\n artifact['name'] = 'Finding Artifact'\n artifact['container_id'] = container_id\n artifact['source_data_identifier'] = finding['Id']\n artifact['cef'] = finding\n\n create_artifact_status, create_artif...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return collected metadata of a dataproduct.
def dataproduct(self, identity, dataproduct_id): metadata = {} permissions = self.permission.dataproduct_permissions( dataproduct_id, identity ) or {} session = self.config_models.session() # find Group or Data layer object OWSLayer = self.config_models.mod...
[ "def get_metadata(self):\n return self.client._perform_json(\n \"GET\", \"/projects/%s/recipes/%s/metadata\" % (self.project_key, self.recipe_name))", "def GetMetadata(self):\n return self.dict['meta']", "def _get_dsmeta(self, bids):\n # STEP 1: Extract metadata from `dataset_des...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Collect metadata of a basic DataSet dataproduct.
def basic_dataset_metadata(self, data_set_view, session): metadata = {} contacts = self.basic_dataset_contacts(data_set_view, session) metadata = { 'identifier': data_set_view.name, 'display': data_set_view.data_set.data_set_name, 'type': 'datasetview', ...
[ "def dataproduct(self, identity, dataproduct_id):\n metadata = {}\n\n permissions = self.permission.dataproduct_permissions(\n dataproduct_id, identity\n ) or {}\n\n session = self.config_models.session()\n\n # find Group or Data layer object\n OWSLayer = self.co...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return contacts metadata for a basic DataSet dataproduct.
def basic_dataset_contacts(self, data_set_view, session): # collect contacts for basic DataSet and related GDI resources gdi_oids = [ data_set_view.gdi_oid, data_set_view.data_set.gdi_oid_data_source ] return self.contacts(gdi_oids, session)
[ "def basic_dataset_metadata(self, data_set_view, session):\n metadata = {}\n\n contacts = self.basic_dataset_contacts(data_set_view, session)\n\n metadata = {\n 'identifier': data_set_view.name,\n 'display': data_set_view.data_set.data_set_name,\n 'type': 'datas...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return primary key, geometry columns, types and srids from a PostGIS table.
def postgis_metadata(self, data_source_id, schema, table_name): metadata = {} try: engine = self.engine_for_data_source(data_source_id) if engine is None: return { 'error': "FEHLER: DataSource nicht gefunden" } # c...
[ "def get_table_info(self):\n epsg = None\n meta = MetaData()\n table_obj = Table(self._table, meta,\n autoload=True, autoload_with=self._engine)\n if not self._columns:\n self._columns = table_obj.columns.keys()\n geo_cols = [(col.name, col.type...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return SQLAlchemy engine for a data_source.
def engine_for_data_source(self, data_source_id): engine = None # find data_source DataSource = self.config_models.model('data_source') session = self.config_models.session() query = session.query(DataSource) \ .filter_by(gdi_oid=data_source_id) data_source =...
[ "def get_engine():\n from zine.application import get_application\n return get_application().database_engine", "def engine(self):\n if self._engine is None:\n with self._engine_lock:\n if self._engine is None:\n logger.debug(\"Creating SQL engine with URL:...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Recursively check if layer is a WMS layer.
def layer_in_ows(self, ows_layer, root_layer): if root_layer is None: # no WMS root layer return False in_wms = False # get parent groups parents = [p.group for p in ows_layer.parents] for parent in parents: if parent.gdi_oid == root_layer.gdi...
[ "def is_layer(obj):\n # TODO(b/110718070): Replace with isinstance(obj, base_layer.Layer).\n return hasattr(obj, \"_is_layer\") and not isinstance(obj, type)", "def _checkLayers(self):\n pass", "def IsByLayer(self) -> bool:", "def is_feature_layer(layer):\n return getattr(layer, '_is_feature_layer', Fal...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return ows_metadata for a layer.
def ows_metadata(self, layer): ows_metadata = {} if layer.ows_metadata: try: # load JSON from ows_metadata ows_metadata = json.loads(layer.ows_metadata) except ValueError as e: self.logger.warning( "Invalid JSON...
[ "def get_metadata_for(layer_index):\n try:\n layer = CatalogLayer.objects.get(id=layer_index)\n meta = layer.metadata\n except CatalogLayer.DoesNotExist:\n return {'success': 'false', 'message':\n '{0} is not a valid index for CatalogLayer'.format(layer_index)}\n except ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Split comma separated values into list.
def split_values(self, value): if value: return [s.strip() for s in value.split(',')] else: return []
[ "def split_by_comma(s):\n return s.strip().split(\",\")", "def separate_comma(s):\n return s.split(',')", "def comma_separated_list(val):\n\n if not isinstance(val, str):\n VarType.__throw_invalid_type_error(val, str)\n\n val += ' '\n i = 0\n j = 0\n out = []\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Update QML with embedded symbols.
def update_qml(self, qml): if qml is None: return qml try: # parse XML root = ElementTree.fromstring(qml) # embed symbols self.embed_qml_symbols(root, 'SvgMarker', 'name') self.embed_qml_symbols(root, 'SVGFill', 'svgFile') ...
[ "def embed_qml_symbols(self, root, layer_class, prop_key):\n for svgprop in root.findall(\".//layer[@class='%s']/prop[@k='%s']\" %\n (layer_class, prop_key)):\n symbol_path = svgprop.get('v')\n path = os.path.abspath(\n os.path.join(QGS_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Embed symbol resources as base64 in QML.
def embed_qml_symbols(self, root, layer_class, prop_key): for svgprop in root.findall(".//layer[@class='%s']/prop[@k='%s']" % (layer_class, prop_key)): symbol_path = svgprop.get('v') path = os.path.abspath( os.path.join(QGS_RESOURCES_DI...
[ "def qt_rcc_generate(res: Dict[str, List[str]]) -> str:\n root = XmlElementTree.Element(\"RCC\")\n\n for tag, items in res.items():\n if not len(items):\n continue\n\n doc = XmlElementTree.SubElement(root, \"qresource\", prefix=tag)\n\n for file in items:\n XmlElemen...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds human readable date variable. Assumes date is in seconds since epoch. time_var is netCDF.Variable object.
def add_utc_date(nc, time_var): # Create Variable utc = nc.createVariable('utc_time', int, ('time')) setattr(utc, 'standard_name', "time") setattr(utc, 'long_name', "UTC date yyyy-mm-dd hh:00:00 as yyyymmddhh") setattr(utc, "units","Gregorian_year month day hour") toUTC = lambda d: int(dt.datet...
[ "def _append_date(self, value, _file):\n _tabs = '\\t' * self._tctr\n _text = value.strftime('%Y-%m-%dT%H:%M:%S.%fZ')\n _labs = '{tabs}<date>{text}</date>\\n'.format(tabs=_tabs, text=_text)\n _file.write(_labs)", "def date_info_day(date_str, infile):\n #date_str = str(sys.argv[1])\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds a time bounds variable to variable. Assumes time dimension is called 'time'
def add_time_bounds(nc, varname): THREE_HOURS = 60*60*3 # in seconds bnds_name = 'time_bnds' bounds_dim = 'nv' # Create bounds dimension nc.createDimension(bounds_dim, 2) # Get variable matching varname time_var = nc.variables['time'] time_var.setncattr('bounds', bnds_name) time_d...
[ "def __define_variable_time(self, initial_guess, minimum, maximum):\n i = 0\n for nlp in self.nlp:\n if isinstance(nlp[\"tf\"], self.CX):\n time_bounds = Bounds(minimum[i], maximum[i], interpolation=InterpolationType.CONSTANT)\n time_init = InitialConditions(in...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialize some callbacks inline Use this constructor to provide credentials and certificate callbacks inline, instead of defining your own class for these ones. You can e.g. also pass in one of the credential objects as 'credentials' instead of creating a function which returns a hardcoded object.
def __init__(self, credentials=None, certificate=None): if credentials is not None: self.credentials = credentials if certificate is not None: self.certificate = certificate
[ "def __init__(self, authorization_callback=None, credentials=None):\n if not authorization_callback and not credentials:\n raise ValueError(\"Either parameter 'authorization_callback' or parameter 'credentials' must be specified.\")\n\n # super(KeyVaultAuthentication, self).__init__()\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Certificate callback Override with your own function to determine whether the accept the server's certificate.
def certificate_check(self, certificate, valid, host): raise Passthrough
[ "def verify_callback(self, peer_cert, preverify_status=None):\n if isinstance(preverify_status, Exception):\n raise preverify_status\n self.check_certificate(peer_cert, cert_name='peer certificate')", "def verify_server_certificate(self):\n return self._verify_server_certificate", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Transfer progress callback Override with your own function to report transfer progress.
def transfer_progress(self, stats):
[ "def transferProgress(self, p_int, p_int_1): # real signature unknown; restored from __doc__\n pass", "def doProgress(self,progress,message):\n pass", "def progressCallback(self, bytesWritten):\n self.mutex.acquire()\n self.totalBytesWritten += bytesWritten\n self.copyProgress...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Update tips callabck Override with your own function to report reference updates
def update_tips(self, refname, old, new):
[ "def __editShowCallTips(self):\n self.activeWindow().callTip()", "def _refresh_hint_text(self):", "def update_action_tooltip(*args):\n return _ida_kernwin.update_action_tooltip(*args)", "def help_update(self):\n print(UPDATE)", "def call_toolTips(self):\n self.choose_fold_button.setToo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Push update reference callback Override with your own function to report the remote's acceptace or rejection of reference updates.
def push_update_reference(self, refname, message):
[ "def ref_updated(self, event):\n pass", "def _notify_update(self, cuds_object):", "def update_ref(ref, sha):\n pass", "def update_only_ff(self, local_ref, remote_ref):\n refs = self.show_ref([local_ref, remote_ref])\n local_sha1 = refs[local_ref]\n remote_sha1 = refs[remote_ref...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Name of the remote
def name(self): return maybe_string(C.git_remote_name(self._remote))
[ "def remote_hostname(self):\n return self.m_iface.remote_hostname()", "def remote_hostname(self):\n return pn_connection_remote_hostname(self._impl)", "def get_remote_name(self, file_path: str, remote_name: str):\n if remote_name is None:\n remote_name = os.path.basename(file_pat...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Url of the remote
def url(self): return maybe_string(C.git_remote_url(self._remote))
[ "def remote_url(self):\n return self.config.get('remote-server')", "def getURL(self):\n remote_url = self.data.get(\"remote_url\")\n if remote_url:\n return remote_url\n\n if self.brain:\n return self.brain.getURL()", "def remote_url(self):\n return urlli...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Push url of the remote
def push_url(self): return maybe_string(C.git_remote_pushurl(self._remote))
[ "def push(self, url):\n self.pool.push(url)", "def set_push_url(self, name, url):\n err = C.git_remote_set_pushurl(self._repo._repo, to_bytes(name), to_bytes(url))\n check_error(err)", "def push_remote(self, identifier):\r\n log.info('pushing %s to remote %s' % (identifier,\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Save a remote to its repository's configuration.
def save(self): err = C.git_remote_save(self._remote) check_error(err)
[ "def remote_save(self, file_path=None, delete_local=False, remote_target=None):\n if not file_path:\n file_path = self.fname\n\n f = File()\n if not remote_target:\n remote_target = self.remote.get('target', None)\n LOG.info(\"Saving picture %s to %s\" % (file_path,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Total number of refspecs in this remote
def refspec_count(self): return C.git_remote_refspec_count(self._remote)
[ "def n_refs(self):\n return self._n_refs", "def get_total_rehashes(self):\n return self.count_rehashes", "def __len__(self):\n total_objs = 0\n\n if self._shelve is not None:\n total_objs += len(self._shelve)\n\n if self._dict is not None:\n total_objs +=...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Refspecs that will be used for pushing
def push_refspecs(self): specs = ffi.new('git_strarray *') err = C.git_remote_get_push_refspecs(specs, self._remote) check_error(err) return strarray_to_strings(specs)
[ "def push_ref(self, commit_id=\"*\"):\n pass", "def list_refs(self):\n pass", "def push_update_reference(self, refname, message):", "def updateScptRefs(self):\n for scpt in self.refs_scpt.keys():\n self.refs_scpt[scpt] = scpt.getRef()\n self.scptRefs = set(self.refs_scpt...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Call fn and return the credentials object
def get_credentials(fn, url, username, allowed): url_str = maybe_string(url) username_str = maybe_string(username) creds = fn(url_str, username_str, allowed) credential_type = getattr(creds, 'credential_type', None) credential_tuple = getattr(creds, 'credential_tuple', None) if not credential...
[ "def _project_and_creds(fn):\n\n def wrapper(args: dict):\n project_id = args.get('project_id')\n creds_file = args.get('cloud_key')\n\n creds_data = util.credentials(creds_file)\n creds = creds_data.credentials\n\n if project_id is None:\n project_id = creds_data.project_id\n\n return fn(ar...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a new remote with the given name and url. Returns a object. If 'fetch' is provided, this fetch refspec will be used instead of the default
def create(self, name, url, fetch=None): cremote = ffi.new('git_remote **') if fetch: err = C.git_remote_create_with_fetchspec(cremote, self._repo._repo, to_bytes(name), to_bytes(url), to_bytes(fetch)) else: err = C.git_remote_create(cremote, self._repo._repo, to_bytes(...
[ "def create_remote(self, name, url, **kwargs):\r\n return Remote.create(self, name, url, **kwargs)", "def create_remote(self, name: str, url: str, **kwargs: Any) -> Remote:\n return Remote.create(self, name, url, **kwargs)", "def create_remote(repo, remote_url):\n\torigin = repo.create_remote('ori...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Rename a remote in the configuration. The refspecs in standard format will be renamed. Returns a list of fetch refspecs (list of strings) which were not in the standard format and thus could not be remapped.
def rename(self, name, new_name): if not new_name: raise ValueError("Current remote name must be a non-empty string") if not new_name: raise ValueError("New remote name must be a non-empty string") problems = ffi.new('git_strarray *') err = C.git_remote_rename(...
[ "def __gitRenameRemote(self):\n remotes = self.vcs.gitGetRemotesList(self.project.getProjectPath())\n remote, ok = QInputDialog.getItem(\n None,\n self.tr(\"Rename\"),\n self.tr(\"Select a remote repository:\"),\n remotes,\n 0, False)\n if ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the pushURL for a remote
def set_push_url(self, name, url): err = C.git_remote_set_pushurl(self._repo._repo, to_bytes(name), to_bytes(url)) check_error(err)
[ "def git_remote_url(self, git_remote_url):\n self._git_remote_url = git_remote_url", "def push_url(self):\n\n return maybe_string(C.git_remote_pushurl(self._remote))", "def git_remote_url(self, git_remote_url):\n\n self._git_remote_url = git_remote_url", "async def url(self, ctx: Context,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add a fetch refspec (str) to the remote
def add_fetch(self, name, refspec): err = C.git_remote_add_fetch(self._repo._repo, to_bytes(name), to_bytes(refspec)) check_error(err)
[ "def add_push(self, name, refspec):\n\n err = C.git_remote_add_push(self._repo._repo, to_bytes(name), to_bytes(refspec))\n check_error(err)", "def fetch_pull(ref):\n origin.fetch(tags=True)\n repo.git.checkout(\"{}\".format(ref))\n repo.git.pull(\"origin\", \"{}\".fo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add a push refspec (str) to the remote
def add_push(self, name, refspec): err = C.git_remote_add_push(self._repo._repo, to_bytes(name), to_bytes(refspec)) check_error(err)
[ "def add_fetch(self, name, refspec):\n\n err = C.git_remote_add_fetch(self._repo._repo, to_bytes(name), to_bytes(refspec))\n check_error(err)", "def cmd_push_review(remote):\n return ['git', 'push', remote]", "def push_ref(self, commit_id=\"*\"):\n pass", "def git_push():\n\n # get ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Downloads the 6B GloVe embeddings (approx. 1GB)
def download_glove (): # Get the URL ... print("Downloading https://nlp.stanford.edu/data/glove.6B.zip ...") res = requests.get("https://nlp.stanford.edu/data/glove.6B.zip", stream=True) if res.status_code != 200: print("Could not download the 6B GloVe Dataset! The server responded with code " + res.status_...
[ "def load_glove_embeddings():\n #if you are running on the CSE machines, you can load the glove data from here\n #data = open(\"/home/cs9444/public_html/17s2/hw2/glove.6B.50d.txt\",'r',encoding=\"utf-8\")\n data = open(\"glove.6B.50d.txt\",'r',encoding=\"utf-8\")\n\n word_index_dict = {}\n word_index...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Runs the Stanza module
def run_stanza (arguments): if arguments.download: # Download the full Stanza dataset result = input("ATTENTION! This will download the full English Stanza corpus (approx. 400 MB). Do you wish to continue (y/n)? ") if result == "y" or result == "yes": # For a list of processors, see https://stanford...
[ "def run():\n harvest = Harvest()\n exit(harvest.run())", "def main():\n superrocket = SuperRocket.from_cmd_args()\n superrocket.run()", "def main():\n parser = argparse.ArgumentParser(\n description='Launch a Stream server daemon.',\n formatter_class=argparse.ArgumentDefaultsHelpFormatte...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Runs the GloVe module
def run_glove (arguments): if arguments.download: # Download the 6B GloVe dataset result = input("ATTENTION! This will download approximately 1GB of data. Do you wish to continue (y/n)? ") if result == "y" or result == "yes": download_glove() sys.exit(0) # Afterwards exit normally
[ "def main():\n # load properties\n properties = utils.load_properties()\n logger = utils.config_logger(properties)\n logger.info(\"Configuration file is loaded\")\n if properties[\"setup_folders\"]:\n logger.info(\"Set up folders is true. Glove vectors and datasets will be downloaded\")\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
tests where symbols are looked up properly
def test_symbol_lookup(self): def check_lookup(symbol, expected): op = BaseWhereOperator.get_operator(symbol) self.assertEqual(op, expected) check_lookup('EQ', EqualsOperator) check_lookup('IN', InOperator) check_lookup('GT', GreaterThanOperator) check_l...
[ "def test_symbol_lookup(self):\r\n\r\n def check_lookup(symbol, expected):\r\n op = BaseWhereOperator.get_operator(symbol)\r\n self.assertEqual(op, expected)\r\n\r\n check_lookup('EQ', EqualsOperator)\r\n check_lookup('IN', InOperator)\r\n check_lookup('GT', Greater...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
matrix_set_diag operator implemented in numpy. Returns a numpy array with the diagonals of input array replaced with the provided diagonal values.
def matrix_set_diag(input_np, diagonal, k=0, align="RIGHT_LEFT"): out = np.array(input_np, copy=True) cols = input_np.shape[-1] rows = input_np.shape[-2] onlyOneDiagonal = True if isinstance(k, (tuple, list)): if len(k) < 2 or k[0] == k[1]: k = k[0] else: on...
[ "def set_diag(L, diag_L):\n d = shape_as_list(L)[2]\n M = L\n M[:, torch.arange(0, d), torch.arange(0, d)] = diag_L\n return M", "def zero_diag(mat):\n\n return replace_diag(mat, np.zeros(mat.shape[0]))", "def replace_diag(mat, newdiag):\n\n if newdiag.ndim>1: \n raise Exception(\"newdi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The stream effect has the colors of the LEDs move like a stream, where the color of a LEDs is given to its neighbor in the next update step.
def stream_handler(args_dict: dict): color_sequence = args_dict['color_sequence'] color_seq_len = args_dict['color_seq_len'] color_itr = args_dict['color_itr'] n_leds = args_dict['n_leds'] step_sequence = [color_sequence[c % color_seq_len] for c in range(color_itr, n_leds + color_itr)] # Upda...
[ "def led_update(self):\n self.send_startframe()\n self.send_colour(self.led_count)\n self.send_endframe()", "def nextLight():\n global light\n pin.setAllOutPinsLow()\n light += 1\n light %= len(traffic_lights)\n print traffic_colors[light]\n pin.setOutPinHigh(traffic_lights[...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieve al cursos from graph
def get_cursos(request): if request.method == 'GET': cursos = Curso.nodes.all() cursos_list = [] for i in range(0, len(cursos)): cursos_list.append(cursos[i].__dict__["nombre"]) return JsonResponse({"cursos": cursos_list})
[ "def graph(self):", "def getCursosGraduacao(self):\n params = {\"NIVEL_CURSO_ITEM\": 3}\n return self.getCursos(params)", "def download_chicago_graph():\n\n\tG = ox.graph_from_place(\"Chicago,IL, United States\", network_type='drive')\n\treturn G", "def ggml_metal_get_concur_list(ctx: ffi.CData)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Predict the BDEs of each bond in a list of molecules.
def predict(smiles_list, drop_duplicates=True, batch_size=1, verbose=False): molecule_list = [Molecule(smiles=smiles) for smiles in smiles_list] smiles_list = [mol.smiles for mol in molecule_list] pred_df = pd.concat( ( get_fragments(mol, drop_duplicates=drop_duplicates) fo...
[ "def predict(smiles_list, drop_duplicates=True, verbose=True):\n\n is_valid = pd.Series({smiles: not check_input(smiles)[0] for smiles in smiles_list}, name='is_valid')\n pred_df = pd.concat([predict_bdes(smiles, draw=False) for smiles in smiles_list])\n pred_df = pred_df.merge(is_valid, left_on='molecule'...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function makes a list of all the songs in album
def songs_list(name_of_album): songs = "" data = dbase() data = data[name_of_album][0] for song in data.keys(): songs += song songs += ", " return songs[:-2]
[ "def songs_from_album(album_url):\n resp = requests.get(album_url)\n soup = BeautifulSoup(resp.content, 'html.parser')\n table_classes = {'class': \"tracklist\"}\n tracklist = soup.find('table', table_classes)\n songs = None\n if tracklist:\n songs = []\n for row in tracklist.findAll...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This func calc the number of words in one song
def get_len(song, album): length = 0 words = dbase()[album][0][song] words = words[2] words = words.split() for word in words: length += 1 return str(length)
[ "def song_length(ans):\r\n length = 0\r\n flag = 1\r\n albums = simple_album_list()\r\n for album in albums:\r\n songs = simple_songs_list(album)\r\n for song in songs:\r\n if ans == song:\r\n words = dbase()[album][0][song]\r\n words = words[2]\r\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This func calc how many words there is in all of the songs, albums. using "get_len" function
def song_length(ans): length = 0 flag = 1 albums = simple_album_list() for album in albums: songs = simple_songs_list(album) for song in songs: if ans == song: words = dbase()[album][0][song] words = words[2] words = w...
[ "def get_len(song, album):\r\n length = 0\r\n words = dbase()[album][0][song]\r\n words = words[2]\r\n words = words.split()\r\n for word in words:\r\n length += 1\r\n return str(length)", "def len_by_songs(self):\n pass", "def common():\r\n full_song = \"\"\r\n albums = si...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function returns the lyrics of specific song
def song_lyrics(ans): albums = simple_album_list() for album in albums: songs = simple_songs_list(album) for song in songs: if ans == song: words = dbase()[album][0][song] words = words[2] return words
[ "def getLyrics(query):\n\n if ('hakun' in query.lower()):\n return 'Hakuna Matata! What a wonderful phrase \\n Hakuna Matata! Ain\\'t no passing craze'\n\n json = GENIUS.search_genius(query)\n url = (json.get('hits')[0].get('result').get('url'))\n lyrics = GENIUS._scrape_song_lyrics_from_url(url)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function finds what album the song in
def song_album(ans): albums = simple_album_list() for album in albums: songs = simple_songs_list(album) for song in songs: if ans == song: return album
[ "def find_album_playlist(data):\n\n return data['album'].lower() + '.m3u'", "def search_for_album(album_name):\n\n print(f'Searching for album: {album_name}')\n\n search_result = spotifyObject.search(q=f'\"{album_name}\"', limit=20, type='album')\n\n items = search_result['albums']['items']\n\n res...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function makes list of the top 50 commonest words of all songs
def common(): full_song = "" albums = simple_album_list() for album in albums: songs = simple_songs_list(album) for song in songs: full_song += str(song_lyrics(song)) split_lyrics = full_song.lower().split() counter = collections.Counter(split_lyrics) most_wo...
[ "def get_top_k_words(sentences, k):\r\n words = []\r\n for sentence in sentences:\r\n words.extend(sentence.split())\r\n #调用了库文件 Counter\r\n return [word for word, _ in Counter(words).most_common(k)]", "def top_k_frequent(top_k, words, list_of_texts):\n dict_top_freq = {}\n for word i...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Points List of tuples, where each tuple has (x,y) coods of the points. numLines Number of pairs of points to be randomly sampled numIter Number of ietrations for which estimates of Prob should be refined e_tilde Critical distance for 50% probability of memebership in the line gamma_tilde Critical fraction of valid poin...
def iterative_function(points, numLines, numIter, e_tilde, gamma_tilde, beta_tilde): numPoints = len(points) # Randomly sample pairs and get the corresponding rho and theta parameters for a line fitted to the pair: # Returns a list of tuples - Each tuple has the rho and theta parameters for the l...
[ "def generate_points(self, num_points):\n sections_for_value = ut.get_sections_for_coords(self.section_file, self.include_labels, self.split, self.probs_spacing, self.probs_offset[2])\n self.log.info('Generating {} new points'.format(num_points))\n points = np.random.choice(np.arange(0,self.pro...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function call from contact.
def call_from_contact(self): log_test_case(self.name, 'call_from_contact') #lick_textview_by_text(SC.PRIVATE_CONTACT_NUMBER) click_textview_by_id('primary_action_view') sleep(1) goback() sleep(3) return
[ "def call(self, callee: \"SIPPhoneTemplate\") -> None:", "def call(self, args):\n self.message(\"You call to your friend\")", "def onAfterSendCallSuccess(self, msg, call):\r\n pass", "def test_get_contact(self):\n pass", "def on_contact(self, update, context):\n user = update.effec...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Assign slots for organizations within a program Gets the slot assignment data as a JSON string from the program and enqueues a task to process the slot assignments
def assignProgramSlots(request, *args, **kwargs): program = None params = request.REQUEST # Query the program entity try: program = program_logic.getFromKeyName(params["programkey"]) except KeyError: logging.error("programkey not in params") return responses.terminateTask() if not program: ...
[ "def assignSlots(request, *args, **kwargs):\n\n # Setup an artifical request deadline\n timelimit = int(request.REQUEST.get(\"timelimit\", 20000))\n timekeeper = Timekeeper(timelimit)\n\n program_key = request.REQUEST.get(\"programkey\")\n last_key = request.REQUEST.get(\"lastkey\", \"\")\n program = program_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the slots attribute for each organization entity
def assignSlots(request, *args, **kwargs): # Setup an artifical request deadline timelimit = int(request.REQUEST.get("timelimit", 20000)) timekeeper = Timekeeper(timelimit) program_key = request.REQUEST.get("programkey") last_key = request.REQUEST.get("lastkey", "") program = program_logic.getFromKeyName(...
[ "def slots_per_agent(self, slots_per_agent):\n\n self._slots_per_agent = slots_per_agent", "def set_slots(self, slot, value):\n self.slots[slot] = value", "def set_organization(apps, schema_editor):\n BagItProfile = apps.get_model('bag_transfer', 'BagItProfile')\n Organization = apps.get_mod...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
receive batch from replay and transfer batch from cpu to gpu
def sample_batch(pid, args, batch_queue, port_dict, device, actor_id_to_ip_dataport, local_size, cache_array): def recv_data(k, data_stream, actor_set, real_data_tasks_i): for real_data in data_stream: tmp = [] tmp.append(real_data.state) tmp.append(real_data.action) ...
[ "def _replay(self):\n\n losses = []\n\n train_data = utils.memory_dataset(self._Memory._samples, utils.transform())\n\n params = {\n 'batch_size': self._Model.batch_size,\n 'sampler': torch.utils.data.RandomSampler(train_data),\n # 'shuffle': True,\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get raw data from data file, returned in mV SPAM raw data is single precision float with unit Volts. Calling this applies the ts_lsb calculated when the headers are read. This is because when a recording consists of multiple data files, each channel of each data file might have a different scaling. The only way to make...
def getUnscaledSamples(self, **kwargs) -> TimeData: # initialise chans, startSample and endSample with the whole dataset options = self.parseGetDataKeywords(kwargs) # get the files to read and the samples to take from them, in the correct order dataFilesToRead, samplesToRead, scalings =...
[ "def getRawData(self):\n try:\n self.data = np.fromstring(\n self.stream.read(\n self.frames_per_buffer,\n exception_on_overflow = False\n ),\n dtype = np.int16\n )\n self.data = self.data.asty...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the data files that have to be read to cover the sample range
def getDataFilesForSamples( self, startSample: int, endSample: int ) -> Tuple[List[str], List[List[int]], List[float]]: # have the datafiles saved in sample order beginning with the earliest first # go through each datafile and find the range to be read dataFilesToRead = [] s...
[ "def data_files(self):\n tf_record_pattern = os.path.join('/data/ImageNet/output_data/', '%s-*' % self.subset)\n data_files = tf.gfile.Glob(tf_record_pattern)\n if not data_files:\n print('No files found for dataset %s' % (self.subset))\n\n exit(-1)\n return data_fi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get defaults for channel headers Returns Dict[str, Any] Dictionary of headers for channels and default values
def chanDefaults(self) -> Dict[str, Any]: chanH = {} chanH["gain_stage1"] = 1 chanH["gain_stage2"] = 1 chanH["hchopper"] = 0 # this depends on sample frequency chanH["echopper"] = 0 # channel output information (sensor_type, channel_type, ts_lsb, pos_x1, pos_x2, pos_y1, ...
[ "def default_channel_response_data(channel):\n channel_record = Channel.objects.get(name=channel.name)\n return {\n \"title\": channel.title,\n \"name\": channel.name,\n \"description\": channel.description,\n \"public_description\": channel.public_description,\n \"channel_t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read header files For SPAM data, the may be more than one header file as data can be split up into smaller files as it is recorded. In that case, the header information should be somehow merged. All sampling frequencies should be the same
def readHeader(self) -> None: # read header files self.headersList = [] self.chanHeadersList = [] for headerFile in self.headerF: if "xtrx" in headerFile.lower(): headers, chanHeaders = self.readHeaderXTRX(headerFile) else: headers,...
[ "def readHeaderXTR(self, headerFile: str) -> None:\n with open(headerFile, \"r\") as f:\n lines = f.readlines()\n sectionLines = {}\n # let's get data\n for line in lines:\n line = line.strip()\n line = line.replace(\"'\", \" \")\n # continue i...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read a XTR header file The raw data for SPAM is in single precision Volts. However, if there are multiple data files for a single recording, each one may have a different gain. Therefore, a scaling has to be calculated for each data file and channel. This scaling will convert all channels to mV. For the most part, this...
def readHeaderXTR(self, headerFile: str) -> None: with open(headerFile, "r") as f: lines = f.readlines() sectionLines = {} # let's get data for line in lines: line = line.strip() line = line.replace("'", " ") # continue if line is empty ...
[ "def readHead(self):\n filesize = self.rhd.tell()\n \n #the order in which all of this is called is critcal\n self.header_identifier = hex(np.uint32(struct.unpack('<I', self.rhd.read(4))))\n v = np.int8(struct.unpack('BBBB', self.rhd.read(4)))\n\n #read each property of the...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read a XTRX header files XTRX are newer header files and will supercede XTR
def readHeaderXTRX(self, headerFile): raise NotImplementedError("Support for XTRX files has not yet been implemented")
[ "def readHeader(self) -> None:\n # read header files\n self.headersList = []\n self.chanHeadersList = []\n for headerFile in self.headerF:\n if \"xtrx\" in headerFile.lower():\n headers, chanHeaders = self.readHeaderXTRX(headerFile)\n else:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Merge headers from all the header files Checks all the header files to see if there are any gaps and calculates the sample ranges for each file together with the total number of samples. Sets the start and end time of the recording and class variables datetimeStart and datetimeStop.
def mergeHeaders(self, headersList: List, chanHeadersList: List) -> None: # take the first header as an example self.headers = headersList[0] self.chanHeaders = chanHeadersList[0] if len(headersList) == 1: # just fill in the data file list and data ranges self.dat...
[ "def readHeaderXTR(self, headerFile: str) -> None:\n with open(headerFile, \"r\") as f:\n lines = f.readlines()\n sectionLines = {}\n # let's get data\n for line in lines:\n line = line.strip()\n line = line.replace(\"'\", \" \")\n # continue i...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Information about the data files as a list of strings Returns List[str] List of information about the data files
def printDataFileList(self) -> List[str]: textLst: List[str] = [] textLst.append("Data File\t\tSample Ranges") for dFile, sRanges in zip(self.dataFileList, self.dataRanges): textLst.append("{}\t\t{} - {}".format(dFile, sRanges[0], sRanges[1])) textLst.append("Total samples = ...
[ "def get_data_files(self):\n return self.data_list", "def list_data():\n path=data.__path__[0]\n\n return os.listdir(path)", "def find_data(self):\n data_list = []\n for root, dirs, files in os.walk(pathfinder.data_path()):\n for name in files:\n data_list.ap...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a set of results, return a list of LDAPSearchResult objects.
def get_search_results(results): if len(results) == 0: return [] if type(results) == tuple and len(results) == 2: (code, arr) = results elif type(results) == list: arr = results res = [] for item in arr: res.append(LDAPSearchResult(item)) return res
[ "def _return_all():\n conn = ldap.initialize(settings.LDAP_SYNC_PROVIDER_URI)\n conn.bind_s(settings.LDAP_ADMIN_DN, settings.LDAP_ADMIN_PASSWORD)\n encoded_q = '@'.encode('utf-8')\n search_filter = filter_format('(|(mail=*%s*)(uid=*%s*))',\n (encoded_q, encoded_q,))\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Look for transaction receipt, only raise not found error if they are missing for longer than two minutes.
async def _check_transaction_receipt(self, tx_hash: str, timestamp: int): async_scheduler: AsyncCallScheduler = AsyncCallScheduler.shared_instance() try: return await async_scheduler.call_async(self._w3.eth.getTransactionReceipt, tx_hash) except TransactionNotFound as e: ...
[ "def wait_for_receipt(tx_hash, duration=C.EVM_TIMEOUT):\n slept = 0\n tx_rcpt = None\n\n while slept < duration:\n # because web3 throws if not present vs returning None (like the docs say)\n try:\n tx_rcpt = g.w3.eth.getTransactionReceipt(tx_hash)\n except TransactionNotFou...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test case for add_asset_share_feed
def test_add_asset_share_feed(self): pass
[ "def test_article_has_share_links(self):\n slug = self.create_article()['slug']\n res = self.get_single_article(slug)\n self.assertIn('share_article', res.data)\n self.assertIn('Facebook', res.data['share_article'])\n self.assertIn('Twitter', res.data['share_article'])\n se...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Build redirect with redirect_state parameter.
def get_redirect_uri(self, state=None): regex = re.compile(r"\:(80|443)\/") uri = regex.sub("/", self.redirect_uri) if self.REDIRECT_STATE and state: uri = url_add_parameters(uri, {'redirect_state': state}) return uri
[ "def get_redirect_uri(self, state=None):\n if state is not None:\n uri = self.blank_redirect_uri\n if self.REDIRECT_STATE and state:\n uri = url_add_parameters(uri, {'redirect_state': state})\n else:\n uri = self.redirect_uri\n return uri", "def...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Loads surface mesh using meshio. Not meant for mixed shape meshes.
def load_mesh(fname): fname = abs_fname_(fname) m = meshio.read(fname) mesh = Mesh() mesh.vertices = m.points for i, c in enumerate(m.cells): if i == 0: faces = c.data else: faces = np.vstack((faces, c.data)) mesh.faces = faces return mesh
[ "def load_mesh(self):\n return fn.Mesh(self.mesh_folder_path)", "def load_mesh(filename):\n mesh = _Trimesh(filename)\n loader_available = mesh.load()\n\n if not loader_available: # pragma: no cover\n mesh = _Open3DMesh(filename)\n loader_available = mesh.load()\n\n if not loader...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Loads volume mesh using meshio. Not meant for mixed shape meshes.
def load_volume_mesh(fname): fname = abs_fname_(fname) m = meshio.read(fname) mesh = Mesh() mesh.vertices = m.points for i, c in enumerate(m.cells): if i == 0: elements = c.data else: elements = np.vstack((elements, c.data)) mesh.elements = elements ...
[ "def load_mesh(self):\n return fn.Mesh(self.mesh_folder_path)", "def read(self, mesh_path: str) -> None:\n\n reader = VtuReader(mesh_path)\n self.set_mesh_data(mesh=reader.mesh, bc=reader.bc, mpc=reader.mpc)", "def load_volume_mixd(dim, fname=None, mxyz=None, mien=None, hexa=False):\n ve...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }