query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Asserts that a and b are equivalent entities or lists of entities. ...specifically, that they have the same property values, and if they both have populated keys, that their keys are equal too.
def assert_entities_equal(self, a, b, ignore=frozenset(), keys_only=False, in_order=False): if not isinstance(a, (list, tuple, db.Query, ndb.Query)): a = [a] if not isinstance(b, (list, tuple, db.Query, ndb.Query)): b = [b] key_fn = lambda e: e.key if isinstance(e, n...
[ "def assert_equal_entities(self, expected_entities, actual_entities):\n for expected_entity, actual_entity in zip(expected_entities, actual_entities, strict=True):\n expected_entity[\"first_seen\"] = actual_entity[\"first_seen\"] = \"ignore\"\n self.assertDictEqual(expected_entity, actu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a list of keys for a list of entities.
def entity_keys(self, entities): return [e.key() for e in entities]
[ "def get_all_keys(self) -> List:\r\n key_list = []\r\n for i in self.hash_table:\r\n if i is not None:\r\n key_list.append(i[0])\r\n return key_list", "def listkeys(d):\n return list(iterkeys(d))", "def keys(self):\n for item in self.table:\n i...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Attempts to extract the callback identifier from the given text
def get_callback_id(text): # type: (str) -> str idx = re.search(r"__doPostBack\('(.*)',", text) return idx.group(1) if idx else ""
[ "def get_id(text):\n # I also want an optional type\n return text[2] if \"Guard\" in text else None", "def _get_callback_id(self, callback):\n for event_id, entry in self.callbacks.iteritems():\n cb, once = entry\n if cb == callback:\n return event_id", "def getcallback(self, txt):\n\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Attempts to extract mp4 url from the given text, uses VIDEO_DEFINITION as preference. i.e. VIDEO_DEFINITION is High will try High, then fallback to Low if not found VIDEO_DEFINITION is Low will try Low, then fallback to High if not found
def get_mp4_url(text): # type: (str) -> Optional[str] mp4 = re.search(r"(http.*{}\.mp4)".format(VIDEO_DEFINITION), text) if not mp4: logger.debug("get_mp4_url no mp4: {}".format(VIDEO_DEFINITION)) swap = "Low" if VIDEO_DEFINITION == "High" else "High" mp4 = re.search(r"(http.*{}\.mp...
[ "def __extract_video_url(self):\n\n self.__logger.info('wait for %s seconds', self.__seconds)\n\n time.sleep(self.__seconds)\n\n self.__logger.info('Extract video url from %s', self.__args.url)\n\n try:\n req = requests.post(self.__args.url, data=self.__params)\n ht...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the full catalogue url for a given catalogue number
def get_catalogue_url(number): # type: (int) -> str return "{}catalogue/{}".format(EAFA_URI, number)
[ "def get_url(self, index):\n\n\t\treturn 'https://www.cardmarket.com/en/YuGiOh/Products/Singles/' + self.db.loc[index, 'set_url_name'] + '/' + self.db.loc[index, 'url_name']", "def getProductUrl(productId):\r\n return baseUrl + productId", "def get_url(course_code):\n\n ans = DatabaseConnector.get_values(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Attempts to extract the year from text, default 0
def get_year(text): # type: (str) -> int year = re.search(r"\d{4}", text) return int(year.group()) if year else 0
[ "def parse_year(html_text: str) -> int:\n # parse HTML for year\n m = re.search(YEAR_CCLI_REGEX, html_text, re.M)\n if m is not None:\n match_year = re.search(GET_YEAR_REGEX, m.group(0), re.M)\n if match_year is not None: # year found\n return int(match_yea...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Attempts to get a list of genres from the soup
def get_genres(soup): # type: (BeautifulSoup) -> list genre = soup.find("h4", string="Genre:") if not genre: return [] genres = genre.find_next("p").find_all("a") if len(genres): return [genre.text for genre in genres] return []
[ "def _set_genres(self):\r\n try:\r\n genres = self.page.find('div', itemprop='genre')\r\n if genres:\r\n genres = genres.findAll('a')\r\n if genres:\r\n for genre in genres:\r\n try:\r\n g...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Attempts to extract the image url from the given text. The url is tested and cached if working (quite a few 404). If not 200 and a catalogue_number number is supplied, a lowres thumbnail is used and cached as the response
def get_image_url(text, catalogue_number): # type: (str, int) -> Optional[str] image = re.search(r"image: \"(http.*)\"", text) url = image.group(1) if image else None thumb = EAFA_LOW_RES_THUMB_TEMPLATE.format(catalogue_number) if not url and catalogue_number: return thumb with Cache() a...
[ "def get_image_url(text):\n # type: (str) -> str\n img = re.search(r\"https[^)]*\", text)\n return clean_uri(img.group())", "def get_pic_content(url, header, black_type, logger):\n session = requests.session()\n max_cnt = 5\n while max_cnt:\n max_cnt -= 1\n m_ip, _, m_proxy = get_p...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Attempts to extract the form state and validation data
def get_form_data(data): # type: (BeautifulSoup) -> Optional[dict] validation = data.find("input", {"id": "__EVENTVALIDATION"}) if not validation: logger.debug("get_form_data error no __EVENTVALIDATION") return return { "state": data.find("input", {"id": "__VIEWSTATE"}).get("valu...
[ "def handle(self):\n validations = {}\n\n for name in self.inputs:\n input_tag = self.soup.find(\n 'input', {'name': name}\n )\n\n if input_tag:\n parent = input_tag.find_parent(\n 'div', 'freebirdFormviewerViewNumberedI...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets cached or live HTML from the url with POST data
def post_html(url, data): # type: (str, dict) -> Optional[BeautifulSoup] headers = { "Accept": "text/html", "Accept-encoding": "gzip", "User-agent": "Mozilla/1.0 (X 0; rv:0.1) Gecko" } with Cache() as c: cached = c.get(url) if cached: if cached["fresh"...
[ "def get_html_from_url(url, params_d, expire_in_days=365): #Added params_d\n # check in cache\n html = get_from_cache(url, params_d)\n # print(html)\n if html is not None:\n if DEBUG:\n print('Loading from cache: {0}'.format(url))\n else:\n if DEBUG:\n print('Fetchi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Extract a substring from full_str. Extract the substring that starts with start_str and ends with end_str. If end_str is None, or if it is not found, the extract until the end of the string. The extracted string will include start_str, but will not include end_str. If start_str is not found, return default (which defau...
def extract_from_string(full_str, start_str, end_str=None, default=''): idx = full_str.find(start_str) if idx < 0: return default if end_str is not None: length = full_str[idx + len(start_str):].find(end_str) if length >= 0: return full_str[idx:idx + len(start_str) + leng...
[ "def substring(s, start, end):\n startless = start is None\n endless = end is None\n if startless and endless:\n return s\n if endless:\n return s[start:]\n if startless:\n return s[:end]\n return s[start:end]", "def extract(text, start, end, end_is_optional=True, inclusive=...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parition a string into pieces. >>> list(partition_string("abcdef", [])) ['abcdef'] >>> list(partition_string("abcdef", ["g"])) ['abcdef', ''] >>> list(partition_string("abcdef", ["c"])) ['ab', 'cdef'] >>> list(partition_string("abcdef", ["c", "e"])) ['ab', 'cd', 'ef'] >>> list(partition_string("abcdef", ["bc", "e"])) [...
def partition_string(full_str, markers): idx = 0 last_marker = '' rest = full_str for marker in markers: next_idx = rest.find(marker) if next_idx < 0: next_idx = len(rest) marker = '' yield last_marker + rest[idx:next_idx] last_marker = marker ...
[ "def partition(stringstring, removeSet=\"string\", render=bool, name=\"string\", addSet=\"string\"):\n pass", "def split_input(string, chunk_size):\n num_chunks = len(string)/chunk_size\n if (len(string) % chunk_size != 0):\n num_chunks += 1\n output = []\n for i in range(0, num_chunks):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Shows a histogram plot of loaded features
def histogram(self): self.X.hist() plt.show()
[ "def plot_histogram_features(data, feature):\n print \"Plotting histogram for: '{}'.\".format(feature)\n if len(data) > 0 and feature in ['hashtags_most_frequent_jaccard',\n 'hashtags_most_frequent_similarity',\n 'hashtags_most_popular_ja...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The flat_signal_sequence function computes the median counts per amp as function of frame sequence number.
def flat_signal_sequence(flat_files, bias_frame=None, dark_frame=None, mask_files=(), mondiode_func=mondiode_value, verbose=True): flux_dict = dict() mjd_dict = dict() adus = None for item in flat_files: if verbose: print(item) ...
[ "def median(signals, win_length):\r\n return nanfilter(signals, win_length, nanmedian)", "def hamming_windowed_ramp_filter(ffts):\n ramp = np.abs(np.fft.fftfreq(int(ffts.shape[1])))\n ramp[1:] = ramp[1:] * (0.54 + 0.46 * np.cos(ramp[1:]))\n return ffts * ramp", "def median_stack(frames):\n\treturn n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get handshake protocol enum for label.
def get(cls, label: Union[str, "HSProto"]) -> "HSProto": if isinstance(label, str): for hsp in HSProto: if ( DIDCommPrefix.unqualify(label) == hsp.name or sub("[^a-zA-Z0-9]+", "", label.lower()) in hsp.aka ): ...
[ "def _read_protos(self, size: int) -> 'Enum_LinkType':\n _byte = self._read_unpack(4, lilendian=True)\n _prot = Enum_LinkType.get(_byte)\n return _prot", "def getProtocol(self):\n # type: () -> str", "def loadbalancer_protocol(self) -> str:\n return pulumi.get(self, \"loadbala...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert an aries message to an attachment decorator.
def wrap_message(cls, message: dict) -> AttachDecorator: return AttachDecorator.data_json(mapping=message, ident="request-0")
[ "def _transform_attachments(self):\n # Do nothing if message has no attachment header\n if 'attachment' not in self._message:\n return\n\n # Make sure the message is multipart. We need a multipart message in\n # order to add an attachment.\n self._make_message_multipar...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse a URLencoded invitation into an `InvitationMessage` instance.
def from_url(cls, url: str) -> "InvitationMessage": parts = urlparse(url) query = parse_qs(parts.query) if "oob" in query: oob = b64_to_bytes(query["oob"][0], urlsafe=True) return cls.from_json(oob) return None
[ "def parse_email(message):\n\n pass", "def parse_email(msg):\n # Process Art-Battle messages:\n if msg.subject.find(u'У вас новое письмо') != -1:\n m = re.match(u'.*Вам пришло новое письмо от пользователя <a href=\"http://tabun\\\\.everypony\\\\.ru/profile/(?P<user>.+?)/\".*'+\n u'Тема...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
I'm a lover, not a warrior
def become_warrior(self): self.isalover = False self.hungry += 110 self.wanderlust = 0
[ "def mood():", "def victim_clue():\n if search_type == Murder.WEAPON:\n _ = \"\" if victim_mc else \"nt\"\n print(f\"\\t{guess_weapon} was{_} used on {guess_victim}\")\n else: # elif search_type == Murder.ROOM\n _ = \"died\" if victim_mc else \"d...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Chooses the closest plant
def pickplant(self): for tree in self.plants: if (self.gimme_distance((tree[0],tree[1])) < self.gimme_distance(self.myplant[:2])): self.myplant = (tree[0],tree[1],tree[2])
[ "def choose_food(self, food):\n # find the closest food item\n dmin = 10**15\n for f in food:\n d = distance(self.position, f.position)\n if d < dmin:\n self.target = f\n dmin = d", "def get_closest_food(self):\n food_dict = self.get_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retry a specific job (reset its status to todo).
def do_retry(self, arg: str) -> None: job_id = int(arg) self.job_manager.retry_job_by_id( # type: ignore job_id=job_id, retry_at=utils.utcnow().replace(microsecond=0) ) (job,) = self.job_manager.list_jobs(id=job_id) # type: ignore print_job(job)
[ "def retry(job, force=False, only_failed=True):\n\n if job.attempt >= max_attempts() and not force:\n log.info('Permanently failed job %s (after %d attempts)', job.id_, job.attempt)\n return\n\n if job.state in ['cancelled', 'complete']:\n if only_failed:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Cancel a specific job (set its status to failed).
def do_cancel(self, arg: str) -> None: job_id = int(arg) self.job_manager.finish_job_by_id( # type: ignore job_id=job_id, status=jobs.Status.FAILED, delete_job=False ) (job,) = self.job_manager.list_jobs(id=job_id) # type: ignore print_job(job)
[ "def cancel(self):\n if self._jobid == -1:\n return\n\n os_ext.run_command('scancel %s' % self._jobid,\n check=True, timeout=settings.job_submit_timeout)\n self._is_cancelling = True\n self.wait()", "def cancel_job(job: Job) -> Job:\n if not JobS...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets a person from a person_id.
def get_person_from_id(self, person_id): assert person_id in self.ids_persons, "Unknown person id: {}".format(person_id) return self.ids_persons[person_id]
[ "def get_person_info(self, id):\n info = json.loads(self.rest.GET('person/%s' % id))\n return Person(info)", "def get_person_bio_with_id(person_id):\r\n\r\n sort_by = \"popularity.desc\"\r\n\r\n url = f\"https://api.themoviedb.org/3/person/{person_id}?language=en-US&api_key={TMDB_API_KEY}\"\r\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets a person ID from a person.
def get_id_from_person(self, person): if person == "Radu Goldiş": person = person.replace(S_CEDILLA, S_COMMA).replace(S_CEDILLA.upper(), S_COMMA.upper()) # hardcoded fixing assert person in self.persons_ids, "Unknown person: {}".format(person) # NO need to save and restore original ...
[ "def get_person_from_id(self, person_id):\n assert person_id in self.ids_persons, \"Unknown person id: {}\".format(person_id)\n return self.ids_persons[person_id]", "def return_personid(self):\n return self.person.userid", "def id(self):\n return self.__person_id", "def id(self) ->...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets a profession from a profession ID.
def get_prof_from_id(self, prof_id): assert prof_id in self.ids_professions, "Unknown profession id: {}".format(prof_id) return self.ids_professions[prof_id]
[ "def get_id_from_prof(self, prof):\n assert prof in self.professions_ids, \"Unknown profession: {}\".format(prof)\n return self.professions_ids[prof]", "def get_person_from_id(self, person_id):\n assert person_id in self.ids_persons, \"Unknown person id: {}\".format(person_id)\n return...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets a profession ID from a profession.
def get_id_from_prof(self, prof): assert prof in self.professions_ids, "Unknown profession: {}".format(prof) return self.professions_ids[prof]
[ "def get_prof_from_id(self, prof_id):\n assert prof_id in self.ids_professions, \"Unknown profession id: {}\".format(prof_id)\n return self.ids_professions[prof_id]", "def generate_person_id(self):\n if self.occupation is 'Fellow':\n Person.person_id += 1\n fellow_id = '...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets a nationality from a nationality ID.
def get_nation_from_id(self, nation_id): assert nation_id in self.ids_nationalities, "Unknown nationality id: {}".format(nation_id) return self.ids_nationalities[nation_id]
[ "def get_nationality(code, locale=\"en\"):\n global data\n if locale not in data:\n load(locale)\n return data[locale][\"nationalities\"][code.upper()]", "def get_nation_from_country(self, country):\n assert country in self.countries_nationalities, \"Unknown country: {}\".format(country)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets a nationality ID from a nationality.
def get_id_from_nation(self, nation): assert nation in self.nationalities_ids, "Unknown nationality: {}".format(nation) return self.nationalities_ids[nation]
[ "def get_nation_from_id(self, nation_id):\n assert nation_id in self.ids_nationalities, \"Unknown nationality id: {}\".format(nation_id)\n return self.ids_nationalities[nation_id]", "def get_nationality(code, locale=\"en\"):\n global data\n if locale not in data:\n load(locale)\n ret...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets a nationality from a country.
def get_nation_from_country(self, country): assert country in self.countries_nationalities, "Unknown country: {}".format(country) return self.countries_nationalities[country]
[ "def get_country_from_nation(self, nation):\n assert nation in self.nationalities_countries, \"Unknown nationality: {}\".format(nation)\n return self.nationalities_countries[nation]", "def get_nationality(code, locale=\"en\"):\n global data\n if locale not in data:\n load(locale)\n r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets a country from a nationality .
def get_country_from_nation(self, nation): assert nation in self.nationalities_countries, "Unknown nationality: {}".format(nation) return self.nationalities_countries[nation]
[ "def get_nation_from_country(self, country):\n assert country in self.countries_nationalities, \"Unknown country: {}\".format(country)\n return self.countries_nationalities[country]", "def countryname(cc):\n for corporation in corporations:\n if cc in corporation:\n return corpo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
given a list of parsers (some of which may be None) apply the appropriate one to each element of the input_row
def parse_row(input_row, parsers): return [parser[value] if parser is not None else value for value, parser in zip(input_row, parsers)]
[ "def multiple(single_parser: Callable, separator: str=',', at_least_one=False) -> Callable:\n def handler(inp: Optional[str]) -> Optional[List]: # returns an entity\n if inp is None:\n if at_least_one:\n raise ValueError('Expected at least one element for multiple parsing')\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
wrap a reader to apply the parsers to each of its rows
def parse_rows_with(reader, parsers): for row in reader: yield parse_row(row, parsers)
[ "def run(cls, row, reader):\n\n cls._parse_keys(row, reader.line_num)\n cls._parse_relationships(row, reader.line_num)", "def rows(self):\n def parse_result_row(row):\n return row.split(\"\\t\")\n\n for row in self.results.data:\n yield parse_result_row(row)", "def update_from_read...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
wraps f to return None if f raises an exception assumes f takes only one input
def try_or_none(f): def f_or_none(x): try: return f(x) except: return None return f_or_none
[ "def try_f(f, data, default=None):\r\n try:\r\n return f(data)\r\n except Exception, e:\r\n return default", "def catch_none(n_return=1):\n\n def decorator(func):\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n\n if args[0] is not None:\n return fu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get or create author record, update if changed
def get_or_update_author(author_id, name, description): item, created = author.get_or_create(id=author_id) item.name = name item.description = description item.save() return item
[ "def get_or_create_author(self):\n if not self.validate():\n return None\n\n return Author.get_or_create(\n name = self.name.data,\n author_type = AuthorType.query.get(self.author_type_id.data),\n gender = Gender.query.get(self.person...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a corresponding Dockerfile for each version of specified runtime
def generate_runtime_dockerfile(self): for version in self.versions: with open(self.dockerfiles+'/{}_{}.d'.format(self.runtime, version), 'w') as dockerfile: # Here replace by flavour image or version "example php:version" if version == 'generic': ...
[ "def generate_runtime_container(self):\n for version in self.versions:\n self.display('docker build -f {}/dockerfiles/{}_{}.d -t {} {}'.format(\n self.tmp, self.runtime, version, 'continuous:{}_{}'.format(self.runtime, version), self.tmp), \"yellow\")\n self.exec('docker ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a container for each version of specified runtime
def generate_runtime_container(self): for version in self.versions: self.display('docker build -f {}/dockerfiles/{}_{}.d -t {} {}'.format( self.tmp, self.runtime, version, 'continuous:{}_{}'.format(self.runtime, version), self.tmp), "yellow") self.exec('docker build -f {}...
[ "def generate_runtime_dockerfile(self):\n for version in self.versions:\n with open(self.dockerfiles+'/{}_{}.d'.format(self.runtime, version), 'w') as dockerfile:\n # Here replace by flavour image or version \"example php:version\"\n if version == 'generic':\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create bats file to run bats tests in the container related to each version of specified runtime
def generate_bats_file(self): for version in self.versions: with open(self.dockerfiles+'/{}_{}.bats'.format(self.runtime, version), 'w') as batsfile: batsfile.write('#!/usr/bin/env bats\n') for component in self.template['components']: bats_path = ...
[ "def generate_and_run_bats_container(self):\n for version in self.versions:\n self.display(\n 'Preparing bats container for version : '+version, \"yellow\")\n self.exec(\n 'docker build -f {}/dockerfiles/{}_{}.bats.d -t bats_tests {}'.format(self.tmp, self....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create the superior layer container to run bats tests in the container related to each version of specified runtime
def generate_and_run_bats_container(self): for version in self.versions: self.display( 'Preparing bats container for version : '+version, "yellow") self.exec( 'docker build -f {}/dockerfiles/{}_{}.bats.d -t bats_tests {}'.format(self.tmp, self.runtime, ver...
[ "def generate_runtime_dockerfile(self):\n for version in self.versions:\n with open(self.dockerfiles+'/{}_{}.d'.format(self.runtime, version), 'w') as dockerfile:\n # Here replace by flavour image or version \"example php:version\"\n if version == 'generic':\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert string column into datetime column
def convert_str_to_datetime(df, *, column=None, format=None): df[column] = pd.to_datetime(df[column], format=format) return df
[ "def col_to_datetime(df: DataFrame, column: str) -> DataFrame:\n df = df.withColumn(\n column, to_timestamp(col(column), \"EEE MMM dd HH:mm:ss zzz yyyy\")\n )\n return df", "def normalize_time_to_datetime(self, format=\"%Y-%m-%d %H:%M:%S\", col=None):\n if col is None:\n col = se...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert datetime column into string column
def convert_datetime_to_str(df, *, column=None, format=None): df[column] = df[column].dt.strftime(format) return df
[ "def _format_datetime_col_to_str(df, col):\n df[col] = df[col].dt.strftime('%Y-%m-%d')\n return df", "def _convert_timestamp_to_string(s: pd.Series) -> pd.Series:\n if pd.core.dtypes.common.is_datetime_or_timedelta_dtype(s):\n return s.dt.strftime(\"%Y-%m-%d %H:%M:%S.%f %z\")\n else:\n r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Metropolis algorithm for the Ising model
def IsingMetropolis(N,J,B,kT,M,printinterval=1): random.seed(1) # start from "hot" state S=np.ones((N),'int') E=energy(S,J,B) sumS=0. sumE=0 expEkT=[exp((-4*J-2*B)/kT), 1., exp((-4*J+2*B)/kT), exp(( -2*B)/kT), 1., exp(( +2*B)...
[ "def model_SA():\n m = 7 # The parameter within the distribution of E\n n = 7 # The parameter within the distribution of I\n\n mean_exposed_days = 6\n mean_infectious_days = 20\n # The result is that the median time in I is ~15 days while the mean time is 20\n # Source: https://ourworldindata.or...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
sample functions from a GP prior
def sample_gpp(model ,x, n_samples): model.eval() likelihood.eval() # sample functions model.train_inputs = None preds = model(x) y = torch.stack([preds.sample() for i in range(n_samples)]) return y
[ "def sample_function(params: dict):\n # print(\"Calling sampler with state\\n{}\".format(params['state']))\n # if params['perturbation']:\n # print(\"Calling sample_function with perturbation; variables \\\n # are sampled by random variables of the form: {}\".format([a.__name__ for a in para...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
samples functions from a bnn
def sample_bnn(model ,x, n_samples, train=True): if train == False: model.eval() with torch.no_grad(): # sample functions y = torch.stack([model(x) for i in range(n_samples)]) return y else: return torch.stack([model(x) for i in range(n_samples)])
[ "def sample_gpp(model ,x, n_samples):\n model.eval()\n likelihood.eval()\n # sample functions\n model.train_inputs = None\n preds = model(x)\n y = torch.stack([preds.sample() for i in range(n_samples)])\n return y", "def gibbs_sample(self, k): # iterates for k step...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Method to recursively verify that the tree has the Binary Search Tree property (the key of any node in the left subtree of n is less than n's key, and the key of any node in the right subtree of n is greater than or equal to n's key)
def recursive_bst(self, g, n): if n.left != g.nil: max_left = g.Maximum(n.left).key self.assertLess(max_left, n.key, f"BST Property Violated : A node with key {max_left} is in the left subtree of a node with key {n.key}") self.recursive_bst(g, n.left) if n.right != g.nil: min_right...
[ "def is_bst_valid_recursive(root,lower_bound=-float('inf'),upper_bound=float('inf')): \n\tif not root:\n\t\treturn True\n\n\tif root.data < lower_bound or root.data > upper_bound:\n\t\treturn False\n\n\treturn is_bst_valid_recursive(root.left_child,lower_bound,root.data) \\\n\t\t\tand is_bst_valid_recursive(root.ri...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that a Delete on an empty tree does nothing
def test_delete_empty(self): g = Red_Black_Tree() self.assertTrue(g.isEmpty(), "Calling isEmpty on empty tree returns False") # In an empty tree, the root node is the leaf node self.assertEqual(g.root, g.nil, "Root node in empty tree is not equal to leaf") # In an empty tree, the...
[ "def test_delete_from_empty_tree(bst_empty):\n assert bst_empty.delete(1) is None", "def test_delete_when_not_in_tree(five_balanced):\n assert five_balanced.delete(100) is None", "def test_delete_root_only_node(bst_empty):\n bst_empty.insert(1)\n bst_empty.delete(1)\n assert bst_empty.root is Non...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests that inserting a node, then deleting it leaves the tree empty
def test_insert_delete(self): g = Red_Black_Tree() self.assertTrue(g.isEmpty(), "Calling isEmpty on empty tree returns False") g.Insert(5) # arbitrary number self.assertFalse(g.isEmpty(), "isEmpty returns True after inserting a node") g.Delete(5) self.assertTrue(g.i...
[ "def test_insert_delete2(self):\n \n g = Red_Black_Tree()\n \n self.assertTrue(g.isEmpty(), \"Calling isEmpty on empty tree returns False\")\n \n g.Insert(5) # arbitrary number\n \n self.assertFalse(g.isEmpty(), \"isEmpty returns True after inserting a node\")\n \n g.Delete(4)\n \n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests that inserting a node, then deleting a different node leaves the node inserted
def test_insert_delete2(self): g = Red_Black_Tree() self.assertTrue(g.isEmpty(), "Calling isEmpty on empty tree returns False") g.Insert(5) # arbitrary number self.assertFalse(g.isEmpty(), "isEmpty returns True after inserting a node") g.Delete(4) self.assertFalse(g...
[ "def test_insert_remove_insert_remove(tree):\n for i in range(5):\n tree.insert('hi')\n assert tree.contains('hi')\n tree.remove('hi')\n assert not tree.contains('hi')", "def test_insert_delete(self):\n \n g = Red_Black_Tree()\n \n self.assertTrue(g.isEmpty(), \"Calling ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the predecessor function
def test_predecessor(self): g = Red_Black_Tree() max_key = 10 min_key = 1 for i in range(min_key, max_key+1): g.Insert(i) for i in range(min_key+1, max_key+1): search_result = g.Search(g.root, i) predecessor = g.Predecessor(search_result).key self.assertEqual(p...
[ "def test_prev(dll):\n assert dll.head.next.prev is dll.head", "def get_predecessor():\n return myglobal.node.get_predecessor(), 200", "def predecessor(self, root):\r\n root = root.left\r\n while root.right:\r\n root = root.right\r\n return root.val", "def test_pre_order_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that Delete_Fixup resolves Case 2 of CLRS when x is its parent's right child and w is BLACK with two BLACK children
def test_case_2_2(self): g = Red_Black_Tree() n2 = Red_Black_Node(2) n1 = Red_Black_Node(1) n4 = Red_Black_Node(4) n3 = Red_Black_Node(3) n2.color = BLACK n1.color = BLACK n4.color = RED n3.color = BLACK n2.p = g.nil n2.left = n1 n1.p = n2 n2.right...
[ "def delete_fixup(self, deleted_node: RedBlackTree.Node):\r\n while self.root != deleted_node and deleted_node.color == \"BLACK\":\r\n if deleted_node == deleted_node.parent.left:\r\n if deleted_node.parent.right.color == \"RED\":\r\n '''case 1: brother node is RE...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Inserts and deletes 100 nodes, verifying that all 5 properties of Red Black Trees are maintained.
def test_100(self): g = Red_Black_Tree() for i in range(100): self.test_bst(g) self.test_properties(g) g.Insert(i) # Verify that all nodes were inserted self.assertEqual(g.Size(), 100) for i in range(100): self.test_bst(g) self.test_properties...
[ "def test_100_random(self):\n \n g = Red_Black_Tree()\n \n insert_order = np.random.permutation(100)\n delete_order = np.random.permutation(100)\n \n for i in insert_order:\n self.test_bst(g)\n self.test_properties(g)\n \n g.Insert(i)\n \n # Verify that all nodes were ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Inserts and deletes 100 nodes in random order, verifying that all 5 properties of Red Black Trees are maintained
def test_100_random(self): g = Red_Black_Tree() insert_order = np.random.permutation(100) delete_order = np.random.permutation(100) for i in insert_order: self.test_bst(g) self.test_properties(g) g.Insert(i) # Verify that all nodes were inserted self....
[ "def test_100(self):\n \n g = Red_Black_Tree()\n \n for i in range(100):\n self.test_bst(g)\n self.test_properties(g)\n \n g.Insert(i)\n \n # Verify that all nodes were inserted\n self.assertEqual(g.Size(), 100)\n \n for i in range(100):\n self.test_bst(g)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check that the output type is a single datatype.
def validate_output_type(output_type: Any) -> None: if isinstance(output_type, list): raise com.IbisTypeError("The output type of a UDF must be a single datatype.")
[ "def IsValueType(self) -> bool:", "def one_type_check(self):\n if self.type2 == '':\n print('The pokemon only has one type')\n else:\n print('The pokemon has type ' + self.type1 + ' and ' + self.type2)", "def validate_for_onnx_model(\n input_dtype, output0_dtype, output1_d...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Define a function to fill the queue with the orders of the day.
def fill_queue(orders_of_the_day, queue_of_the_day): for order in orders_of_the_day: queue_of_the_day.enqueue(order) return queue_of_the_day
[ "def fill_queue(queue_fill, any_list):\n for elem in any_list:\n queue_fill.put(elem)", "def create_queue(self, queue):", "def calender_queue(self, month, year):\n day = ['S', ' M', ' T', ' W', ' Th', 'F', ' S']\n\n days = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n\n value...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reads a grade of a discipline and student
def readGrade(): try: dID = int(input("\n \t Discipline ID: ")) sID = int(input("\n \t Student ID: ")) grade = int(input("\n \t Grade: ")) return Grade(dID, sID, grade) except ValueError: return Grade(0, 0, 0)
[ "def readgra(self) -> None:\n path :str = os.path.join(self.directory_path,\"grades.txt\")\n for stucwid, coursename, grade, instcwid in file_reader(path, 4, sep='\\t',header=True): \n if stucwid not in self.studict.keys():\n print(f\" There is no Student with CWID: {stucwid...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Internal decorator to cache the accounts keystore and prevent concurrent accesses with locks.
def _cache_and_lock_accounts_keystore(fn): cached_accounts = {} last_mod = None def wrap(*args): nonlocal last_mod _keystore_cache_lock.acquire() files_in_dir = str(os.listdir(_account_keystore_path)) dir_mod_time = str(os.path.getmtime(_account_keystore_path)) curr_...
[ "def gen_credentials_cache(keytab, principal):\n cachefile=tempfile.mkstemp()\n subprocess.check_call(['kinit', '-k', '-t', keytab, '-c', cachefile[1], principal])\n return cachefile", "def _lock_and_transform(func):\n\n @wraps(func)\n def wrapper(self, key):\n with self._lock:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Internal function to set the account keystore path according to the binary.
def _set_account_keystore_path(): global _account_keystore_path response = single_call("hmy keys location").strip() if not os.path.exists(response): os.mkdir(response) _account_keystore_path = response
[ "def set_key_root(self, path):\n self._config['DEFAULT']['key_dir'] = os.path.expanduser(path)", "def set_auth_store_path(self, value:str):\n self._prefs[PREFERENCE_KEY.AUTH_STORE.value] = value\n self._write_prefs()", "def keystore(self, conf: dict):\n # check if same keystore file ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Perform the crawl and set appropriate data in redis for future use.
def crawl_wrapper(self, *args, **kwargs): results = self.crawl(*args, **kwargs) for redis_key, price in results.items(): self.redis.set(redis_key, price)
[ "def crawl(self):\n\n #Iteration tracker for checking when to regenerate driver\n iter_ = 0 \n\n #Set DB scan start\n now = datetime.now()\n self.db.set_start(now)\n failures = []\n status = {}\n with open(os.getcwd() + '/scan-status.txt', 'r') as f:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Perform enter phone number action
def input_the_number(driver): country_select_button = Select( driver.find_element( By.CLASS_NAME, "react-phone-number-input__country-select")) country_select_button.select_by_visible_text('Rwanda') input_phone_element = driver.find_element( By.CLASS_NAME, "react-phone...
[ "def validate_phone_number(driver):\n code = input(\"Enter The code you got on your phone : \")\n code_input_element = driver.find_element(By.NAME, \"OTP\")\n code_validate_button = driver.find_element(\n By.CLASS_NAME, \"ms-Button-label\")\n code_input_element.send_keys(str(code))\n code_vali...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
validate the OTP code send on the phone number
def validate_phone_number(driver): code = input("Enter The code you got on your phone : ") code_input_element = driver.find_element(By.NAME, "OTP") code_validate_button = driver.find_element( By.CLASS_NAME, "ms-Button-label") code_input_element.send_keys(str(code)) code_validate_button.click...
[ "def validate(self, data):\n phone_number = phonenumbers.parse(\n str(data.get('phone_number')), None)\n authy_api = AuthyApiClient(settings.ACCOUNT_SECURITY_API_KEY)\n authy_phone = authy_api.phones.verification_start(\n phone_number.national_number,\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
used to parametrized test cases on dest port type
def dest_port_type(self, request): return request.param
[ "def test_create_port(self):\n port = create_ofport({'device': 'a'})\n port_dict = {'some-port-attributes-go-here': 42,\n 'firewall_group': 1}\n self.map.create_port(port, port_dict)\n self._check_port('a', 1)\n self._check_fwg(1, ['a'])", "def test_validatio...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
setup the acl table
def setup_acl_table(self, duthost, setup_info, setup_mirror_session): pass
[ "def setup_acl_table(self, setup_info, setup_mirror_session, config_method):\n\n duthost_set = BaseEverflowTest.get_duthost_set(setup_info)\n if not setup_info[self.acl_stage()][self.mirror_type()]:\n pytest.skip(\"{} ACL w/ {} Mirroring not supported, skipping\"\n .f...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
used to parametrized test cases on mirror type
def mirror_type(self): pass
[ "def test_rank_type_detection(rank, actual_type, scalar, test_type):\n expected = scalar if test_type == \"scalar\" else (actual_type == test_type)\n function = getattr(qutip.dimensions, \"is_\" + test_type)\n assert function(rank) == expected", "def test_match_type_function(value, expected_type, does_ma...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get the acl stage
def acl_stage(self): pass
[ "def get_stage(cls, name):\n return cls.pipeline_stages[name][0]", "def _find_stage(self):\n try:\n return self.client.get_stage(\n restApiId=self.module.params.get('rest_api_id'),\n stageName=self.module.params.get('name')\n )\n except ClientError as e:\n if 'NotFoun...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
If a run exists in the new df, take that, else take the data in the old df
def merge_dataset(old_df, new_df): old_df = old_df.drop(columns = 'Unnamed: 0') new_df = new_df.drop(columns = ['Unnamed: 0', 'Unnamed: 0.1']) new_df.rename(columns={'BioSample': 'biosample'}, inplace=True) old_runs = old_df['run'] new_runs = new_df['run'] # remove any rows from the old df if...
[ "def filter_by_run_number(joined_df, run_number):\r\n\r\n joined_df = joined_df[joined_df['Run No.'] == str(run_number)]\r\n return joined_df", "def update_jobs_dataframe(job_df):\n\n status_df = jobs_status()\n for i in range(len(job_df)):\n if len(status_df.loc[status_df['job_id'] == job_df.i...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes in the old df and removes any MICs found in conflict
def remove_conflicting(old_df, diff_df): diffs = zip(list(diff_df['id']),list(diff_df['antimicrobial'])) # for every difference, set that cell to blank for run, mic in diffs: old_df.loc[old_df['run']==run, 'MIC_'+mic] = '' return old_df
[ "def remove_duplicates(self, df: pd.DataFrame) -> pd.DataFrame:\n ##### YOUR CODE GOES HERE #####\n pass", "def clean(self, df):\n df = df.drop(self.__preprocessor.get_non_redundant_entity_attributes(), axis=1)\n df = df.drop(self.__preprocessor.get_redundant_entity_attributes(), axis=...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Recreate the (compressed) image from the code book & labels
def recreate_image(codebook, labels, w, h): d = codebook.shape[1] image = np.zeros((w, h, d)) label_idx = 0 for i in range(w): for j in range(h): image[i][j] = codebook[labels[label_idx]] label_idx += 1 return image
[ "def decode(imprefix_color,imprefix,start,threshold_color,threshold):\n nbits = 10\n \n imgs = list()\n imgs_inv = list()\n print('loading',end='')\n for i in range(start,start+2*nbits,2):\n fname0 = '%s%2.2d.png' % (imprefix,i)\n fname1 = '%s%2.2d.png' % (imprefix,i+1)\n prin...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns true if the provided Emp ID exists
def exists(id): return (id in emp_map)
[ "def is_this_record_exist(table, id_):\n if id_[0] not in [record[0] for record in table]:\n\n ui.print_error_message(\"Record with this ID not found\")\n return False\n return True", "def check_emp():\n emp_dict = request.get_json()\n db = get_db()\n cur = db.execute(\"select * from ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Writes the specified waveform as a CSV text file, each line a `t,V` entry.
def writeWaveformTextFile(t, V, path): with open(path, 'w') as f: for a, b in zip(t, V): f.write('{t:g},{V:g}'.format(a, b)) # for # with
[ "def write_csv(data, filepath):\n pass #TODO implement", "def to_csv(self, statename, controlname,\r\n filename=\"Optimisation_output.csv\", delimiter=\",\"):\r\n\r\n header = \"time, \"\r\n result = np.zeros(0)\r\n result = np.hstack((result, self.time_update()))\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reads waveform data from a binary file with specified version.
def readWaveformBinaryFile(path, version = None): with open(path, 'rb') as inputFile: fileVersion = ord(inputFile.read(1)) if version is None: version = fileVersion elif version != fileVersion: raise RuntimeError( "File '{}' is version {} (attempted read as version {})".format( ...
[ "def parse_binary(raw_data):\n\n ins = 4 # Size of int stored in the raw binary string\n cur = 0 # Cursor walking in the string and getting data\n cur += 12 # Skipping the global raw binary string header\n whs = unpack(\"i\", raw_data[cur:cur + ins])[0] # Storing size of the waveform header\n c...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Writes the specified data into a binary file with specified version.
def writeWaveformBinaryFile(t, V, path, version = None): # here we keep it very simple... if version is None: version = DefaultBinaryVersion with open(path, 'wb') as outputFile: outputFile.write(chr(version)) if version == 1: timeStruct = BinaryFileVersion1.TimeDataStruct outputFile.writ...
[ "def write_to_file(self, data):", "def write_file(self, data) -> None:\n pass", "def write_binary_file(output_path, data):\n with open(output_path, \"wb\") as f:\n f.write(data)", "def write(self, version):\n with self._path.open(mode='w') as fh:\n fh.write(str(version))", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Writes waveform data into a file. The output format of the file can be specified, or the default binary format (`DefaultBinaryVersion`) will be used.
def writeWaveformFile(t, V, path, version = None): if version == 0: return writeWaveformTextFile(t, V, path) else: return writeWaveformBinaryFile(t, V, path, version=version)
[ "def writeWaveformBinaryFile(t, V, path, version = None):\n \n # here we keep it very simple...\n \n if version is None: version = DefaultBinaryVersion\n with open(path, 'wb') as outputFile:\n outputFile.write(chr(version))\n if version == 1:\n timeStruct = BinaryFileVersion1.TimeDataStruct\n o...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
E.g. for "EW06", chimney='EW06', col='EW', cryostat='E', TPC='W', row=6.
def __init__(self, chimney = None, col = None, cryostat = None, TPC = None, row = None ): if chimney is not None: assert col is None and cryostat is None and TPC is None and row is None col, row \ = ChimneyInfo.convertToStyleAndSplit(ChimneyInfo.GeographicStyle, chimney) chimney = No...
[ "def calc_c_o(row):\n yh = row['H'] / 1.00794\n yc = row['C12'] / 12. + row['C13'] / 13.\n yo = row['O16'] / 16. + row['O17'] / 17. + row['O18'] / 18.\n\n if row['CO'] > 1:\n excess = np.log10((yc / yh) - (yo / yh)) + 12.\n else:\n excess = np.log10((yo / yh) - (yc / yh)) + 12.\n\n r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the list of N expected waveform files at the specified channel index.
def allChannelSources(self, channelIndex = None, channel = None, N = 10): values = self.sourceInfo.copy() if channelIndex is not None: values.setChannelIndex(channelIndex) assert channel is None elif channel is not None: values.setChannel(channel) assert channelIndex is None valu...
[ "def get_samples(self,n):\n samples = []\n stride = self.n_total_frames//n\n for k in range(0,self.n_total_frames,stride):\n frame = self.get_frame(k)\n samples.append(frame)\n return samples", "def get_samples(self,n):\n samples = []\n stride = self...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parses `path` and returns a filled `WaveformSourceFilePath`.
def parseWaveformSource(path): sourceDir, triggerFileName = os.path.split(path) name, ext = os.path.splitext(triggerFileName) if ext.lower() != '.csv': print >>sys.stderr, "Warning: the file '%s' has not the name of a comma-separated values file (CSV)." % path tokens = name.split("_") sourceInfo = W...
[ "def infer_from_path(self, path: str) -> str:\n loaded, sr = librosa.load(path, sr=self.processor.sample_rate)\n spect = self.processor.parse_audio(sound=loaded, sample_rate=sr)\n spect = spect.view(1, 1, spect.size(0), spect.size(1))\n spect = spect.to(DEVICE)\n input_sizes = tor...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the format converter and the parsed channel information in a tuple.
def formatMatcher(spec): for class_ in ChannelConversions.ValidFormats: if class_ is ChannelConversions.InvalidChannelConverter: continue try: channelInfo = class_.parse(spec) except ChannelConversions.ChannelFormatError: continue return class_, channelInfo else: return ChannelConversion...
[ "def get_chan_format(self, chan):\n ret_val = self._get_chan_format(chan)\n return ret_val", "def get_info(self):\r\n value = pointer(BASS_CHANNELINFO())\r\n bass_call(BASS_ChannelGetInfo, self.handle, value)\r\n return value[0]", "def _extract(self):\n def get_framerat...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Peak finder with running window average. The peaks are found as extrema of the averages of `l` elements in a running window. If specified, a baseline is subtracted to all samples. V is required to have at least one element. Samples are assumed to be periodic.
def extractPeaks(t, V, baseline = 0.0, l = 1): assert l >= 1 assert len(V) >= l minSum = MinAccumulator() minPos = None maxSum = MaxAccumulator() maxPos = None iterV = iter(V) s = 0.0 for i, v in enumerate(iterV): if i >= l: break s += v last = 0 for i, x in enumerate(iterV, start=l...
[ "def peak_to_peak(alist):\n return max(alist) - min(alist)", "def peakdetect(x,minpeakh = 18,\n minpeakw = 6):\n signdx = sign(diff(x))\n posidx = where(diff(signdx)>0)[0]+1\n negidx = where(diff(signdx)<0)[0]+1\n np = len(posidx)\n # preallocation\n peakinfo = []\n \n if ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert boarding pass character sequence into a BoardingPass.
def scan_boarding_pass(boarding_pass: str) -> BoardingPass: row = convert_partition(boarding_pass[:7], lower="F") column = convert_partition(boarding_pass[7:], lower="L") seat_id = row * 8 + column return BoardingPass(row, column, seat_id)
[ "def convert_id(boarding_pass: str) -> int:\n mapper = {70: \"0\", 66: \"1\", 76: \"0\", 82: \"1\"} # maps ord to string\n bin_id = boarding_pass.translate(mapper)\n row = int(bin_id[:7], 2)\n seat = int(bin_id[7:], 2)\n return row * 8 + seat", "def passcode_generate():\n #pascode is mutate...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
expand a win32 error code into a human readable message
def _win32_strerror(err): # FormatMessage will allocate memory and assign it here buf = ctypes.c_char_p() FormatMessage( FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_IGNORE_INSERTS, None, err, 0, buf, 0, ...
[ "def moveit_error_string(val):\n return MOVE_IT_ERROR_TO_STRING.get(val, 'UNKNOWN_ERROR_CODE')", "def _build_error_str(err_num):\n return \"[{estr} {enum}] {edesc}: {{arg}}\".format(\n estr=os.errno.errorcode[err_num],\n enum=err_num,\n edesc=os.strerror(err_num))", "def _display_stat...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
read a line Maintains its own buffer, callers of the transport should not mix calls to readBytes and readLine.
def readLine(self): if self.buf is None: self.buf = [] # Buffer may already have a line if we've received unilateral # response(s) from the server if len(self.buf) == 1 and b"\n" in self.buf[0]: (line, b) = self.buf[0].split(b"\n", 1) self.buf = [b] ...
[ "def readline(self):\n\n pos = self.readbuf.find('\\n')\n if pos >=0:\n line = self.readbuf[0: pos]\n # logging.info('stdout: {0}'.format(line))\n # self.parse_loss(line)\n self.readbuf = self.readbuf[(pos + 1):]\n return line\n else:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Windows 7 and earlier does not support GetOverlappedResultEx. The alternative is to use GetOverlappedResult and wait for read or write operation to complete. This is done be using CreateEvent and WaitForSingleObjectEx. CreateEvent, WaitForSingleObjectEx and GetOverlappedResult are all part of Windows API since WindowsX...
def _get_overlapped_result_ex_impl(pipe, olap, nbytes, millis, alertable): log("Preparing to wait for maximum %dms", millis) if millis != 0: waitReturnCode = WaitForSingleObjectEx(olap.hEvent, millis, alertable) if waitReturnCode == WAIT_OBJECT_0: # Event is signaled, overlapped IO o...
[ "def mqrt_MQGetOverlappedResult(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"lpOverlapped\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "def advapi32_EventAccessQuery(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"Guid\", \"Buffer\", \"B...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieve buffered log data If remove is true the data will be removed from the buffer. Otherwise it will be left in the buffer
def getLog(self, remove=True): res = self.logs if remove: self.logs = [] return res
[ "def get_buffer(self, truncate=False):\n self.log_buffer.seek(0)\n output = self.log_buffer.read()\n if truncate:\n self.log_buffer.seek(0)\n self.log_buffer.truncate()\n return output", "def _Buffer(self):\n if self.read_buf:\n return self.read_buf\n #...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieve the data associated with a named subscription If remove is True (the default), the subscription data is removed from the buffer. Otherwise the data is returned but left in the buffer. Returns None if there is no data associated with `name` If root is not None, then only return the subscription data that matche...
def getSubscription(self, name, remove=True, root=None): if root is not None: root = os.path.normpath(os.path.normcase(root)) if root not in self.sub_by_root: return None if name not in self.sub_by_root[root]: return None sub = self...
[ "def findSubscription(name):\n subscriptions = opencue.wrappers.subscription.Subscription()\n return subscriptions.find(name)", "def query_subscription_by_name(subscription_name):\n logger.info(f'Attempting to fetch subscription by name: {subscription_name}')\n subscription_model = db.session....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Send a query to the watchman service and return the response This call will block until the response is returned. If any unilateral responses are sent by the service in between the requestresponse they will be buffered up in the client object and NOT returned via this method.
def query(self, *args): log("calling client.query") self._connect() try: self.sendConn.send(args) res = self.receive() while self.isUnilateralResponse(res): res = self.receive() return res except EnvironmentError as ee: ...
[ "def send(self):\n if self._validate_params_exist():\n payload = self._format_query()\n\n r = requests.post(Query.BASE_URL,\n data=json.dumps(payload),\n headers={'Content-Type': 'application/json'})\n r = r.text\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Perform a server capability check
def capabilityCheck(self, optional=None, required=None): res = self.query( "version", {"optional": optional or [], "required": required or []} ) if not self._hasprop(res, "capabilities"): # Server doesn't support capabilities, so we need to # synthesize the r...
[ "def _checkManageCapabilities(self, irc, msg, channel):\n if channel != 'global':\n capability = ircdb.makeChannelCapability(channel, 'op')\n else:\n capability = 'admin'\n if not ircdb.checkCapability(msg.prefix, capability):\n irc.errorNoCapability(capability,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Converts pynamodb model to its dict representation
def model2dict(self): def to_primitive(obj_list): data = [] for item in obj_list: if isinstance(item, str) or isinstance(item, int): data.append(item) else: data.append(item.as_dict()) return data ...
[ "def serialize(model):\r\n # first we get the names of all the columns on model\r\n columns = [c.key for c in class_mapper(model.__class__).columns]\r\n # then we return their values in a dict\r\n return dict((c, getattr(model, c)) for c in columns)", "def to_dict(instance):\n return model_to_dict(instance...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
add_one is a function that prints out the sum of the two numbers a and d
def add_one(a, d): return " The sum of {} and {} is:".format(a, d)
[ "def add(a, b):\n print(str(a) + \" + \" + str(b) + \" = \" + str(a + b))", "def adding_one(integer_one):\n return integer_one + 1", "def adding_one(num):\n return num + 1", "def sum(a, b):\n return a + b", "def add(n1, n2):\n return n1 + n2", "def sum1(num1, num2):\n return num1+num...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function checks the type of values of a and b if it is int,float,str,list and does their summation. If the type is a dictionary then its tested in the else statement
def funky(a, b): if isinstance(a, (int, float, str, list)) and isinstance(b, (int, float, str, list)): return a + b elif isinstance(a, dict) and isinstance(b, dict): sum_dict = dict(a.items() + b.items()) return sum_dict
[ "def add_up(first, second):\n if not first:\n return second\n elif not second:\n return first\n else:\n if isinstance(first, mydict) and isinstance(second, mydict):\n return first + second\n elif isinstance(first, mylist) and isinstance(second, mylist):\n r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given origin the method returns chunk list generating it on first access.
def provide_chunks(self, origin, **kw): if isinstance(origin, TypeReference): key = origin.type.definer else: key = origin try: chunks = self.chunk_cache[key] except KeyError: # Notify user about cycle dependency and continue i...
[ "def get_buffers_from_origin(self, origin):\n g = origin.provider.paginate(origin.uri)\n iterations = max(1, origin.iterations)\n\n # Generator can raise StopIteration before iterations is reached.\n # We use a for loop instead of a comprehension expression to catch\n # gracefully...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
During coarse chunk sorting header inclusions are moved to the top of "local". Same headers are ordered by path.
def __lt__(self, other): if isinstance(other, HeaderInclusion): shdr = self.origin ohdr = other.origin sg = shdr.is_global og = ohdr.is_global if sg == og: return shdr.path < ohdr.path else: # If self `is_gl...
[ "def test_file_external_sort(self):\n self.file_sort_common(itemslimit=True)", "def collect_headers(self):\n headers = set()\n for obj in self.objects.itervalues():\n if 'coord' in obj:\n filename = obj['coord']['file']\n # check if it should be ignore...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
returns balence_log array for player one during simulated number of hands
def simulate_trial(num_hands): balence_log = [] # init cards used_cards = [] cards_showing = [] deck = [] for _ in range(8): for val in VALS: for suit in SUITS: deck.append(Card(suit, val)) random.shuffle(deck) blank_card = Card('Plastic', 'Blank') ...
[ "def simulate_trial(num_hands):\n balence_log = []\n used_cards = []\n cards_showing = []\n deck = build_deck()\n\n dealer = Dealer(deck, used_cards)\n\n player1 = SimplePlayer(deck, used_cards)\n # player1 = BasicStratPlayer(0, deck, used_cards, dealer)\n # player1 = HLPlayer(0, deck, used_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This API is used to create an alarm policy.
def CreateAlarmPolicy(self, request): try: params = request._serialize() headers = request.headers body = self.call("CreateAlarmPolicy", params, headers=headers) response = json.loads(body) model = models.CreateAlarmPolicyResponse() model._...
[ "def handle_create(self):\r\n asclient = self.stack.clients.auto_scale()\r\n args = self._get_args(self.properties)\r\n policy = asclient.add_policy(**args)\r\n resource_id = '%s:%s' % (self.properties[self.GROUP], policy.id)\r\n self.resource_id_set(resource_id)", "def Create(i...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This API is used to create a Prometheus alerting rule.
def CreateAlertRule(self, request): try: params = request._serialize() headers = request.headers body = self.call("CreateAlertRule", params, headers=headers) response = json.loads(body) model = models.CreateAlertRuleResponse() model._deseri...
[ "def test_create_data_for_alert_rule():\n args = {\n 'kind': 'fusion',\n 'rule_name': 'test_fusion_rule',\n 'template_name': 'test_template',\n 'enabled': True,\n 'description': None\n }\n expected_data = {\n 'kind': 'Fusion',\n 'etag': None,\n 'prope...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This API is used to create an exporter integration.
def CreateExporterIntegration(self, request): try: params = request._serialize() headers = request.headers body = self.call("CreateExporterIntegration", params, headers=headers) response = json.loads(body) model = models.CreateExporterIntegrationRespon...
[ "def add_exporter(self, exptype, name = None, **kwargs):\n self.logger.debug('Adding exporter for {}'.format(exptype))\n if not name:\n name = exptype\n\n if name in self.exporters:\n raise ValueError('Exporter with name {!r} already exists!'.format(name))\n\n if no...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This API is used to create a Grafana integration configuration.
def CreateGrafanaIntegration(self, request): try: params = request._serialize() headers = request.headers body = self.call("CreateGrafanaIntegration", params, headers=headers) response = json.loads(body) model = models.CreateGrafanaIntegrationResponse(...
[ "def createConfigurationFolder(portal):\n print '*** Criando pasta de configuracao...'\n configuration_folder = getOrCreateType(portal, portal, 'configuration', 'CmedConfiguration')\n configuration_folder.manage_permission('View', [MANAGER_ROLE, UEMRADMIN_ROLE, DOCTOR_ROLE, SECRETARY_ROLE], acquire = False...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This API is used to create a Grafana notification channel.
def CreateGrafanaNotificationChannel(self, request): try: params = request._serialize() headers = request.headers body = self.call("CreateGrafanaNotificationChannel", params, headers=headers) response = json.loads(body) model = models.CreateGrafanaNoti...
[ "def create_notification_channel(\n self,\n name,\n notification_channel,\n retry=google.api_core.gapic_v1.method.DEFAULT,\n timeout=google.api_core.gapic_v1.method.DEFAULT,\n metadata=None,\n ):\n if metadata is None:\n metadata = []\n metadata ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This API is used to add a policy group.
def CreatePolicyGroup(self, request): try: params = request._serialize() headers = request.headers body = self.call("CreatePolicyGroup", params, headers=headers) response = json.loads(body) model = models.CreatePolicyGroupResponse() model._...
[ "def add_policy_group(self,group_name,policy_list=\"*\",limit_bps=\"4000000000\",limit_pps=\"2700000\"):\n # menu\n self.left_menu(u\"ポリシーグループ管理\")\n\n self._selenium.click_button(u\"//input[@value='ポリシーグループの追加']\")\n self._selenium.input_text(\"policy_group_name\",group_name)\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This API is used to create a Prometheus CVM agent.
def CreatePrometheusAgent(self, request): try: params = request._serialize() headers = request.headers body = self.call("CreatePrometheusAgent", params, headers=headers) response = json.loads(body) model = models.CreatePrometheusAgentResponse() ...
[ "def create_agent(self):\n\n agent = Agent()\n self.agents.append(agent)\n return agent", "def health_monitor_create(self, **kwargs):\n return self._clients.octavia().health_monitor_create(**kwargs)", "def __init__(\n self,\n time_step_spec,\n action_spec,\n # Spe...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This API is used to create Prometheus configurations.
def CreatePrometheusConfig(self, request): try: params = request._serialize() headers = request.headers body = self.call("CreatePrometheusConfig", params, headers=headers) response = json.loads(body) model = models.CreatePrometheusConfigResponse() ...
[ "def generate(cls, **kwargs) -> \"PrometheusConfiguration\":\n return cls(\n **{**dict(\n description=\"Update the base_url and metrics to match your Prometheus configuration\",\n metrics=[\n PrometheusMetric(\n \"throughput\"...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This API is used to create a Prometheus recording rule.
def CreateRecordingRule(self, request): try: params = request._serialize() headers = request.headers body = self.call("CreateRecordingRule", params, headers=headers) response = json.loads(body) model = models.CreateRecordingRuleResponse() m...
[ "def create_sampling_rule(self, SamplingRule: Dict) -> Dict:\n pass", "def create(session: Session, rule_name: str, rule_type: str, data: str, frequency: int) -> Rule:\n if not rule_name or not rule_type or not data:\n raise ValueError(\"A rule name, a type, an argument and a frequency is...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This API is used to delete an alarm policy.
def DeleteAlarmPolicy(self, request): try: params = request._serialize() headers = request.headers body = self.call("DeleteAlarmPolicy", params, headers=headers) response = json.loads(body) model = models.DeleteAlarmPolicyResponse() model._...
[ "def delete_policy(self, policy):\r\n return self.manager.delete_policy(scaling_group=self, policy=policy)", "def delete_policy(self):\n response=self.client.list_policy_versions(PolicyArn=self.PolicyArn)\n for Version in response[\"Versions\"]:\n if not(Version[\"IsDefaultVersion\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This API is used to batch delete Prometheus alerting rules.
def DeleteAlertRules(self, request): try: params = request._serialize() headers = request.headers body = self.call("DeleteAlertRules", params, headers=headers) response = json.loads(body) model = models.DeleteAlertRulesResponse() model._des...
[ "def realms_bulk_del_jobs():\n try:\n body = json.loads(request.body.read())\n jobs = body['jobs']\n for realm_id, job_id in jobs:\n _del_job(realm_id, job_id)\n except ValueError:\n raise JSONError(client.BAD_REQUEST,\n exception='ValueError',\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }