query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Plot histogram of population fitness values
def plt_hist(pop, bin_limit=fit_range): plt.hist(pop, bins=range(0,bin_limit+1)) plt.grid(True) plt.title('Distribution of Population') plt.show()
[ "def income_per_person(year) :\n incomes.hist(str(year))\n plt.title('Histogram of per capita GDP for each country in '+str(year))\n plt.xlabel('Per capita GDP in Dollars')\n plt.ylabel('# of Countries')\n plt.show()", "def plot_data(self,feature_idx):\r\n\t\tfig,ax = plt.subplots()\r\n\t\tax.set(t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check that instance can't be instantiated without implementation of the 'apply_connectivity_changes'
def test_apply_connectivity_changes(self): with self.assertRaisesRegexp(TypeError, "Can't instantiate abstract class TestedClass with " "abstract methods apply_connectivity_changes"): self.tested_class()
[ "def check_connection(self):\n pass", "def test_cannot_be_instantiated(self):\n with self.assertRaises(NotImplementedError):\n Channel(0)", "def __instancecheck__(cls, inst, live=True):\n return cls.implements(inst)", "def _check_connection(self):\n if \"_connection\" no...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check the requests collection for orders to change the validity of an upload.
def check_for_requests(self): while True: doc = self.cc.requests_coll.find_one_and_delete( {'receiver': 'validator'}, sort=[('_id', pymongo.ASCENDING)] ) if doc is None: break if doc['action'] == 'validate_upload': ...
[ "def test_upload_amended_orders(self):\n pass", "def validate_order_item(self, request):\n raise NotImplementedError()", "def Check(self, **kwargs):\n from datetime import datetime, timedelta\n problems = super(ZFSBackupS3, self).Check(**kwargs)\n\n # Now we check for multipar...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Assign a new action_id to every recently completed upload
def check_for_uploads(self): def set_action_id_ops() -> Sequence[Tuple[UpdateOne, InsertOne]]: find_query = { 'complete': True, self.action_id_name: {'$exists': False}, 'meta.format': {'$exists': True}, 'meta.start_time': {'$exists': T...
[ "def add_action(self, action):\n with self._mutex:\n _id = max(self._used_ids) if self._used_ids else 0\n while _id in self._used_ids:\n _id += 1\n self._actions[_id] = action\n self._used_ids.add(_id)\n self._workers[_id] = Thread(target=...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Scans the analyzers collection for analyzer modules that were executed and performs validation on their generated observations. If they are ok, the observations are committed to the observatory and a new action is inserted into the action_log.
def check_for_analyzers(self): executed = self.analyzer_state.executed_analyzers() for analyzer in executed: # check for wish if self.analyzer_state.check_wish(analyzer, 'cancel'): print("validator: cancelled {} upon request".format(analyzer['_id'])) ...
[ "def start_analyzers(self):\n self.commPool.logFromCore(Messages.system_analyzers_start + self.CONFIG['URL_BASE'], LogTypes.INFO, self.__class__.__name__)\n loa = LoaderOfAnalyzer(self.CONFIG)\n self.LoaderOfAnalyzersThread = Process(target=loa.start, args=())\n self.LoaderOfAnalyzersThr...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Cleanup the oslo.messaging layer.
def cleanup(): global TRANSPORT, NOTIFIER assert TRANSPORT is not None assert NOTIFIER is not None TRANSPORT.cleanup() TRANSPORT = NOTIFIER = None
[ "def cleanup(self):\n self.msgmap.clear()\n self.droppedmsgs.clear()\n self.chan.stop_receiving_messages()\n\n # TODO: enable\n #self.cmdMap.clear()\n #self.cmdCliSubmitQueue.clear()\n #self.cmdSvrComputeQueue.clear()\n #self.droppedCommands.clear()\n #...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
create soap envelope, and convert the structure to xml
def create_soap_envelope(self, rpc_name, cwmp_version="cwmp-1-0", rpc_args="", cwmp_id=""): log.debug_info("create_soap_envelope") try: dict_envelope_attrib = {'xmlns:SOAP-ENV':'http://schemas.xmlsoap.org/soap/envelope/', 'xmlns:SOAP-ENC':'http://schemas....
[ "def _create_envelope_element(self):\n all_elements = xsd.Sequence([])\n\n assert self.header\n if self.header.type._element:\n all_elements.append(\n xsd.Element(\"{%s}header\" % self.nsmap[\"soap-env\"], self.header.type)\n )\n\n all_elements.append...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Look up the official Spore achievement name and text Call before getting any achievements
def loadAchievementList(): global achievements achievements = {} doc = minidom.parse(urllib.urlopen(serverString + "/data/achievements.xml")) for element in doc.getElementsByTagName("achievement"): key = element.getElementsByTagName("id")[0].firstChild.data name = element.getElementsByTa...
[ "def get_achievement(name):\r\n c = tibiaDatabase.cursor()\r\n try:\r\n # Search query\r\n c.execute(\"SELECT * FROM Achievements WHERE name LIKE ? ORDER BY LENGTH(name) ASC LIMIT 15\", (\"%\" + name + \"%\",))\r\n result = c.fetchall()\r\n if len(result) == 0:\r\n retur...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Store name and description (must have loadAchievementList called first)
def getInfo(self): self.name, self.description = achievements[self.id]
[ "def loadAchievementList():\n global achievements\n achievements = {}\n doc = minidom.parse(urllib.urlopen(serverString + \"/data/achievements.xml\"))\n for element in doc.getElementsByTagName(\"achievement\"):\n key = element.getElementsByTagName(\"id\")[0].firstChild.data\n name = elemen...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the URL for the official Spore achievement icon
def getIconUrl(self): return "%s/static/war/images/achievements/%s.png" % (serverString, self.id)
[ "def icon(self):\r\n try:\r\n return self.data['icon_url_base']+self.data['icon_url_name']\r\n except KeyError:\r\n return ''", "def badge_image_url(self) -> str:\n color_string = self.color()\n compliance_string = self.urlencode()\n return f\"https://img.s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get profile pic, tagline, user id and creation date
def getProfileInfo(self): doc = minidom.parse(urllib.urlopen(serverString + "/rest/user/" + self.name)) for element in doc.getElementsByTagName("user")[0].childNodes: if element.nodeType != minidom.Node.ELEMENT_NODE: continue elif element.tagName == "status" and i...
[ "def getUserProfilePic(user):", "def profile_pic(request):\n if request.user.is_authenticated:\n profile_obj = CustomUser.objects.get(id__exact=request.user.id)\n pic = profile_obj.avatar\n return {'picture': pic}\n return {}", "def getprofile(self, *args, **kwargs):\n return _...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get number of achievements for the user and a list of achievement ids and unlockdates
def getAchievements(self, start=None, length=5): if start == None: start = len(self.achievements) doc = minidom.parse(urllib.urlopen("%s/rest/achievements/%s/%i/%i" % (serverString, self.name, start, length))) if int(doc.getElementsByTagName("status")[0].firstChild.data) != 1: ...
[ "async def achievements(self, ctx: commands.Context):\r\n # The milestones for each achievement type\r\n milestones_dict_of_achievements = {\r\n 'times_entertained': [5, 25, 100, 250, 500, 1000, 5000, 10000, 100000, 1000000],\r\n 'times_fed': [5, 25, 25, 50, 100, 500, 1000...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get various stats like height, diet, abilities etc. for a creature
def getStats(self): if self.type != "CREATURE" and self.type != None: return self.stats = _xmlUrlToDict(serverString + "/rest/creature/" + self.id, float)
[ "def get_character_health(character: dict):\r\n print(\"Your health is: %d\" % character['HP'])", "def get_damage():\n\n return character['Damage']", "def get_hp():\n\n return character['HP']", "def hero_characteristics(hero):\n print(f\"{hero.name}'s true identy is {hero.identity}\")\n print(f...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get asset id, and name for assets in a sporecast.
def sporecastAssets(sporecastId, start=0, length=20): url = "%s/rest/assets/sporecast/%s/%i/%i" % (serverString, sporecastId, start, length) doc = minidom.parseString(urllib.urlopen(url).read().decode("utf-8", "ignore").encode("ascii", "xmlcharrefreplace")) if int(doc.getElementsByTagName("status")[0].first...
[ "def getAssetId(self):\n node = self.__node\n \n assetParam = node.getParameter('castingInfo.asset')\n if assetParam != None:\n assetId = assetParam.getValue(ZERO)\n return assetId\n \n return None", "def _get_asset_name(self, action_result):\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The interest will be 10 percent per annum compounded monthly using the 30/360 US day count convention .
def interest(self, from_date, to_date): yearfrac = findates.daycount.yearfrac(from_date, to_date, "30/360 US") months = yearfrac * 12 return Decimal((1.0 + \ self.annual_interest_rate / 12....
[ "def calcMonthlyInterest(self):\n imensual=self.ianual/12.\n self.deposit(imensual*self.balance)", "def __calculate_monthly_interest(self):\n return self.__percentage_interest / 12", "def calcInterest(principal, apr, numOfDays):\n return principal * ((apr/100.00) / 365.00 ) * numOfDays;"...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Any principal amounts in this loan will be paid in Hong Kong dollars. Any accured interest shall be paid in the form of Bitcoin with the interest rate calculated in Hong Kong dollars
def payments(self, loan): self.currency_interest = "XBT" """The lender agrees to provide the borrower half of the loan amount on the initial loan on the initial date""" loan.fund(on=self.initial_loan_date, amount=self.total_loan_amount * \ ...
[ "def _loan_principal(self):\n return self.asking_price - self.down_payment", "def computeInterest(self):\r\n interest = self.balance * SavingsAccount.RATE\r\n self.deposit(interest)\r\n return interest", "def compound_interest(principal, interest, compound_times, years_to_grow):\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Method returns QuerySet of orders of matched shift.
def get_orders_with_shift(shift_id): return Order.objects.filter(shift_id=shift_id)
[ "def get_orders(self):\n return self.order_lst", "def get_history_orders(self):\n return self.engine.get_history_orders()", "def get_orders():\n\treturn Order.query", "def getOrderList(self):\r\n\t\treturn self.pair.orders", "def getOrderList(self):\r\n\t\treturn self.orders", "def ordering...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return sales per category {
def get_context_data(self, **kwargs): start, end = self.get_start_end_dates(self.request) if start or end is not None: category_list = [] misc_items = 0 discount = 0 category_sales = {} count_test_items = 0 out = {} tot...
[ "def get_sales_and_profit_abc_summary(client_id:str): \n categories = ['A', 'B', 'C']\n df = pd.DataFrame()\n df['Categories'] = categories\n sales, profit = [], []\n sales_abc = get_saleswise_abc_data(client_id)\n profit_abc = get_profitwise_abc_data(client_id)\n \n for cat in categorie...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Apply filters so Datacontainer object
def apply_filters(self, filters): self._data = self.model.objects.filter(**filters)
[ "def filter(self, filters):", "def filter_data(self):\n self.data = filter_pandas(self.data, self.filters)", "def filter(self, data):\n pass", "def _data_filtering(self):\n self._filter_nan_user_or_item()\n self._remove_duplication()\n self._filter_by_field_value()\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Apply annotation to datacontainer object
def annotate(self, annotation): self._data = self._data.annotate(**annotation)
[ "def Annotate(self, annotations):\n self._annotations.update(annotations)", "def Annotate(self, annotations):\n self._results.Annotate(annotations)", "def _add_annotation(annotation_dict, annotation_name):\n self.annotation[annotation_name] = annotation_dict[self.uuid]", "def _convert_annotations...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Apply aggregation to datacontainer object
def aggregate(self, aggregation): self._data = self._data.aggregate(**aggregation)
[ "def _agg(self, data, op):\n if op == \"avg\":\n dist.all_reduce(data, op=dist.ReduceOp.SUM)\n data /= self.world_size\n else:\n raise NotImplementedError\n return data", "def _agg(self, data, op):\n raise NotImplementedError", "def aggregate(self, in...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Stop all timers in a canvas.
def _stop_timers(canvas): for attr in dir(canvas): try: attr_obj = getattr(canvas, attr) except NotImplementedError: # This try/except is needed because canvas.position raises # an error (it is not implemented in this backend). attr_obj = None ...
[ "def cancel_all(self):\n for timer in self._timers:\n timer.Stop()", "def reset_timers(context):\n _cancel_all_timers(context)", "def killtimers(self):\n for timer in self._timers: timer.cancel()\n self._timers = []", "def _stop_timers(self):\n if self.ping_timer:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Evolui uma onda com energia [E] e desvio padrão [deltaz] da posição inicial [zi] até a posição final [zf]. A evolução ocorre em um espaço unidimensional de lamanho [L] partido em [N] pontos. Esta evolução é pseudoanalítica, pois assume que a integração numérica pode ser realizada com grande precisão. Parâmetros
def evolucao_analitica(zi=-20.0, zf=20, E=150.0, deltaz=5.0, L=100.0, N=1024, tempo=2.1739442773545673e-14, simples=True): result = locals().copy() # mudando para AU L_au = L / au2ang E_au = E / au2ev deltaz_au = deltaz / au2ang ...
[ "def evolucao_numerica(zi=-20.0, zf=20, E=150.0, deltaz=5.0,\n L=100.0, N=1024, dt=1e-20, method='pe',\n tempo=2.1739442773545673e-14,\n simples=False):\n result = locals().copy()\n # mudando para AU\n L_au = L / au2ang\n dt_au = dt / au_t\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Evolui uma onda com energia [E] e desvio padrão [deltaz] da posição inicial [zi] até a posição final [zf]. A evolução ocorre em um espaço unidimensional de lamanho [L] partido em [N] pontos. O método a ser utilizado deve ser escolhido entre PseudoEspectral, CrankNicolson e RungeKutta Parâmetros
def evolucao_numerica(zi=-20.0, zf=20, E=150.0, deltaz=5.0, L=100.0, N=1024, dt=1e-20, method='pe', tempo=2.1739442773545673e-14, simples=False): result = locals().copy() # mudando para AU L_au = L / au2ang dt_au = dt / au_t E_au = E ...
[ "def evolucao_analitica(zi=-20.0, zf=20, E=150.0, deltaz=5.0,\n L=100.0, N=1024,\n tempo=2.1739442773545673e-14,\n simples=True):\n result = locals().copy()\n # mudando para AU\n L_au = L / au2ang\n E_au = E / au2ev\n deltaz_au = delta...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Logit crossentropy loss with masking.
def masked_logit_cross_entropy(preds, labels, mask): loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=preds, labels=labels) loss = tf.reduce_sum(loss, axis=1) mask = tf.cast(mask, dtype=tf.float32) mask /= tf.maximum(tf.reduce_sum(mask), tf.constant([1.])) loss *= mask return tf.reduce_mean...
[ "def masked_softmax_cross_entropy(preds, labels, mask):\n loss = tf.nn.softmax_cross_entropy_with_logits(logits=preds, labels=labels)\n mask = tf.cast(mask, dtype=tf.float32)\n mask /= tf.reduce_mean(mask)\n loss *= mask\n return tf.reduce_mean(loss)", "def masked_softmax_cross_entropy(preds, labels, mask):\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
L2 loss with masking.
def masked_l2(preds, actuals, mask): loss = tf.nn.l2(preds, actuals) mask = tf.cast(mask, dtype=tf.float32) mask /= tf.reduce_mean(mask) loss *= mask return tf.reduce_mean(loss)
[ "def get_l2_loss(image1, image2):\n assert image1.shape == image2.shape\n return np.mean((image1 - image2)**2)", "def loss_hole(self, mask, y_true, y_pred):\n return self.l1((1-mask) * y_true, (1-mask) * y_pred)", "def make_weak_loss(z1, z2, labels, loss_fn=gin.REQUIRED):\n\n return loss_fn(z1, z2...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Indicator function for zero elements z.
def fun(self, x): if np.any(x[self._z] != 0): return np.inf else: return 0
[ "def step(z):\n result = np.zeros_like(z)\n result[z > 0] = 1.0\n return result", "def zero_crosser(indicator: pd.Series) -> pd.Series:\n indicator = indicator.fillna(0)\n return (((indicator.shift() * indicator) <= 0) * np.sign(indicator)).astype(int)", "def _iszero(x):\n return x.is_zero", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Toma el nombre de la tabla, las columnas a mostrar, y el query (como un where o un inner join), esta parte es opcional e imprime el resultado del query
def make_query(table_name, cols, query): str_query = None if query == None: str_query = "SELECT {} FROM {};".format(cols, table_name) else: str_query = "SELECT {} FROM {} {};".format(cols, table_name, query) print(">>>ejecutando: ", str_query) sistema.cursor.execute(str_query) for...
[ "def buscarAlumnosCurso(self, curso):", "def select(self, table, columns=['*'], condition='', orderby='', limit=0, isFetchAll=True):", "def buscarAlumnosPrueba(self, prueba):", "def show_data(self, table_name):\n sql_command = f'SELECT * FROM {table_name}'\n print(sql_command) if self.debug == 1...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parses through the 'inventory' parameter in target_objs to fetch the remote inventories and stores it in save_dir before recursively copying it to the output_path (relative to the inventory path) Overwrites existing inventory items if force fetched
def fetch_inventories(inventory_path, target_objs, save_dir, force, pool): git_inventories = defaultdict(list) http_inventories = defaultdict(list) # To ensure no duplicate output path inv_output_path = defaultdict(set) for target_obj in target_objs: try: inventories = target_ob...
[ "def generate_inventory(self):\n hosts = self.cluster.remotes.keys()\n hostnames = [remote.hostname for remote in hosts]\n hostnames.sort()\n inventory = []\n if self.inventory_group:\n inventory.append('[{0}]'.format(self.inventory_group))\n inventory.extend(hos...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
load probe names frome file
def load_probes(probe_file): probes = common.read_file(probe_file) probe_list = list(filter(None, probes)) return probe_list
[ "def load_specs(self, filename):\n self.filename = filename\n # Add loading functionality here", "def load_BL62_text_files(file_name_filter):\n files = os.listdir('.')\n\n spectra = {}\n\n for spec_file in files:\n m = re.search('('+file_name_filter+'.*)\\.dat',spec_file)\n if...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the virtual addr of probes.
def get_probe_address(elf_path, probes, section='.text'): assert len(probes) <= 26, 'Too many probes' text_data = objdump_section(elf_path, '.text') name_to_addr = parse_func_names(text_data) probe_names = list(string.ascii_uppercase) name_idx = 0 ret = [] for probe in probes: ...
[ "def getVirtualAddress(self) -> int:\n ...", "def getVirtualAddress(self) -> long:\n ...", "def getOffset(self, virtualAddress: long) -> long:\n ...", "def get_profibus_address(self):\n return int(self.query(\"ADDR?\"))", "def _get_virtual_oper_VipV6_address(self):\n return se...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load the COMPAS recidivism dataset. The purpose of this dataset is to predict whether a criminal will recidivate within two years of release.
def load_recidivism(return_X_y=False): return _load_dataset( 'compas/two-year-recidivism.csv', target='two_year_recid', return_X_y=return_X_y )
[ "def fetch_ricci_df(preprocess=False):\n (train_X, train_y), (test_X, test_y) = lale.datasets.openml.fetch(\n \"ricci\", \"classification\", astype=\"pandas\", preprocess=preprocess\n )\n orig_X = pd.concat([train_X, test_X]).sort_index()\n orig_y = pd.concat([train_y, test_y]).sort_index()\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load the GDP growth dataset (from FRED data). The purpose of this dataset is to forecast GDP growth based on macroeconomic variables.
def load_gdp(return_X_y=False): return _load_dataset( 'gdp/GDP-growth.csv', target='GDP_g', return_X_y=return_X_y )
[ "def freddie_performance_load():\n freddie_performance_url = 's3a://onemortgage/freddie/historical_data1_time_Q*.txt'\n freddie_performance_colnames = [\"loan_seq_no\",\n \"report_period\",\n \"cur_actual_upb\",\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the nickname of this GetSesameResponse.
def nickname(self, nickname): if nickname is None: raise ValueError("Invalid value for `nickname`, must not be `None`") # noqa: E501 self._nickname = nickname
[ "def nickname(self, value):\n self._nickname = value", "def nickname(self, nickname):\n\n self._nickname = nickname", "def set_nickname(self, nickname):\n \n if len(nickname) > globals.MAX_NICKNAME_LENGTH:\n nick = nickname[0:globals.MAX_NICKNAME_LENGTH-3]+\"...\"\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
returns true if password is 1030 characters long and consists of letters and numbers
def validate_password(password): if re.match(r"^[a-zA-Z0-9]{10,30}$", password): return True return False
[ "def valid_password(password: str) -> bool: \n return len(password) >= 6", "def password_validates(password):\n if any(char.isdigit() for char in password) \\\n and any(char.isalpha() for char in password) \\\n and len(password) > 5:\n return True\n els...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Iterates over the pages of an API operation results. Paginators act as an abstraction over the process of iterating over an entire result set of a truncated API operation. Yields an iterable with the response obtained from applying `method`.
def paginate( client: client, method: str, **kwargs: Dict[str, Any] ) -> Generator[Dict[str, Any], None, None]: paginator = client.get_paginator(operation_name=method) for page in paginator.paginate(**kwargs): yield page
[ "def paginate(self, *args, **kwargs):\n has_next = False\n method_to_call = getattr(self.cymon, self.method)\n result = method_to_call(limit=100, *args, **kwargs)\n if result['next'] is not None:\n print result['next']\n has_next = True\n yield result['result...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function add a new paragraph in two files (main+answers)
def input_file_docx(str_write, str_answer): paragraph = dti.add_paragraph(str_write) paragraph_format = paragraph.paragraph_format paragraph_format.space_after = Pt(1.0) paragraph = dti1.add_paragraph(str_answer) paragraph_format = paragraph.paragraph_format paragraph_format.space_after ...
[ "def add_to_file(question_text):\n\t#Strip newlines for the scraped question \n\tQa = question_text.replace(\"\\n\\n\\n\", \"\")\n\tQa = Qa.replace(\"\\n\\n\", \"\\n\")\n\n\t#Write data to file\n\tquestion_list.write(\"\\n\\n\\n\" + Qa)", "def __add(files):\n # Open master.tex and read in lines\n master = o...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Authorizes outbrain object If token exists in yaml, outbrain object will get the attribute "token" with the token string If token was generated more than 28 days ago, it gets another token, and adds it to the outbrain object as the new token attribute If the token is still valid, no new token is requested
def authorize(outb, creds): try: outb.token = creds["token"] token_gen_date = datetime.datetime.strptime(creds["token_generated_on"], "%Y-%m-%d__%H_%M_%S") # Convert token generated date to datetime object for comparison if (datetime.datetime.now() - datetime.timedelta(days=28)) > t...
[ "def UserToken(self) -> object:", "def authorise():\n\ttokenfile = open(\"secrettoken.txt\", \"r\")\n\ttoken = tokenfile.read()\n\ttoken = token.strip()\n\tpnutpy.api.add_authorization_token(token)", "def __init__(self, token):\n self.token = token\n self.time_of_blacklisting = datetime.now()", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a list of dicts with all the campaign ids and names for the marketer_id
def get_camp_ids_names_containing_str(marketer_id, string): all_campaigns = outb.get_campaigns_per_marketer(marketer_id).get(marketer_id[0]) return [{"id": x.get("id"), "name": x.get("name")} for x in all_campaigns if string in x["name"]]
[ "def get_campaigns(self):\n\n self.get_settlements()\n game_list = set()\n\n if self.settlements is not None:\n for s in self.settlements:\n if s is not None:\n game_list.add(s[\"_id\"])\n\n for s in self.get_survivors():\n game_lis...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a list of campaign IDs which contain a given string
def get_camp_ids_containing_str(marketer_id, string): all_campaigns = outb.get_campaigns_per_marketer(marketer_id).get(marketer_id[0]) return [x.get("id") for x in all_campaigns if string in x["name"]]
[ "def get_camp_ids_names_containing_str(marketer_id, string):\n all_campaigns = outb.get_campaigns_per_marketer(marketer_id).get(marketer_id[0])\n return [{\"id\": x.get(\"id\"), \"name\": x.get(\"name\")} for x in all_campaigns if string in x[\"name\"]]", "def doFindAllMatching(self, str):\n matches ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Transforms the result of get_campaign_performance_per_period() function, and only includes campaign IDs that are in the list of camp_ids_to_filter
def transform_and_filter_result(result,camp_ids_to_filter): final_result = list() for x in result[0][0]: if x["campaignId"] in camp_ids_to_filter: result_per_id = list() for result in x["results"]: result_per_id_per_day = dict() # The resulting dic...
[ "def get_campaign_stats(self,campaign_id):\n campaign = Campaign(campaign_id)\n fields = ['account_name',\n 'campaign_name',\n 'clicks',\n 'cpc',\n 'reach',\n 'ctr',\n 'frequency',\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a lookup_type of 'year', 'month', 'day' or 'week_day', returns the SQL that extracts a value from the given date field field_name.
def date_extract_sql(self, lookup_type, field_name): if lookup_type == 'week_day': # DAYOFWEEK() returns an integer, 1-7, Sunday=1. # Note: WEEKDAY() returns 0-6, Monday=0. return "DAYOFWEEK(%s)" % field_name elif lookup_type == 'week': return "WEEK(%s)" %...
[ "def date_extract_sql(self, lookup_type, field_name):\n raise NotImplementedError()", "def date_extract_sql(self, lookup_type, field_name):\n return \"EXTRACT(%s FROM %s)\" % (lookup_type, field_name)", "def date_trunc_sql(self, lookup_type, field_name):\n raise NotImplementedError()", "d...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a column type (e.g. 'BLOB', 'VARCHAR'), returns the SQL necessary to cast it before using it in a WHERE statement. Note that the resulting string should contain a '%s' placeholder for the column being searched against.
def field_cast_sql(self, db_type, internal_type=None): if db_type and db_type.lower() == 'blob': return 'CAST(%s as nvarchar)' return '%s'
[ "def field_cast_sql(self, db_type):\n return '%s'", "def column_datatype_to_string(\n cls, sqla_column_type: TypeEngine, dialect: Dialect\n ) -> str:\n sqla_column_type = sqla_column_type.copy()\n if hasattr(sqla_column_type, \"collation\"):\n sqla_column_type.collation =...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a cursor object that has just performed an INSERT/OUTPUT statement into a table that has an autoincrementing ID, returns the newly created ID.
def fetch_returned_insert_id(self, cursor): return cursor.fetchone()[0]
[ "def last_insert_id(self, cursor, table_name, pk_name):\n # TODO: Check how the `last_insert_id` is being used in the upper layers\n # in context of multithreaded access, compare with other backends\n\n # IDENT_CURRENT: http://msdn2.microsoft.com/en-us/library/ms175098.aspx\n # SC...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a quoted version of the given table, index or column name. Does not quote the given name if it's already been quoted.
def quote_name(self, name): if name.startswith(self.left_sql_quote) and name.endswith(self.right_sql_quote): return name # Quoting once is enough. return '%s%s%s' % (self.left_sql_quote, name, self.right_sql_quote)
[ "def escape_column_name(column):\n col_name = (\n column[\"name\"] if isinstance(column, collections.abc.Mapping) else str(column)\n )\n escaped_name = col_name.replace('\"', '\"\"')\n return f'\"{escaped_name}\"'", "def quote_dotted(\n name: Union[\"quoted_name\", str], quote: functools.par...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a SQL expression that returns a random value.
def random_function_sql(self): return "RAND()"
[ "def random_function_sql(self):\n return 'RANDOM()'", "def get_random_query() -> str:\n\n # Generate a list of random columns.\n columns = get_random_columns()\n arithmetic_exprs = [get_random_arithmetic_expr()\n for _ in range(random.randint(0, 2))]\n arithmetic_exprs = ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a string of the query last executed by the given cursor, with placeholders replaced with actual values. `sql` is the raw query containing placeholders, and `params` is the sequence of parameters. These are used by default, but this method exists for database backends to provide a better implementation according...
def last_executed_query(self, cursor, sql, params): return super(DatabaseOperations, self).last_executed_query(cursor, cursor.last_sql, cursor.last_params)
[ "def last_executed_query(self, cursor, sql, params):\n from django.utils.encoding import smart_unicode, force_unicode\n\n # Convert params to contain Unicode values.\n to_unicode = lambda s: force_unicode(s, strings_only=True, errors='replace')\n if isinstance(params, (list, tuple)):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the SQL for committing the given savepoint.
def savepoint_commit_sql(self, sid): return "REMOVE SAVEPOINT %s" % self.quote_name(sid)
[ "def savepoint_commit_sql(self, sid):\n return \"COMMIT TRANSACTION %s\" % sid", "def savepoint_commit_sql(self, sid):\n raise NotImplementedError", "def savepoint_create_sql(self, sid):\n return \"SAVE TRANSACTION %s\" % sid", "def savepoint_rollback_sql(self, sid):\n return \"ROL...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the SQL statement required to start a transaction.
def start_transaction_sql(self): return "BEGIN TRANSACTION"
[ "def start_transaction_sql(self):\n return \"BEGIN;\"", "def start_transaction_sql(self):\n return 'START TRANSACTION;'", "def startTxn ( self ):\n\n self.txncursor = self.conn.cursor()\n sql = \"START TRANSACTION\"\n self.txncursor.execute ( sql )", "def wrap_transaction(self):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the SQL that will be appended to tables or rows to define a tablespace. Returns '' if the backend doesn't use tablespaces.
def sql_for_tablespace(self, tablespace, inline=False): return "ON %s" % self.quote_name(tablespace)
[ "def tablespace_sql(self, tablespace, inline=False):\n return ''", "def tablespace_sql(self, tablespace, inline=False):\n return \"ON %s\" % self.quote_name(tablespace)", "def get_create_sql(self):\n return '\\n'.join([tbl.get_create_sql() for tbl in self.tables.itervalues()])", "def get_tabl...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Prepares a value for use in a LIKE query.
def prep_for_like_query(self, x): # http://msdn2.microsoft.com/en-us/library/ms179859.aspx return smart_text(x).replace('%', '\%').replace('_', '\_')
[ "def search_value(self, value):\n self._search_value = value.strip()", "def prepare_value(self, prop, value):\n return value", "def _prepare_cache(self, value):\n\n return value", "def prep_for_iexact_query(self, x):\n return x", "def get_db_prep_value(self, value):\n pass...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Same as prep_for_like_query(), but called for "iexact" matches, which need not necessarily be implemented using "LIKE" in the backend.
def prep_for_iexact_query(self, x): return x
[ "def postfix(cls, __and=True, __key=None, **kwargs):\r\n return _queries(\"LIKE\", __key, __and, [(k, f\"%{_escape_like(v)}\") for k, v in kwargs.items()])", "def prep_for_like_query(self, x):\n # http://msdn2.microsoft.com/en-us/library/ms179859.aspx\n return smart_text(x).replace('%', '\\%'...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Transform a datetime value to an object compatible with what is expected by the backend driver for datetime columns.
def adapt_datetimefield_value(self, value): if value is None: return None # Expression values are adapted by the database. if hasattr(value, 'resolve_expression'): return value if settings.USE_TZ and timezone.is_aware(value): # pyodbc donesn'...
[ "def _convert_datetime(self, value):\n return self._convert_default(value)", "def convert_datetimefield_value(self, value, expression, connection):\n if value is None:\n return value\n # Cloud Spanner returns the\n # google.api_core.datetime_helpers.DatetimeWithNanoseconds s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Transform a time value to an object compatible with what is expected by the backend driver for time columns.
def adapt_timefield_value(self, value): if value is None: return None # Expression values are adapted by the database. if hasattr(value, 'resolve_expression'): return value # SQL Server doesn't support microseconds if isinstance(value, string_type...
[ "def convert_timefield_value(self, value, expression, connection):\n if value is None:\n return value\n # Convert DatetimeWithNanoseconds to time.\n return time(value.hour, value.minute, value.second, value.microsecond)", "def _convert_time(self, value):\n return self._conve...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a twoelements list with the lower and upper bound to be used with a BETWEEN operator to query a field value using a year lookup `value` is an int, containing the lookedup year.
def year_lookup_bounds(self, value): first = '%s-01-01 00:00:00' # SQL Server doesn't support microseconds last = '%s-12-31 23:59:59' return [first % value, last % value]
[ "def year_lookup_bounds(self, value):\n first = '%s-01-01 00:00:00'\n second = '%s-12-31 23:59:59.999999'\n return [first % value, second % value]", "def year_lookup_bounds_for_date_field(self, value):\n return self.year_lookup_bounds(value)", "def year_range(cls, year):\n ret...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Coerce the value returned by the database backend into a consistent type that is compatible with the field type. In our case, cater for the fact that SQL Server < 2008 has no separate Date and Time data types.
def convert_values(self, value, field): if value is None: return None if field and field.get_internal_type() == 'DateTimeField': if isinstance(value, string_types) and value: value = parse_datetime(value) return value elif field and field.get_i...
[ "def convert_types(cls, value):\n if type(value) in (datetime, date):\n return time.mktime(value.timetuple())\n elif isinstance(value, Decimal):\n return float(value)\n else:\n return value", "def normalise_field_value(value):\n if isinstance(value, datetim...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return all the sites that are in FILENAME.
def get_sites(): sites_file = open(FILENAME) sites = [] for site in sites_file: sites.append("http://" + site.strip()) return sites
[ "def _get_sitenames():\n sites = []\n with open('/etc/hosts') as _in:\n for line in _in:\n if line.startswith('#'):\n continue\n for name in ('lemoncurry', 'magiokis'):\n if name in line:\n sites.append(line.strip().split()[1])\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Attempt to read the given site. Return the text of the site if successful, otherwise returns False.
def read_site(site): try: connection = urlopen(site) html = connection.read() connection.close() except: return False parser = HtmlTextParser() parser.parse(html) return parser.get_text()
[ "def read_text(link, from_file = False):\n\tif from_file:\n\t\ttext = read_from_file(link)\n\telse:\n\t\t_, text = read_from_web(link)\n\n\treturn text", "def scrape(self, site):\n site_resp = requests.get(site)\n return site_resp.text", "def get_content(url):\n\tresponse = requests.get(url)\n\tif...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Index the given site with the given text.
def index_site(site, text): # YOUR CODE HERE # pass # delete this when you write your code
[ "def index_page(self, url, html_text):\n \n # TODO - Write the indexing logic \n # self.urls.append(url)\n self.indexer.index_html_page(url, html_text)", "def add_page_to_index(index, url, content):\n words = content.split()\n for word in words:\n add_to_index(index, word,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Build the index by reading and indexing each site.
def build_index(): for site in get_sites(): text = read_site(site) while text == False: text = read_site(site) # keep attempting to read until successful index_site(site, text)
[ "def build(self):\n\t\tself.documents = self.get_items_to_index()\n\t\tself.build_index()", "def populate_index(db):\n\tfor url in URL:\n\t\tprint url\n\t\trequest = urllib2.Request(url)\n\t\ttry :\n\t\t\tresponse = urllib2.urlopen(request)\n\t\texcept urllib2.URLError:\n\t\t\tprint \"Network Unreachable \"\n\t\t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
this method try to read notes from a sound wave file with a list of dict of start_time, pitch and duration
def read_note_from_sound_file(filename: str, samplerate: int = DEFAULT_SAMPLE_RATE): print("====> reading notes from sound file") win_s = 512 // DOWN_SAMPLE # fft size hop_s = 256 // DOWN_SAMPLE # hop size # adjust sample rate s = source(filename, samplerate, hop_s) samplerate = s.samplerate ...
[ "def get_data_from_midi(dir):\n note_to_int = get_note_dic(False)\n sequences = []\n levels = []\n for glob_file in glob.glob(\"{}/*.mid\".format(dir)):\n midi = converter.parse(glob_file)\n notes = []\n tempo_time = 0\n file_name = os.path.split(glob_file)[1].split(\".\")[0]...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
this method try to read pitches from a sound wave file with a list of dict of pitch and confidence
def read_pitch_from_sound_file(filename: str, samplerate: int = DEFAULT_SAMPLE_RATE): if os.path.isfile(filename) is False: raise Exception('File not found with filename = %s' % filename) print("====> reading pitch from sound file") win_s = 4096 // DOWN_SAMPLE # fft size hop_s = 512 // DOWN_SA...
[ "def read_pitchs(midi_file):\n song = midi.read_midifile(midi_file)\n song.make_ticks_abs()\n tracks = []\n for track in song:\n notes = [note for note in track if note.name == 'Note On']\n pitch = [note.pitch for note in notes]\n tick = [note.tick for note in notes]\n tracks...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
given defined bpm, it generates a bar of heartbeat sound. given the fact that the heart beat track has a certain length of each beat, the bpm cannot be too high, which is undetermined yet.
def get_one_bar_heart_beat(filename: str, bpm: int): heart_beat_track = AudioSegment.from_file(file=filename, format='mp3') heart_beat_1 = heart_beat_track[70:180] heart_beat_2 = heart_beat_track[380:490] # AudioSegment.export(part, 'single_heartbeat1.mp3') tick_per_sec = 60 * 1000 / bpm # mak...
[ "def bpm(self):\n pulse_gen = Pulse(1, duty_cycle=0.1, sample_rate=self.audio.frame_rate)\n bpms = np.arrange(60, 181, 2)\n\n max = 0\n bpm = 0\n\n for i in bpms:\n pulse_gen.freq = i/60\n pulse_gen.duty_cycle = pusle_gen.freq * 0.1\n\n if len(self...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Command line entry point. $ python runsim.py show animation on screen $ python runsim.py file=video.mp4 save animation to video $ python runsim.py plot show plot on screen $ python runsim.py plot file=plot.pdf save plot to pdf
def main(*args): # # Use argparse to handle parsing the command line arguments. # https://docs.python.org/3/library/argparse.html # parser = argparse.ArgumentParser(description='Animate an epidemic') parser.add_argument('--size', metavar='N', type=int, default=50, help=...
[ "def main():\n\tfor f in os.listdir():\n\t\tif f.endswith(\".mp4\"):\n\t\t\tmake_images(f)\n\t\t\t#plot_changes(f) # Uncomment if you want to create plots of changes in-between frames", "def play(out_dir, subprocesses, simulation_viz):\r\n simulator = Simulator()\r\n if simulation_viz is SimulationV...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a triplet with a 1 bit sign, an 11 bits unsigned integer exponent and a 53 bits unsigned integer mantissa. The returned triplet can be used to fully reconstruct the fp64 value passed as an argument.
def to_sign_exponent_mantissa(value, exponent_bits=exponent_bits, mantissa_bits=mantissa_bits): float_mantissa, float_exponent = math.frexp(value) if (float_mantissa >= 0): sign = 0 else: sign = 1 exponent = int(float_exponent + 2**(exponent_bits - 1)) mantissa = int(abs(float_mantis...
[ "def bits2target(bits):\n exponent = ((bits >> 24) & 0xff)\n assert 3 <= exponent, \"[exponent>=3] but {}.\".format(exponent)\n mantissa = bits & 0x7fffff\n if (bits & 0x800000) > 0:\n mantissa *= -1\n return mantissa * pow(256, exponent - 3)", "def DequantizeFP(scale, mantissa, nScaleBits=3...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns an fp64 from a 1 bit sign, an 11 bit unsigned integer exponent and a 53 bit unsigned integer mantissa.
def from_sign_exponent_mantissa(sign, exponent, mantissa, exponent_bits=exponent_bits, mantissa_bits=mantissa_bits): if (sign): signed_mantissa = - mantissa else: signed_mantissa = mantissa signed_exponent = exponent - 2**(exponent_bits - 1) norm_signed_mantissa = float(signed_mantissa) ...
[ "def to_sign_exponent_mantissa(value, exponent_bits=exponent_bits, mantissa_bits=mantissa_bits):\n float_mantissa, float_exponent = math.frexp(value)\n if (float_mantissa >= 0):\n sign = 0\n else:\n sign = 1\n exponent = int(float_exponent + 2**(exponent_bits - 1))\n mantissa = int(abs(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check that the storage of a float as sign, exponent and mantissa and way back produces exactly the original number.
def test_float_storage(): values = [2.3434, 124012.2323209999, -12.39212445433389] for value in values: sign, exp, mantissa = to_sign_exponent_mantissa(value) restored_value = from_sign_exponent_mantissa(sign, exp, mantissa) print(restored_value) assert(value == restored_value)
[ "def check_for_float(check):", "def testSpecialFloat():\n\n dataIn = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]\n dataOut16 = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]\n dataOut24 = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]\n\n error = 0.0\n\n value = -.123456789\n Integer = float32ToInt(value)\n\n\n for i in range(3):\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Inverse projection of the projected map to a healpix spherical map.
def inv_projmap(self, img, nside=None): pass
[ "def inv_projmap(self, img, nside=None):\n pass\n\n ysize, xsize = img.shape\n\n if nside is None:\n lonra = self.arrayinfo['lonra']\n latra = self.arrayinfo['latra']\n npix = np.int((360.0 * xsize / (lonra[1] - lonra[0])) * (180.0 * ysize / (latra[1] - latra[0]...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
flipconv is either 'astro' or 'geo'. None will be default. With 'astro', east is toward left and west toward right. It is the opposite for 'geo'
def set_flip(self, flipconv): if flipconv is None: flipconv = 'astro' # default if flipconv == 'astro': self._flip = -1 elif flipconv == 'geo': self._flip = 1 else: raise ValueError("flipconv must be 'astro', 'geo' or None for default.")
[ "def convert_flip(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n axis = op.attr(\"axis\")\n\n for i, ax in enumerate(axis):\n if i == 0:\n out = _op.reverse(x, ax)\n else:\n out = _op.reverse(out, ax)\n\n g.add_node(op.output(\"Out\")[0], out)", "def _apply...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the field of view in degree of the plane of projection
def get_fov(self): return 2.*pi
[ "def get_field_of_view(self):\n assert self.width is not None and self.height is not None\n angle = (\n math.atan(\n max(self.width, self.height) / (self.get_focal_length() * 2.0)\n )\n * 2.0\n )\n return angle", "def fov(self) -> float:\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Inverse projection of the projected map to a healpix spherical map.
def inv_projmap(self, img, nside=None): pass ysize, xsize = img.shape if nside is None: lonra = self.arrayinfo['lonra'] latra = self.arrayinfo['latra'] npix = np.int((360.0 * xsize / (lonra[1] - lonra[0])) * (180.0 * ysize / (latra[1] - latra[0]))) # the tot...
[ "def inv_projmap(self, img, nside=None):\n pass", "def reproject_map(nside, phi, healpix_array=None):\n\n vec = hp.pix2vec(nside, np.arange(hp.nside2npix(nside)))\n eu_mat = euler(-phi, 0, 0, deg=True)\n rot_map = hp.rotator.rotateVector(eu_mat, vec)\n new_hp_inds = hp.vec2pix(nside, rot_map[0]...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes sag using voltage trace.
def sag(V): Vmin = np.amin(V) Vend = V[-1] return Vmin - Vend
[ "def sag_ratio(V):\n\n Vmin = np.amin(V)\n Vend = V[-1]\n sr = (Vmin - Vend) / Vmin\n if sr < 0:\n print(\"Warning: sag ratio being negative indicates there is no sag\")\n return sr", "def vf(gravedad, tiempo):\r\n #se realiza un multiplicacion y el valor se le asigna a la variable vf\r\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes sag (absolute, not sag ratio) using abf object and epoch index. See `sag_ratio` to calculate the sag ratio.
def sag_abf(abf, epoch_ind): p0 = abf.sweepEpochs.p1s[epoch_ind] p1 = abf.sweepEpochs.p1s[epoch_ind+1] V = abf.sweepY[p0:p1] return sag(V)
[ "def sag_ratio_abf(abf, epoch_ind):\n p0 = abf.sweepEpochs.p1s[epoch_ind]\n p1 = abf.sweepEpochs.p1s[epoch_ind+1]\n V = abf.sweepY[p0:p1]\n return sag_ratio(V)", "def sag_ratio(V):\n\n Vmin = np.amin(V)\n Vend = V[-1]\n sr = (Vmin - Vend) / Vmin\n if sr < 0:\n print(\"Warning: sag r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes sag ratio using voltage trace. Sag ratio is computed as $$ SR = \frac{V_{min} V_{end}}{V_{min}} $$
def sag_ratio(V): Vmin = np.amin(V) Vend = V[-1] sr = (Vmin - Vend) / Vmin if sr < 0: print("Warning: sag ratio being negative indicates there is no sag") return sr
[ "def sag(V):\n Vmin = np.amin(V)\n Vend = V[-1]\n return Vmin - Vend", "def calc_V(A):\n return 1. / calc_rV(A)", "def estimate_ringVRratio(sl, sv, dp=160., do=25.6, norfices=8, lowerGamma=2.2, setting='medium', method='master_curve',\n return_err=False, return_V_R=False):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes sag ratio using abf object and epoch index.
def sag_ratio_abf(abf, epoch_ind): p0 = abf.sweepEpochs.p1s[epoch_ind] p1 = abf.sweepEpochs.p1s[epoch_ind+1] V = abf.sweepY[p0:p1] return sag_ratio(V)
[ "def sag_abf(abf, epoch_ind):\n p0 = abf.sweepEpochs.p1s[epoch_ind]\n p1 = abf.sweepEpochs.p1s[epoch_ind+1]\n V = abf.sweepY[p0:p1]\n return sag(V)", "def sag_ratio(V):\n\n Vmin = np.amin(V)\n Vend = V[-1]\n sr = (Vmin - Vend) / Vmin\n if sr < 0:\n print(\"Warning: sag ratio being n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes minimum of voltage.
def Vmin(V): return np.min(V)
[ "def get_min_cell_voltage(self):\n summary = \" \".join(self.get_summary().split())\n pattern = '\\$.... .. .*? .*? .*? (.*?) .*? . .*? .*? . . . .*?'\n minv = float(re.findall(pattern,summary).pop())\n return minv", "def get_min_voltage(self, dynamixel_id):\n byte_seq = self.re...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes minimum Voltage using abf object and epoch index.
def Vmin_abf(abf, epoch_start): p0 = abf.sweepEpochs.p1s[epoch_start] p1 = abf.sweepEpochs.p1s[epoch_start + 1] V = abf.sweepY[p0:p1] return Vmin(V)
[ "def get_min_cell_voltage(self):\n summary = \" \".join(self.get_summary().split())\n pattern = '\\$.... .. .*? .*? .*? (.*?) .*? . .*? .*? . . . .*?'\n minv = float(re.findall(pattern,summary).pop())\n return minv", "def voltage_drop_abf(abf, epoch_start):\n vmin = Vmin_abf(abf, ep...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes voltage drop using voltage trace.
def voltage_drop(V): vmin = Vmin(V) resting = Vrest(V) return vmin - resting
[ "def voltage_drop_abf(abf, epoch_start):\n vmin = Vmin_abf(abf, epoch_start)\n resting = Vrest_abf(abf, epoch_start)\n return vmin - resting", "def _drop(self, u, v):\n return u - v * u.dot(v) / v.dot(v)", "def measure_voltage(self):\n return 0.0", "def detention_vol(\n tmnt_ddt: flo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes voltage drop using abf object and epoch index.
def voltage_drop_abf(abf, epoch_start): vmin = Vmin_abf(abf, epoch_start) resting = Vrest_abf(abf, epoch_start) return vmin - resting
[ "def voltage_drop(V):\n vmin = Vmin(V)\n resting = Vrest(V)\n return vmin - resting", "def test_unscaled_voltage_observation(self):\n arr = np.ones(self.env.num_buses, dtype=self.env.dtype)\n arr[0] = MIN_V\n arr[-1] = MAX_V\n df = pd.DataFrame({'BusPUVolt': arr})\n wit...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes capacitance from the time constant tau and membrane resistance Rm.
def capacitance(tau, Rm): return tau/Rm
[ "def _compute_time_scales(self):\n self.tau_l = self.volume_m3 / (self.mb_model.prcp_clim * self.area_m2)\n self.tau_a = self.tau_l * self.area_m2 / self.length_m ** 2", "def determine_capacity(self) -> float:\n return sum(self.observations[idx].obs_time.mins() for _, idx in self.schedule) / ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes the time constant (usually called tau) using a fit to an exponential function.
def time_constant(t, V): a_init = 1 b_init = -100 c_init = V[-1] popt, pcov = curve_fit(func_exp, t, V, p0=[a_init, b_init, c_init], bounds=(-np.inf, np.inf)) Vpred = np.zeros(len(t)) for i in range(len(t)): Vpred[i] = func_exp(t[i], popt[0], popt[1], popt[2]...
[ "def exp_fit(xx):\r\n x = np.arange(1,len(xx)+1)*dt #time shift axis\r\n xx = xx/max(xx)\r\n popt, pcov = sp.optimize.curve_fit(exp_decay, x, xx)#, p0=(1, 1e6, 1)) \r\n return popt[1] #return tau estimate\r", "def exp(t,tau):\n return np.exp(-t/tau)", "def exp_decay(x, a, tau, b):\r\n retur...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes time constant using abf object and epoch index.
def time_constant_abf(abf, epoch_start): p0 = abf.sweepEpochs.p1s[epoch_start] p1 = abf.sweepEpochs.p1s[epoch_start + 1] t = abf.sweepX[p0:p1] - abf.sweepX[p0] V = abf.sweepY[p0:p1] return time_constant(t, V)
[ "def epoch():\n\treturn time.time()", "def to_timestamp(index, base_dt, fps=4):\n ts = time.mktime(base_dt.timetuple())\n return ts + index * (1.0 / fps)", "def epoch_time_now():\n return int(time.time())", "def index_in_epoch(self):\n return self._index_in_epoch", "def __float__(self):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes input membrane resistance Rm using current trace I and voltage trace V.
def input_membrane_resistance(I, V): V1 = V[0] V2 = V[-1] I1 = I[0] I2 = I[-1] dV = V2 - V1 dI = I2 - I1 return dV / dI
[ "def Mdyn_virial(R_kpc, v_kms):\n M = 2.8e5 * (v_kms)**2 * R\n M10 = M / 1e10\n return M10", "def scoreCirc_VoltageReference(circuit, gen, indi, makeRedundancyInMatrix):\n #----------#\n VREF = 1.5\n #----------#\n \n FullBigCircuitMatrix = deepcopy(circuit.fullRedundancyMatrix)\n rowsR,columnsR,co...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes input membrane resistance Rm using abf object and epoch index.
def input_membrane_resistance_abf(abf, epoch_start): p0 = abf.sweepEpochs.p1s[epoch_start] p1 = abf.sweepEpochs.p1s[epoch_start + 1] V = abf.sweepY[p0:p1] I = abf.sweepC[p0-1:p1] return input_membrane_resistance(I, V)
[ "def abs_units(wb_run,sample_run,mono_van,wb_mono,samp_rmm,samp_mass,ei_guess,rebin,map_file,monovan_mapfile,**kwargs): \n #available keywords\n #abs_units_van_range\n global reducer, rm_zero,inst_name,van_mass,bleed_switch,rate,pixels\n print 'DGreduce run for ',inst_name,'run number ',sample_run\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes spike amplitude from voltage trace V and spike index t_spike.
def spike_amplitude(V, t_spike): # handle no spike found if t_spike is None: return None Vmax = V[t_spike] Vmin = np.min(V[t_spike+1:t_spike+500]) return Vmax - Vmin
[ "def spike_amplitude_abf(abf, t_spike, epoch_start=3):\n p0 = abf.sweepEpochs.p1s[epoch_start]\n V = abf.sweepY[p0:-1]\n\n return spike_amplitude(V, t_spike)", "def all_spike_ind(t, V):\n spikes, _ = find_peaks(V, [1, 1000])\n\n return spikes", "def avg_spike_frequency(t, V):\n intervals = int...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes spike amplitude from abf object with epoch index and the index of the spike time. Note that t_spike should be found within the same epoch, otherwise there be an index mismatch.
def spike_amplitude_abf(abf, t_spike, epoch_start=3): p0 = abf.sweepEpochs.p1s[epoch_start] V = abf.sweepY[p0:-1] return spike_amplitude(V, t_spike)
[ "def avg_spike_frequency_abf(abf, epoch):\n p0 = abf.sweepEpochs.p1s[epoch]\n p1 = abf.sweepEpochs.p1s[epoch+1]\n t = abf.sweepX[p0:p1]\n V = abf.sweepY[p0:p1]\n return avg_spike_frequency(t, V)", "def spike_amplitude(V, t_spike):\n # handle no spike found\n if t_spike is None:\n retur...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Finds index in an array `arr` closest to value `val`.
def find_nearest_idx(arr, val): arr = np.asarray(arr) idx = (np.abs(arr - val)).argmin() return idx
[ "def nearest_ind_to_val(arr, val):\n arr = np.array(arr)\n return (np.abs(arr - val)).argmin()", "def find_nearest(val, arr):\n\n ix = np.argmin(np.abs(np.asarray(arr) - val))\n return arr[ix], ix", "def closest(xarr, val):\n idx_closest = np.argmin(np.abs(xarr - val))\n return idx_closest", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes spike width for time t, voltage trace V, and index t_spike and voltage amplitude `spike_amp`.
def spike_width(t, V, t_spike, spike_amp): # handle no spike found if t_spike is None: return None Vmin = np.min(V[t_spike+1:t_spike+500]) minval = np.max([t_spike - 100, 0]) if len(V) > t_spike+500: maxval = -1 else: maxval = t_spike+500 id1 = find_nearest_idx(V[min...
[ "def spike_width_abf(abf, t_spike, spike_amp, epoch_start=3):\n # handle no spike found\n if t_spike is None:\n return None\n p0 = abf.sweepEpochs.p1s[epoch_start]\n t = abf.sweepX[p0:-1]\n V = abf.sweepY[p0:-1]\n return spike_width(t, V, t_spike, spike_amp)", "def spike_amplitude(V, t_sp...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes spike width for abf object and t_spike index, and spike amplitude `spike_amp`. Note that t_spike should be found within the same epoch, otherwise there be an index mismatch.
def spike_width_abf(abf, t_spike, spike_amp, epoch_start=3): # handle no spike found if t_spike is None: return None p0 = abf.sweepEpochs.p1s[epoch_start] t = abf.sweepX[p0:-1] V = abf.sweepY[p0:-1] return spike_width(t, V, t_spike, spike_amp)
[ "def spike_width(t, V, t_spike, spike_amp):\n # handle no spike found\n if t_spike is None:\n return None\n\n Vmin = np.min(V[t_spike+1:t_spike+500])\n minval = np.max([t_spike - 100, 0])\n if len(V) > t_spike+500:\n maxval = -1\n else:\n maxval = t_spike+500\n id1 = find_n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Finds the index of the first spike. The value of startind can be used as an offset in case t and V are slices of a larger array, but you want the index for those arrays.
def first_spike_tind(V, startind=0): spikes, _ = find_peaks(V, [1, 1000]) if len(spikes) == 0: found_spike = False else: found_spike = True if found_spike is False: raise NoSpikeFoundException else: return spikes[0]
[ "def find_time_index(t, t_0):\n\n t_gte = np.flatnonzero(t >= t_0)\n if not t_gte.size:\n raise FeatureError(\"Could not find given time in time vector\")\n\n return t_gte[0]", "def _get_start_index(self, tstart):\n i = bisect_right(self.data['epoch'], tstart)\n if i and not (i-1 < 0...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes spike latency. Makes sure that current is +100 pA.
def spike_latency(t, I, V): # make sure that current is +100 pA if abs(I[5] - 0.1) > 1e-7: sign = "" if I[5] > 0: sign = "+" print(f"Warning! Expected +100pA current, got {sign}{round(I[5]*1000)} \ pA current") spike_tind = first_spike_tind(V) return ...
[ "def _latency(self):\n\n return\n time.sleep(0.005 + random.random() / 30.)", "def spike_latency_abf(abf, epochstart):\n p0 = abf.sweepEpochs.p1s[epochstart]\n t = abf.sweepX[p0:-1]\n V = abf.sweepY[p0:-1]\n I = abf.sweepC[p0:-1]\n return spike_latency(t, I, V)", "def get_latency(a,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes spike latency using abf objet and epoch index.
def spike_latency_abf(abf, epochstart): p0 = abf.sweepEpochs.p1s[epochstart] t = abf.sweepX[p0:-1] V = abf.sweepY[p0:-1] I = abf.sweepC[p0:-1] return spike_latency(t, I, V)
[ "def spike_latency(t, I, V):\n # make sure that current is +100 pA\n if abs(I[5] - 0.1) > 1e-7:\n sign = \"\"\n if I[5] > 0:\n sign = \"+\"\n print(f\"Warning! Expected +100pA current, got {sign}{round(I[5]*1000)} \\\n pA current\")\n\n spike_tind = first_spik...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets all spike indices from time t and voltage trace V.
def all_spike_ind(t, V): spikes, _ = find_peaks(V, [1, 1000]) return spikes
[ "def interspike_intervals(t, V):\n # first pass -- get number of spikes and locations\n spike_inds = all_spike_ind(t, V)\n n_spikes = len(spike_inds)\n\n if n_spikes == 0:\n return []\n\n # generate array to hold time intervals\n intervals = np.zeros((n_spikes-1), dtype=float)\n for ti i...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes interspike intervals for time t and voltage trace V. If there are N spikes, then there will be N1 intervals.
def interspike_intervals(t, V): # first pass -- get number of spikes and locations spike_inds = all_spike_ind(t, V) n_spikes = len(spike_inds) if n_spikes == 0: return [] # generate array to hold time intervals intervals = np.zeros((n_spikes-1), dtype=float) for ti in range(1, n_sp...
[ "def all_spike_ind(t, V):\n spikes, _ = find_peaks(V, [1, 1000])\n\n return spikes", "def analyzeIV(t, V, I, tw, thr):\n ntraces = numpy.shape(V)[0]\n vss = []\n vmin = []\n vm = []\n ic = []\n nspikes = []\n ispikes = []\n tmin = []\n fsl = []\n fisi = []\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes interspike intervals for each spike, then computes the average of those intervals, then returns the reciprocal to denote the average spike frequency, in Hz.
def avg_spike_frequency(t, V): intervals = interspike_intervals(t, V) try: raise_if_not_multiple_spikes(intervals) except NoMultipleSpikesException: return None avg_int = np.average(intervals) return 1/avg_int
[ "def mean_spike_frequency(t):\n interspike_times = np.diff(t)\n mean_interspike_time = np.mean(interspike_times)\n mean_frequency = 1000.0 / (\n mean_interspike_time\n ) # factor of 1000 to give frequency in Hz\n\n if math.isnan(mean_frequency):\n mean_frequency = 0\n return mean_fr...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes average spike frequency for abf object and epoch index.
def avg_spike_frequency_abf(abf, epoch): p0 = abf.sweepEpochs.p1s[epoch] p1 = abf.sweepEpochs.p1s[epoch+1] t = abf.sweepX[p0:p1] V = abf.sweepY[p0:p1] return avg_spike_frequency(t, V)
[ "def spike_amplitude_abf(abf, t_spike, epoch_start=3):\n p0 = abf.sweepEpochs.p1s[epoch_start]\n V = abf.sweepY[p0:-1]\n\n return spike_amplitude(V, t_spike)", "def get_aa_frequency(self):\n\t\treturn self.aa_count / float(self.get_num_calls()) if self.get_num_calls() > 0 else nan", "def mean_spike_fre...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes maximum interspike frequency (equivalent to minimum interspike interval).
def max_spike_frequency(t, V): intervals = interspike_intervals(t, V) raise_if_not_multiple_spikes(intervals) min_int = np.amin(intervals) return 1/min_int
[ "def max_frequency(sig, FS):\n\n f, fs = plotfft(sig, FS)\n t = np.cumsum(fs)\n\n try:\n ind_mag = np.where(t > t[-1]*0.95)[0][0]\n except:\n ind_mag = np.argmax(t)\n f_max = f[ind_mag]\n\n return f_max", "def findMaximal(freqSet):", "def peak_frequency(self):\r\n try:\r\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes minimum interspike frequency (equivalent to maximum interspike interval).
def min_spike_frequency_tV(t, V): intervals = interspike_intervals(t, V) raise_if_not_multiple_spikes(intervals) max_int = np.amax(intervals) return 1/max_int
[ "def max_spike_frequency(t, V):\n intervals = interspike_intervals(t, V)\n raise_if_not_multiple_spikes(intervals)\n min_int = np.amin(intervals)\n return 1/min_int", "def mean_spike_frequency(t):\n interspike_times = np.diff(t)\n mean_interspike_time = np.mean(interspike_times)\n mean_freque...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks for whether there are multiple spikes, otherwise raises and exception.
def raise_if_not_multiple_spikes(intervals): if len(intervals) < 1: raise NoMultipleSpikesException
[ "def checkForSpike(self):\n if self.Vtrace[-1]>=self.Vthreshold:\n self.__fire()", "def spikes(arr):\n arr = np.array(arr)\n if (arr.size == 0) or flat(arr) or monotonic(arr):\n return False\n arr = normalize(arr)\n spikes = np.where(arr > arr.mean())[0]\n rest = np.ones_li...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
load csv filter div and 0.95 r value and at least 2 clls
def load_and_filer(pwd,rval=0.95): df = pd.read_csv(pwd) df = rl.give_good_structure(df) df = df.loc[(df['end_type']=='DIVISION')|(df['end_type']=='DIV')|(df['end_type']=='div')] if 'length_box' in df.columns: #guillaume data df['time_sec'] = df['frame']*60*3 df['length_box_um'] = df['le...
[ "def filter_parameters(\n time_bin=60,\n LV_param_set_Index=1,\n LV_params=[\"u10\"],\n META_FILE=\"../data/ASAID_DATA_OVERVIEW - Sheet1.csv\",\n INTERPOLATE_limit=0,\n FILTER_LOD_OUTLIERS=True,\n):\n META_FILE = Path(META_FILE)\n\n META = pd.read_csv(META_FILE, sep=\",\")\n if LV_param_s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }