content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def get_global_attributes(hdr): """Rudimentary parser for ENVI header file. Improve.""" with open(hdr,'r') as f: header = [ln.strip() for ln in f.readlines()] global_atts = dict( description = header[2][:-1], samples = header[3].split("=")[1].strip(), lines = header[4].split("=")[1].strip(), bands = header[5].split("=")[1].strip(), data_type = header[8].split("=")[1].strip(), source_type = header[7].split("=")[1].strip(), interleave = header[9].split("=")[1].strip(), byte_order = header[10].split("=")[1].strip(), map_info = header[11].split("=")[1].strip(), wavelength_units = header[13].split("=")[1].strip(), missing_value = header[24].split("=")[1].strip()) global_atts["Conventions"] = "CF-1.6" return(global_atts)
b236c58be6f2efde8f03ccd46cdc5f22e7830187
37,166
import time def mwToEpoch(timestamp): """ Converts a mediawiki timestamp to unix epoch time """ try: return time.strptime(timestamp, '%H:%M, %d %B %Y') except ValueError: try: return time.strptime(timestamp, '%H:%M:%S, %d %B %Y') # Some users (ex: Pathoschild) include seconds in their signature except ValueError: return None
450d4c7c6dcdd5853a4a0df9b4def815aa405290
37,167
def passthrough_prop(prop_name, doc=None, obj_name="_cim_obj"): """Factory function for creating a property that passes through to the underlying ArcGIS Pro SDK object.""" def _get(self): try: obj = getattr(self, obj_name) return getattr(obj, prop_name) except AttributeError as ae: raise AttributeError("Unable to get the {} property on this instance of {}.".format( prop_name, self.__class__.__name__)) def _set(self, val): raise NotImplementedError("Property {} cannot be set".format(prop_name)) def _del(self, val): raise NotImplementedError("Property {} cannot be deleted".format(prop_name)) return property(_get, None, None, doc)
8dca03c0e40296bb97c1b3442a97b22edfb098d5
37,168
def GetMetricsFromProcess(process): """Returns a list of dicts for our metrics""" metrics = [] # Create the labelset labelset = { 'processname': process['name'], 'user': process['username'] } # pprint.pprint(process) metrics.append({'metric': 'cpu_percent', 'value': process['cpu_percent'], 'labelset': labelset}) metrics.append({'metric': 'cpu_user', 'value': process['cpu_times_user'], 'labelset': labelset, 'type': 'counter'}) metrics.append({'metric': 'cpu_system', 'value': process['cpu_times_system'], 'labelset': labelset, 'type': 'counter'}) metrics.append({'metric': 'open_files', 'value': len(process['open_files']), 'labelset': labelset}) metrics.append({'metric': 'fds', 'value': process['num_fds'], 'labelset': labelset}) metrics.append({'metric': 'context_switch_voluntary', 'value': process['num_ctx_switches_voluntary'], 'labelset': labelset}) metrics.append({'metric': 'threads', 'value': process['num_threads'], 'labelset': labelset}) metrics.append({'metric': 'memory_percent', 'value': process['memory_percent'], 'labelset': labelset}) metrics.append({'metric': 'memory_rss', 'value': process['memory_rss'], 'labelset': labelset}) metrics.append({'metric': 'memory_vms', 'value': process['memory_vms'], 'labelset': labelset}) metrics.append({'metric': 'memory_shared', 'value': process['memory_shared'], 'labelset': labelset}) return metrics
370e85e919ae42371db3b99cc53fde5537c7ecfe
37,169
from pathlib import Path def load_groups(input_path: Path) -> list: """ Load in the customs quesetionarre response data into a buffer and group together the responses that are associated with a single group. Responses belonging to a single group are on contiguous lines, and groups are separated by a blank line. Splitting by "\n\n" will break apart the responses from separate groups, and then removing the "\n" characters from in-group responses should then just give us the specific responses void of whitespace characters. Remove empty characters per line and also empty groups. Args: input_path [pathlib.Path]: path to input data Returns: list of lists: [ [group-responses] ] """ with open(input_path, "r") as infile: groups = list( filter( None, [ list( filter( None, [line.strip() for line in group.split("\n") if line != ""], ) ) for group in infile.read().split("\n\n") ], ) ) return groups
f23eb0398757e749786a7ba1fdcb8dede18736fd
37,170
def currency_filter(value): """Outputs comma separated rounded off figure""" number = float(value) rounded_number = round(number) integer_number = int(rounded_number) return "{:,}".format(integer_number)
129cf81cce1021ad0f2e78320abf893c8ffd9f71
37,171
def invertSelection(): """ invertSelection() -> None Selects all unselected nodes, and deselects all selected ones. @return: None. """ return None
f7fb48792a696f33492e44430878c037ce33b5b1
37,172
import numpy def computek(x, L): """Return the actual k-vector given a reduced vector (x).""" ccc = 2.0*numpy.pi/L return numpy.array((ccc*x[0], ccc*x[1], ccc*x[2]))
b013f4e9da69e76d0601a3e854fdfb54e047be56
37,174
def get_trip_distance(distances_matrix, path): """ :param distances_matrix: Matrix of distances between cities :param path: List of city indices :return: Trip distance """ distance = 0 for index in range(len(path))[1:]: distance += distances_matrix[path[index - 1], path[index]] return distance
aabb74cb8b876911e384eceb86c33f4114327fb2
37,175
def hello(): """ Hello World! :return: """ return "<h1> Tinker Tank V: Python</h1><p>Hola Amigos</p>"
0154fdb833c990cc4ed85081fdd38e4cecd20cac
37,176
def _get_effect_size(effect_size,features,PD_indices,control_indices): """Takes in effect_size dataframe to populate""" effect_size.at['PD mean'] = features.loc[PD_indices].mean(axis=0) effect_size.at['PD stdev'] = features.loc[PD_indices].std(axis=0) #sample stdev effect_size.at['PD n'] = features.loc[PD_indices].count(axis=0) effect_size.at['control mean'] = features.loc[control_indices].mean(axis=0) effect_size.at['control stdev'] = features.loc[control_indices].std(axis=0) #sample stdev effect_size.at['control n'] = features.loc[control_indices].count(axis=0) sd_pooled_numerator = (effect_size.loc['PD n']-1)*effect_size.loc['PD stdev']**2 + (effect_size.loc['control n']-1)*effect_size.loc['control stdev']**2 sd_pooled_denominator = effect_size.loc['PD n'] + effect_size.loc['control n'] - 2 sd_pooled = (sd_pooled_numerator/sd_pooled_denominator).pow(0.5) effect_size.at['Hedges g'] = (effect_size.loc['PD mean'] - effect_size.loc['control mean'])/sd_pooled return effect_size
4bf9e04b8afaa5509343397f24796edaf1b69fba
37,178
def gift_list(number): """Generates the list of gifts for a given verse Parameters ---------- number: Integer The number of the verse we want the list for Returns ------- string The list of gifts """ gifts = { 1: 'a Partridge in a Pear Tree', 2: 'two Turtle Doves', 3: 'three French Hens', 4: 'four Calling Birds', 5: 'five Gold Rings', 6: 'six Geese-a-Laying', 7: 'seven Swans-a-Swimming', 8: 'eight Maids-a-Milking', 9: 'nine Ladies Dancing', 10: 'ten Lords-a-Leaping', 11: 'eleven Pipers Piping', 12: 'twelve Drummers Drumming' } return gifts[1] if number == 1 \ else ', '.join([gifts[n] for n in reversed(range(2, number+1))]) + \ f', and {gifts[1]}'
20c9cf10f80e8ee0650721253b31554ed0cda6a4
37,179
def get_adjusted_unsuccessful(row, outcome_col, num_unsuccess_col): """ Returns adjusted number of unsuccesful contacts given column of outcome and num_unsuccess """ outcome = row[outcome_col] num_unsuccess = row[num_unsuccess_col] if outcome == 0: adj_num_unsuccess = num_unsuccess - 3 if num_unsuccess >= 3 else 0 else: adj_num_unsuccess = num_unsuccess return adj_num_unsuccess
77f0b0d46269bfe299d8791c04a152a1d2df27ed
37,180
def triangular_force(t_start, t_max, t_end, f_max): """ Returns a triangular force: f_max at t_max /\ / \ / \ / \ ---- ----- | | | |__ t_end |__ t_start Parameters ---------- t_start: float time where triangular force starts to raise t_max: float time where force reaches maximum t_end: float time where force is gone to zero again f_max: float maximum value of force Returns ------- f: callable function f(t) """ def f(t): if t_start < t <= t_max: return f_max * (t-t_start) / (t_max-t_start) elif t_max < t < t_end: return f_max * (1.0 - (t - t_max) / (t_end - t_max)) else: return 0.0 return f
91a69292b9ff641a9b47c280438f64c13ba89bc9
37,181
import argparse def parse_arguments(): """ Parse command line arguments """ parser = argparse.ArgumentParser(description='Parameters for the experiment') parser.add_argument('-e', '--err-meths', default=["mean_err"], nargs="+", help="Functions to computer errors.") parser.add_argument('-o', '--outlier-removal', default="true", choices=["true", "false"], help="Whether to remove outliers or not.") parser.add_argument('-l', '--loose', action='store_true') parser.add_argument('-d', '--diff', action='store_true', help="Store the diffs") parser.add_argument('-s', '--estimate', default=["True"], nargs="+") parser.add_argument('-w', '--draw', action='store_true', help="Whether to generate diagrams") args = parser.parse_args() # parser.print_help() # raise Exception("") estimates = [e.lower() == "true" for e in args.estimate] return args.err_meths, args.outlier_removal == "true", args.loose, estimates, args.diff, args.draw
c9dc5f7a1f3306368870b779a698aeec4a1a1569
37,183
def open_text_file(path: str) -> str: """Opens text file to string""" f = open(path, "r") return f.read()
01d7c1a8637432d5fbc8372f2773e7ee2f0dcfce
37,184
from datetime import datetime def FormattedTime(DateTimeObject = None) -> str: """Returns a formatted time string.""" if DateTimeObject == None: DateTimeObject = datetime.now() return DateTimeObject.strftime("%H:%M:%S")
51d431e40691371579530419aebd19de4655455c
37,185
import re import click def parse_port(port: str) -> str: """Check if port is valid. Parameters ---------- port : str Possible port name. Returns ------- port : str Valid port name Raises ------ click.BadParameter If ``port`` is not a four-digit number not starting with zero. """ if re.fullmatch(r'^[1-9][0-9]{,3}', port): return port else: raise click.BadParameter( 'ERROR: Use up to four digits not starting with zero.')
e6acb439117f0d30ec161875371aa7e7b9f10c76
37,187
import toml from typing import MutableMapping import os def get_meta() -> MutableMapping: """Get project metadata from pyproject.toml file. Returns: MutableMapping """ toml_path = os.path.join(os.path.dirname(__file__), "..", "pyproject.toml") with open(toml_path) as fopen: pyproject = toml.load(fopen) return pyproject
2ecdee3fe55539177f50504b671833c8f837ef9b
37,189
def tuple_keys_to_str(dictionary): """ Converts tuple keys to str keys """ finaldict = {} for key in dictionary.keys(): strkey = () for item in key: if type(item) == int: strkey += (str(item),) else: strkey += (item,) newkey = '_'.join(strkey) finaldict[newkey]=dictionary[key] return finaldict
13939b3fdedf64c198739247defb79d655ad48a8
37,191
import struct def hexstr_to_bytearray(a): """ Return hex string packed into a binary struct. """ b = b"" for c in range(0, len(a) // 2): b += struct.pack("B", int(a[c * 2:c * 2 + 2], 16)) return b
bab157787ea4973f79049b07e6308e42ac291723
37,193
import sys def split_contigs(chrom_segments, gaps_file): """ Split the assembly up into contigs defined by the gaps. Args: chrom_segments: dict mapping chromosome names to lists of (start,end) gaps_file: file specifying assembly gaps Returns: chrom_segments: same, with segments broken by the assembly gaps. """ chrom_events = {} # add known segments for chrom in chrom_segments: if len(chrom_segments[chrom]) > 1: print( "I've made a terrible mistake...regarding the length of chrom_segments[%s]" % chrom, file=sys.stderr) exit(1) cstart, cend = chrom_segments[chrom][0] chrom_events.setdefault(chrom, []).append((cstart, 'Cstart')) chrom_events[chrom].append((cend, 'cend')) # add gaps for line in open(gaps_file): a = line.split() chrom = a[0] gstart = int(a[1]) gend = int(a[2]) # consider only if its in our genome if chrom in chrom_events: chrom_events[chrom].append((gstart, 'gstart')) chrom_events[chrom].append((gend, 'Gend')) for chrom in chrom_events: # sort chrom_events[chrom].sort() # read out segments chrom_segments[chrom] = [] for i in range(len(chrom_events[chrom]) - 1): pos1, event1 = chrom_events[chrom][i] pos2, event2 = chrom_events[chrom][i + 1] event1 = event1.lower() event2 = event2.lower() shipit = False if event1 == 'cstart' and event2 == 'cend': shipit = True elif event1 == 'cstart' and event2 == 'gstart': shipit = True elif event1 == 'gend' and event2 == 'gstart': shipit = True elif event1 == 'gend' and event2 == 'cend': shipit = True elif event1 == 'gstart' and event2 == 'gend': pass else: print(str(pos1)+' '+str(pos2)+ "I'm confused by this event ordering: %s - %s" % (event1, event2), file=sys.stderr) exit(1) if shipit and pos1 < pos2: chrom_segments[chrom].append((pos1, pos2)) return chrom_segments
90032b07a8f85796cd81827315af256724bbb2ff
37,195
def get_nlp_bib_neotoma(nlp_sentences, bibliography, neotoma_summary): """ Uses all main datasets to create preprocessed_df dataframe Parameters ---------- nlp_sentences : pd.DataFrame pd.DataFrame with all NLP Sentences database information bibliography : pd.DataFrame pd.DataFrame with all Bibliography database information neotoma_summary : pd.DataFrame pd.DataFrame with all neotoma_summary database information Returns ------- nlp_bib : pd.DataFrame pd.DataFrame with summarized merged nlp sentences and bibliography information preprocessed_df : pd.DataFrame pd.DataFrame with summarized NLP, Bibliography and Neotoma database information """ nlp_bib = nlp_sentences.merge(bibliography, on='gddid') preprocessed_df = nlp_bib.merge(neotoma_summary, on='doi') return nlp_bib, preprocessed_df
0c706502b79de5e10736192dfd6e6aa05593e61d
37,196
def distance(x_1, x_2): """Calculate distance between sets. Args: x_1, x_2: sets. Returns: distance. """ return len(x_1) + len(x_2) - len(x_1.intersection(x_2)) * 2
d97bea2a322c1749ec85f28e3712e2117c16dd76
37,197
import sys def get_new_id(iterator): """Returns a new id that is not in the given iterator""" letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789ñÑçÇ' for l in letters: if l not in iterator: return l sys.stderr.write("Too many different chains given. The program can only handle modeling" " a maximum of 66 different sequences") exit(1)
19b09342ed25482971c6bfdebd3a02e99b6561c4
37,198
def _validate_fixed_params(fixed_params, spec_param_names): """ Check that keys in fixed_params are a subset of spec.param_names except "sigma2" Parameters ---------- fixed_params : dict spec_param_names : list of string SARIMAXSpecification.param_names """ if fixed_params is None: fixed_params = {} assert isinstance(fixed_params, dict) fixed_param_names = set(fixed_params.keys()) valid_param_names = set(spec_param_names) - {"sigma2"} invalid_param_names = fixed_param_names - valid_param_names if len(invalid_param_names) > 0: raise ValueError( f"Invalid fixed parameter(s): {sorted(list(invalid_param_names))}." f" Please select among {sorted(list(valid_param_names))}." ) return fixed_params
6dc784f1593b03dc5862a554071d2000f37c1350
37,200
def dims(): """Number of dimensions""" return 4
8311bde5b356372c22a2644f228697f20fed2e53
37,201
import string def num2alpha(num, maxNumberOfOptions=8): """ Function to turn an integer to A,B,C,D, etc. If out of range, return the num itself :param num: input, presumably an integer but can be anything :param maxNumberOfOptions: the max integer to convert :return: A letter, or the num itself """ if not isinstance(num, int): try: num = int(num) except ValueError: return num # we have an int now try: return dict(list(zip(list(range(1, maxNumberOfOptions + 1)), string.ascii_uppercase)))[num] except KeyError: return num
140d14a2510d0b1ceb0a7a36e0da30ebb1c1d060
37,202
def find_link_joints(model, link_name): """Find the joints attached to a given link Parameters ---------- model : <ModelSDF> SDF model link_name : <str> Name of the link in the sdf Returns ------- out : <tuple> Tuple of joint names attached to the link """ return tuple([ joint.name for joint in model.joints if joint.parent == link_name ])
1d09cdf09889e19b7d8b911686ce90765d772a1c
37,203
def polyfill_filename(api): """Gets the filename associated with an API polyfill. Args: api: String name of API. Returns: Filename of API polyfill. """ return "{}.polyfill.js".format(api)
efb54fddafb846985c77e44837f7a24c57596581
37,204
def standardize_survey_list(row): """This function takes in a list for a given row of a survey result and will lower-case it and strip out white space from left and right of the string. Args: row (list): A list of survey results Returns: list: standardized survey results """ # lowers all strings and strips whitespace clean_row = [x.lower().strip() for x in row] return list(set(clean_row))
cf1e627e25a1dc98d8748a8bc1e0b78e420eb043
37,205
def subtitle(): """Return a sample subtitle string.""" return 'this is a subtitle'
85c9d4f87628f59f0a7a37dc896c7aad2704aa01
37,206
def make_ucsc_chr(interval): """ Converts interval from ENSEMBL chroms to UCSC chroms (appends str to each chrom) """ interval.chrom = "chr" + interval.chrom return interval
d5878670494dc51e29922d43a605634f26cd8e00
37,207
def round_filters(filters, width_coefficient, depth_divisor): """计算通过channel系数的缩放后filter的数量, 保证了经过放缩后的系数是8的整数倍 并且保证输出的 filters 要大于输入 filters 的 90% 以上 """ filters *= width_coefficient new_filters = int(filters + depth_divisor / 2) // depth_divisor * depth_divisor new_filters = max(depth_divisor, new_filters) if new_filters < 0.9 * filters: new_filters += depth_divisor return int(new_filters)
406c0bb9adf52bea220176f10e3a459f351f8aaf
37,208
def percent_bias(observed, modeled): """ Calculates percent bias Arguments: observed (numpy.ndarray): historic observational data modeled (numpy.ndarray): model output with matching time index """ return 100 * ( sum( modeled - observed ) / sum( observed ) )
cca7c1b8c9c699330957fcc4e5dfa8aea3d23ab5
37,209
from typing import Any def verbatim(text: Any) -> str: """ Returns the main string argument as-is. It produces a TypeError if input text is not a string. """ if not isinstance(text, str): raise TypeError("argument to verbatim must be string") return text
0f735833f1514f0f5bd45efc07af783ffc72cbd3
37,210
def format_bool(value): """Converts boolean to yes/no string.""" return 'bez dat' if value is None else ('Ano' if value else 'Ne')
04aa58c8d280bc90c52169bf563650075963d6a2
37,211
import torch def dummy_forward_pass(model, inp_shape): """ Dummy forward pass""" model.eval() with torch.no_grad(): output = model(torch.randn(inp_shape)) return output
b316bba7a2d94cd3d4413878263a306f9d20beca
37,212
def get_mappings(all_tweets_itr): """Returns a tuple with two dictionaries. The first is a mapping username -> userid, where username is the one @foo, with @. The second is a mapping tweetid -> userid of the creator""" map_usrname_usrid = {} map_twtid_usrid = {} print(" [*] Extracting user mappings") for tweet in all_tweets_itr: map_usrname_usrid["@"+tweet["user"]["screen_name"]] = tweet["user"]["id"] map_twtid_usrid[tweet["id"]] = tweet["user"]["id"] return map_usrname_usrid, map_twtid_usrid
7794676f45a9610b48e569c2c115faa04dc48904
37,214
def get_message_source_from_event(event): """ Helper function to get the message source from an EventHub message """ return event.message.annotations["iothub-message-source".encode()].decode()
37d8c0aa2304f930c8e1507bf5a2355e3ccb66e9
37,215
def port(unused_tcp_port): """ returns an unused tcp port """ return unused_tcp_port
1b29d11fa0723e5a4dbab63613de1feb644fc8dd
37,218
def recover_bps(delt, bps, bps_star): """ delt, bps, bps_star - seqlen+1 x bsz x K returns: bsz-length list of lists with (start_idx, end_idx, label) entries """ seqlenp1, bsz, K = delt.size() seqlen = seqlenp1 - 1 seqs = [] for b in range(bsz): seq = [] _, last_lab = delt[seqlen][b].max(0) last_lab = last_lab.item() curr_idx = seqlen # 1-indexed while True: last_len = bps[curr_idx][b][last_lab] last_len = int(last_len) seq.append((curr_idx-last_len, curr_idx, last_lab)) # start_idx, end_idx, label, 0-idxd # print((curr_idx-last_len, curr_idx, last_lab)) curr_idx -= last_len if curr_idx == 0: break last_lab = bps_star[curr_idx][b][last_lab] seqs.append(seq[::-1]) return seqs
1f0adca5aa25ae818b8738ef0bfefb0a6f44fe6c
37,220
def _ops_equal(op1, op2): """Checks if two operators are equal up to class, data, hyperparameters, and wires""" return ( op1.__class__ is op2.__class__ and (op1.data == op2.data) and (op1.hyperparameters == op2.hyperparameters) and (op1.wires == op2.wires) )
dd73cc9f89ec0fc8540043b7982a6c5b6fc8609b
37,222
def _offset_rounding_error(figure): """ """ fig = [] pre_part = [] for line in figure: if not pre_part == []: fig.append((pre_part[3], line[1], line[2], line[3])) else: fig.append(line) pre_part = line return fig
0b3f8fa628ae1b0d51c5c0a06c381eee5769c5aa
37,223
from bs4 import BeautifulSoup def fancy_html(s, apply_p_blocks=True): """ Accepts a string; returns fancy-ass HTML with all of our cool stuff.""" s = str(s) if apply_p_blocks: raw = "".join(["<p>%s</p>" % p for p in s.split('\n') if p != '']) else: raw = str(s) # expand links -- do this before we get to HTML junk raw_list = raw.split(" ") for word in raw_list: if word[:8] == 'https://': index = raw_list.index(word) url = word if url[-1] in [',','.']: url = url[:-1] raw_list[index] = '<a href="%s">%s</a>' % (url,word) raw = " ".join(raw_list) soup = BeautifulSoup(raw, 'html.parser') # fix anchors for anchor in soup.find_all('a'): if 'fancy' not in anchor.get('class', []): anchor['class'] = anchor.get('class', []) + ['fancy'] anchor['target'] = 'top' return str(soup)
2f077eeb45760feec9306737510e620b6214720b
37,224
import requests def extract_wiki_text(wikipedia_title, language='en'): """Get the text of the given wikipedia article.""" base_url = ('https://' + language + '.wikipedia.org/w/api.php?action=query' + '&prop=extracts&format=json&explaintext=' + '&exsectionformat=plain&titles=') text = '' r = requests.get(base_url + wikipedia_title) if r.status_code == 200: pages = r.json()['query']['pages'] for (id, page) in pages.items(): text += page['extract'] return text
231bfec347c0a92b19081bb852feb3b6c7674413
37,225
from typing import Dict from typing import Any import torch def load_to_cpu(path: str) -> Dict[str, Any]: """ This is just fairseq's utils.load_checkpoint_to_cpu(), except we don't try to upgrade the state dict for backward compatibility - to make cases where we only care about loading the model params easier to unit test. """ state = torch.load( path, map_location=( lambda s, _: torch.serialization.default_restore_location(s, "cpu") ), ) return state
aa30621b7230a3f22afbfd3db50f74d8f385493a
37,226
def enfold(dt, fold=1): """ Provides a unified interface for assigning the ``fold`` attribute to datetimes both before and after the implementation of PEP-495. :param fold: The value for the ``fold`` attribute in the returned datetime. This should be either 0 or 1. :return: Returns an object for which ``getattr(dt, 'fold', 0)`` returns ``fold`` for all versions of Python. In versions prior to Python 3.6, this is a ``_DatetimeWithFold`` object, which is a subclass of :py:class:`datetime.datetime` with the ``fold`` attribute added, if ``fold`` is 1. .. versionadded:: 2.6.0 """ return dt.replace(fold=fold)
4121810bf2e351963976ceca161cab941c2b7f7c
37,228
import os import json def read_import_json(link2kernel_path): """ {"pid_v3": "MXyDP7F69jfTyZRKHgYKq4g", "eissn": "1678-992X", "issn": "1678-992X", "acron": "sa", "pid": "S0103-90161999000100009", "year": "1999", "volume": "56", "number": "1", "order": "00009"} """ imported = {} for filename in sorted(os.listdir(link2kernel_path)): name, ext = os.path.splitext(filename) if ext == ".json" and name.startswith("imported."): file_path = os.path.join(link2kernel_path, filename) with open(file_path, "r") as fp: items = fp.read().splitlines() for item in items: data = json.loads(item) imported[data["pid"]] = data return imported
0db3998bf595d3bfc2e4c65810be1db2efa6f0d5
37,229
def name_body(data_path, marker_list): """ Funkcja dodajaca numer badanego do nazwy markerow (potrzebne do wczytywania konkretnych markerów z pliku) """ s = data_path.split('\\')[-1] body = s.split('-')[3]+":" for i in range(len(marker_list)): marker_list[i] = body+marker_list[i] return marker_list
51f1a7ebf9878c2da712b2aa10f312ce0dc5e1f5
37,230
def parse_xml_metadata(xml_dic, xml_node): """Return the XML Metadata.""" for child in xml_node.getchildren(): if child.get('Name') == 'cl.exe': xml_dic['compiler_version'] = child.get('Version') elif child.get('Name') == 'VisualStudio': xml_dic['visual_studio_version'] = child.get('Version') elif child.get('Name') == 'VisualStudioEdition': xml_dic['visual_studio_edition'] = child.get('Value') elif child.get('Name') == 'OperatingSystem': xml_dic['target_os'] = child.get('Version') elif child.get('Name') == 'Microsoft.Build.AppxPackage.dll': xml_dic['appx_dll_version'] = child.get('Version') elif child.get('Name') == 'ProjectGUID': xml_dic['proj_guid'] = child.get('Value') elif child.get('Name') == 'OptimizingToolset': xml_dic['opti_tool'] = child.get('Value') elif child.get('Name') == 'TargetRuntime': xml_dic['target_run'] = child.get('Value') return xml_dic
b1bd0140d478a5b701a17be7f5906b2b6a7db45d
37,231
def bootstrap_predictions(train_x, train_y, test_x, model, boot_idx=None, permute_idx=None): """ fits a model to training data and predicts output using test data. features are selected with boot_idx. Optional permutation of rows for significant testing. train_x, test_x: dataframes containing train and test data train_y: target value for model fitting model: sklearn regressor object, or Pipeline boot_idx: features to use for model permute_idx: permutation index to shuffle training data returns: pred: bootstrapped predictions of test data """ # fit model with bootstrapped genes if boot_idx is None: if permute_idx is None: model.fit(train_x, train_y) else: model.fit(train_x.iloc[permute_idx,:], train_y) pred = model.predict(test_x) else: if permute_idx is None: model.fit(train_x.iloc[:,boot_idx], train_y) pred = model.predict(test_x.iloc[:,boot_idx]) else: model.fit(train_x.iloc[permute_idx,boot_idx], train_y) pred = model.predict(test_x.iloc[:,boot_idx]) return pred
dbe328094a2eda06a048f0d3cdcee182e8558db2
37,232
def bag_of_words(text): """Returns bag-of-words representation of the input text. Args: text: A string containing the text. Returns: A dictionary of strings to integers. """ bag = {} for word in text.lower().split(): bag[word] = bag.get(word, 0) + 1 return bag
75993f3bd5fb20e3015afee499a14bf1574902ca
37,234
def find_list_string(name, str_list, case_sensitive=True, as_prefix=False, first_matched=False): """ Find `name` in the string list. Comparison parameters are defined in :func:`string_equal`. If ``first_matched==True``, stop at the first match; otherwise if multiple occurrences happen, raise :exc:`ValueError`. Returns: tuple ``(index, value)``. """ if not case_sensitive: lookup_name=name.lower() else: lookup_name=name found_name=None for i,s in enumerate(str_list): if not case_sensitive: lookup_s=s.lower() else: lookup_s=s if as_prefix: sat=lookup_s.startswith(lookup_name) else: sat=(lookup_s==lookup_name) if sat: if found_name is None: found_name=(i,s) if first_matched: break else: raise ValueError("{0} and {1} both satisfy name {2}".format(found_name[1],s,name)) if found_name is None: raise KeyError("can't find name in the container: {0}".format(name)) return found_name
f485ae3cdebcee9e4cfe85378c38f7f27d737b96
37,236
def rmRTs(tokens) : """Remove RTs""" return [x for x in tokens if x != "rt"]
00df94bcfba29784bae736bb41c964cea7589657
37,237
def mapRange(value, inMin, inMax, outMin, outMax): """ Function to interpret the steam-jack float values """ return outMin + (((value - inMin) / (inMax - inMin)) * (outMax - outMin))
071d7de4fa9c790c0bbdcfa2cc69392779c3aa79
37,238
import requests def get (url, cb , cert=None,errorCb = None) : """ Get json from url and return the processed data through cb or errorCb """ if not cert or not cert['ssl']: r = (requests.get(url)).json() else : r = (requests.get(url,cert=(cert['cert'],cert['key']))).json() return r
f4786ca062af596152db06a3e4456740c5e6b9cf
37,239
def create_timeseries(list_ens, units, start_date, end_date, monthly=False, save_out=None, cov=None): """ Analysis the data given - in this case it computes the timeseries (assumes grid/sample point) :param list_ens: the list of ensembles (dicts) containing the data of the climate variables :param units: the units matching to each variable :param start_date and end_date: extract data within this time frame :param monthly: data is stored in monthly increments (time = 12) else assumed (time = 365) :param save_out: if set, then save output of histogram/ rimeseries :param cov: if set, then perform covariance analysis :return: None """ return None
a0461284148bf024550e21d3bdde39300412d57b
37,240
def test_attr(attr, typ, name): """Tests attribute. This function tests if the attribute ``attr`` belongs to the type ``typ``, if not it gives an error message informing the name of the variable. """ try: return typ(attr) except: raise TypeError('"{}" keyword must be a {} object'.format(name, typ))
28e0bdad00117cca25a67057db2f1525387cf39d
37,241
def get_phylogenetic_weight(node): """Calculate the weight at a phylogenetic node. Args: node (`Node`): The phylogenetic node. Returns: float: (weight of the parent) / (number of siblings + 1). """ if node.parent is None: return 1.0 return 1.0*get_phylogenetic_weight(node.parent)/len(node.parent.children)
2a57eb9cac2f5939d13552ea1e99e8dc99313db9
37,242
def unify(comparisons, weights='actual', threshold=0.5): """Unify all comparisons in one way and with appropriate weights. Unify all comparisons: a = b => a = b and b = a a < b => a < b a > b => b < a where 0 means = and 1 means <. The appropriate weight can be chose either as - actual: the given weight of the human judgement - thresholded: 1 if it is above a threshold and 0 otherwise (removed from comparisons). """ unified = [] if weights == 'actual': for c in comparisons: if c[2] == 0: unified.append((c[0], c[1], 0, c[3])) unified.append((c[1], c[0], 0, c[3])) elif c[2] == 1: unified.append((c[1], c[0], 2, c[3])) elif c[2] == 2: unified.append((c[0], c[1], 2, c[3])) else: raise Exception('Expecting 0,1,2 as comparison, got', c[2]) elif weights == 'thresholded': print("Using threshold", threshold) for c in comparisons: if c[3] > threshold: if c[2] == 0: unified.append((c[0], c[1], 0, 1)) unified.append((c[1], c[0], 0, 1)) elif c[2] == 1: unified.append((c[1], c[0], 2, 1)) elif c[2] == 2: unified.append((c[0], c[1], 2, 1)) else: raise Exception('Expecting 0,1,2 as comparison, got', c[2]) else: raise Exception("Method", weights, "to apply for the weights " "is not known.") # print("before and after changing the comparisons to one way\n", # comparisons, # unified) return unified
e2a4b8c85d6d50676beef8ce4b0ff5eb35dd7496
37,243
def clean_queries(df): """Returns the input DataFrame of queries cleaned""" # filter out queries with length less than 2 characters long df = df[df['querystring'].str.len() > 1] return df
1b120bacc0c9982bafa60e2e3d8e451ce622b1b4
37,244
from typing import Tuple def split_arr( arr: list, arr_len: int, ind: int, arr1: list, arr2: list ) -> Tuple[bool, list, list]: """ Backtrack. One by one add each array element to either arr1 or arr2 """ if ind == arr_len: return (True, arr1, arr2) if sum(arr1) == sum(arr2) else (False, arr1, arr2) else: res = split_arr(arr, arr_len, ind + 1, arr1 + [arr[ind]], arr2) return ( res if res[0] is True else split_arr(arr, arr_len, ind + 1, arr1, arr2 + [arr[ind]]) )
d604d0bb1ce91e7aca435eb06f971503adffaa7d
37,245
import json def create_button(action, payload, label, display_mode='popup', disabled=False): """ This function creates an HTML button the user can interact with from a CloudWatch custom widget. Parameters: action (string): The ARN of a lambda function to call when clicking on this button payload (string): A JSON formatted string that will be passed as argument to the lambda function used as endpoint for this button label (string): The label to use on the button display_mode (string): Can either be `popup` (display the result in a popup) or `widget` (to replace the content of the current widget by the output of the Lambda function used as endpoint). Defaults to `popup` disabled (boolean): Set to True to display a disabled button Returns: string: an HTML string with the button to display """ if disabled: button_style = 'awsui-button-disabled' disabled = 'disabled=""' else: button_style = 'awsui-button awsui-button-variant-primary' disabled = "" button = ( f'<a class="btn {button_style}" {disabled}>{label}</a>\n' '<cwdb-action ' f'action="call" ' f'endpoint="{action}" ' f'display="{display_mode}">' f'{json.dumps(payload)}' '</cwdb-action>\n' ) return button
33d7255888b956275e9783ee91c297b81fdc6ae7
37,246
def order_validation(order): """ Validate the order. If it's under 2 then raise a ValueError """ if order < 2: raise ValueError("An order lower than two is not allowed.") else: return order
ee9f9c64c30a9625246d5e139c94a53d144270db
37,247
def isBalanceTree2(root) -> bool: """通过BFS,可以提前截断""" if not root: return True stack = [root] minDepth = 0 curDepth = 0 while stack: curSize = len(stack) tmp = [] curDepth += 1 while curSize != 0: curRoot = stack.pop() if not curRoot.left and not curRoot.right: if minDepth == 0: minDepth = curDepth else: stack = [] break if curRoot.left: tmp.append(curRoot.left) if curRoot.right: tmp.append(curRoot.right) curSize -= 1 stack = tmp[:] # print(stack, curDepth) # print(curDepth, minDepth) return [False, True][curDepth-minDepth <= 1]
c65dda0db6f38742e494c1539215dfe7203c03c7
37,248
def max_ones(coral): """ Description: Returns the percentage of 1's in the coral. This function assumes 'coral' is a list, it could be further improved if it was a numpy array Input: - coral Output: - fitness """ return 100*(sum(coral) / len(coral))
5d5b61786b76e5f1c520ab47032c9935ab18cf09
37,250
import requests import io import zipfile def download_zip(url, to_path): """Download zipfile from url and extract it to to_path. Returns the path of extraction. """ filename = url.split('/')[-1] r = requests.get(url) r.raise_for_status() content = io.BytesIO(r.content) with zipfile.ZipFile(content) as z: z.extractall(to_path) return to_path
904a4a0082040ec40e09f8cac8275c36943a3acc
37,251
def make_subsets(data, size: int) -> list: """ Creates subsets out of ``data``, each subset having ``size`` elements. Used in ``graphs/multiple_results.html`` and ``graphs/single_result.html``. """ subset_list = list() if type(data) is list: subset = list() while True: subset.append(data.pop()) if len(subset) == size: subset_list.append(subset) subset = list() if not data: subset_list.append(subset) break elif type(data) is dict: subset = dict() while True: key, value = data.popitem() subset.update({key: value}) if len(subset) == size: subset_list.append(subset) subset = dict() if not data: subset_list.append(subset) break return subset_list
eb0b477d85c15a1012325751c771aa3b316c0312
37,252
import re import html def prettify(item: str) -> str: """ Strip HTML tags and unescape any characters :param item: A string to be prettified """ item = re.sub("<.+?>", "", item) return html.unescape(item)
69f60a9138e3ada4971011e14d9cc3b10918cba1
37,253
def check_environment_status(celery_worker, environment_id): """Check the status of the environment that is being requested. :param celery_worker: The worker holding the task results. :type celery_worker: :obj:`celery.Celery` :param environment_id: The environment ID to check status on. :type environment_id: str :return: A dictionary of status and and result. :rtype: dict """ task_result = celery_worker.AsyncResult(environment_id) result = task_result.result status = task_result.status if result and result.get("error") is not None: status = "FAILURE" if result: task_result.get() return {"status": status, "result": result}
5a35056c2a130a7148ad5c66a4dd4478c9b39fab
37,254
def to_gib(bytes, factor=2**30, suffix="GiB"): """ Convert a number of bytes to Gibibytes Ex : 1073741824 bytes = 1073741824/2**30 = 1GiB """ return "%0.2f%s" % (bytes / factor, suffix)
1006d69275c212d978b06ab248f02ea966f1490e
37,255
def _bregman_map(M, lambda_k, diff_d_k): """ Bregman map, according to [1], Eq. 2.9, with f(x) := -d(lambda), and M*psi(x,y) := M/2*||lambda-lambda_k||_2 Old: lambda_bregman = lambda_k + float(1.0)/M*diff_d_k return self._project_on_dual_feasible_set(lambda_bregman) """ return lambda_k + float(1.0)/M*diff_d_k
128bf46eee3b0c6957dee5edf496f6bbc97d81e4
37,257
def fn_middleware(get_response): """Function factory middleware.""" def mw(request): response = get_response(request) return response return mw
e4261f99c2acd939dfb6198f1e253a680c2d9184
37,258
def include_key_in_value_list(dictio): """ Returns a list that contains the cases in the log, starting from the dictionary returned by PM4Py Parameters --------------- dictio Dictionary of cases Returns --------------- list_cases List of cases """ ret = [] for key in dictio: val = dictio[key] val["caseId"] = key ret.append(val) return ret
917cf7a280daad5372ff9ac312d96dc46fdcc5e6
37,259
import os def get_root_path(): """Returns absolute path of the project root folder :returns: root folder path """ return os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
5587e7a2a67f8089de3724c55e52008a6595064a
37,260
def var_number(i, j, d, k): """ Convert possible values into propositional vars :param i: row number 1 - 9 :param j: col number 1 - 9 :param d: digit 1 - 9 :param k: size of suduko :return: variable number 1- 729 """ return (k ** 2 * k ** 2) * (i - 1) + (k ** 2) * (j - 1) + d
676bd8c4e67466926b21542bad96c9864ca6da2c
37,261
def forward_step(model, batch): """shared for training and validation""" outputs = model(batch) # dict return outputs
874bd012671733399b126789a2443953ef19e5b0
37,262
import functools import subprocess import sys def called_process_error2exit_decorator(func): """Decorator to convert given CalledProcessError to an exit message This avoids displaying nasty stack traces to end-users """ @functools.wraps(func) def func_wrapper(*args, **kwargs): try: func(*args, **kwargs) except subprocess.CalledProcessError as e: print("{err}:\n{msg}".format(err=str(e), msg=e.output)) sys.exit(1) return func_wrapper
94d49cc1666698cdf6cceeb16a2386b1d6886c58
37,264
def infer_id_from_external_id(playground, asset_id: str, external_id: str, project_id: str): """ Infer asset id from external id Parameters ---------- - asset_id: asset id - external_id: external id - project_id: project id """ if asset_id is None and external_id is None: raise Exception( 'Either provide asset_id or external_id and project_id') if asset_id is not None: return asset_id assets = playground.assets( external_id_contains=[external_id], project_id=project_id, fields=['id'], disable_tqdm=True) if len(assets) == 0: raise Exception( f'No asset found with external ID "{external_id}"') if len(assets) > 1: raise Exception( f'Several assets found containing external ID "{external_id}":' \ f' {assets}. Please, use asset ID instead.') return assets[0]['id']
e889399b21077e6d40cedba2116d859eeaf464ce
37,265
import pickle def load(input_filename): """unpickle an object from a file""" with open(input_filename, "rb") as input_file: res = pickle.load(input_file) return res
7401114353e671fa52a035159dd564882b771bc3
37,266
import sys def new_type(name, *args, **kwargs): # pragma: no cover """ Alternative for type(...) to be legacy-py compatible. """ name = name.encode() if sys.version_info[0] == 2 else name return type(name, *args, **kwargs)
65edfda84cf66fcb3c9da3852875f38ac3655498
37,267
def doWeNeedToFilter(options): """ we can skip the filter step if we're going to let everything through """ if options.top_pct >= 0: return True if options.bits > 0: return True if options.evalue is not None: return True if options.pctid > 0: return True if options.length > 0: return True if options.hits_per_read > 0: return True if options.hsps_per_hit > 0: return True if options.nonoverlapping >= 0: return True if options.bad_refs: return True return False
6dbf7785d16701dbe692074fdfe1fa65c9f3090b
37,269
import os def addPrefix(path, prefix): """Prefix the baseName part of a path and return the result. @param path: The path to prefix. @type path: string @param prefix: The prefix to prepend to the baseName part. @type prefix: string @return: The path with it's baseName part prefixed with 'prefix'. @rtype: string """ if not prefix: return path tail = os.path.basename(path) head = path[:len(path)-len(tail)] return head + prefix + tail
4a22f4c489d86f743029c982165a5b4b12ce7246
37,270
def s3_object_exists(s3): """Provide a function to verify that a particular object exists in an expected bucket.""" def check(bucket_name, key): bucket_names = [ bucket_info["Name"] for bucket_info in s3.list_buckets()["Buckets"] ] if not bucket_names: return False object_names = [ contents["Key"] for contents in s3.list_objects(Bucket=bucket_name)["Contents"] ] return key in object_names return check
a0f549de4b2acc3fbbc4077d31f4feca9eec5cae
37,272
def format_model_name(model_name, specific_params): """ Given the model name and input parameters, return a string ready to include as a name field in simulated graphs. """ batch_size = specific_params['batch_size'] if 'resnet' in model_name: layers = ''.join(filter(lambda x: x.isdigit(), model_name)) return f'ResNet-{layers} ({batch_size})' if 'densenet' in model_name: layers = ''.join(filter(lambda x: x.isdigit(), model_name)) return f'DenseNet-{layers} ({batch_size})' if 'inception' in model_name: version = model_name[-1] return f'Inception V{version} ({batch_size})' if 'treelstm' in model_name: return 'TreeLSTM' if model_name == 'unroll_gan': return 'Unrolled GAN' if model_name == 'lstm': return f'LSTM ({batch_size})' return model_name
2f2d5c39eb5fcb21a10069a2ff27995e4727f83d
37,273
def get_kmgraph_meta(mapper_summary): """ Extract info from mapper summary to be displayed below the graph plot """ d = mapper_summary["custom_meta"] meta = ( "<b>N_cubes:</b> " + str(d["n_cubes"]) + " <b>Perc_overlap:</b> " + str(d["perc_overlap"]) ) meta += ( "<br><b>Nodes:</b> " + str(mapper_summary["n_nodes"]) + " <b>Edges:</b> " + str(mapper_summary["n_edges"]) + " <b>Total samples:</b> " + str(mapper_summary["n_total"]) + " <b>Unique_samples:</b> " + str(mapper_summary["n_unique"]) ) return meta
89afbf3b1e0b51acfed5f6f957b67516fa0aed04
37,274
def vol_to_mat(data): """ Reshape nD into 2D """ return data.reshape(((-1,) + data.shape[3:]), order='F')
340f55843a4a3b17b1aefe4eb0dd63da44ea0c29
37,275
def put_in_the_middle(target_tensor, data_tensor, pad_better=False, is_padded=None): """ put data_sensor with arbitrary number of dimensions in the middle of target tensor. if data_tensor is bigger, data is cut off if target_sensor is bigger, original values (probably zeros) are kept :param target_tensor: :param data_tensor: :return: """ target_shape = target_tensor.shape data_shape = data_tensor.shape def get_indices(target_width, data_width): if target_width>data_width: diff = target_width - data_width target_slice = slice(diff/2, target_width-(diff-diff/2)) data_slice = slice(None, None) else: diff = data_width - target_width data_slice = slice(diff/2, data_width-(diff-diff/2)) target_slice = slice(None, None) return target_slice, data_slice t_sh = [get_indices(l1,l2) for l1, l2 in zip(target_shape, data_shape)] target_indices, data_indices = list(zip(*t_sh)) target_tensor[target_indices] = data_tensor[data_indices] if is_padded is not None: is_padded[:] = True is_padded[target_indices] = False if pad_better: if target_indices[0].start: for i in range(0, target_indices[0].start): target_tensor[i] = data_tensor[0] if target_indices[0].stop: for i in range(target_indices[0].stop, len(target_tensor)): target_tensor[i] = data_tensor[-1]
48c869dd45d79fcc3a5be326e200a83955229059
37,276
def binary(i, width): """ >>> binary(0, 5) [0, 0, 0, 0, 0] >>> binary(15, 4) [1, 1, 1, 1] >>> binary(14, 4) [1, 1, 1, 0] """ bs = bin(i)[2:] bs = ("0" * width + bs)[-width:] b = [int(c) for c in bs] return b
0a9ee440d14cc0fccc8b3d7c83c36582a4583749
37,280
def price_index(price_of_product_x, price_of_product_y): """Return the price index of product X over product Y. Args: price_of_product_x (float): Price of product X. price_of_product_y (float): Price of product Y. Returns: price_index (float): Price of X / Price of Y """ return (price_of_product_x / price_of_product_y) * 100
e3f6eeec3395cf039e037eca97bca5e1b7eb55ca
37,281
def get_no_blank_row_sheet(sheet): """ sheetの全ての空っぽrow(list: [])を削除してから, new_sheetとして返す. """ new_sheet = [] for row in sheet: if not row: pass else: new_sheet.append(row) return new_sheet
41200cf8718cb6ea59d4407747dc24e2f91ec587
37,283
def filename_parse(filename): """ Parses filename to get information about the method used. It is assumed that u-pbe/avtz on hcn will be named hnc_pbe_avtz_u.xxx, where xxx is an arbitrary extension. """ # Since SV-P is misspelled as SV-P_, catch that filename = filename.replace("SV-P_", "SV-P") # Same with 6-31+G-d,p filename = filename.replace("6-31+G-d,p_", "6-31+G-d,p") tokens = filename.split("/")[-1].split("_") mol = tokens[0] func = tokens[1] basis = tokens[2].split(".")[0] name = func + "/" + basis if func in ['rDCSD', 'df-lrmp2']: unrestricted = False #elif func in ['luCCSD', 'uCCSD', 'uDCSD']: elif func in ['uCCSD', 'uDCSD']: unrestricted = True else: unrestricted = (len(tokens) == 4) name = "u-" * unrestricted + name if func in ['rDCSD', 'uDCSD']: func = 'DCSD' return mol, func, basis, unrestricted, name
4d78ff5ac6edf1942118e5183d603d2db072e3f7
37,284
def kelvin_to_level(kelvin): """Convert kelvin temperature to a USAI level.""" if kelvin < 2200: return 0 if kelvin > 6000: return 100.0 return (kelvin-2200)/(6000-2200) * 100
993cce9c7b85dea718a0eb21fd1e20a088a403f9
37,286
import json def load_data(filename: str): """ Try to read data from file. """ try: with open(filename) as f: data = json.load(f) except: print("Could not open file") data = [] return data
cbefbbfcb87bff92004b535b15f940a1ea527211
37,287
import os def get_undo_path(undofile): """ extract path from undo filename example: '%home%near%bin%000.cpp' would return '/home/near/bin/000.cpp' """ return undofile.replace('%', os.sep)
8f432e9fb700b81ee9e60b7005d47090e4105689
37,288
def hexinv(hexstring): """ Convenience function to calculate the inverse color (opposite on the color wheel). e.g.: hexinv('#FF0000') = '#00FFFF' Parameters ---------- hexstring : str Hexadecimal string such as '#FF0000' Returns ------- str Hexadecimal string such as '#00FFFF' """ if hexstring[0]=='#': hexstring=hexstring.lstrip('#') hexcolor=int(hexstring,16) color_comp=0xFFFFFF^hexcolor hexcolor_comp="#%06X"%color_comp return hexcolor_comp
a38f1bc031ddc7f15fa462c32de4af93fa42a6c3
37,290
import inspect import importlib def form_to_model(api_version, stack=1): """Convert a form name to a model name""" frame_record = inspect.getouterframes(inspect.currentframe())[stack] form_module = inspect.getmodulename(frame_record.filename) model_module = importlib.import_module( ".%s" % (form_module), "frontend.models.v%d" % (api_version) ) model_func = getattr(model_module, frame_record.function) return model_func
fa1e9909bae43014b14d74caf485dce6c2b128b1
37,291
def _getData(node): # {{{ """ return textdata """ if node.nodeType == node.TEXT_NODE: return node.data.strip() return ""
0819a318204f95e6c9d2517959f00b0999e2c6cc
37,292
import random def random_deletion(words, p): """ Randomly delete words from the sentence with probability p Args: words ([type]): [description] p ([type]): [description] Returns: [type]: [description] """ # obviously, if there's only one word, don't delete it if len(words) == 1: return words # randomly delete words with probability p new_words = [] for word in words: r = random.uniform(0, 1) if r > p: new_words.append(word) # if you end up deleting all words, just return a random word if len(new_words) == 0: rand_int = random.randint(0, len(words) - 1) return [words[rand_int]] return new_words
6224a942c4e5c667b662f28d9b519e56ab533e4a
37,293