content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def extract(mod, models): """ Returns models with mod removed. """ if mod == {}: # No mod to delete, return models as it is. return models return [model for model in models if model != mod]
5aa24ccaa238fe85f4f037c8af318e95a77b7b55
12,965
import os def _replace_desdata(pth, desdata): """Replace the NERSC DESDATA path if needed. Parameters ---------- pth : str The path string on which to do replacement. desdata : str The desired DESDATA. If None, then the path is simply returned as is. Returns ------- pth : str The path, possible with DESDATA in the path replaced with the desired one. """ if desdata is None: return pth nersc_desdata = '/global/project/projectdirs/des/y3-image-sims' if (nersc_desdata in pth and os.path.normpath(desdata) != os.path.normpath(nersc_desdata)): return pth.replace(nersc_desdata, desdata) else: return pth
082681d1366b7409d11fba13c7d41fa43e4d9d9e
12,966
def _is_no_cache(request): """Returns True if the request should skip the cache.""" cache_control = request.headers.get('Cache-Control') or '' return 'no-cache' in cache_control or 'max-age=0' in cache_control
bf8421a3f9a654a877cdf518aa9a4de532098f89
12,967
def get_missing(array, first=True): """ returns the ids of similar looking images that we haven't collected yet array is | ID | row | col | filename | lat | lon | X_1 | ... | X_n | where ID is the image id row is the row pixel index of top left corner of subimage col is the col pixel index of top left corner of subimage filename is the filename of the larger image lat is approximate subimage latitude lon is approximate subimage longitude X_1 ... X_n are IDs of n most similar subimages first is a boolean flag for the first iteration of this returns a list of the similar subimage ID that are not in the ID column """ if first: existing = set([array[0][0]]) potential = set(array[0][5:]) else: existing = [int(x[0]) for x in array] existing = set(existing) tmp = [x[:5] for x in array] potential = [] for row in tmp: for x in row: potential.append(int(x)) potential = set(potential) missing = potential.difference(existing) return list(missing)
323a2b88a02fafe6f06f2e65be1be625a0ace29b
12,968
def get_or_else(data, key, default_value = None): """ Tries to get a value from data with key. Returns default_value in case of key not found. """ if not data: return default_value try: return data[key] except: return default_value
53bc769a3331684c46127b08976fd62ca1ab47df
12,969
def update_rl_veh(env, rl_queue, rl_veh, removed_veh, control_range, num_rl, rl_ids): """Update the RL lists of controllable, entering, and exiting vehicles. Used by the open environments. Parameters ---------- env : flow.Env the environment class rl_queue : collections.dequeue the queue of vehicles that are not controllable yet rl_veh : list of str the list of current controllable vehicles, sorted by their positions removed_veh : list of str the list of RL vehicles that passed the control range control_range : (float, float) the control range (min_pos, max_pos) num_rl : int the maximum number of vehicles to control at any given time rl_ids : list of str or iterator the RL IDs to add to the different attributes Returns ------- collections.dequeue the updated rl_queue term list of str the updated rl_veh term list of str the updated removed_veh term """ # Add rl vehicles that just entered the network into the rl queue. for veh_id in rl_ids: if veh_id not in list(rl_queue) + rl_veh + removed_veh: rl_queue.append(veh_id) # Remove rl vehicles that exited the controllable range of the network. for veh_id in rl_veh: if env.k.vehicle.get_x_by_id(veh_id) > control_range[1] \ or veh_id not in env.k.vehicle.get_rl_ids(): removed_veh.append(veh_id) rl_veh.remove(veh_id) # Fill up rl_veh until they are enough controlled vehicles. while len(rl_queue) > 0 and len(rl_veh) < num_rl: # Ignore vehicles that are in the ghost edges. if env.k.vehicle.get_x_by_id(rl_queue[0]) < control_range[0]: break rl_id = rl_queue.popleft() veh_pos = env.k.vehicle.get_x_by_id(rl_id) # Add the vehicle if it is within the control range. if veh_pos < control_range[1]: rl_veh.append(rl_id) return rl_queue, rl_veh, removed_veh
616be730420795755d5b628284517572b0c2248f
12,970
def get_frame(df, t, fps=20): """Gets the player data from the right frame Arguments: df: pd.DataFrame with player tracking data t: timestamp of the play fps: frame per second Returns: dfFrame: pd.DataFrame with player tracking data from timestamp t Raises: """ dfFrame = df.loc[int(t * fps)].set_index("player") dfFrame.player_num = dfFrame.player_num.fillna("") return dfFrame
51bad21aa17978bcfee37fe31edb79ddb45e9191
12,972
import math def pformat_bar( value, width=40, prefix="", vmin=0., vmax=1., border=True, fill=' ', reverse=False): """Return a progressbar-like str representation of value. Parameters ========== value : float Value to be represented. width: int Bar width (in character). prefix: string Text to be prepend to the bar. vmin : float Minimum value. vmax : float Maximum value. """ # This code is based on https://gist.github.com/rougier/c0d31f5cbdaac27b876c # noqa: E501 # The original license: # ----------------------------------------------------------------------------- # Copyright (c) 2016, Nicolas P. Rougier # Distributed under the (new) BSD License. # ----------------------------------------------------------------------------- # Block progression is 1/8 if reverse: # blocks = ["", "▐", "█"] blocks = ' ▁▂▃▄▅▆▇█' else: blocks = ["", "▏", "▎", "▍", "▌", "▋", "▊", "▉", "█"] vmin = vmin or 0.0 vmax = vmax or 1.0 if border: lsep, rsep = "▏", "▕" else: lsep, rsep = " ", " " # Normalize value value = min(max(value, vmin), vmax) value = (value - vmin) / (vmax - vmin) v = value * width x = math.floor(v) # integer part y = v - x # fractional part i = int(round(y * (len(blocks) - 1))) bar = "█" * x barfrac = blocks[i] n = width - x - 1 nobar = fill * n if reverse: bar = f'{lsep}{nobar}{barfrac}{bar}{rsep}' else: bar = f'{lsep}{bar}{barfrac}{nobar}{rsep}' return bar
6b12a5bb2bd4d051224b2f5960a0378b0593b2f7
12,973
def test_generator(func_graph, stack_roots): """Return the function that will start the fault-tolerance testing for a graph""" def test(self): """Test fault-tolerance of the topology""" self.set_up(func_graph, stack_roots) self.network_function() return test
1a607086ef68833894f8d031589c61dd8557f090
12,974
import csv def process_csv(csv_file): """Turn the contents of the CSV file into a list of lists""" print("Processing {}".format(csv_file)) with open(csv_file, "r") as datafile: data = list(csv.reader(datafile)) return data
87603c21d09332cc1e1d25a3de6074f984f95426
12,975
import re def new_exposures(out): """Scan rsync output for exposures to be transferred. Parameters ---------- out : :class:`str` Output from :command:`rsync`. Returns ------- :class:`set` The unique exposure numbers detected in `out`. """ e = set() e_re = re.compile(r'([0-9]{8})/?') for l in out.split('\n'): m = e_re.match(l) if m is not None: e.add(m.groups()[0]) return e
8fc8817fe0ad79c177473ec676853569d733ec65
12,976
from datetime import datetime def now(): """Current UTC datetime string suitable for use as a Solr dt field.""" return datetime.utcnow().isoformat(timespec='seconds') + 'Z'
390877fc1b7592df54300cb70b9b62590f1bd4c9
12,978
def clean_data(df): """Cleans the passed dataframe. Actions done while cleaning: - dropping all rows with missing location ids - dropping all rows where dropoff time is before pickup time - consider all location ids that map to the same zone as equivalent and replace them with a single value Keyword Arguments: df -- the dataframe to clean Returns: the cleaned dataframe """ any_location_id_missing = (df.PULocationID > 263) | (df.DOLocationID > 263) df = df.drop(df.index[any_location_id_missing]) df = df[df.tpep_dropoff_datetime > df.tpep_pickup_datetime] df.PULocationID.replace([104, 105], 103) return df
f29a1d3784914c2dfcae1611c8860dbaa7f2a93c
12,979
def midpoint(pair1, pair2): """find and return the midpoint between the two given points""" x = (pair1[0] + pair2[0])/2 y = (pair1[1] + pair2[1])/2 return x, y
760aca99b1dad002fb8b6f483515f222dee77160
12,982
def separate_answers(albert_text, cls='[CLS]', sep='[SEP]'): """ Separates the sentences of sequence classification used for bert :param bert_text: list of bert word tokens :param cls: string of cls token :param sep: string of sep token :return: separated strings """ # Fix SPIECE_underline cls_idx = albert_text.index(cls) + 4 sep_1_idx = albert_text.index(sep) + 4 ans1 = albert_text[cls_idx + 1:sep_1_idx - 4] ans2 = albert_text[sep_1_idx + 1:albert_text.index(sep, sep_1_idx + 1)] return ans1, ans2
66a9b1e5bcd2c096187db12f31b6e66336552f22
12,984
def mail_addr_test(str): """Tests for a bracketed mailing address""" testcase = str start = str.find('<') end = str.find('>') if start==-1 or end==-1: return False else: testcase2 = testcase[start+1:end] if testcase2.find('<') != -1 or testcase2.find('>') != -1: return False elif testcase2.find(' ') != -1: return False else: return True
2a814da46f7596ba6931d109270327a36cb56aed
12,985
import random def random_with_bias(messages: list, word: str): """ Go through all the messages and try to choose the ones where the given word is not at the end of the string. """ last_word_messages = [] non_last_word_messages = [] for m in messages: words = m.split() if words[-1].lower() == word: last_word_messages.append(m) else: non_last_word_messages.append(m) if not last_word_messages: return random.choice(non_last_word_messages) if not non_last_word_messages: return random.choice(last_word_messages) return random.choice(last_word_messages if random.randint(0, 5) == 0 else non_last_word_messages)
74579e4216af2a2c1023aa9a2130301c4ec04e6b
12,986
def group_table(df, valuefield): """Group the table by -valuefield- Args: df (dataframe): dataframe valuefield ([type]): [description] Returns: [type]: [description] """ df = df[df["Agegroup"] != "0-19"] df_grouped = df.groupby([df[valuefield]], sort=True).sum().reset_index() return df_grouped
a55f601cda65d032dce90828d2a293c0976ef3a7
12,987
def find_allowed_size(nx_size): """ Finds the next largest "allowed size" for the Fried Phase Screen method Parameters: nx_size (int): Requested size Returns: int: Next allowed size """ n = 0 while (2 ** n + 1) < nx_size: n += 1 nx_size = 2 ** n + 1 return nx_size
b7d53ba805ed3c4f543bbb3ff0a94056b76f9738
12,988
def remove_field(meta, field_id): """Delete field and any descendants.""" field_ids = [] if meta.has_field(field_id): field_ids = meta.remove_field(field_id) meta.plot = { key: value for key, value in meta.plot.items() if value not in field_ids } return field_ids
c8bdc810448bf5d722587776377228391a820e60
12,989
def compute_likelihood(da_logical, dim='ensemble'): """ Returns array of likelihoods computed along dim from logical event data Notes ----- See http://www.cawcr.gov.au/projects/verification/ """ if dim == None: likelihood = da_logical else: likelihood = da_logical.mean(dim=dim).rename('likelihood') return likelihood
7387b7720aa4619c5bee27fedcd4e0e8401eb4ce
12,990
def isCompliant(quote): """Checks a few compliant parameters to filter bad quotes.""" if quote == '': return False elif len(quote) < 30: return False else: return True
db7bf215b28b8add0c01118c2ad7e043d5b34048
12,996
def expandRectangle(rect, xfactor=3, yfactor=3): """ Takes a (x,y,w,h) rectangle tuple and returns a new bounding rectangle that is centered on the center of the origin rectangle, but has a width/height that is larger by a given factor. The returned coordinates are rounded to integers """ x, y, w, h = rect # Horizontal expansion x -= ((xfactor - 1) / 2) * w w *= xfactor # Horizontal expansion y -= ((yfactor - 1) / 2) * h h *= yfactor return (int(round(x)), int(round(y)), int(round(w)), int(round(h)))
dbc37c87f7fe69c846a3089ab0543e4810ee3c21
12,997
import re def baby_names_table_rows_from_babyfile_string(babyfile_string): """ babyfile_string sample excerpt with lines of html <tr align="right"><td>1</td><td>Michael</td><td>Jessica</td> <tr align="right"><td>2</td><td>Christopher</td><td>Ashley</td> <tr align="right"><td>3</td><td>Matthew</td><td>Brittany</td> return list of dictionaries row = {'rank' : rank, 'name_boy' : name_boy, 'name_girl' : name_girl} """ print('baby_names_table_rows_from_babyfile_string') table_rows = [] # findall with regular expression with () groups returns a list of tuples. baby_tuples = re.findall(r'<tr align="right"><td>(\d+)</td><td>(\w+)</td><td>(\w+)</td>', babyfile_string) for baby_tuple in baby_tuples: rank = baby_tuple[0] name_boy = baby_tuple[1] name_girl = baby_tuple[2] row = {'rank' : rank, 'name_boy' : name_boy, 'name_girl' : name_girl} table_rows.append(row) #print(table_rows) return table_rows
a0bb831ee908e9fae0c125b2452977afc23058ce
12,998
import re def strong_to_b(value): """ Replaces enclosing <strong> and </strong>s with <p><b>s """ value = re.sub(r'^<p><strong>(.+?)</strong></p>$', u'<p><b>\\1</b></p>', value) return value
06e0f531160e4e5bf23feba3723aaf85971e606b
12,999
def safe_execution(func): """run a function in safe mode""" def inner_func(*args, **kwargs): try: return func(*args, **kwargs) except Exception as e: print("An exception occcurred, details:{}".format(e)) return inner_func
2423332a5a71e2858eb82496176c110a0be738cf
13,000
def product(var_x, var_y): """This is a test!""" return var_x * var_y
bd71b40841a39f9df1b796975782c238c20ec3a3
13,001
def SingletonDecorator(cls): """ Decorator to setup any class as Singleton. Args: cls (class): decorated class Returns: cls instance Warning: Some subclass or decorated inside methods may lost docstring. Not recommended use this decorator with documented class. Example: .. code-block:: python @SingletonDecorator class SingletonClass: pass """ class WrapperClass(cls): _instance = None # class instance def __new__(cls, *args, **kwargs): if WrapperClass._instance is None: WrapperClass._instance = super(WrapperClass, cls).__new__(cls, *args, **kwargs) WrapperClass._instance._initialized = False # make sure class was initialized once only return WrapperClass._instance def __init__(self, *args, **kwargs): if self._initialized: return super(WrapperClass, self).__init__(*args, **kwargs) self._initialized = True WrapperClass.__name__ = cls.__name__ WrapperClass.__doc__ = cls.__doc__ WrapperClass.__module__ = cls.__module__ return WrapperClass
37c330af4c12b12c338fe56c6882d0a9c937e105
13,002
def obscure(item, start=3, end=3): """ Replaces middle of string with * :param item: string - to be obscured string :param start: int - how many letters to leave from start :return: obscured string - how many letters to leave from end """ # ignore None if item is None: return None total_length = len(str(item)) removed_length = start + end if removed_length > total_length: return item else: return item[:start] + "*"*(total_length-removed_length) + item[-end:]
c5f181e83806d4027bb3e8dd19e94701dda1f4f4
13,003
import json def error_response(title, status='500'): """Generates an error response to return to API Gateway Parameters ---------- title: string The title description of the error. status: string Thes http status code. Returns ------- dict: An response back to the api gateway with the error. """ return { 'statusCode': '500', 'body': json.dumps({ 'errors': [{ 'status': status, 'title': title }] })}
16a62ec46396bee5fe3b240febe4bd0ac2cf9de9
13,005
def create_plotly_blob(data_list, xlabel, ylabel, title): """ Create plotly line plot object, useful for jupyter plots or generation of interactive html plots E.G. import plotly.graph_objects as go blob = create_plotly_blob([(xdata1, ydata1, label1, True), (xdata2, ydata2, label2, False)], 'x', 'y', 'title') fig = go.Figure(blob) fig.show() 4 element tuples in data_list must follow (xdata, ydata, label, visible): xdata: 1d array of x-axis data ydata: 1d array of y-axis data label: str label description visible: bool, if False plot is only given in legend but not turned on :param data_list: list of 4 element tuples (xdata, ydata, label, visible) :param xlabel: str x-axis label :param ylabel: str y-axis label :param title: str plot title :return: dict """ # Convert title title = title.replace('\n', '<br>') # Create json blob auto_blob = { 'data': [], 'layout': {'font': {'family': 'Courier New, monospace', 'size': 18}, 'legend': {'title': {'text': 'Scannables'}}, 'title': {'text': title}, 'xaxis': {'title': {'text': xlabel}}, 'yaxis': {'title': {'text': ylabel}}} } for item in data_list: if not item[3]: vis = 'legendonly' else: vis = True trace = { 'mode': 'markers+lines', 'name': item[2], 'type': 'scatter', 'visible': vis, 'x': list(item[0]), 'y': list(item[1]), } auto_blob['data'] += [trace] return auto_blob
a19ece2ba0a1fc6a1db6a167b266d1b3099927c1
13,006
def getDrowAnnotationLine(ann): """ levels: 0: eye state (right) 1: eye state(left) """ bodyAnnId = ann[0] a1Ann = 0 if bodyAnnId == 0: # open a1Ann = 0 elif bodyAnnId == 1: # close a1Ann =1 elif bodyAnnId == 2: # opening a1Ann= 2 elif bodyAnnId == 3: # closing a1Ann=3 elif bodyAnnId == 4: # undefined a1Ann=4 lineAnn = [bodyAnnId, a1Ann] for i in range(2, len(ann)): lineAnn.append(ann[i]) return lineAnn
3403bd63f3fa16aad4f713edb16954fbd1d7e20a
13,008
import os def _IsBuildGnInputFile(input_file): """Returns True iff this is a BUILD.gn file.""" return os.path.basename(input_file) == 'BUILD.gn'
3977dca316c731bbb9cbc484b877cb58cdcb552f
13,009
def get_indices_of_A_in_B(A, B): """Return the set of indices into B of the elements in A that occur in B Parameters ---------- A : list The "needles" B : list The "haystack" Returns ------- list Indices into B of elements in A occuring in B """ s = set(B) return [i for i, e in enumerate(A) if e in s]
88948acd14d7979f7e5ce067776c847e1d0b2a24
13,010
def size_str(size): """ size: number of bytes Return a human-readable string. """ if size is None: return None for unit in ('', 'k', 'm', 'g', 't'): if size < 100 and size != int(size): return '%.1f%s' % (size, unit) if size < 1024: return '%d%s' % (size, unit) size /= 1024.0
f90e5196c21a8ce0f179043cf1cc16171da62bd0
13,012
def get_instructions(content_tree, namespaces): """find all text links that have a py3o """ xpath_expr = "//text:a[starts-with(@xlink:href, 'py3o://')]" return content_tree.xpath( xpath_expr, namespaces=namespaces )
c527cf9bb4b97ac3207ee2cf5ed9ffb3febdf7ab
13,013
def get_ce_alexnet(): """Returns Corruption Error values for AlexNet""" ce_alexnet = dict() ce_alexnet['Gaussian Noise'] = 0.886428 ce_alexnet['Shot Noise'] = 0.894468 ce_alexnet['Impulse Noise'] = 0.922640 ce_alexnet['Defocus Blur'] = 0.819880 ce_alexnet['Glass Blur'] = 0.826268 ce_alexnet['Motion Blur'] = 0.785948 ce_alexnet['Zoom Blur'] = 0.798360 ce_alexnet['Snow'] = 0.866816 ce_alexnet['Frost'] = 0.826572 ce_alexnet['Fog'] = 0.819324 ce_alexnet['Brightness'] = 0.564592 ce_alexnet['Contrast'] = 0.853204 ce_alexnet['Elastic Transform'] = 0.646056 ce_alexnet['Pixelate'] = 0.717840 ce_alexnet['JPEG Compression'] = 0.606500 return ce_alexnet
e23f902cafb45eeeca9009971c143d00095617c8
13,014
def get_usrid_from_league(league): """ get user id from a league and put them into a list :param league: LeagueDto: an object contains league information :return: usrid """ entries = league['entries'] usrid = [] for entry in entries: usrid.append(entry['playerOrTeamId']) usrid = list(set(usrid)) return usrid
ce91f7f1f0afcc064b8f59c90721b60be47ab4b9
13,016
import math def polar_2_car_1(phi_list): """ Function: Convert polarization coordinate to euclidean Refer: 三维空间中直角坐标与球坐标的相互转换 https://www.cnblogs.com/hans_gis/archive/2012/11/21/2755126.html 没有讲高维空间的坐标转换,也找不到其他的博客,只能模仿这个 paper0 Eq 1 paper1 Eq 1 It gives an calculation example When phi_list = [Pi / 3, Pi / 4], direction unit vector (D) should be [1/2 = 0.5, sqrt(6)/4 = 0.6123, sqrt(2)/2 = 0.707] paper2 Eq 1.3 It has GOOD explanation :param phi_list: list[angle(float, 0 ~ 180 degree)] :return: """ # math.radians(x, /): Convert angle x from degrees to radians d_last = math.cos(math.radians(phi_list[-1])) d_list = [] for i in range(len(phi_list)): tmp_d = 1.0 # Calculate the cosine part if i > 0: tmp_d *= math.cos(math.radians(phi_list[i])) # Calculate the sine part for s in range(i, len(phi_list)): tmp_d *= math.sin(math.radians(phi_list[s])) d_list.append(tmp_d) d_list.append(d_last) return d_list
e3166c62bebf99ba8fdde1f61de6be48750332dc
13,018
import string def apply_object_attributes_to_template(template, value_object): """Generate a string from the template by applying values from the given object. If the template provided is not a template (not have any placeholders), this will not have any effect and template will be returned unchanged. If value_object is None, this will not have any effect. Arguments: template -- A string that may or may not be templated. If templated, the placeholders will be populated with values from the attributes of the value_object. value_object -- Any object that supports the __dict__ method. Most classes have a __dict__ method that return a mapping of all the attributes and the associated values. Returns: string -- This will be the template, with the values from value_object applied. """ # Parse the template and extract the field names. # We'll use the field names to explicitly look-up attributes in the value_object. # The reason for this is that it works for @property attributes as well as normal attributes. field_names = [field_name for _, field_name, _, _ in string.Formatter().parse(template) if field_name is not None] template_values = {} for field_name in field_names: try: template_values[field_name] = getattr(value_object, field_name) except AttributeError as e: raise AttributeError(('Unable to apply object to template. Could not look-up attribute \'{}\' in the ' 'object \'{}\'. Error: {}').format(field_name, str(value_object), str(e))) return template.format(**template_values)
266085845a4e4283d9ffa897b4b0b7d2f8669001
13,019
def get_html_mouseover(): """html content for mouseover mode. Returns: mouseover html """ return """<!DOCTYPE HTML><html lang="en-us"> <head> <meta name="ad.size" content="width=[[width]],height=[[collapsedHeight]]"> <script> var clickTag = "[[clicktag_url]]"; var ads = { }; ads.receiveMessage = function(e) { e.data = e.data || {}; if(e.data.msg && e.data.action) { if(e.data.msg == "adcase" && e.data.action == "click") { window.open(clickTag); } } } window.addEventListener ? window.addEventListener("message", ads.receiveMessage, !1) : (window.attachEvent && window.attachEvent("message", ads.receiveMessage)); var tpl = { expanded: false }; var url = document.location.href.substring(0,document.location.href.length-10); var expandedURL = false; var expandedImage = false; if("[[expandedURL]]" != "") { expandedURL = url + "[[expandedURL]]"; } else { expandedImage = url + "[[expandedImage]]"; } window.top.postMessage({ msg: "adcase", format:"footerFixed", params: { expandOn:'mouseover', expandMS:[[expandMS]], expandTo:"layer", expandedURL: expandedURL, expandedImage: expandedImage, backgroundColor: '[[backgroundColor]]', collapse:true, expandedHeight: [[expandedHeight]], collapsedHeight: [[collapsedHeight]] } } , "*"); </script> </head> <body style='margin:0;cursor:pointer'> [[clicktag_layer]] <div id='ad_collapsed' style=';width:[[width]]px;height:[[collapsedHeight]]px;overflow:hidden;'> [[collapsedContent]] </div> </body></html>"""
2bbb4d69f944b5556a472ab16cdf1bed32a22018
13,021
def urlconf(patterns, do_not_autogenerate=True): """ A custom url configuration for this action, just like in Django's urls.py. The custom urlconf applies after the urlconf for the controller, unless erase is true. Example: `["/user/(?P<user_id>\d+)/"]` :param patterns: a url pattern or a list of url patterns :param do_not_autogenerate: erase the urlconf that was automatically generated """ if type(patterns) not in (list, tuple): patterns = tuple(patterns) def decorator(action_function): action_function.urlconf = patterns action_function.urlconf_erase = do_not_autogenerate return action_function return decorator
1d6f8c9e840979cbaab54112b3203eebebed5f82
13,022
def note2ratio(note, cents=0): """ Converts semitones to a frequency ratio. """ ratio = 2 ** ((note + cents / 100) / 12) return ratio
1c78697d3978d122d8be39e406bc8be8b7684f9d
13,023
import os def absolute_path(dir_path): """Return absolute path""" return os.path.abspath(os.path.expanduser(os.path.expandvars(dir_path)))
b2f1f0f8acb1e0854a95542564aceac1d0ea2e0b
13,026
def prompt(message, validator=lambda x: True, input_to_option=lambda x: x, default_value=None, options_to_print=None): """ Prompt the user a message with optionally some options. :param message: the message to show to the user :param validator: a function that predicates if the input is correct :param input_to_option: a function that given the input transforms it in something else :param default_value: the value to return as the default if the user doesn't insert anything :param options_to_print: the options to print if necessary :return: the value inserted by the user validated """ if options_to_print: print("Allowed values for {0}:".format(message)) for item in options_to_print: print(item) user_prompt = "{0} [{1}]: ".format(message, default_value if default_value is not None else "") valid_user_input = False result = default_value # Give the user the possibility to try again if wrong while not valid_user_input: user_input = input(user_prompt).strip() or default_value result = input_to_option(user_input) if validator(result): valid_user_input = True else: print("ERROR: {0} is not an acceptable value for {1}".format(user_input, message)) return result
3dcb9ab47330dfb0ea52b8c9704d22b02b245174
13,027
import re def get_pull_no(ref): """ Get pull request from a git given ref >>> get_pull_no('refs/pull/12345/head') 12345 >>> get_pull_no('refs/pull/6789/merge') 6789 """ match = re.search('refs/pull/([0-9]+)/', ref) if match: return int(match[1]) raise ValueError("Unable to get pull request number from ref {}" .format(ref))
dbd4ca5f241c735976f992765c1e8b507dab53a0
13,030
import os def section_name(name, n, prefix='py-{pid}'.format(pid=os.getpid())): """Join arguments to get a Travis section name, e.g. 'py-123.section.0'""" return '.'.join(filter(bool, [prefix, name, str(n)]))
413b812bc746c0b5621ee9d2f94ebaad9ce64179
13,033
import logging def _calculate_plot_variables(cp_profile, selected_variables): """ Helper function to calculate valid subset of variables to be plotted """ if not selected_variables: return cp_profile.selected_variables if not set(selected_variables).issubset(set(cp_profile.selected_variables)): logging.warning("Selected variables are not subset of all variables. Parameter is ignored.") return cp_profile.selected_variables else: return list(selected_variables)
73e6a499173fb5c8bd5c957c60eac66a176dd566
13,034
import math def total_sample_splits_categorical(no_of_values): """ Compute total number of sample splits that can generated by categoricals. Parameters ---------- no_of_values : Int. Returns ------- no_of_splits: Int. """ no_of_splits = 0 for i in range(1, no_of_values): no_of_splits += math.factorial(no_of_values) / ( math.factorial(no_of_values-i) * math.factorial(i)) return no_of_splits/2
705a565998c5cde4a37370e6787aa0f07d973987
13,035
def normalize_commit(commit, **kwargs): """ This takes commits either in the JSON format provided by a GitHub webhook, or the object format provided by github3.py, and returns a normalized Python dict. """ if isinstance(commit, dict): # If GitHub webhook payload: sender = kwargs.get("sender", {}) avatar_url = "" if sender["avatar_url"] and sender["login"] == commit["author"]["username"]: avatar_url = sender["avatar_url"] return { "id": commit["id"], "timestamp": commit["timestamp"], "author": { "name": commit["author"]["name"], "email": commit["author"]["email"], "username": commit["author"]["username"], "avatar_url": avatar_url, }, "message": commit["message"], "url": commit["url"], } else: # If github3.py object: return { "id": commit.sha, "timestamp": commit.commit.author.get("date", ""), "author": { "name": commit.commit.author.get("name", ""), "email": commit.commit.author.get("email", ""), "username": commit.author.login if commit.author else "", "avatar_url": commit.author.avatar_url if commit.author else "", }, "message": commit.message, "url": commit.html_url, }
7728c9d9f42929fce8193580ef374d1e55e4df7f
13,037
def get_polygon(annotation): """ Extracts a polygon from an annotation Parameters ---------- - annotation : Kili annotation """ try: return annotation['boundingPoly'][0]['normalizedVertices'] except KeyError: return None
4dc22e186db271469ff5070df2b7238f684766ac
13,038
def arrays_to_strings(measure_def): """To facilitate readability via newlines, we express some JSON strings as arrays, but store them as strings. Returns the json with such fields converted to strings. """ fields_to_convert = [ "title", "description", "why_it_matters", "numerator_columns", "numerator_where", "denominator_columns", "denominator_where", "numerator_bnf_codes_query", "denominator_bnf_codes_query", ] for field in fields_to_convert: if field not in measure_def: continue if isinstance(measure_def[field], list): measure_def[field] = " ".join(measure_def[field]) return measure_def
212a6211d55ed3745d8772180a5e285771145a05
13,039
def policy(scores, lives=3): """ no improvement in last 3 runs """ temp_lives = lives last = scores[0] for i,score in enumerate(scores): if i > 0: if temp_lives == 0: return i elif score >= last: temp_lives -= 1 last = score else: temp_lives = lives last = score return -1
9520b26dfd83f16a331906185edaec1ab9100dc8
13,040
def concat_drive_path(dest_path: str, end_point: str, default_folder: str = 'mydrive') -> str: """ Generate file path :param dest_path: parent path :param end_point: file_name or folder_name :param default_folder: if dest_path is None, use your drive home folder instead :return: """ if dest_path is None: display_path = f"/{default_folder}/{end_point}" elif dest_path.startswith('id'): display_path = f"{dest_path}/folder_name" else: # add start position / dest_path = f"/{dest_path}" if not dest_path.startswith('/') else dest_path # add end position / dest_path = f"{dest_path}/" if not dest_path.endswith('/') else dest_path display_path = f"{dest_path}{end_point}" return display_path
09dd261e825379604c0c7546b3a6a68d1b35505f
13,042
def temp_gradient(bottom_hole_temperature, surface_temperature, bottom_hole_depth): """ Temperature gradient calculation. Parameters ---------- bottom_hole_temperature : float Bottom hole temperature (deg F or deg C) surface_temperature : float Surface temperature (deg F or deg C) bottom_hole_depth : float Bottom hole depth (ft or m) Returns ------- float Returns temperature gradient in deg per depth unit (degF/ft or deg C/m) """ gradient = (bottom_hole_temperature - surface_temperature) / bottom_hole_depth return gradient
f99a9215fd63a8ef564a1fc91de75ea146091712
13,043
def read_ovaldefgroup_file(testfile): """Read oval files""" with open(testfile, 'r') as test_file: body = test_file.read() return body
a472777f700e5e768c0995af6a1db4c476fc029f
13,044
def cross2D(v1,v2): """calculates the scalar cross product magnitude of two 2D vectors, v1 and v2""" return v1[0]*v2[1] - v1[1]*v2[0]
6bbe95ac483b349cda500a0c5045181035b46314
13,045
def qualified_name_to_object(qualified_name: str, default_module_name='builtins'): """ Convert a fully qualified name into a Python object. It is true that ``qualified_name_to_object(object_to_qualified_name(obj)) is obj``. >>> qualified_name_to_object('unittest.TestCase') <class 'unittest.case.TestCase'> See also :py:func:`object_to_qualified_name`. :param qualified_name: fully qualified name of the form [<module>'.'{<name>'.'}]<name> :param default_module_name: default module name to be used if the name does not contain one :return: the Python object :raise ImportError: If the module could not be imported :raise AttributeError: If the name could not be found """ parts = qualified_name.split('.') if len(parts) == 1: module_name = default_module_name else: module_name = parts[0] parts = parts[1:] value = __import__(module_name) for name in parts: value = getattr(value, name) return value
f8758a80aaa4196aa559b18a5850840ba4bbd69b
13,046
def aggregated_data_from_experiments(experiments, contains_err=False): """ experiments: Is a dict of score dicts with each key being the scores of an experiments contains_err: If True, for each metric score a error is expected under the key metric_label + '_std' Returns: a structured list that can be processed by the plotters first element is a list of the protocol_names second element is a list of the experiment_labels third element is a list of the metric_labels fourth element is the dict the scores: per metric label (key) the value are the metric_scores (is a dict) per experiment_label (key) the value is a tuple containing two lists: experiment_scores_list is a list of scores for each protocol experiment_scores_std_list is a list of stds of the scores for each protocol """ experiment_labels = list(experiments.keys()) protocol_labels = list(experiments[list( experiments.keys())[0]].keys()) metric_labels = [] for label in experiments[experiment_labels[0]][protocol_labels[0]].keys(): if 'mean' in label and 'std' not in label: metric_labels.append(label) data = list() data.append(protocol_labels) data.append(experiment_labels) data.append(metric_labels) scores = dict() for metric_label in metric_labels: metric_scores = dict() for experiment_label in experiment_labels: experiment_scores_list = list() experiment_scores_std_list = list() for evaluation_protocol in protocol_labels: experiment_scores_list.append( experiments[experiment_label][evaluation_protocol][metric_label]) if contains_err: experiment_scores_std_list.append( experiments[experiment_label][evaluation_protocol][metric_label + '_std']) else: experiment_scores_std_list.append(0.0) experiment_scores = (experiment_scores_list, experiment_scores_std_list) metric_scores[experiment_label] = experiment_scores scores[metric_label] = metric_scores data.append(scores) return data
561f967e3777230484c2b355a3af109d7172350a
13,047
def CheckForDoubles(String): """Prüft ob mehrere Personen gleichzeitig sprechen und teilt diese dann auf einzelne Einträge auf. Rückgabe Liste der sprechenden Pers. : "#Anna #Berta" -> ["#Anna", "#Berta"]""" stringList = [] y = len(String) x = 0 for i in range(len(String) - 1, -1, -1): if String[i] == "#": x = i stringList.append(String[x:y]) y = x - 1 return stringList
5555d50e2bb1d6fedcf6e7d74c3c1c5c866cea3b
13,048
def monitoredsystems(): """this method returns the current list of monitored systems""" # Replace this with sql logic to get monitored systems later monitored = ['Arque', 'Zaonce', 'Teorge', 'Tianve', 'Tionisla', 'Neganhot', 'Orrere', 'Oguninksmii'] return monitored
0590360d7a97e93ab6f00e5139dc94a7bb7c8c7c
13,050
import torch def unnormalize_img(img, mean, std, in_gpu=True): """ img: [3, h, w] """ img = img.detach().cpu().clone() # img = img / 255. img *= torch.tensor(std).view(3, 1, 1) img += torch.tensor(mean).view(3, 1, 1) min_v = torch.min(img) img = (img - min_v) / (torch.max(img) - min_v) return img
52698e7644e2d20e18e3ed44108af5b901d4f01b
13,052
def build_url(hub_base_url: str, org: str, workspace: str) -> str: """ Build the base URL of the Chaos Hub workspace where the experiment and the journal will be stored and be made visible. """ return '/'.join([hub_base_url, 'api', org, workspace])
116b9b7f6211ca50b48550689b7e161cee0b4d9d
13,053
def SelectDBSeveralFittest(n, db_list): """ Function: SelectDBSeveralFittest ================================= Select n fittest individual @param n: the number of fittest individuals @param db_list: the ordered list of fitnesses with associated unique ids obtained from the database @return: the reference of the one fittest individual """ return db_list[:n]
b43709032f21e4854dd0cc7b00a1e7cae983d762
13,055
def longest_peak(relative_angles): """ Find the longest area < 0 :param relative_angles: list of angles :return: coordinates of the area """ length = relative_angles.shape[0] longest = (0, 0) j = 0 for i in range(length): if relative_angles[i] >= 0: j = i if i - j > longest[1] - longest[0]: longest = (j, i) return longest
742359ed60f4b6cebc9da8510cb801a08a40df99
13,056
from typing import Callable from typing import Any import functools def agent_must_be_ready(f: Callable[..., Any]) -> Callable[..., Any]: """Any Agent method decorated with this will raise if the agent is not ready.""" @functools.wraps(f) def decorated(self, *args: Any, **kwargs: Any) -> Any: if not self.is_ready(): raise Exception( "Agent needs to be prepared before usage. You need to set a " "processor and a tracker store." ) return f(self, *args, **kwargs) return decorated
bec74702355d6aaa7796527a05d9510048513910
13,057
def calc_exec_time(block): """ calculate the total time to execute all observing blocks by exp. time. Excludes any read out time and overheads. :param block: The observing Block document. :type block: Dict :rtype: int """ if "parameters" not in block: return 0 exp1 = 0 exp2 = 0 sci_blk = block["parameters"] if sci_blk.keys() >= {"det1_exptime", "det1_nexp"}: if sci_blk['det1_exptime'] and sci_blk['det1_nexp']: exp1 = sci_blk['det1_exptime'] * sci_blk['det1_nexp'] if sci_blk.keys() >= {"det1_exptime", "det2_exptime", "det1_nexp", "det2_nexp"}: if sci_blk['det2_exptime'] and sci_blk['det2_nexp']: exp2 = sci_blk['det2_exptime'] * sci_blk['det2_nexp'] return max(exp1, exp2)
deaf6649173de784a77601da4ef4df1b7c10cb93
13,058
def _parse_instructors(details): """ Extract instructor names from the course detail page Args: details(Tag): BeautifulSoup Tag for course details Returns: list of dict: List of first & last names of each instructor """ try: instructors = details.findAll( "div", {"class": "field--name-field-learn-more-links"} )[-1].findAll("div", {"class": "field__item"}) return [ instructor.get_text().strip().split(",", 1)[0].split(" ", 1) for instructor in instructors ] except (AttributeError, IndexError): return []
3dc4f4ddf62dc9dae894d218e249a279941eca5f
13,060
import random def convert_labels(read_dataset, write_dataset, percent, no_sentences_to_enable): """ Takes a file and randomly enables no_sentences_to_enable of them. :param read_dataset: file to read from :param write_dataset: file to write to :param percent: percent to enable (used to calculate the probability of enabling) :param no_sentences_to_enable: how many sentences should be enabled :return: sentences enabled """ sentences_enabled = 0 write_file_str = "" with open(read_dataset) as read_file: lines = read_file.read().split("\n") line_index = 0 while line_index < len(lines): line = lines[line_index].strip() if len(line) == 0: write_file_str += "\n" line_index += 1 continue line_tok = line.split() assert len(line_tok) > 2, "Line tok shouldn't be empty!" prob = random.random() conf = (percent + 3) / 100.0 if conf < 1 and (prob > conf or line_tok[1] == "on" or sentences_enabled >= no_sentences_to_enable): while len(line) != 0: write_file_str += line + "\n" line_index += 1 line = lines[line_index].strip() else: sentences_enabled += 1 while len(line) != 0: line_tok = line.split() write_file_str += (line_tok[0] + "\t" + "on" + "\t" + line_tok[-1] + "\t" + "\n") line_index += 1 line = lines[line_index].strip() write_file_str += "\n" line_index += 1 with open(write_dataset, 'w') as write_file: write_file.write(write_file_str) return sentences_enabled
31acbb1a4146bcf4f09a15ed3d89794f90654a44
13,061
def unindent(line, from_row, to_row, count=1): """ Unindent text of the `Line` object. """ current_row = line.document.cursor_position_row line_range = range(from_row, to_row) def transform(text): remove = ' ' * count if text.startswith(remove): return text[len(remove):] else: return text.lstrip() line.transform_lines(line_range, transform) line.cursor_position = line.document.translate_row_col_to_index(current_row, 0) line.cursor_position += line.document.get_start_of_line_position(after_whitespace=True)
3befe045bd7b1a78ee2f675a66410a745046c346
13,064
from typing import Callable import functools def thread_exception_decorator(callback: Callable, *callback_args, **callback_kwargs): """ 多线程异常回调处理装饰器 """ def decorator(func): @functools.wraps(func) def wrapper(*args, **kwargs): try: return func(*args, **kwargs) except BaseException as e: if isinstance(e, SystemExit): raise e if callback: callback(*callback_args, **callback_kwargs, origin={ "args": args, "kwargs": kwargs, "e": e }) # import sys # sys.exit(1) return wrapper return decorator
2afb4e0fbb8487fcffd9fcf8788426b3ca75b77a
13,066
import itertools def get_words(docs, selections): """ Gets the selected words in the provided documents. :param docs: the document to analyze :param selections: the words selected in the documents :return: a dictionary with the documents and for each a list of the selected words """ i = 0 obtained_words = {} for doc, words in docs.items(): k = 0 obtained_words_doc = [] in_word = False for token in selections[i]: if token == 1 and k < len(words): obtained_words_doc.append([words[k]]) in_word = True elif token == 2 and k < len(words) and in_word: obtained_words_doc[len(obtained_words_doc) - 1].append(words[k]) else: in_word = False k += 1 # remove duplicate selections obtained_words_doc.sort() obtained_words_doc = list(w for w, _ in itertools.groupby(obtained_words_doc)) obtained_words[doc] = obtained_words_doc i += 1 return obtained_words
2c0396ac8de4eae19113e46c51a7e07ff9d4d85f
13,067
def useful_person_for_subjects( person_main_topics: list, scopus_topics: list, k_relevant: int = 1) -> bool: """ Get possibility status use person for subject Args: person_main_topics: scopus_topics: topics list in ScopusScienceTopicSearch class format k_relevant: count of reviewed topics Returns: Status """ if len(scopus_topics) == 0 or len(person_main_topics) == 0: return False for topic in scopus_topics[:k_relevant]: if topic.split(',')[0] in person_main_topics: return True return False
df567a755dc6a5f4df57ed3654f407fb27655cd5
13,069
import re def get_ver(): """format check""" with open("./fitgrid/__init__.py", "r") as stream: fg_ver = re.search( r".*__version__.*=.*[\"\'](?P<ver_str>\d+\.\d+\.\d+\S*)[\'\"].*", stream.read(), ) if fg_ver is None: msg = """ fitgrid.__init__.py must have an X.Y.Z semantic version, e.g., __version__ = '0.0.0' __version__ = '0.0.0-dev.0.0' """ raise ValueError(msg) return fg_ver['ver_str']
70d6e7c1938751cf260d666c7bfab10fe1356cd0
13,070
import unicodedata import string def normalize(word): """ Remove special characters """ return "".join(c for c in unicodedata.normalize('NFKD', word) if c in string.ascii_letters).lower()
a9e9df9163a1e1869b24f46dd781851e522f8313
13,071
def get_hpa(gasprice, hashpower): """gets the hash power accpeting the gas price over last 200 blocks""" hpa = hashpower.loc[gasprice >= hashpower.index, 'hashp_pct'] if gasprice > hashpower.index.max(): hpa = 100 elif gasprice < hashpower.index.min(): hpa = 0 else: hpa = hpa.max() return int(hpa)
aef3195b11ea411af8871f0925dc115aa52b7d4e
13,072
def format_line_obj(line_obj, ordered_attributes, delimiter): """Formats an object into delimited string If attribute not found, a ':(' will be inserted instead """ return str(delimiter).join((str(line_obj.__dict__.get(attr, ':(')) for attr in ordered_attributes))
be7da1fa6bd080047bdd235d332b72e770dcfa48
13,073
def pyramid(n): """ Write a function that when given a number >= 0, returns an Array of ascending length subarrays. Note: the subarrays should be filled with 1s :param n: :return: """ result = [] if n is None or n == 0: return result for n in range(1, n + 1): result.append([1] * n) return result
0d63f8fdfe5c4263ebe4e6730e67219a353e5a0f
13,075
import struct def parse_images(filename): """ Reading images information and converting from bytearray to list of ints with help of python's struct module. """ images = [] imgs_file = open(filename, 'rb') # Get only the size of following data size = struct.unpack(">IIII", imgs_file.read(16))[1] for _ in range(size): # Read whole image pixel and unpack it from unsigned bytes to integers barray = imgs_file.read(784) img = list(struct.unpack("<" + "B"*784, barray)) images.append(img) return images
bf17dded918051d52cc807f01aed350d94897077
13,076
from pathlib import Path def read_psw_file(psw_file: Path) -> bytes: """ Attempts to read the password from the given `psw_file`. Raises: `OSError`: If some IO related error occurred during file reading """ if not psw_file.is_file(): raise FileNotFoundError("'{}' does not exist".format(psw_file)) psw = psw_file.read_bytes() return psw
64b91077ff4e728f1d6ef26b770bf222e3c8f34a
13,077
from typing import Match import re def _convert_all_hexes_to_ints(vbf_string: str) -> str: """ >>> _convert_all_hexes_to_ints('abc 0xff def 0XF') 'abc 255 def 15' """ def hex_match_to_int_str(match: Match) -> str: return str(int(match.group(0), base=16)) return re.sub(r"(?i)0x[0-9-a-f]+", hex_match_to_int_str, vbf_string)
4b671facb107c8d8ac0d93ad859dcb1b22ddf151
13,078
def matmul_cupydense(left, right, scale=1, out=None): """ Perform the operation ``out := scale * (left @ right) + out`` where `left`, `right` and `out` are matrices. `scale` is a complex scalar, defaulting to 1. """ # This may be done more naturally with gemm from cupy.cublas # but the GPU timings which are not very different. if out is None: return (left @ right) * scale else: out += (left @ right) * scale return out
244a956221054b7fa330100badf8ec5e15e20f8a
13,079
def success_response(data: dict, web): """Успешный ответ""" return web.json_response({'success': True, 'message': '', **data})
000939a3cb832cb0a104788b91db5140c25462b0
13,081
def either(*funcs): """Return ``True`` is any of the function evaluate true""" return lambda x: any(f(x) for f in funcs)
32f037748a1fb4cf06e5fdce7c1bad20e8200d61
13,082
import re def findalliter(patter, string): """Function used to return all re.findall objects in string""" m_test = re.findall(r'(\d+)_(.+)', string) m_iter = [m_test] while m_test: m_test = re.findall(r'(\d+)_(.+)', m_test[0][1]) m_iter.append(m_test) return m_iter
eddeb7393a7e883a313b9612a78c0e3d0be0e23f
13,083
def split(matrix): """ Splits a given matrix into quarters. Input: nxn matrix Output: tuple containing 4 n/2 x n/2 matrices corresponding to a, b, c, d """ row, col = matrix.shape row2, col2 = row//2, col//2 return matrix[:row2, :col2], matrix[:row2, col2:], matrix[row2:, :col2], matrix[row2:, col2:]
6284c4a8ff6747b005971a7a37f0327aa5815a7c
13,084
def my_fitness_functions(l): """ Utility functions of M. Sefrioui and J. Perlaux, "Nash genetic algorithms: examples and applications," Proceedings of the 2000 Congress on Evolutionary Computation. CEC00 (Cat. No.00TH8512), La Jolla, CA, USA, 2000, pp. 509-516 vol.1, doi: 10.1109/CEC.2000.870339. """ x = l[0] y = l[1] return [ -(x-1) ** 2.0 - (x-y) ** 2.0, -(y-3) ** 2.0 - (x-y) ** 2.0 ]
1da2cf3e1298228f77eb7ed4ae4b6ccbb7401d3a
13,085
import re def normalize_tokenize(string): """ Takes a string, normalizes it (makes it lowercase and removes punctuation), and then splits it into a list of words. Note that everything in this function is plain Python without using NLTK (although as noted below, NLTK provides some more sophisticated tokenizers we could have used). """ # make lowercase norm = string.lower() # remove punctuation norm = re.sub(r'(?u)[^\w\s]', '', norm) # split into words tokens = norm.split() return tokens
800be8deabbc109f7d9459e5cf2da9fb134557ae
13,086
def _IsDisallowedPropertiesError(error): """Checks if an error is due to properties that were not in the schema. Args: error: A ValidationError Returns: Whether the error was due to disallowed properties """ prop_validator = 'additionalProperties' prop_message = 'Additional properties are not allowed' return error.validator == prop_validator and prop_message in error.message
2042806486e861cf97d0e2ec168a167610749ebd
13,087
def my_simple_str(x): """Seems to be useless, but "no signature found for builtin type <class 'str'>" is raised otherwise""" return str(x)
bbe332fa413a093d634b1ea9010f6b17212594a2
13,089
def convect(sigma, interps): """Convect interps based on density (sigma). Ignores variations in cell depths and convects vertically :arg interps: dictionary of 3D numpy arrays. Key represents the variable name. :type interps: dictionary :arg sigma: sigma-t, density, 3D array :type sigma: numpy array :returns sigma, interps stabilized """ small = 0.01 var_names = interps.keys() kmax, imax, jmax = sigma.shape good = False while not good: good = True for k in range(kmax - 1): for i in range(imax): for j in range(jmax): if sigma[k, i, j] > sigma[k + 1, i, j]: good = False for var in var_names: interps[var][k, i, j], interps[var][ k + 1, i, j ] = interps[var][k + 1, i, j], interps[var][k, i, j ] sigma[k, i, j], sigma[k + 1, i, j] = sigma[ k + 1, i, j ], sigma[k, i, j] return sigma, interps
0538cd97dd09a808071fee28f5097ea1a4298eb2
13,090
def notqdm(iterable, *args, **kwargs): """ silent replacement for `tqdm` """ return iterable
f37a5f84cf02987e4ada0ba8c0ee324c7a676903
13,091
import os def file_is_empty(file_path): """Returns True if the file is empty. """ return os.stat(file_path).st_size == 0
e6124a27ebf394cb79b9dfbcdb118a9fde9264cd
13,092
def convertFrom1D(list): """ Convert from a list to a matrix with the appropriate dimensions """ return [[x] for x in list]
c3bd41be26a20c6895c6ecc30d7e28005a17a44e
13,093
import asyncio async def maybe_async(func, *args, **kwargs): """异步调用一个函数,该函数可能是同步函数,也可能是异步函数""" result = func(*args, **kwargs) if asyncio.iscoroutine(result): result = await result return result
ac1d507c8f83b573ef5c3416e2285f3df752d4aa
13,094
import sys import mpmath def tables(f, r, v, xi, symmetric, is64, verbose=False): """Calculate k[i], w[i], and f[i].""" ki = [None] * len(xi) wi = ki[:] fi = ki[:] if is64: # The ziggurat user masks off bits to select the bin and symmetry. im = 2 ** (63 - (len(xi)-1).bit_length()) else: if symmetric: im = 2**31 else: im = 2**32 for i, x in enumerate(xi): if verbose and i & 7 == 0: print('\r{0}/{1}'.format(i, len(xi)), end='', file=sys.stderr) if i == 0: ki[0] = mpmath.floor(im * r*f(r)/v) wi[0] = v / f(r) / im else: ki[i] = mpmath.floor(im * xi[i-1]/x) wi[i] = x / im fi[i] = f(x) if verbose: print('\r{0}/{0}'.format(len(xi)), file=sys.stderr) assert all(v is not None for v in ki) assert all(v is not None for v in wi) assert all(v is not None for v in fi) return ki, wi, fi
a33ca631153d183d01a45f75a72f474396a2df05
13,095
def slice2limits(slices): """ Create a tuple of min,max limits from a set of slices Parameters: * slices: list of slices Returns: Tuple of minimum and maximum indices """ mins = [s.start for s in slices] maxs = [s.stop-1 for s in slices] return mins,maxs
1196e67fea135fb3c363def9d5fe53a0ada9b0af
13,097
def solution(array, n_rotations): """ Returns the Cyclic Rotation of array with n_rotations positions to the right. """ n = len(array) n_rotations = n_rotations % n if n > 0 else 0 return array[n - n_rotations:] + array[:n - n_rotations]
6f5edc0c8ec3d0e58830466e61577e80a725a624
13,098
import re def check_question(msg): """ 提问 :param msg: :return: """ return re.search('\?', msg) or re.search('?', msg) or re.search('吗?', msg)
3ac884d153a32586d5dfe197b597f6e125bed8eb
13,099
import yaml import json def load_data(file_path, use_yaml=True): """Load data from a given YAML file into a Python dictionary. :param file_path: path to your YAML file. :param use_yaml: use yaml or json :return: contents of the file as Python dict. """ with open(file_path, "r") as f: if use_yaml: data = yaml.safe_load(f) else: data = json.load(f) return data
b136e16cd614cfdb3ca791a6d64d513cf52dbf7c
13,100