content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def quintil_rent(x,p,d): """Funcion para obtener la division por quintiles de la rentabilidad""" if x <= d[p][0.20]: return 'Q1' elif x <= d[p][0.4]: return 'Q2' elif x <= d[p][0.6]: return 'Q3' elif x <= d[p][0.8]: return 'Q4' else: return 'Q5'
c510d469a23a8fbb4f87d2ea52ad835e57f023fc
33,582
import json def from_json(text): """Parse text as json. """ # If `text` is a byte string, decode it as utf-8. if isinstance(text, bytes): text = text.decode('utf-8') return json.loads(text)
3374c29a5e36dd096f4cb1c3d6692bb9af242386
33,583
def visual_bounds(EBC, std_lon=False): """ Returns the latitude and longitude bounds for plotting a decently large swatch of the EBC. Parameters ---------- EBC : str Identifier for the EBC. 'CalCS': California Current 'HumCS': Humboldt Current 'CanCS': Canary Current 'BenCS': Benguela Current std_lon : boolean (optional) Set to True if you desire -180 to 180 longitude. Returns ------- lon1 : int; minimum lon boundary lon2 : int; maximum lon boundary lat1 : int; minimum lat boundary lat2 : int; maximum lat boundary Examples -------- import esmtools.ebus as ebus x1,x2,y1,y2 = ebus.visual_bounds('CalCS') """ if EBC == 'CalCS': lat1 = 32 lat2 = 45 lon1 = -135 lon2 = -115 elif EBC == 'HumCS': lat1 = -20 lat2 = 0 lon1 = -85 lon2 = -70 elif EBC == 'CanCS': lat1 = 15 lat2 = 35 lon1 = -25 lon2 = -5 elif EBC == 'BenCS': lat1 = -30 lat2 = -15 lon1 = 5 lon2 = 20 else: raise ValueError('\n' + 'Must select from the following EBUS strings:' \ + '\n' + 'CalCS' + '\n' + 'CanCS' + '\n' + 'BenCS' + \ '\n' + 'HumCS') if (std_lon == False) & (EBC != 'BenCS'): lon1 = lon1 + 360 lon2 = lon2 + 360 return lon1,lon2,lat1,lat2
a1f787ad911b16cdad6486ef94bf1595bc8fb833
33,584
def update(event_handler): """post processing updating artist objects""" def event_handler_decorated(self, *args, **kwargs): event_handler(self, *args, **kwargs) # self.ax.imshow(self.bgimg) self.ax.set_xlim(0, self.bgimg.shape[1]) self.ax.set_ylim(0, self.bgimg.shape[0]) self.ax.invert_yaxis() self.fig.canvas.draw() return event_handler_decorated
cdc65e1f937b0cfc6423b9a4142460202e5565bb
33,585
import os import random def random_text(random_file=None): """Random selective quotes from file""" if random_file is None: random_file = os.path.join( os.path.dirname(os.path.dirname(__file__)), "data", "random.txt" ) with open(random_file) as file_used: return random.choice(list(file_used))
541c21924704d1d9f8843c29ed7b66598b3a1f4e
33,587
import json def load(path): """ 本函数从一个文件中载入数据并转化为 dict 或者 list path 是保存文件的路径 """ with open(path, 'r', encoding='utf-8') as f: s = f.read() # log('load', s) # loads: 相反,字符串 --> 列表 --> 对象。 # loads的参数是字符串的格式,所以loads负责字符串 --> 列表这一段,而 列表 --> 对象由ms = [cls.new(m) for m in models]负责。 return json.loads(s)
2e95625d505b9d983f9c2094c2fe0300a0f19456
33,588
def is_success(code): """List of status codes considered to be successful.""" okay = [200, 202] return code in okay
442ac0b94a14afe31be26cee454f7a4e69e46d7c
33,589
def exponential_smoothing(series, alpha): """ :define: Exponential smoothing weights all of the observations, while exponentially decreasing the weights as we move further back in time. :define2: Exponentiality is hidden in the resuriveness of the function: y-hat = a * y-not + (1-a) * (previous y-not) :param series: dataframe with time stamps :param alpha: float [0.0, 1.0], smoothing parameter. The smaller alpha is, the more influence the previous observations have, and the smoother the series is :return: exponentially smoothed dataframe, predicts one observation in the future """ result = [series[0]] # first value is same as series for n in range(1, len(series)): result.append(alpha * series[n] + (1 - alpha) * result[n - 1]) return result
0d62b329daea56355ba81e0bff5cac9b65858877
33,590
def RGB(image): """ input: array with YCbCr values between 16 and 235(Y) or 240(Cb, Cr) output: array with RGB values between 0 and 255 """ x = image.copy() Y = image[:, :, 0] Cb = image[:, :, 1] Cr = image[:, :, 2] x[:, :, 0] = (255.0/219.0)*(Y - 16.0) + (0.0/112.0) *(Cb - 128.0)+ (255.0*0.701/112.0)*(Cr - 128.0) x[:, :, 1] = (255.0/219.0)*(Y - 16.0) - (0.886*0.114*255.0/(112.0*0.587)) *(Cb - 128.0) - (255.0*0.701*0.299/(112.0*0.587))*(Cr - 128.0) x[:, :, 2] = (255.0/219.0)*(Y - 16.0) + (0.886*255.0/(112.0)) *(Cb - 128.0) + (0.0/112.0)*(Cr - 128.0) return x
351614248bc6c3ea3b99b777f327c52f8c775944
33,591
import os def get_abs_path_url(path): """ Returns the absolute url for a given local path. """ return "file://%s" % os.path.abspath(path)
36680eb29fd3e69334f76f7704b65e3c938ccbf0
33,592
def _iterate_over_nested_obj(obj, key): """It iterates over the nested dict object. It iterates over two types of data * list * dict for the rest data type it returns the value Args: obj (any type): object to process key (str, int): key to find in object """ if isinstance(obj, dict): if obj.get(key): return obj[key] elif isinstance(obj, list): for item in obj: value = _iterate_over_nested_obj(item, key) if value: return value return None else: return None
7b6352d6f24a8753b700c2c6491455460d5a4274
33,593
import os def cwd_relative(path, args): """Translate prefix-relative path to current working directory-relative.""" return os.path.relpath(os.path.join(args.root, path), args.initial_working_dir)
8de4cf30740222064eab2aacbf7f078e04ff7d85
33,594
def target_distribution(targets): """Get the target distributions. Arguments: targets (pd.Series): Target data Returns: pd.Series: Target data and their frequency distribution as percentages. """ distribution = targets.value_counts() / len(targets) return distribution.mul(100).apply('{:.2f}%'.format).rename_axis('Targets')
93148f804130c33d18a9e33a3bf8d6fb12f6e3df
33,595
import numpy def threshold_op(base_array, threshold_val, base_nodata, target_nodata): """Threshold base to 1 where val >= threshold_val.""" result = numpy.empty(base_array.shape, dtype=numpy.uint8) result[:] = target_nodata valid_mask = ~numpy.isclose(base_array, base_nodata) & ( ~numpy.isclose(base_array, 0)) result[valid_mask] = base_array[valid_mask] >= threshold_val return result
e8b4a07483ad7a6beef798708a8d6a615a8c35f6
33,597
def ExperimentTemplate() -> str: """A template with Markdown syntax. :return: str with Markdown template """ return """ Experiment ========== Any [markdown code](https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet) can be used to describe this experiment. For instance, you can find the automatically generated used settings of this run below. Current Settings ---------------- | Argument | Value | | -------- | ----- | """
302d41a33dc9bfebfdca53980a87c8c77e8f475a
33,600
import pip import os import sys def get_pip_program(exe=None): """ Gets :epkg:`pip` executable and fixes an issue with :epkg:`Pandoc`. @param exe path to python executable @return pip executable .. faqref:: :title: How can I check the dependencies? The module `pipdeptree <https://github.com/naiquevin/pipdeptree>`_ gives you something like:: d3py==0.2.3 - ipython [installed: 3.1.0] - networkx [installed: 1.9.1] - decorator [required: >=3.4.0, installed: 3.4.2] - numpy [installed: 1.9.2] - pandas [installed: 0.16.0] - pytz [required: >=2011k, installed: 2015.4] - python-dateutil [required: >=2, installed: 2.4.2] - six [required: >=1.5, installed: 1.9.0] - numpy [required: >=1.7.0, installed: 1.9.2] autopep8==1.1.1 - pep8 [required: >=1.5.7, installed: 1.5.7] sphinxjp.themes.basicstrap==0.4.2 - setuptools - Sphinx [installed: 1.3.1] - alabaster [required: >=0.7, installed: 0.7.4] - six [required: >=1.4, installed: 1.9.0] - colorama [installed: 0.3.3] - Pygments [required: >=2.0, installed: 2.0.2] - Babel [required: >=1.3, installed: 1.3] - pytz [required: >=0a, installed: 2015.4] - snowballstemmer [required: >=1.1, installed: 1.2.0] - docutils [required: >=0.11, installed: 0.12] - sphinx-rtd-theme [required: >=0.1, installed: 0.1.8] - Sphinx [required: >=1.3, installed: 1.3.1] - alabaster [required: >=0.7, installed: 0.7.4] - six [required: >=1.4, installed: 1.9.0] - colorama [installed: 0.3.3] - Pygments [required: >=2.0, installed: 2.0.2] - Babel [required: >=1.3, installed: 1.3] - pytz [required: >=0a, installed: 2015.4] - snowballstemmer [required: >=1.1, installed: 1.2.0] - docutils [required: >=0.11, installed: 0.12] - Jinja2 [required: >=2.3, installed: 2.7.3] - MarkupSafe [installed: 0.23] - Jinja2 [required: >=2.3, installed: 2.7.3] - MarkupSafe [installed: 0.23] ... """ tried = [] if exe is None: exe = os.path.dirname(sys.executable) major, minor = sys.version_info[0:2] if sys.platform.startswith("win"): if not exe.lower().endswith("scripts"): pi = os.path.join(exe, "Scripts", "pip.exe") tried.append(pi) if not os.path.exists(pi): pi = os.path.join(exe, "Scripts", "pip%d.exe" % major) tried.append(pi) if not os.path.exists(pi): # Anaconda is different pi = os.path.join(exe, "Scripts", "pip.exe") tried.append(pi) if not os.path.exists(pi): pi = os.path.join(exe, "Scripts", "pip%d.exe" % major) tried.append(pi) if not os.path.exists(pi): pi = os.path.join( exe, "Scripts", "pip%d.%d.exe" % (major, minor)) tried.append(pi) raise FileNotFoundError( "tried (1):\n" + "\n".join(tried) + "\n---- try ---\npython -m pip install -U pip --force") else: pi = os.path.join(exe, "pip.exe") tried.append(pi) if not os.path.exists(pi): # Anaconda is different pi = os.path.join(exe, "pip.exe") tried.append(pi) if not os.path.exists(pi): pi = os.path.join(exe, "pip%d.exe" % major) tried.append(pi) if not os.path.exists(pi): pi = os.path.join(exe, "pip%d.%d.exe" % (major, minor)) tried.append(pi) if not os.path.exists(pi): raise FileNotFoundError( "tried (2):\n" + "\n".join(tried) + "\n---- try ---\npython -m pip install -U pip --force") else: if sys.version_info[0] == 2: if exe is None: return "pip" else: pi = os.path.join(exe, "pip") else: major = sys.version_info[0] minor = sys.version_info[1] if exe is None: return "pip%d.%d" % (major, minor) else: # this does not work because on Linux, the binary is installed on the local path # pip3.4 are not in the same place # pi = os.path.join(exe, "pip%d.%d" % (major, minor)) exe = os.path.normpath(os.path.join(os.path.dirname( pip.__file__), "..", "..", "..", "..", "bin")) pi = os.path.join(exe, "pip%d.%d" % (major, minor)) if not os.path.exists(pi): pi = os.path.join(exe, "pip") if not os.path.exists(pi): raise FileNotFoundError( "unable to find pip: {0}\n__file__={1}\nexe={2}".format(pi, pip.__file__, exe)) return pi
e26f89d48506f1af66a2e1315b52cc8142a5852d
33,601
def bubbleSort(l): """ Needs to be implemented """ for i in range(1, len(l)-1): for j in range(1, len(l)-i): if l[j] > l[j+1]: l[j], l[j+1] = l[j+1], l[j] return l
55b486dda1f7efdd994903ef68482fc7e22444b5
33,602
def encode_bool(value: bool) -> bytes: """Encodes a boolean. """ return bytes([int(value)])
fdfc695dcede9df5e79df8789837cd30a1deb0f8
33,603
from subprocess import Popen, PIPE, STDOUT def run_cmd(cmd, shell=False, input=None): """Run the supplied command and return a dict of its outputs.""" p = Popen(cmd, stdout=PIPE, stdin=PIPE, stderr=STDOUT, shell=shell, universal_newlines=True) stdout, stderr = p.communicate(input=input) exit_code = p.returncode return({'stdout': stdout, 'stderr': stderr, 'exit_code': exit_code})
c5d91da12d9b34341cf20a3e167be326d8060d35
33,604
import subprocess import sys def pip_import(module, pypi_name=None): """ Return None if we can't import or install it. """ try: return __import__(module) except ImportError: pass subprocess.call([sys.executable, "-m", "pip", "install", pypi_name or module]) return __import__(module)
fdae11df335a13662467407834774ea3efe55163
33,605
def filterProj4String(p4string): """ Removes the '+units' flag and value from a Proj4 string, and the '+ellps' flag and value if there is a '+datum' flag and value, since those seems to trip pyproj up. Seems kludgy. Argh. """ def should_keep_flag(flag, should_remove_ellps): """ Determines if a flag should be kept in the string. """ if flag.startswith("+units="): return False if should_remove_ellps and flag.startswith("+ellps="): return False return True has_datum_flag = "+datum=" in p4string flags = [ flag for flag in p4string.split(" ") # Having a datum means specifying an ellipse is redundant. if should_keep_flag(flag, has_datum_flag) ] # Return the final filtered Proj4 string outstring = " ".join(flags) print("String returned: {}".format(outstring)) return outstring
73d8c5f3255620c9cfe79311a6541c87ed17f4a2
33,606
import argparse def create_argument_parser(): """Create argument parser for the evaluate script.""" parser = argparse.ArgumentParser( description='evaluates a dialogue model') parser.add_argument( '-s', '--stories', type=str, required=True, help="file that contains the stories to evaluate on") parser.add_argument( '-m', '--max_stories', type=int, default=None, help="maximum number of stories to test on") parser.add_argument( '-d', '--core', required=True, type=str, help="core model to run with the server") parser.add_argument( '-u', '--nlu', type=str, help="nlu model to run with the server. None for regex interpreter") parser.add_argument( '-v', '--verbose', default=True, help="use verbose logging") parser.add_argument( '-o', '--output', type=str, default="story_confmat.pdf", help="output path for the created evaluation plot") return parser
5c26c56721f476b81149f9389474f56d86f7a5bc
33,607
import torch def remove_init_unsafe_from_d(data, initials, unsafes): """ :param data: :param initials: :param unsafes: :return: """ center_init = torch.tensor(initials[0]) center_unsafe = torch.tensor(unsafes[0]) new_data = [] for idx in range(len(data)): # if data belongs to init or unsafe, remove it if torch.norm(center_init - data[idx]) > initials[1]*1.2 and \ torch.norm(center_unsafe - data[idx]) > unsafes[1]*1.2: new_data.append(data[idx]) new_data = torch.stack(new_data) return new_data
e443aa75f9483108f2fa81b603ad5f06cb4ad329
33,609
def flatten_globals(sequence_globals, evaluated=False): """Flattens the data structure of the globals. If evaluated=False, saves only the value expression string of the global, not the units or expansion.""" flattened_sequence_globals = {} for globals_group in sequence_globals.values(): for name, value in globals_group.items(): if evaluated: flattened_sequence_globals[name] = value else: value_expression, units, expansion = value flattened_sequence_globals[name] = value_expression return flattened_sequence_globals
110e4b9e5af2981c50dd4b8295f542cfaf4c2d83
33,610
def reversed_iterator(iter): """ Returns a reversed list of the elements of an Iterator. """ return reversed(list(iter))
0f0a012573948002777d0706024f207a98d09022
33,612
import argparse def get_args(): """Get command line arguments""" parser = argparse.ArgumentParser(description='Python cli boilerplate', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('-n', '--num', help='A number', metavar='int', type=int, default=10) args = parser.parse_args() return args
4e8ca0c0ac418a6b9ae4e697d1c38143f5f43734
33,613
def std_label(value): """Given Crystal specific uppercase species names, returns the capitalized versions. """ labels = [] for label in value: labels.append(label.lower().capitalize()) return labels
ffad92d9ed272bf9fabcc650e2e9b1e967d00246
33,614
def is_value(field: str, field_name: str, value: str) -> bool: """ """ return field == value
56d600056c1e3b8fac81112a50fadf09dcbef299
33,615
def same_module(left, right): """ Returns true if l is same module as r, which is when module name is equal and modules defined in one file, in same cabal-dev sandbox or in cabal """ same_cabal = left.cabal and right.cabal and (left.cabal == right.cabal) same_filename = left.location and right.location and (left.location.filename == right.location.filename) nowhere = (not left.cabal) and (not left.location) and (not right.cabal) and (not right.location) return left.name == right.name and (same_cabal or same_filename or nowhere)
2ca767afd576497b2313e2963c6411f7f595a033
33,617
import hashlib def compute_hash(filepath: str) -> str: """Compute an MD5 hash for a filepath string.""" h = hashlib.md5() h.update(filepath.encode()) return h.hexdigest()
ac311fd236a5250402231e506387a2a42073af3e
33,618
def add_pem_headfoot(public_key): """ Return string, representing PEM text for a public key Keyword Parameters: public_key -- String, representing the public key text >>> add_pem_headfoot('foo') '-----BEGIN PUBLIC KEY-----\\nfoo\\n-----END PUBLIC KEY-----' """ preamble = "-----BEGIN PUBLIC KEY-----\n" suffix = "\n-----END PUBLIC KEY-----" return preamble+public_key+suffix
dcb093f942d47f6a11bd2dc82441ba6e82f55b01
33,619
import functools import operator def prod(it): """Product of an iterable. """ return functools.reduce(operator.mul, it)
e20df189d56656f680782579759c5080e6cb75c8
33,620
def many_parents_edges(): """Node 62 has 18 parents and no children.""" edges = [('96', '62'), ('80', '62'), ('98', '62'), ('100', '62'), ('86', '62'), ('102', '62'), ('104', '62'), ('64', '62'), ('106', '62'), ('108', '62'), ('110', '62'), ('112', '62'), ('114', '62'), ('116', '62'), ('118', '62'), ('122', '62'), ('70', '62'), ('94', '62')] return edges
7eb0509ce26e210fbf457ee59db504e73668ed97
33,621
def extract_dict_key(dataframe, column, key, new_column=None, separator='.'): """ Extract values of ``key`` into ``new_column``. If key is missing, ``None`` is added to the column. .. code-block:: python >>> df = DataFrame({ ... 'trial_num': [1, 2, 1, 2], ... 'subject': [1, 1, 2, 2], ... 'samples': [ ... {'A': 1, 'B': 2, 'C': None}, ... {'A': 3, 'B': 4, 'C': 5}, ... {'A': 6, 'B': 7, 'C': None}, ... None, ... ] ...}) >>>df.pipe(extract_dict_key, 'samples', key='A') trial_num subject samples.A samples 0 1 1 1 {'A': 1, 'B': 2, 'C': None} 1 2 1 3 {'A': 3, 'B': 4, 'C': 5} 2 1 2 6 {'A': 6, 'B': 7, 'C': None} 3 2 2 NaN NaN :param dataframe: The DataFrame object to work on. :type dataframe: :class:`DataFrame <pandas.DataFrame>` :param str column: The name of the column which should be extracted. :param str key: Key that should be extracted. :param str new_column: Name of the new column. By default, ``column`` will be applied as prefix to ``key``. :param str separator: The separator between ``column`` and ``key`` if ``new_column`` is not specified. :returns: The extracted DataFrame :rtype: :class:`DataFrame <pandas.DataFrame>` """ new_column = new_column or '{}{}{}'.format(column, separator, key) if new_column != "" else key dataframe.loc[:, new_column] = dataframe[column].apply( lambda x: x.get(key) if isinstance(x, dict) else x ).rename(new_column) return dataframe
5567a3e8e837e45b851779942e2c9729d4fe856c
33,622
def compression(path): """ Based on filename, is compression being used? """ compress = None ext = path.suffix if ext == ".gz": compress = True elif ext == ".dil": compress = False else: raise Exception(f"invalid file extension [{ext}], must be .dil or .dil.gz") return compress
37d556c149bf15876b352979e00f12033c40b796
33,623
def ros_unadvertise_service_cmd(service): """ create a rosbridge unadvertise_service command object :param service: name of the service """ command = { "op": "unadvertise_service", "service": service } return command
ef78ba4ffbb6fc8b0f13ccaebdfb13651294a330
33,625
def mk_struct(name, label, resn=None, chain=None, color='white', style='cartoon', transparency=None): """Generate the PyMol code to display a structure.""" return """ create %(label)s, %(name)s %(chain)s %(resn)s show %(style)s, %(label)s color %(color)s, %(label)s %(transparency)s """ % { 'label': label, # ENH: make optional 'name': name, 'resn': ("and resn %s" % resn) if resn else '', 'chain': ("and chain %s" % chain) if chain else '', 'color': color, 'style': style, 'transparency': ("set %s_transparency=%s, %s\n" % (style.rstrip('s'), transparency, label) if transparency is not None else ''), }
329ec5c8cf2a4d1d54436471a2757d257fb53cca
33,627
def values_exist(expected: set, actual: set) -> bool: """ Check the expected worksheets exist. :return: """ return expected <= actual
54dd7a070b0488efa0d1cda3ba78cd8238f55c6e
33,628
def loopback_ip(): """Return an IP address for localhost.""" return "127.0.0.1"
1ddba8f7935b528fbac2f1db1f06f22ce616007b
33,629
import os def different_args(args, this_config_file, logger): """ Returns empty list if no args differ """ diff_args = [] args_to_write = ["maxdist", "subassembler", "maxcov"] old_config_dict = {} this_config_dict = vars(args) if not os.path.exists(this_config_file): raise ValueError( "No previous config file found %s; rerunning" % this_config_file) with open(this_config_file, "r") as f: for line in f: (key, val) = line.strip().split(":") old_config_dict[key] = val if len(old_config_dict) == 0: raise ValueError("Old config file empty; rerunning") for arg in args_to_write: # note that reading from the file makes all old args strings, so we # accomodate that try: if str(this_config_dict[arg]) != old_config_dict[arg]: logger.info( "New parameter value for " + "{} ({}) doesn't match old value ({})".format( arg, this_config_dict[arg], old_config_dict[arg])) diff_args.append(arg) except KeyError: logger.info("parameter %s not found in old config", arg) # this can happen after updates -- play it safe and redo it diff_args.append(arg) return diff_args
09305de32e385df3d5e130a66a343c73558eabf1
33,632
import torch def physical_violation(bdb_layout, bdb_3d): """ compute the loss of physical violation :param bdb_layout: 1 x 8 x 3 tensor :param bdb_3d: b x 8 x 3 tensor :return: """ b = bdb_3d.size(0) layout_max = torch.max(bdb_layout, dim=1)[0].expand(b, -1) # bx3 layout_min = torch.min(bdb_layout, dim=1)[0].expand(b, -1) # bx3 bdb_max = torch.max(bdb_3d, dim=1)[0] # b x 3 bdb_min = torch.min(bdb_3d, dim=1)[0] # b x 3 violation = torch.nn.functional.relu(bdb_max - layout_max) + torch.nn.functional.relu(layout_min - bdb_min) # b x 3 return violation, torch.zeros(b, 3).cuda()
6352591534822467d35ac0c72fbfcc1800ad8862
33,633
def clear_stop_word(file_name, sentence_list): """Clear a stop word from list of word in sentence Parameter : file name that contain stop words and sentence as list Return : Sentence as list that already clear a stop words """ duplicate_list = [] stopwords = [] # Open stopword file stopwords_file = open(file_name, "r") for line in stopwords_file: stop = line.strip().split() for i in stop: stopwords.append(i) stopwords_file.close() # Use list comprehension for finding stop words in sentence list for member in sentence_list: if member in stopwords: duplicate_list.append(member) # Remove stop word that in sentence from sentence list by use of duplicate list for member in duplicate_list: sentence_list.remove(member) # Return! return sentence_list
a20f35c232371e55f3f1d7b52e139f916f4bc6a5
33,635
import re def string2lines(astring, tab_width=8, convert_whitespace=False, whitespace=re.compile('[\v\f]')): """ Return a list of one-line strings with tabs expanded, no newlines, and trailing whitespace stripped. Each tab is expanded with between 1 and `tab_width` spaces, so that the next character's index becomes a multiple of `tab_width` (8 by default). Parameters: - `astring`: a multi-line string. - `tab_width`: the number of columns between tab stops. - `convert_whitespace`: convert form feeds and vertical tabs to spaces? - `whitespace`: pattern object with the to-be-converted whitespace characters (default [\\v\\f]). """ if convert_whitespace: astring = whitespace.sub(' ', astring) lines = [s.expandtabs(tab_width).rstrip() for s in astring.splitlines()] return lines
add99c9a5844cc8b7a2c9cc63dc18a42ee4e9f10
33,636
def add_default(name, **kwargs): """Add validator to the plugin class default meta. Validator will be added to all subclasses by default :param name: str, name of the validator plugin :param kwargs: dict, arguments used to initialize validator class instance """ def wrapper(plugin): if not hasattr(plugin, "DEFAULT_META"): plugin.DEFAULT_META = {} plugin.DEFAULT_META.setdefault("validators", []) plugin.DEFAULT_META["validators"].append((name, (), kwargs,)) return plugin return wrapper
96f1bd3faadb636e500a16cd867445714ffe4a52
33,637
def make_progress(row): """return string for progress bar""" if row is None: return "" hits = row["hits"] / float(row["tot"]) * 100.0 dots = row["dots"] / float(row["tot"]) * 100.0 # other = row['other'] / float(row['tot']) * 100.0 nulls = row["nulls"] / float(row["tot"]) * 100.0 return """<div class="progress"> <div class="progress-bar progress-bar-success" style="width: %.1f%%"> <span>%s</span> </div> <div class="progress-bar progress-bar-info" style="width: %.1f%%"> <span>%s</span> </div> <div class="progress-bar progress-bar-danger" style="width: %.1f%%"> <span>%s</span> </div> </div>""" % ( hits - 0.05, row["hits"], dots - 0.05, row["dots"], # other - 0.05, row['other'], nulls - 0.05, row["nulls"], )
05e4d2e58611bef0c2e753891686bf7aaf9ec8c2
33,639
def _lookup_elements(adm, idRefs): """Lookup multiple ID references""" return [adm.lookup_element(key) for key in idRefs]
a6bcecd31142009b202386dd9b5d573163722d0d
33,640
def setup_ca(reference_fname,indexing=1,skip_resis=[]): """ most of the code in my_md_analysis assumes 1-based indexing for this, but make_plot wants 0-based indexing. in the end, i should switch everything over to 0-based, but i'll leave this in as a hack for now. """ master_residue_list = [] reference_coords = [] ca_atom_id_list = [] skip_atom_id_list = [] # # We have a few naming conventions to deal with, like # 1HD2 vs. HD12. One thing that seems consistent is that # the first non-numeric character in the atom name tells # us the element. # # No need for speed here. # def is_hydro(atom_name): c = atom_name[0] if c in '1234567890': c = atom_name[1] return c in 'H' def is_nosp(atom_name): c = atom_name[0] if c in '1234567890': c = atom_name[1] return c in 'NOSP' def is_mainchain(atom_name,resn): return (resn in 'NAP'.split()) or (atom_name in 'CA C N O H HA'.split()) hydro_atom_id_list = [] nonhydro_atom_id_list = [] nosp_atom_id_list = [] nonnosp_atom_id_list = [] mainchain_atom_id_list = [] sidechain_atom_id_list = [] mainchain_nonhydro_atom_id_list = [] sidechain_hydro_atom_id_list = [] sidechain_nonhydro_atom_id_list = [] f = open(reference_fname) generated_atom_id = 1 for line in f: # This is an annoyingly hardcoded bit. we should use a real PDB parser. line = line.replace('NDPH',' NDP ') parts = line.split() if parts[0] not in ['ATOM','HETATM']: continue #atom,atom_id,atom_name,resn,chain,resi,x,y,z,occupancy,b,elem_name = parts atom,atom_id,atom_name,resn,chain,resi,x,y,z = parts[:9] # # One little hack to deal with missing chain info. If it's a number, # it's not really the chain. # try: junk = float(chain) atom,atom_id,atom_name,resn,resi,x,y,z = parts[:8] except ValueError: pass x,y,z = list(map(float,(x,y,z))) try: resi,atom_id = list(map(int,(resi,atom_id))) except ValueError: print("Trouble getting resi,atom_id from",resi,atom_id,line.strip()) raise if resi in skip_resis: skip_atom_id_list.append(atom_id) if is_hydro(atom_name): hydro_atom_id_list.append(atom_id) else: nonhydro_atom_id_list.append(atom_id) if is_nosp(atom_name): nosp_atom_id_list.append(atom_id) else: nonnosp_atom_id_list.append(atom_id) if is_mainchain(atom_name,resn): mainchain_atom_id_list.append(atom_id) if not is_hydro(atom_name): mainchain_nonhydro_atom_id_list.append(atom_id) else: sidechain_atom_id_list.append(atom_id) if is_hydro(atom_name): sidechain_hydro_atom_id_list.append(atom_id) else: sidechain_nonhydro_atom_id_list.append(atom_id) if atom_name != 'CA': continue # # 1RX2 claims to model two conformations for ASP 116. I only see one # in the PDB file, and it's conformation A. # if resn == 'AASP' and resi == 116 and '1RX2' in reference_fname: resn = 'ASP' elif resn == 'AASP': print("Unrecognized residue: AASP") master_residue_list.append((resi,resn)) reference_coords.append((x,y,z)) ca_atom_id_list.append(atom_id) f.close() results = master_residue_list,reference_coords,ca_atom_id_list,hydro_atom_id_list,nonhydro_atom_id_list,nosp_atom_id_list,nonnosp_atom_id_list,mainchain_atom_id_list,sidechain_atom_id_list,mainchain_nonhydro_atom_id_list,sidechain_hydro_atom_id_list,sidechain_nonhydro_atom_id_list,skip_atom_id_list # # Now skip the things in the skip list # _results = [] for (_i,thing) in enumerate(results): if not thing: # empty lists _results.append(thing) continue if isinstance(thing[0], type(())): # master_residue_list has (1,'MET'), etc. thing = [i for i in thing if i[0] not in skip_atom_id_list] elif isinstance(thing[0], type(1)): thing = [i for i in thing if i not in skip_atom_id_list] else: print("wha?",thing) a = 1/0 _results.append(thing) results = _results # # And now take care of the indexing (0 or 1) problem # if indexing == 0: print("Fixing indexing") for thing in results: for i in range(len(thing)): if isinstance(thing[0], type(())): # master_residue_list has (1,'MET'), etc. thing[i] = (thing[i][0] - 1,thing[i][1]) elif isinstance(thing[0], type(1)): thing[i] = thing[i] - 1 else: print("wha?",thing) a = 1/0 return results
9478deea22b51417bbf38ccc8c71e16898ef3e48
33,641
def enumeration(env, node): """ 'Enumeration' statement class for AST. interpret - runtime function for Evaluator (empty function). """ return node.elements
94d9aab5ef2f562ebaaaade063c1f09516d07d29
33,642
def LIX_getter(doc): """ extract LIX score Example: Doc.set_extension("LIX", getter=LIX_getter) fetch result: doc._.LIX """ O = len(doc) P = len(list(doc.sents)) L = len([t for t in doc if len(t) > 6]) LIX = O / P + L * 100 / O return LIX
4cd7a33ba05ce8303d30345b32d350e0fd67feea
33,646
def generate_stopwords(): """ return set because membership access is much faster than using lists (lists are faster at iterating @return {set} """ stopwords = set() f = open("data/stopwords.txt") for line in f.readlines(): stopwords.add( line.strip("\n") ) f.close() return stopwords
aab067bc1d1ac98235590270ef3239ff2cfcfdb6
33,647
def integrand_x(x, alpha, beta, l, i): """Integrand in the reparametrized q^l_i expression.""" return ( x ** (alpha - 1) * (1 - x) ** (l + beta - i / 2 - 1) * (1 + x) ** (i / 2) )
4eea4308976868ab715951b3e1c64750b2c87a2f
33,649
def depth_to_location(depth: float): """ Convert depth (of polyp) to location in colon based on https://training.seer.cancer.gov/colorectal/anatomy/figure/figure1.html :param depth: :return: """ locations = [] if depth <= 4: locations.append('anus') if 4 <= depth <= 17: locations.append('rectum') if 15 <= depth <= 57: locations.append('sigmoid') if 57 <= depth <= 82: locations.append('descending') if 80 <= depth <= 84: # I made this up locations.append('hepatic') if 82 <= depth <= 132: locations.append('transverse') if 130 <= depth <= 134: locations.append('splenic') if 132 <= depth <= 147: locations.append('ascending') if 147 <= depth: locations.append('cecum') return locations
1481bdc4e83d55668d98d19218a6b842c64bb21c
33,650
from datetime import datetime def GrADStime_to_datetime(gradsTime): """ Convert GrADS time string e.g., 00:00z01Jan2000 to datetime Parameters ---------- gradsTime : str Grads time in str format e.g., 00:00z01Jan2000. Returns ---------- re : datetime """ lens = len(gradsTime) if lens==15 or lens==14: time = datetime.strptime(gradsTime, "%H:%Mz%d%b%Y") elif lens==12 or lens==11: time = datetime.strptime(gradsTime, "%Hz%d%b%Y" ) elif lens== 9 or lens== 8: time = datetime.strptime(gradsTime, "%d%b%Y" ) elif lens== 7: time = datetime.strptime(gradsTime, "%b%Y" ) else: raise Exception('invalid length of GrADS date/time string') return time
df21ac4532510e883245be67ba354f4048761800
33,651
def add_decoy_tag(peptides): """ Adds a '_decoy' tag to a list of peptides """ return [peptide + "_decoy" for peptide in peptides]
cc9825c4eb7b6b1bb8b7857b2522bc90fb0e4c01
33,652
def rejection_sample(fn, condition): """ Sample from fn until we get a value that satisfies condition, then return it. """ while True: value = fn() if condition(value): return value
28674cecc21c6ef30bf1777dd3f4e595c2db1007
33,653
def fetch_base_path(dir_path: str) -> str: """Module to fetch base path of a given path. Parameters ---------- dir_path*: string variable representing the actual path variable (absolute/relative) (* - Required parameters) Returns ------- base path as string """ # Return the file name if '/' in dir_path: base_url_idx = dir_path.rindex('/')+1 return dir_path[:base_url_idx] return dir_path
3c50a107884335cbbd64e44541fc7deecfc188b6
33,654
def mock_time(value): """Mock out the time value.""" def _mock(): return value return _mock
fe4bca50a3593557a74c413980160b812baf6e27
33,655
def subtile(img, w, h, cx, cy): """ Gets a tile-indexed w by h subsurface from the tilemap "img". """ return img.subsurface((cx*w, cy*h, w, h))
c5d996922b58e53775b46b88906e7844cda2d11c
33,657
def qrel_entry(quest_id, answ_id, rel_grade): """Produces one QREL entry :param quest_id: question ID :param answ_id: answer ID :param rel_grade: relevance grade :return: QREL entry """ return f'{quest_id}\t0\t{answ_id}\t{rel_grade}'
dcd349ea183ed2ee5c508890f08118dae3844802
33,658
import os import errno def get_user_config_path(): """ Returns the path to the user configuration directory and creates it if it does not exist already. :return: Path to the configuration directory of leapp. """ leapp_conf = os.path.join(os.path.expanduser('~'), '.config', 'leapp') try: os.makedirs(leapp_conf) except OSError as e: if e.errno != errno.EEXIST: raise return leapp_conf
c38d16f0f09fb11d3b1da628e0c70ac68e7fcf25
33,659
def find_in_sorted_arr(value, array, after=False): """Return position of element in a sorted array. Returns: int: the maximum position i such as array[i] <= value. If after is True, it returns the min i such as value <= array[i] (or 0 if such an indices does not exist). """ ielt = array.searchsorted(value) if ielt == array.size: ielt -= 1 if not after and array[ielt] != value and ielt > 0: ielt -= 1 return ielt
0a724ca72fdb2abe4feaea3a2b0d971f803479c9
33,660
def get_animation_curve_types(): """ Returns a list with all animation curve types available in Maya :return: list(str) """ anim_curve_types = ['TA', 'TL', 'TT', 'TU', 'UA', 'UL', 'UT', 'UU'] return ['animCurve{}'.format(curve_type) for curve_type in anim_curve_types]
1b5571add93c48496d09007fc96b329b1f839d85
33,661
import sys import platform def _use_appnope(): """Should we use appnope for dealing with OS X app nap? Checks if we are on OS X 10.9 or greater. """ return sys.platform == 'darwin' and V(platform.mac_ver()[0]) >= V('10.9')
f7479fd77a160dafada6e77346bab4a43c7e43d0
33,662
def dequeue(interp, queuename): """ DEQUEUE queuename outputs the least recently QUEUEd member of the queue that is the value of the variable whose name is ``queuename`` and removes that member from the queue. """ var = interp.get_variable(queuename) return var.pop(0)
cb5141b3779eeedb5ac09d8d14da7403bcdb37c3
33,664
import textwrap import argparse def parse_args(): """ Parses and returns script's arguments """ description = textwrap.dedent( """ Download and extract a symbiflow benchmark release into a VTR-style directory structure. If a previous matching symbiflow release tar.gz file is found does nothing (unless --force is specified). """ ) parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter, description=description ) parser.add_argument( "--vtr_flow_dir", required=True, help="The 'vtr_flow' directory under the VTR tree. " "If specified this will extract the symbiflow release, " "placing benchmarks under vtr_flow/benchmarks/symbiflow ", ) parser.add_argument( "--force", default=False, action="store_true", help="Run extraction step even if directores etc. already exist", ) parser.add_argument("--mirror", default="google", choices=["google"], help="Download mirror") parser.add_argument( "--upgrade_archs", action="store_true", default=True, help="Try to upgrade included architecture files (using the upgrade_archs.py)", ) return parser.parse_args()
d5b96cb54da6cbc759d3f1c1c0f8240574d2fb4d
33,665
def r_local(denis, alt): """Local recombination from Sultan eq 21 alpha*n_mol Risbeth & Garriott '69: Dissociative recombination is the principal E and F region loss mechansim. Huba '96: RTI not damped by recombination in F region n_mol is the concentration of molecular ions alpha = 2*10**(-7) according to Sultan '92 alpha ~ 10**(-7) according to Risbeth & Garriott '69 """ n_mol = 0 if alt < 200: return 0 for i, n_i in enumerate(denis): if i == 2 | i == 3 | i == 5: n_mol += n_i return n_mol*2*10**(-7)
89fe0f6bfb0558125f1283d5836940c2937f2c68
33,666
def LinearizedRockPhysicsModel(Phi, Clay, Sw, R): """ LINEARIZED ROCK PHYSICS MODEL Linear rock physics model based on multilinear regression. Written by Dario Grana (August 2020) Parameters ---------- Phi : float or array_like Porosity (unitless). Clay : float Clay volume (unitless). Sw : float Water saturation (unitless) R : float Regression coefficients matrix estimated with regress.m Returns ------- Vp : float or array_like P-wave velocity (km/s). Vs : float or array_like S-wave velocity (km/s). Rho : float or array_like Density (g/cc). References: Grana, Mukerji, Doyen, 2021, Seismic Reservoir Modeling: Wiley - Chapter 2.1 """ # multilinear regression Vp = R[0, 0] * Phi + R[0, 1] * Clay + R[0, 2] * Sw + R[0, 3] Vs = R[1, 0] * Phi + R[1, 1] * Clay + R[1, 2] * Sw + R[1, 3] Rho = R[2, 0] * Phi + R[2, 1] * Clay + R[2, 2] * Sw + R[2, 3] return Vp, Vs, Rho
62287e8d312c020c443663c33f0bf558ad66aa7b
33,667
def _get_loop_decoys(args): """Wrapper for Fread.automodel_loop(), to be used with map(). args[0] is a the MultiFread object, args[1] are the sequential arguments for Fread.automodel_loop(), args[2] are the keyword arguments for Fread.automodel_loop(). Returns a MultiFread object that is a copy of the input object, which also contains the modelling results, warnings, etc. You can access the results and warnings like so: fread_objects = map(_get_loop_decoys, argslist) for obj in fread_objects: for decoy in obj.results: # do something for warning in obj.warnings: # do something """ frd, seqargs, kwargs = args frd.set_silent() frd.automodel_loop(*seqargs, write_models=False, **kwargs) return frd
71e9cf5bd1be078cab862c783dd709bae1b69f61
33,669
def betti(H): """Compute the dimensions of each homology space output by the homology() function""" return [basis.shape[1] for basis in H]
470fefdfcfea577d2a11b6fdc304f85a84eaf8f9
33,670
import subprocess def run_command(command): """ Run a system command Args: command (str): Command to be executed Returns: int, str, str: Return return code, output and error """ stderr = subprocess.PIPE stdout = subprocess.PIPE command_list = command.split(' ') try: p = subprocess.Popen(command_list, stdout=stdout, stderr=stderr) out, err = p.communicate() return p.returncode, out, err except Exception as e: return 1, "Command not found: %s" % command, None
eccc56a5da8d4260c204df79e6aca309c4c36f71
33,675
def getAllUserPresets(): """ getAllUserPresets() -> None gets a list of all current user presets @return: a list of tuples containing all nodename/presetname pairs. """ return None
dcca90a2eb9b9574d1c39f470b6ee2be5dfa6960
33,677
def parse_line(line): """Returns a list of ints and a minuend integer. The line needs to be in the form '1,2,3,...;5' as described in the module docstring """ num_list, minuend = line.split(';') num_list = [int(num) for num in num_list.split(',')] minuend = int(minuend) return num_list, minuend
905222663ae3c7b7c3ef9df5d6d7ab783fb28807
33,679
def time2str(t, fmt='%Y-%m-%d %H:%M:%S', microsec=False): """ Convert a pandas Timestamp to a string See https://docs.python.org/2/library/datetime.html#strftime-strptime-behavior for conversion """ fmt = fmt + '.%f' if microsec is True else fmt return t.strftime(fmt) #return t.strftime(fmt)
a0aecef7b052af4b1c291d6d2a4e54467f319af2
33,680
def _try_list_if_string(input_str): """ Attempt to convert string to list if it is a string """ if not isinstance(input_str, str): return input_str val_split = input_str.split(",") if len(val_split) > 1: return [float(sval.strip()) for sval in val_split] return input_str
c1c66dcad87dd4be041f6705e2b6252b8889dcd2
33,683
import re def is_valid_email(field): """ From SO """ return ( re.match( r'(([^<>()\[\]\\.,;:\s@"]+(\.[^<>()\[\]\\.,;:\s@"]+)*)|(".+"))@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\])|(([a-zA-Z\-0-9]+\.)+[a-zA-Z]{2,}))$', field, ) is not None )
cbce0cc067cca9edbbf7258209bbd9ab8594977e
33,685
def extract_log_size(raw_log_info): """提取日志大小""" log_size = raw_log_info.strip().split('\t')[0] return log_size
7b98fd94b068f830fe7add7b03264f2936e218b4
33,686
from datetime import datetime def _timehdr(ln: str) -> datetime: """ handles malformed header dates NOTE: must do second=int(float()) due to non-conforming files that don't line up decimal point. """ try: second = int(float(ln[30:36])) except ValueError: second = 0 if not 0 <= second <= 59: second = 0 try: usec = int(float(ln[30:43]) % 1 * 1000000) except ValueError: usec = 0 if not 0 <= usec <= 999999: usec = 0 return datetime(year=int(ln[:6]), month=int(ln[6:12]), day=int(ln[12:18]), hour=int(ln[18:24]), minute=int(ln[24:30]), second=second, microsecond=usec)
bffa7c4d1c047d33f9b2f27e672d4cf1aa901b4b
33,687
from datetime import datetime def year_gen(min_val, target): """generate entries in the dropdown menus for number of years. Set to generate values from current year to the provided minimum value""" tup = () for i in range (min_val, int("{:%Y}".format(datetime.now())) + 1): tup += (target.add_item(str(i), i),) return tup
389ed945bc4662352419a1c8915e1876183cf86a
33,688
def RemoveServiceAccountFromDatasetPolicy(dataset_policy, member, role): """Deauthorize Account for Dataset.""" for entry in dataset_policy.access: if entry.role == role and member in entry.userByEmail: dataset_policy.access.remove(entry) return True return False
b06e6f8a52ba077001ba6faf832994c5cd128df1
33,689
import requests def get_data_from_url(url): """ 网络请求(自动重试3次) :argument url: 请求地质 """ return requests.get(url, timeout=30).text
8db57b86acbd4ccaea989ea0f10d31cae2431fe9
33,690
import os import sys def identify_toplevel_dir(path): """ :param path: :return: """ src = path while not path.endswith('/creepiest'): if path == '/': raise EnvironmentError('Could not identify library path starting from {}'.format(src)) path, _ = os.path.split(path) assert path in sys.path, 'CREEPIEST path not in PYTHONPATH: {}'.format(sys.path) return path
36c19cb2287bcf3d9497ca88ca7e8bed0ad70a2a
33,693
import subprocess def cmd_exists(cmd): """Check whether cmd exists on system.""" # https://stackoverflow.com/questions/377017/test-if-executable-exists-in-python return subprocess.call(['type ' + cmd], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) == 0
02922a72abc71c00b9d72f56be06f550e3f402d0
33,694
import os def make_file_dir_names(model_name, nt=0): """ Export file and directory names which contain model_name. Indices of exported files/directories: 0 - model_name_big 1 - model_dir 2 - input_file 3 - enkf_input_file 4 - true_input_file 5 - true_sgsim_file 6 - sgsim_file 7 - true_log_file 8 - log_file 9 - shell_output_file 10 - init_dist_file_one 11 - init_dist_file_two 12 - init_dist_file_three 13 - observations_file 14 - true_file 15 - true_chem_file 16 - monitor_file 17 - time_out_file 18 - assim_out_file_bef 19 - assim_out_file_aft """ model_name_big = model_name.upper() model_dir = os.environ['HOME'] + "/shematModelsDir/" + model_name + "_model" input_file = model_name_big enkf_input_file = model_name_big + ".enkf" true_input_file = model_name_big + "_TRUE" true_sgsim_file = "sgsim_k_" + model_name + "_true.par" sgsim_file = "sgsim_k_" + model_name + ".par" true_log_file = "logk_" + model_name + "_true.dat" log_file = "logk_" + model_name + ".dat" shell_output_file = model_name + ".out" init_dist_file_one = "init_dist_" + model_name + "_1.dat" init_dist_file_two = "init_dist_" + model_name + "_2.dat" init_dist_file_three = "init_dist_" + model_name + "_3.dat" observations_file = "observations_" + model_name_big + ".dat" true_file = "True" + model_name_big + ".plt" true_chem_file = "True" + model_name_big + "_chem.plt" monitor_file = model_name_big + '_E0_monitor_1.dat' time_out_file = model_name_big + '_E0_time_out_' + str(nt) + '.vtk' assim_out_file_bef = 'assim_variables_E1_bef_' + str(nt).zfill(4) + '.vtk' assim_out_file_aft = 'assim_variables_E1_aft_' + str(nt).zfill(4) + '.vtk' return model_name_big, \ model_dir, \ input_file, \ enkf_input_file, \ true_input_file,\ true_sgsim_file, \ sgsim_file, \ true_log_file, \ log_file, \ shell_output_file, \ init_dist_file_one, \ init_dist_file_two, \ init_dist_file_three, \ observations_file, \ true_file, \ true_chem_file,\ monitor_file,\ time_out_file,\ assim_out_file_bef,\ assim_out_file_aft
b5206072b671ece901f6e30d890c9b874223c5aa
33,696
def receive(socket): """ Receives a message from a socket. It detects the end of the message when it receives 0 bytes (EOF). Parameters ---------- socket : socket the socket describing a connection, created by connect(url,port) Returns ------- response : string HTTP response received """ response = "" if socket is None: return response try: while True: message = socket.recv(4096) if len(message) == 0: break response += message.decode() return response except Exception: return None
d00788654d771b16bd86496020ef1a50043c107d
33,697
from typing import List def _out_of_range(matrix: List[List[int]], row: int, col: int) -> bool: """Check if the row/col is out of range.""" return row < 0 or col < 0 or row >= len(matrix) or col >= len(matrix[0])
5385b19fd349051a4f869f4669ee16a2861e1e50
33,698
import re def encodePythonIdentifierToC(value): """Encode an identifier from a given Python string.""" # Python identifiers allow almost of characters except a very # few, much more than C identifiers support. This attempts to # be bi-directional, so we can reverse it. def r(match): c = match.group() if c == ".": return "$" else: return "$$%d$" % ord(c) return "".join(re.sub("[^a-zA-Z0-9_]", r, c) for c in value)
07b52b5d9dcf83de361cd48f2e710a71f16269de
33,699
import math def dryness_formula(days: int, num_storys: int) -> int: """ Dryness(0~1000) >>> assert dryness_formula(30, 255) == 0, '255 storys per month' >>> assert dryness_formula(30, 127) == 125, '127 storys per month' >>> assert dryness_formula(30, 63) == 250, '63 storys per month' >>> assert dryness_formula(30, 31) == 375, '31 storys per month' >>> assert dryness_formula(30, 15) == 500, '15 storys per month' >>> assert dryness_formula(30, 7) == 625, '7 storys per month' >>> assert dryness_formula(30, 3) == 750, '3 storys per month' >>> assert dryness_formula(30, 1) == 875, '1 storys per month' >>> assert dryness_formula(30, 0) == 1000, '0 storys per month' >>> assert dryness_formula(1, 0) == 1000, '0 storys per day' >>> assert dryness_formula(0, 0) == 1000, '0 storys' """ v = math.log2(256 / (num_storys / (days + 1) * 31 + 1)) return max(0, min(1000, round(v / 8 * 1000)))
0cb6721cb3ea52a926ba43d8fbf81d5343e24172
33,700
def substitute(line, charges, current_residuum): """ Look up the charge for an atom, and write into a string. Args: line: String containing an atom name, residuum name, and original charge. charges: DataFrame containing atom name, residuum names, new charges. Returns: line: the original line, with the charge updated from the `charges` table. """ current_atom = line[0:7].strip() # look up the current atom name in the residuum we are currently in mask = (charges.atom == current_atom) & (charges.residue == current_residuum) # Exract the charge q new_charge = charges[mask].q.values[0] charge_string = '{: 1.6f}'.format(new_charge) + ' ' modified_line = line[:24] + charge_string + line[34:] return modified_line
755db68832818d1c294e79ab535418d56bea06d7
33,701
def process_video(self, url): """Background task that runs a long function with progress reports.""" def progress_cb(done, total): self.update_state(state='PROGRESS', meta={'current': done, 'total': total}) #tag.tag_and_upload(url, progress_cb) return {'current': 100, 'total': 100, 'status': 'Task completed!', 'result': 42}
da41cda5377a9e97dc3c5a4ac8df7663f36cccd1
33,704
def form_bowtie_build_cmd_list(bowtie_build_fp, input_contigs_fasta, output_index_fp): """ format arguments received to generate list used for bowtie_build subprocess call Args: bowtie_build_fp(str): the string representing the path to the bowtie program input_contigs_fasta(list): list of files which represent the fasta file inputs used for index construction output_index_fp(str): base name of file path to be used as output directory for index files Returns: call_args_list(list): the sequence of commands to be passed to the bowtie2 build call """ if bowtie_build_fp is '': raise ValueError('bowtie2_build_path name is empty') if output_index_fp is '': raise ValueError('output file name invalid. index_output_fp is None') if input_contigs_fasta is '' or input_contigs_fasta is None: raise ValueError('no fasta file found') # required arguments calls_args_list = [bowtie_build_fp, input_contigs_fasta, output_index_fp] return calls_args_list
28af32e6a7626d8bd5647fa12194f0afddb31fbc
33,705
from os.path import basename import tempfile import tarfile def unpack_tarball(archive, tmpdir_prefix='tmp_'): """ Open an archive in a temporary directory Args: archive (str): the path of th archive to open tmpdir_prefix (str): prefix of the temporary directory to untar the archive to. Returns: tuple: tmpdir, files path of the temporary directory and names of the files extracted by the tarfile """ # creates the directory tmpdir = tempfile.mkdtemp(prefix=tmpdir_prefix + basename(archive) + '_') # extract the archive arch = tarfile.open(archive) arch.extractall(path=tmpdir) files = arch.getnames() arch.close() return tmpdir, files
314d095d423878c6483835b926f4ec595b742628
33,707
import math def test3(x, sin=math.sin): """ >>> %timeit test3(123456) 1000000 loops, best of 3: 306 ns per loop """ return sin(x)
42ab8ee27fef2d964652d61dbd45ddad883dfd8b
33,708
def check_datasets_compatible(dataset1, dataset2): """ Used for cross-corpus datasets that are combined from two datasets. Checks if two datasets have the same class names and original shape. The first entry of the original shape is ignored, since the number of samples does not matter. Returns: class_names, original_shape Raises: Exception if datasets are not compatible """ # check class names class_names1 = dataset1['class_names'] class_names2 = dataset2['class_names'] msg = f'Class names are not equal: {class_names1}, {class_names2}' e = Exception(f'Datasets are not compatible!\n{msg}') if len(class_names1) != len(class_names2): raise e for i in range(len(class_names1)): if class_names1[i] != class_names2[i]: raise e # check if both have a shape or not has_shape1 = has_shape2 = False if 'specs' in dataset1 and 'original_shape' in dataset1['specs']: has_shape1 = True if 'specs' in dataset2 and 'original_shape' in dataset2['specs']: has_shape2 = True if has_shape1 and not has_shape2: raise Exception( 'Dataset 1 has a original_shape but dataset 2 does not!') if has_shape2 and not has_shape1: raise Exception( 'Dataset 2 has a original_shape but dataset 1 does not!') # check shapes original_shape = None if has_shape1 and has_shape2: shape1 = dataset1['specs']['original_shape'] shape2 = dataset2['specs']['original_shape'] msg = f'Shapes are not equal: {shape1}, {shape2}' e = Exception(f'Datasets are not compatible!\n{msg}') if len(shape1) != len(shape2): raise e for i in range(1, len(shape1)): if shape1[i] != shape2[i]: raise e original_shape = shape1 return class_names1, original_shape
5cecca002ebcba6b6733acf8b0b521e61e9937db
33,709
import numbers def human_bytes(size): """ Given a human-readable byte string (e.g. 2G, 10GB, 30MB, 20KB), return the number of bytes. Will return 0 if the argument has unexpected form. Notes ----- Based on: https://gist.github.com/beugley/ccd69945346759eb6142272a6d69b4e0 """ if isinstance(size, numbers.Integral): return size if (size[-1] == 'B'): size = size[:-1] if (size.isdigit()): bytes_ = int(size) else: bytes_ = size[:-1] unit = size[-1] if (bytes_.isdigit()): bytes_ = int(bytes_) if (unit == 'G'): bytes_ *= 1073741824 elif (unit == 'M'): bytes_ *= 1048576 elif (unit == 'K'): bytes_ *= 1024 else: raise ValueError('Invalid units') else: raise ValueError('Invalid value') return bytes_
198b9e0a1179c2f992cb4f1cb4a85980982a756a
33,710
def largest_palindrome_number(lo, hi): """largest palindromic product within a given range (using strings)""" result = 0 for i in reversed(range(lo, hi)): for j in reversed(range(lo, hi)): val = list(str(i*j)) # print(i, j, val) is_palindrome = True for k in range(len(val)//2 + 1): # print(i, j, k, val, val[k], val[len(val)-1-k]) if val[k] != val[len(val)-1-k]: is_palindrome = False break if is_palindrome: # print(i, j, val) if i*j > result: result = i*j return result
232a8d31413e352a601d4474b80141f1e80b3a6e
33,714
def _find_framework_name(package_name, options): """ :param package_name: the name of the package :type package_name: str :param options: the options object :type options: dict :returns: the name of framework if found; None otherwise :rtype: str """ return options.get(package_name, {}).get('framework-name', None)
3b345a5ccbed309ad7a706dce621a8fd405dd9e4
33,717
import operator def finalize_tsv(entries): """ Recalculate and sort TSV data """ ret_entries = list() # Summate counts total_count = 0 for entry in entries: total_count += entries[entry][0] # Sort entries_sorted = sorted(entries.items(), key=operator.itemgetter(1), reverse=True) # Calculate means and store new entry for entry in entries_sorted: abundance = entry[1][0] / total_count * 100 quality_avg = entry[1][2] / entry[1][0] new_entry = (entry[1][0], abundance, quality_avg) + entry[1][3:] ret_entries.append(new_entry) return ret_entries
4ea3915ec42b17896a732cd54d92b02063a9084e
33,718
def make_number_formatter(decimal_places): """ Given a number of decimal places creates a formatting string that will display numbers with that precision. """ fraction = '0' * decimal_places return ''.join(['#,##0.', fraction, ';-#,##0.', fraction])
9d8cfbfb56d01a170f6754925f26f1433a9627e1
33,719
def index(): """Return today's news""" return 'koowa2'
acf48e5ff5d94c4d5b42b9598d0d767bbab5b118
33,720
def convert_node(graph, tree_node): """ Add a tree node to a graph. Parameters ---------- graph : Graph The graph where the node should be added. tree_node : Node The node to be added. Returns ------- node_id : int or tuple The identifier of the added node in the graph. If the node is a leaf node, this is the reference index, otherwise this is a tuple, containing the identifier of the child nodes. distance : float The distance of the given node to its parent node. """ if tree_node.is_leaf(): return tree_node.index, tree_node.distance else: child_ids = [] child_distances = [] for child_node in tree_node.children: id, dist = convert_node(graph, child_node) child_ids.append(id) child_distances.append(dist) this_id = tuple(child_ids) for id, dist in zip(child_ids, child_distances): # Add connection to children as edge in the graph # Distance is added as attribute graph.add_edge(this_id, id, distance=dist) return this_id, tree_node.distance
468b0e87a78e1f289f583f657b04e03132aa123d
33,722