content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def decode_modified_utf8(s: bytes) -> str: """ Decodes a bytestring containing modified UTF-8 as defined in section 4.4.7 of the JVM specification. :param s: bytestring to be converted. :returns: A unicode representation of the original string. """ s_out = [] s_len = len(s) s_ix = 0 while s_ix < s_len: b1 = s[s_ix] s_ix += 1 if b1 == 0: raise UnicodeDecodeError( 'mutf-8', s, s_ix - 1, s_ix, 'Embedded NULL byte in input.' ) if b1 < 0x80: # ASCII/one-byte codepoint. s_out.append(chr(b1)) elif (b1 & 0xE0) == 0xC0: # Two-byte codepoint. if s_ix >= s_len: raise UnicodeDecodeError( 'mutf-8', s, s_ix - 1, s_ix, '2-byte codepoint started, but input too short to' ' finish.' ) s_out.append( chr( (b1 & 0x1F) << 0x06 | (s[s_ix] & 0x3F) ) ) s_ix += 1 elif (b1 & 0xF0) == 0xE0: # Three-byte codepoint. if s_ix + 1 >= s_len: raise UnicodeDecodeError( 'mutf-8', s, s_ix - 1, s_ix, '3-byte or 6-byte codepoint started, but input too' ' short to finish.' ) b2 = s[s_ix] b3 = s[s_ix + 1] if b1 == 0xED and (b2 & 0xF0) == 0xA0: # Possible six-byte codepoint. if s_ix + 4 >= s_len: raise UnicodeDecodeError( 'mutf-8', s, s_ix - 1, s_ix, '3-byte or 6-byte codepoint started, but input too' ' short to finish.' ) b4 = s[s_ix + 2] b5 = s[s_ix + 3] b6 = s[s_ix + 4] if b4 == 0xED and (b5 & 0xF0) == 0xB0: # Definite six-byte codepoint. s_out.append( chr( 0x10000 | (b2 & 0x0F) << 0x10 | (b3 & 0x3F) << 0x0A | (b5 & 0x0F) << 0x06 | (b6 & 0x3F) ) ) s_ix += 5 continue s_out.append( chr( (b1 & 0x0F) << 0x0C | (b2 & 0x3F) << 0x06 | (b3 & 0x3F) ) ) s_ix += 2 else: raise RuntimeError return u''.join(s_out)
bb51073289d8592eb71c8132f8350bad1b36bf27
14,876
def get_links(links, row): """ Process & flatten the links section in the yaml. Values within the subgroup are comma separated & the subgroups are pipe separated. """ l_name_str = [] l_backref_str = [] l_label_str = [] l_target_str = [] l_multiplicity_str = [] l_required_str = [] g_exclusive_str = [] g_required_str = [] for link in links: if isinstance(link, dict) and link != {}: if 'subgroup' in link: sub_name = [] sub_backref = [] sub_label = [] sub_target = [] sub_multi = [] sub_req = [] for l in link['subgroup']: sub_name.append(l['name']) sub_backref.append(l['backref']) sub_label.append(l['label']) sub_target.append(l['target_type']) sub_multi.append(l['multiplicity']) sub_req.append(str(l['required'])) l_name_str.append(', '.join(sub_name)) l_backref_str.append(', '.join(sub_backref)) l_label_str.append(', '.join(sub_label)) l_target_str.append(', '.join(sub_target)) l_multiplicity_str.append(', '.join(sub_multi)) l_required_str.append(', '.join(sub_req)) g_exclusive_str.append(str(link['exclusive'])) g_required_str.append(str(link['required'])) else: try: l_name_str.append(link['name']) l_backref_str.append(link['backref']) l_label_str.append(link['label']) l_target_str.append(link['target_type']) l_multiplicity_str.append(link['multiplicity']) l_required_str.append(str(link['required'])) g_exclusive_str.append('') g_required_str.append('') except KeyError: pass row['<link_name>'] = ' | '.join(l_name_str) row['<link_backref>'] = ' | '.join(l_backref_str) row['<link_label>'] = ' | '.join(l_label_str) row['<link_target>'] = ' | '.join(l_target_str) row['<link_multiplicity>'] = ' | '.join(l_multiplicity_str) row['<link_required>'] = ' | '.join(l_required_str) row['<link_group_exclusive>'] = ' | '.join(g_exclusive_str) row['<link_group_required>'] = ' | '.join(g_required_str) return row
8d2233ffc5180cb2fc9411665e6b5021f7961a76
14,877
def compile_column(name: str, data_type: str, nullable: bool) -> str: """Create column definition statement.""" null_str = 'NULL' if nullable else 'NOT NULL' return '{name} {data_type} {null},'.format(name=name, data_type=data_type, null=null_str)
e1db7bfc78509d394de645b90394f8d796f97a5b
14,878
def convert_255_to_hex(color255): """ >>> color255 = [255, 51, 0] target_rgb01 = pt.FALSE_RED[0:3] target_rgb = np.array([[target_rgb01]]).astype(np.float32) / 25 target_lab = vt.convert_colorspace(target_rgb, 'lab', 'rgb') # Find closest CSS color in LAB space dist_lab = {} dist_rgb = {} css_colors = ub.map_vals(convert_hex_to_255, mcolors.CSS4_COLORS) for k, c in css_colors.items(): rgb = np.array([[c]]).astype(np.float32) / 255 lab = vt.convert_colorspace(rgb, 'lab', 'rgb') dist_lab[k] = np.sqrt(((target_lab - lab) ** 2).sum()) dist_rgb[k] = np.sqrt(((target_rgb - rgb) ** 2).sum()) best_keys = ub.argsort(dist_lab) ub.odict(zip(best_keys, ub.take(dist_lab, best_keys))) """ colorhex = '0x' + ''.join(['%02x' % c for c in color255]) return colorhex
355ad47715a3c188b1b6660a76d816b0124a3f69
14,880
import logging def group_samples(samples, tokenizer): """Groups generated and ground truth texts.""" groups = {} for i, row in enumerate(samples): gt = tokenizer.decode(row['ground_truth_text']) sample = tokenizer.decode(row['sample_tokens']) if gt not in groups: groups[gt] = (gt.split(), [sample.split()]) else: groups[gt][-1].append(sample.split()) if (i + 1) % 100 == 0: logging.info('Processed %d samples', i + 1) return groups
b8bbdb02157d20fd3a9aed774c3a90a24fe18e86
14,881
import io import os def filter_alt_hap_chromosomes(inputfile, outputfile): """ Filter chromosomes with names containing alt or hap as preparation for STAR map index """ line_buffer = io.StringIO() keep = False just_link = True with open(inputfile, 'r') as fasta: for line in fasta: if line.startswith('>'): if 'alt' in line or 'hap' in line: keep = False just_link = False else: keep = True if keep: line_buffer.write(line) if just_link: os.symlink(inputfile, outputfile) else: with open(outputfile, 'w') as fasta: _ = fasta.write(line_buffer.getvalue()) return outputfile
249ef2b6b6b22202f31f406ec1d107dc0fa5b356
14,882
def xy_data(dataset, variable): """X-Y line/circle data related to a dataset""" # import time # time.sleep(5) # Simulate expensive I/O or slow server if dataset == "takm4p4": return { "x": [0, 1e5, 2e5], "y": [0, 1e5, 2e5] } else: return { "x": [0, 1e5, 2e5], "y": [0, 3e5, 1e5] }
153e6ccd16c86bebb5c4597a02e64d0f90c10e63
14,883
from datetime import datetime def get_datetime_utcnow(): """Returns local datetime in UTC""" return datetime.utcnow()
a64843d111344b30eacc5e99258e28f12376a321
14,884
import os import numpy as np from math import sin, cos def eddy_rotate_bvecs(in_bvec, eddy_params): """ Rotates the input bvec file accordingly with a list of parameters sourced from ``eddy``, as explained `here <http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/EDDY/Faq#Will_eddy_rotate_my_bevcs_for_me.3F>`_. """ name, fext = os.path.splitext(os.path.basename(in_bvec)) if fext == '.gz': name, _ = os.path.splitext(name) out_file = os.path.abspath('%s_rotated.bvec' % name) bvecs = np.loadtxt(in_bvec).T new_bvecs = [] params = np.loadtxt(eddy_params) if len(bvecs) != len(params): raise RuntimeError(('Number of b-vectors and rotation ' 'matrices should match.')) for bvec, row in zip(bvecs, params): if np.all(bvec == 0.0): new_bvecs.append(bvec) else: ax = row[3] ay = row[4] az = row[5] Rx = np.array([[1.0, 0.0, 0.0], [0.0, cos(ax), -sin(ax)], [0.0, sin(ax), cos(ax)]]) Ry = np.array([[cos(ay), 0.0, sin(ay)], [0.0, 1.0, 0.0], [-sin(ay), 0.0, cos(ay)]]) Rz = np.array([[cos(az), -sin(az), 0.0], [sin(az), cos(az), 0.0], [0.0, 0.0, 1.0]]) R = Rx.dot(Ry).dot(Rz) invrot = np.linalg.inv(R) newbvec = invrot.dot(bvec) new_bvecs.append(newbvec / np.linalg.norm(newbvec)) np.savetxt(out_file, np.array(new_bvecs).T, fmt='%0.15f') return out_file
846e2af6fa072c711b6ca69462e58c6e6a0b3a29
14,886
def expand_grid(grid): """Expand grid by one in every direction.""" new_length = len(grid) + 2 row = [0 for _ in range(new_length)] new_grid = [[0] + row + [0] for row in grid] return [row[:]] + new_grid + [row[:]]
773ccd3f0bb308ed294f6c3b202818e9a8f7511a
14,887
def stratify(all_labels, types): """ Divide label into three categories. """ coarse = types[:9] fine = types[9:130] return ([l for l in all_labels if l in coarse], [l for l in all_labels if ((l in fine) and (not l in coarse))], [l for l in all_labels if (not l in coarse) and (not l in fine)])
699a632786249d0f1ea59db34bfe4b50c7a93080
14,888
def dbdisconnect(connection) -> bool: """Close connection to SQLite-Database :param connection: :return: Result of success (true/false) """ if connection: connection.close() return True return False
6c5d17d14e898696885730ef86fad9eab03af02f
14,889
def GetAllocationAffinity(args, client): """Returns the message of allocation affinity for the instance.""" if not args.IsSpecified('allocation_affinity'): return None type_msgs = (client.messages. AllocationAffinity.ConsumeAllocationTypeValueValuesEnum) if args.allocation_affinity == 'none': allocation_type = type_msgs.NO_ALLOCATION allocation_key = None allocation_values = [] elif args.allocation_affinity == 'specific': allocation_type = type_msgs.SPECIFIC_ALLOCATION # Currently, the key is fixed and the value is the name of the allocation. # The value being a repeated field is reserved for future use when user # can specify more than one allocation names from which the Vm can take # capacity from. allocation_key = args.allocation_label.get('key', None) allocation_values = [args.allocation_label.get('value', None)] else: allocation_type = type_msgs.ANY_ALLOCATION allocation_key = None allocation_values = [] return client.messages.AllocationAffinity( consumeAllocationType=allocation_type, key=allocation_key, values=allocation_values)
c52f4a95120aa2ca13b2ec37857ad2f398634d77
14,890
def boom_7_for(number): """itirate from zero to number with for,if number is divisable by 7 or contain 7 as a digit we'll add 'BOOM' to the list else we'll add the number""" ls = [] for num in range(number +1): if '7' in str(num) or num % 7 == 0: ls.append("BOOM") else: ls.append(num) return ls
ca3126849aa48d9b1578678d5679837304037fb7
14,891
def lcase(i): """ >>> lcase("Cat") 'cat' """ return i.lower()
88eb1d7681c442757a372aeb06338412eac869ae
14,892
from typing import List def transla(name_row: List[str], language_1: List[str], language_2: List[str]): """ :param name_row: The name in row :param language_1: List of the first language :param language_2: List of the second language :return: translated name for each row Usage: gdf['type_eng'] = gdf['type'].apply(transla,args=(lst_1,lst_2)) """ b = [(a[0], a[1]) for a in zip(language_1, language_2)] for tup in b: if tup[0].lower() == name_row.lower(): # type: ignore return tup[1].lower()
038536ed52970a077aca4e98961f5ef52c98e2c8
14,893
import os def preflight_check(blacklist_file, geoip_file, root_domains_file): """ Checks the existence of all the lookup files. Returns False if one is missing. :return: Bool """ if not os.path.isfile(blacklist_file): return False elif not os.path.isfile(geoip_file): return False elif not os.path.isfile(root_domains_file): return False else: return True
111b4f88f2d52dad117195d082c94ca7ac4e5d7f
14,894
def rdict(x): """ recursive conversion to dictionary converts objects in list members to dictionary recursively attributes beginning with '_' is converted to fields excluding the '_' """ if isinstance(x, list): l = [rdict(y) for y in x] return l elif isinstance(x, dict): x2={} for k, v in x.items(): if k.endswith('__'): continue if k.startswith('_'): k2 = k[1:] x2[k2] = rdict(v) else: x2[k] = rdict(v) return x2 else: if hasattr(x, '__dict__'): d = x.__dict__ toremove=[] # convert _key -> key for k, v in d.items(): if k.startswith('_'): k2 = k[1:] d[k2] = v toremove.append(k) if k.endswith('__'): # internal use only toremove.append(k) # remove items with a None value for k, v in d.items(): if v is None: toremove.append(k) for k in set(toremove): del(d[k]) # go deep for k, v in d.items(): d[k] = rdict(v) return d else: return x
5da3d680b9ec191e5962ac4ce4469cbfef608ed4
14,895
import base64 def diff_kubernetes_secret(secret, desired_fields): """Computes a set of changed fields (either added, removed, or modified) between the given existing secret and the set of desired fields. :param secret: an existing secret as a KubernetesAPIResponse containing encoded secret data :type secret: KubernetesAPIResponse :param desired_fields: a dict of desired fields :type desired_fields: dict[str, str] :return: set[str] """ current_keys = set(secret.data.keys()) desired_keys = set(desired_fields.keys()) differences = current_keys.symmetric_difference(desired_keys) for field in current_keys.intersection(desired_keys): decoded_bytes = base64.b64decode(secret.data[field]) decoded_str = decoded_bytes.decode('utf-8') if decoded_str != desired_fields[field]: differences.add(field) return differences
eb3d219a40bc68ba00d3d876aade936f8b062e75
14,896
def find_indexes_where_lists_differ(list1: list, list2: list) -> list: """This function returns the indexes where the two input lists differ. THe input lists are expected to have same length Args: list1 (list): first input list list2 (list): second input list Returns: out_list (list): output list containing the indexes where the two input list differ Raises: AssertionError: if the two input lists do not have the same length """ assert len(list1) == len(list2), "The two input lists must have same length" out_list = [idx for idx, (first, second) in enumerate(zip(list1, list2)) if first != second] return out_list
888d850b692a1920058f53a4617dcd08d5ed31cc
14,897
def removeprefix(self: str, prefix: str) -> str: """ Backport of `removeprefix` from PEP-616 (Python 3.9+) """ if self.startswith(prefix): return self[len(prefix):] else: return self
109770b8467a3c1e86156e40f9735ade8f2d08aa
14,898
import hashlib def hash_string(string): """ returns hash of bytes """ return hashlib.sha1(string.encode(encoding="utf-8")).hexdigest()
87b31dbf655077234eafcf98b7acbe573e7c0bf8
14,900
def values_of(choices): """ Returns a tuple of values from choices options represented as a tuple of tuples (value, label). For example: .. sourcecode:: python >>> values_of(( ... ('1', 'One'), ... ('2', 'Two'),)) ('1', '2') :rtype: tuple """ return tuple([value for value, label in choices])
1b152cfab229d2eddd5bf0ff3cbf53954b1934e5
14,901
def get_minio_notification_response( minio_client, bucket_name: str, prefix: str, suffix: str, events: list[str] ): """Start listening to minio events. Copied from minio-py.""" query = {"prefix": prefix, "suffix": suffix, "events": events} # pylint: disable=protected-access return minio_client._url_open( "GET", bucket_name=bucket_name, query=query, preload_content=False )
d28bf1bb40a7702a7174972b300c44701c0e45d6
14,902
import re def estimate_investment_net_worth(results): """Currently only using investment table to calculate net worth""" key_codes = { "A": [1, 1000], "B": [1001, 2500], "C": [2501, 5000], "D": [5001, 15000], "E": [15001, 50000], "F": [50001, 100000], "G": [100001, 1000000], "H1": [1000001, 5000000], "H2": [ # This is inaccurate as their is no upper bound 5000000, 1000000000, ], "J": [1, 15000], "K": [15001, 50000], "L": [50001, 100000], "M": [100001, 250000], "N": [250001, 500000], "O": [500001, 1000000], "P1": [1000001, 5000000], "P2": [5000001, 25000000], "P3": [25000001, 50000000], "P4": [ # This is inaccurate as their is no upper bound 50000001, 1000000000, ], } gross_values = [] for k, v in results["sections"]["Investments and Trusts"]["rows"].items(): if v["C1"]["text"] != "" and v["C1"]["text"] != "•": gross_values.append(key_codes[v["C1"]["text"]]) if v["D3"]["text"] != "" and v["D3"]["text"] != "•": gross_values.append(key_codes[v["D3"]["text"]]) low = sum(x[0] for x in gross_values) high = sum(x[1] for x in gross_values) cd = {} cd["investment_net_worth"] = (low, high) net_change = [] for k, v in results["sections"]["Investments and Trusts"]["rows"].items(): B1, D4 = v["B1"]["text"], v["D4"]["text"] for code in [B1, D4]: if code != "" and code != "•": net_change.append(key_codes[code]) low = sum(x[0] for x in net_change) high = sum(x[1] for x in net_change) cd["income_gains"] = (low, high) liabilities_total = [] try: for k, v in results["sections"]["Liabilities"]["rows"].items(): if ( v["Value Code"]["text"] != "" and v["Value Code"]["text"] != "•" ): liabilities_total.append(key_codes[v["Value Code"]["text"]]) low = sum(x[0] for x in liabilities_total) high = sum(x[1] for x in liabilities_total) cd["liabilities"] = (low, high) except: cd["liabilities"] = (0, 0) try: salaries = [] for k, v in results["sections"]["Non-Investment Income"][ "rows" ].items(): if v["Income"]["text"] != "" and v["Income"]["text"] != "•": salary = v["Income"]["text"].replace(",", "").strip("$") if not re.match(r"^-?\d+(?:\.\d+)?$", salary) is None: salaries.append(float(salary)) cd["salary_income"] = sum(salaries) except: cd["salary_income"] = 0 return cd
a5b0134ab428f0b0f3f3f3ba0d2391325fff87b1
14,904
def objc_provider_framework_name(path): """Returns the name of the framework from an `objc` provider path. Args: path: A path that came from an `objc` provider. Returns: A string containing the name of the framework (e.g., `Foo` for `Foo.framework`). """ return path.rpartition("/")[2].partition(".")[0]
cafb53a64cc8654a992d325faa34721a19d5a38a
14,905
import sys def average(arr): """average of the values, must have more than 0 entries. :param arr: list of numbers :type arr: number[] a number array :return: average :rtype: float """ if len(arr) == 0: sys.stderr.write("ERROR: no content in array to take average\n") sys.exit() if len(arr) == 1: return arr[0] return float(sum(arr))/float(len(arr))
e3560867ccd709d7c8af56f21d551e140e09450b
14,907
import re def clean_server_response(d): """cleans the server reponse by replacing: '-' -> None '1,000' -> 1000 :param resp_dict: """ for key, value in d.items(): if type(value) is str or isinstance(value, str): if re.match('-', value): value = None elif re.search(r'^[0-9,.]+$', value): # replace , to '', and type cast to int value = float(re.sub(',', '', value)) else: value= str(value) d[key] = value return d
55a3a02e1be3502ebc8a7a7b5511e57ab3ec24f5
14,908
from multiprocessing import Process def run_in_subprocess(func, *args, **kwargs): """ 多进程工具函数,不涉及返回值等细节处理时使用 :param func: 被进程委托的函数 :return: 返回multiprocessing进程对象 """ process = Process(target=func, args=args, kwargs=kwargs) process.daemon = True process.start() return process
a2a3586ab8975102b38f6caf2b07984287cc4f7c
14,909
import json def extract_session_parameters(response_json: str): """Extracts session parameters from the json response of a Dialogflow webhook.""" response = json.loads(response_json) return response["session_info"]["parameters"]
45cde22d7271512e4f90fc3078c327c9ccbd86b0
14,913
def scalar_multiply(c, u): """ return the vector u scaled by the scalar c """ return tuple((c * a for a in u));
9ace577e852179893e90544754b8010d3476f3ae
14,914
def grouper_orig(cc): """grouper based on gaps in timestamps""" diffs = cc.timestamps().diff() grpr_orig = (diffs>cc.timedelta).cumsum() grpr_orig.name = 'g_orig' return grpr_orig
ecc755be0d107a3ac86c35069546ac920c4d42ce
14,917
def flip_2d(arr): """ Flip a board layout (nxm becomes mxn) @param {Array} arr Rectangle layout @return {Array} Rectangle layout flipped along diagonal """ return tuple(zip(*arr[::]))
7204265236d367ef03b858560b1f2fe31f14388c
14,918
def make_save_string(file_name): """ Returns a complete save string for saving a model. ====================================== Input: file_name (string) - Filename used in save string. Output: save_string (string) - Save string for specified model. """ # Filepath for saved file filepath = r'C:\Developer\electric_motor_thermal_modelling\Models' filepath_full = filepath + '\\' + file_name return filepath_full
fb00690f4c5a4d7063220918709a8a45daa0a538
14,919
import numpy def _kl_divergence(xs, p1, p2): """Calculate Kullback-Leibler divergence of p1 and p2, which are assumed to values of two different density functions at the given positions xs. Return divergence in nats.""" with numpy.errstate(divide='ignore', invalid='ignore'): kl = p1 * (numpy.log(p1) - numpy.log(p2)) kl[~numpy.isfinite(kl)] = 0 # small numbers in p1 or p2 can cause NaN/-inf, etc. return numpy.trapz(kl, x=xs)
433c10350bac2f92b9aafeb61d9fe7823f763073
14,920
def fraction_str(*numbers): """normalized numbers list by sum and return as str Parameters ---------- - numbers: numbers to print fractions of """ total = sum(numbers) fractions = [number / float(total) for number in numbers] return '(' + ', '.join('{:0.03f}'.format(item) for item in fractions) + ')'
150a35b6cfcbf623ee91d6b9db416ec359c850d0
14,921
def v7_multimax(iterable): """Return a list of all maximum values. Or we could make a new list out of the given iterable and then find the max and loop over it again just as we did before """ iterable = list(iterable) max_item = max(iterable, default=None) return [ item for item in iterable if item == max_item ]
6997aa3c7ec0cd549e25c4016d64ce32c6fa0f61
14,923
def _get_job_dir(jenkins_directory, job_name): """ Returns the directory for a job configuration file relative to the jenkins home directory. """ return jenkins_directory + '/jobs/' + job_name
b77a56efa394b4d43209b7f47da96a4f5f0ca96b
14,924
import tempfile import os def mkstemp(*args, **kwargs): """ Helper function which does exactly what ``tempfile.mkstemp()`` does but accepts another argument, ``close_fd``, which, by default, is true and closes the fd before returning the file path. Something commonly done throughout Salt's code. """ if "prefix" not in kwargs: kwargs["prefix"] = "__hubble.tmp." close_fd = kwargs.pop("close_fd", True) fd_, f_path = tempfile.mkstemp(*args, **kwargs) if close_fd is False: return fd_, f_path os.close(fd_) del fd_ return f_path
e1867f2b72c8e5165da32154c08cdec321e796cd
14,925
def check_n_levels(parsed, n): """Make sure there are n levels in parsed""" if len(parsed) == n: return True else: return False
f4040e4cf53730e61e5c06fed5dbe8f271020c6a
14,926
import re import glob def find_files(input_path, framerange=None): """ Discovers files on the filesystem. :param input_path: Path to the file sequence :param framerange: optional framerange :return: array of single file paths """ files = [] if '%' not in input_path: return [input_path] if framerange: for part_range in framerange.split(','): if '-' in part_range: first, last = part_range.split('-') for i in range(int(first), int(last) + 1): files.append(input_path % i) else: files.append(input_path % int(part_range)) else: input_path = re.sub(r'(\%0[4-8]d)(\.[exr|EXR])', r'*\2', input_path) files = glob.glob(input_path) files = sorted(files) return files
8f2818b6c8b72f344c70adee0e79ac52c2313902
14,927
import pathlib def _parse_requirements(path: pathlib.Path): """Read and strip comments from a requirements.txt-like file. """ lines = [line.strip() for line in path.read_text().splitlines() if line] return [line for line in lines if not line.startswith('#')]
af010076d79cca83bfbc6590b833e1bf5f68ddcd
14,928
import configparser from typing import Optional def input_with_default(label: str, defaults: configparser.ConfigParser, fallback: str = "", key: Optional[str] = None) -> str: """Return the string if non empty otherwise use the default.""" key = label.replace(" ", "_") if key is None else key default = defaults.get("DEFAULT", key, fallback=fallback) prompt = f"{label}: " if default == "" or default.startswith("{") else f"{label} ({default}): " value = input(prompt).strip() if value == "": return default return value
1e900f2164c5ef975b16d74d39c7e587b0444fe3
14,930
import torch import numpy as np def one_hot(labels, dim): """Convert label indices to one-hot vector""" batch_size = labels.size(0) out = torch.zeros(batch_size, dim) out[np.arange(batch_size), labels.long()] = 1 return out
ba7cefef066229bd578fb7cdec4f4033f353cd73
14,931
from typing import Iterable def detokenize(tokens: Iterable[str], with_treebank: bool = True) -> str: """ Given a list of tokens, join them together into a string. with_treebank = True is typically used when rendering utterances, so we don't need to deal with things like "andrew's" with_treebank = False is typically for rendering express. """ if with_treebank: return " ".join(tokens).replace(" ", " ") return "".join(tokens)
d6a90c5181a18fe45118b83bdf605c534ceeeac8
14,932
def pair(k1, k2): """ Cantor pairing function """ z = int(0.5 * (k1 + k2) * (k1 + k2 + 1) + k2) return z
83de1d237049ac8f76ec09f7b23037ab581792d5
14,933
import os from typing import Counter def test_count(path): """ Counts terms in each test file""" legit_docs = [] # stores term frequencies of each legit file spam_docs = [] # stores term frequencies of each spam file path_legit = path + "/test/legitimate" for root, dirs, filenames in os.walk(path_legit): for filename in filenames: if filename.endswith('.txt'): filepath = path_legit + "/" + filename file = open(filepath,"r") text = file.read().split() legit_docs.append(Counter(text)) # append term frequency dictionary to the list file.close() path_spam = path + "/test/spam" for root, dirs, filenames in os.walk(path_spam): for filename in filenames: if filename.endswith('.txt'): filepath = path_spam + "/" + filename file = open(filepath,"r") text = file.read().split() spam_docs.append(Counter(text)) # append term frequency dictionary to the list file.close() return (legit_docs, spam_docs)
f9887e549acff96b73be6266edabe1938478a836
14,934
import json def rpc_error(message = 'Invalid Request'): """ Generates an rpc error message """ return json.dumps({"result":None, 'error':{'message':message}, 'id':1})
4aa061e25be938b882cac71cb935f2153ad958cf
14,935
import os import yaml def get_config(confname="main.yaml", confdir=""): """Try to retrieve the best configuration file :param confname: name of the configuration file :param confdir: name of the configuration directory :return: a dictionary containing the configuration """ _global_config = {} for filename in ( confname, os.path.join(os.getcwd(), ".{}".format(confname)), os.path.join(os.getenv("HOME", "/tmp"), ".{}".format(confname)), # nosec "/etc/spider/{0}/{1}".format(confdir, confname), os.path.join(confdir, confname), ): try: _global_config = yaml.safe_load(open(filename)) except FileNotFoundError: continue if not _global_config: raise BaseException("Could not find a usable configuration file {}/{}".format(confdir, confname)) return _global_config
f3f5fbcd9c84be701d39b26f798cf13752f6ceb7
14,937
def gen_input_config(): """ Generates an input configuration for providing to a vault client in tests """ def _gen_input_config( vault_secrets={"acme.user": "secret/acme/server/user", "acme.pwd": "secret/acme/server/user"} ): input_config = { "acme": {"host": "https://acme.com", "cert_path": "/secret/cert"}, "vault_secrets": vault_secrets, } return input_config return _gen_input_config
2d238e91de03641889c0aac9a41d2284294af15d
14,938
def celsius_to_fahrenheit(bot, trigger): """ Convert Celsius to Fahrenheit """ try: celsius = float(trigger.group(2)) except ValueError: bot.reply( "Could not understand input. Please input the temperature in Celsius, e.g. !fa 100" ) return False if celsius is not None: bot.reply( "%.1f degrees Celsius is equal to %.1f degrees Fahrenheit" % (celsius, (celsius * 1.8 + 32)) )
04cfa7b0e7b5a2b2b784dc7b1d158efe1f3dfbb2
14,940
def create_init_weights_fn(initialization_fn): """Returns a function that wraps :func:`initialization_function` and applies it to modules that have the :attr:`weight` attribute. Args: initialization_fn (callable): A function that takes in a tensor and initializes it. Returns: Function that takes in PyTorch modules and initializes their weights. Can be used as follows: .. code-block:: python init_fn = create_init_weights_fn(variance_scaling_) network.apply(init_fn) """ if initialization_fn is not None: def init_weights(m): if hasattr(m, "weight"): initialization_fn(m.weight) return init_weights else: return lambda m: None
9b8344bf86ff02181228f2bf6412d1c5e867c876
14,941
def extract_bits(n, n_bits, offset_from_lsb): """Extract a number of bits from an integer. Example: >>> bin(extract_bits(0b1101011001111010, n_bits=5, offset_from_lsb=7)) '0b1100' 0b1101011001111010 -> 0b01100 ^^^^^<- 7 -> The bits marked with ^ will be extracted. The offset is counted from the LSB, with the LSB itself having the offset 0. """ try: bitmask = (2**n_bits - 1) << offset_from_lsb except TypeError as err: raise ValueError(err) return (n & bitmask) >> offset_from_lsb
08a92d319975915b3b0f453144cc73cba620c5b1
14,943
import torch def tanh_clip(x, clip_val=10.0): """soft clip values to the range [-clip_val, +clip_val]""" if clip_val is not None: x_clip = clip_val * torch.tanh((1.0 / clip_val) * x) else: x_clip = x return x_clip
48205e984549b1bdf38b66155a1fd7f57c1bafa9
14,945
def _category_errmsg(particle, category: str) -> str: """ Return an error message when an attribute raises an `~plasmapy.utils.InvalidElementError`, `~plasmapy.utils.InvalidIonError`, or `~plasmapy.utils.InvalidIsotopeError`. """ article = 'an' if category[0] in 'aeiouAEIOU' else 'a' errmsg = ( f"The particle {particle} is not {article} {category}, " f"so this attribute is not available.") return errmsg
c9d916f6808d989aee3b9523f2322c5a2586367a
14,946
def get_species_units(input): """ Retrieve units for GEOS-CF species and derived values """ if 'NOy/' in input: units = 'unitless' elif 'pm25' in input: units = '$\mu$g m$^{-3}$' elif 'Dust' in input: units = '$\mu$g m$^{-3}$' elif 'NOy' in input: units = 'ppbv' else: units = 'v/v' return units
5daa3a065afd99cc713e3084c4e694d9fa789ccc
14,950
def get_git_sha_from_dockerurl(docker_url: str, long: bool = False) -> str: """ We encode the sha of the code that built a docker image *in* the docker url. This function takes that url as input and outputs the sha. """ parts = docker_url.split("/") parts = parts[-1].split("-") sha = parts[-1] return sha if long else sha[:8]
4bb598da76ada2fdf34fdc3ff24bf046dbeaf9bf
14,951
def get_class_name(obj): """ Dunders in python are ugly, this gets the class name of an object """ return obj.__class__.__name__
ad1940f84f58031751be503dc686adef3cfcb48f
14,952
def get_token(context): """ Retrieves the token the lambda was assigned when it was invoked. :param context: AWS Lambda context :type context: object :return: Lambda token, usually a UUID :rtype: str """ # If that fails, fall back to the requestID # http://docs.aws.amazon.com/lambda/latest/dg/python-context-object.html return context.aws_request_id
83fced7364d6fad8d632edfb0d82efb7bb7cf5de
14,954
def nonlinearitywarning(band, bin_ix, events, verbose=0): """ Flag count rates above the 10% local nonlinearty dropoff, per the calibration paper. :param band: The band that is being used, either 'FUV' or 'NUV'. :type band: str :param bin_ix: Array indices designating which events are in the time bin of interest. :type bin_ix: numpy.ndarray :param events: Set of photon events to check if they are in the non-linearity regime. :type events: dict :param verbose: Verbosity level, a value of 0 is minimum verbosity. :type verbose: int :returns: bool -- Returns True/False whether a given set of events are at the non-linearity regime. """ cps_10p_rolloff = {'NUV':311, 'FUV':109} cps = events['flat_counts'][bin_ix]/events['exptime'][bin_ix] return True if cps >= cps_10p_rolloff[band] else False
2a85bf6c6100e39caef169f57b26c72b6d22e257
14,955
import os import glob def remove_file(file_path): """Remove a feature/raster and all of its anciallary files This function was grabbed from python_common, as it's the only function from that module that is called within geofunctions. Args: filepath (str): Filepath of the raster or shapfile to remove Returns: bool: True on success """ file_ws = os.path.dirname(file_path) for file_name in glob.glob(os.path.splitext(file_path)[0]+".*"): os.remove(os.path.join(file_ws, file_name)) return True
38a7b6d2154467505aede55c0fbb8dd7e508856e
14,956
def cx2x(cx, e): """Transform from *x* index value to *x* index value, using the *e.css('cw')* (column width) as column measure.""" if cx is None: x = 0 else: x = e.parent.pl + cx * (e.css('cw', 0) + e.gw) return x
5d04d7f017c0fa54e19c649133139b2e4439562f
14,957
def q(s): """ Quote the given string """ return "'" + str(s) + "'"
0c0a1477e740b430d5e6997c0115ef317457526c
14,958
def speaker_companies(talk): """ Return a set of the companies of the speakers of the talk.""" companies = sorted( set(speaker.user.attendeeprofile.company for speaker in talk.speakers.all() if speaker.user.attendeeprofile)) return companies
39a1cbf046779dd3ad2e798c950fa3d8211a08e1
14,959
def _diff_count(string1, string2): """ Count the number of characters by which two strings differ. """ assert isinstance(string1, str) assert isinstance(string2, str) if string1 == string2: return 0 minlen = min(len(string1), len(string2)) diffcount = abs(len(string1) - len(string2)) for ii in range(0,minlen): if string1[ii] != string2[ii]: diffcount += 1 return diffcount
82c9ef604db3afee79266e4eb7851164ff43bfd1
14,962
def calc_basic_indispensability(p, n): """ >>> calc_basic_indispensability(3, 0) 2 >>> calc_basic_indispensability(2, 1) 0 >>> calc_basic_indispensability(5, 4) 2 >>> calc_basic_indispensability(7, 5) 2 >>> calc_basic_indispensability(13, 4) 10 """ table = { 2: [1, 0, ], 3: [2, 0, 1, ], 5: [4, 0, 1, 3, 2, ], 7: [6, 0, 4, 1, 5, 2, 3, ], 11: [10, 0, 6, 3, 9, 1, 7, 2, 8, 4, 5, ], 13: [12, 0, 7, 3, 10, 1, 8, 4, 11, 2, 9, 5, 6, ], 17: [16, 0, 9, 4, 13, 2, 11, 6, 15, 1, 10, 5, 14, 3, 12, 7, 8, ], 19: [18, 0, 10, 3, 13, 6, 16, 1, 11, 4, 14, 7, 17, 2, 12, 5, 15, 8, 9, ], 23: [22, 0, 12, 6, 18, 3, 15, 9, 21, 1, 13, 7, 19, 2, 14, 8, 20, 4, 16, 5, 17, 10, 11, ], 29: [28, 0, 15, 7, 22, 4, 19, 11, 26, 1, 16, 8, 23, 5, 20, 12, 27, 2, 17, 9, 24, 3, 18, 10, 25, 6, 21, 13, 14, ], 31: [30, 0, 16, 5, 21, 10, 26, 3, 19, 8, 24, 13, 29, 1, 17, 6, 22, 11, 27, 2, 18, 7, 23, 12, 28, 4, 20, 9, 25, 14, 15, ], } return table[p][n]
44189ce2c5244bed5e56d9efde616afebc47c1f2
14,963
def getInversePermutation(permutation): """Invert a given permutation vector. :param list | tuple | np.ndarray permutation: Permutation vector to invert. :return: Inverted permutation vector. :rtype: list """ inverse = [0] * len(permutation) for i, p in enumerate(permutation): inverse[p] = i return inverse
f8fe339f2b0a7fc8180252b99f3fce742f215a13
14,964
def is_image(content_type): """is the given content_type string for an image? Args: content_type: string containing Content-Type HTTP header value Returns: Boolean """ return (str(content_type).count("image") > 0)
c69f163e09d8b68e179dd8765ef8401d988ac433
14,967
import numpy def matrix_multiply(m1,m2): """ Multiplies the input matrices. Inputs: m1,m2: the input matrices Returns: result: matrix product of m1 and m2 in a list of floats """ product = numpy.matmul(m1,m2) if type(product) == numpy.int64: return float(product) else: result = list(product) return result
0447553f7fa7968cbb13fcaf7319391650377e49
14,968
import base64 def read_encoded(filename: str) -> str: """Read a file and return the base64 encoding of its contents.""" with open(filename, 'rb') as infile: return base64.b64encode(infile.read()).decode('utf-8')
4d2c5c8f36b2ebe67a67714f6313a32d4d6bf005
14,969
import re def strip_irc_colors(data): """Strip mirc colors from string. Expects data to be decoded.""" return re.sub('[\x02\x0F\x16\x1D\x1F]|\x03(\d{1,2}(,\d{1,2})?)?', '', data)
2c4e480cc00e3704a0086522c8bb7409fca0af6e
14,970
def normalised_ellipse_mask(ellipse): """Return a normalized copy of the supplied ellipse. Here 'normalised' means that the rotation is as close to zero as possible. Examples: >>> normalised_ellipse_mask( ... ((1, 2), (100, 200), 90) ... ) ((1, 2), (200, 100), 0) """ # Don't overwrite the original, we'll return a new ellipse. centre, extents, rotation = ellipse centre = list(centre[:]) extents = list(extents[:]) # Get the rotation as close to zero as possible. while rotation > 45: extents[0], extents[1] = extents[1], extents[0] rotation -= 90 while rotation < -45: extents[0], extents[1] = extents[1], extents[0] rotation += 90 return tuple(centre), tuple(extents), rotation
af5316ace76642667340a986f3d1c2d69934a333
14,971
from functools import reduce def xor(*values): """An `exclusive or`_ operation on a list of values. .. _exclusive or: https://en.wikipedia.org/wiki/Exclusive_or :rtype: bool :returns: ``True`` when an odd number of values are ``True`` or ``False`` when an even number of values are ``True``. .. note:: The provided values *must* support casting as a ``bool``. """ return reduce(lambda x, y: bool(x) ^ bool(y), values)
d5dfbff2648f208dc192fd06da0c988c6bb9baaf
14,973
def probability_of_sum(total:int, dice1, dice2): """ Brief: Basic probability - Dice cast Suppose a pair of fair 6-sided dice are thrown. What is the probability that the sum of the rolls is 6? (Answer as a simple fraction of integers) reference: https://statweb.stanford.edu/~susan/courses/s60/split/node65.html """ n = dice1.shape[0] m = dice2.shape[0] comb = n * m count = 0 for i in dice1: for j in dice2: sum = int(i + j) if sum == total: count += 1 prob = count / comb return print("{:.2%}".format(prob))
b48e9db4dd40048d24a3dfb481406346076b7132
14,974
def get_source_with_id(result): """Return a document's `_source` field with its `_id` added. Parameters ---------- result : dict A document from a set of Elasticsearch search results. Returns ------- dict The document's `_source` field updated with the doc's `_id`. """ result['_source'].update({'_id': result['_id']}) return result['_source']
be9b25ad65a8474aa41d3f927664abdb89a674d5
14,975
def apply_x_frame(response): """Include X-frame header in http response to protect against clickhiJacking.""" response.headers["X-Frame-Options"] = "SAMEORIGIN" return response
3cc3b572eb64e856512fd8b41191f601bcbcc96c
14,976
def multiply_probs(ln_pa, ln_pb): """ Returns the log probability of multiplied probabilities. Pc = Pa * Pb => ln(Pc) = ln(Pa)+ln(Pb) """ return ln_pa+ln_pb
5e541072c9aaf327536196ab4ffc9e3bf488ccf0
14,977
import subprocess def del_ovs_port(bridge, port): """ Remove port from OVS bridge. """ cmd = "sudo ovs-vsctl del-port {0} {1}".format(bridge, port) return subprocess.call(cmd.split(" "))
8682c5bda58a78f42b5025ae3ef0db6e0d0576db
14,978
def get_height(image): """get_height(image) -> integer height of the image (number of rows). Input image must be rectangular list of lists. The height is taken to be the number of rows. """ return len(image)
3aa94c4b2458d2a233f32ee10889e52566c04ecb
14,979
import torch def normalize_image_into_standard_form(img): """the input image should in gray x,y""" img = (img-img.min())/(img.max()-img.min()) sz = [1,1]+ list(img.shape) img_t = torch.Tensor(img) img_t = img_t.view(sz) return img_t
8da901135cffc3cc36d5be969ee2e2f94f75f74e
14,981
def is_power_of_two(n: int) -> bool: """ >>> is_power_of_two(1024) True >>> is_power_of_two(5) False """ return n & (n - 1) == 0
e56997fd768c5b6e74f179969b1681a1e0c33e78
14,982
import tempfile import pathlib import subprocess import os def input_from_file(starting_text=''): """Ask user for input by opening a file in their preferred editor. The file is filled with ``starting_text``. If the user quits the editor without saving (or if an error occurrs), ``None`` is returned. """ with tempfile.NamedTemporaryFile('w+') as tmp: path = pathlib.Path(tmp.name) path.write_text(starting_text) with subprocess.Popen(f"$EDITOR {path}", shell=True) as process: process.wait() if process.returncode != os.EX_OK: return None return path.read_text()
131f643fa7d3181ab13469364761801512c6b273
14,983
import os def output(data): """Takes a dictionary |data| of filenames and execution counts and generates a LCOV coverage output.""" out = '' for filename, counts in data.iteritems(): out += 'SF:%s\n' % (os.path.abspath(filename)) for line, count in enumerate(counts): if count != None: out += 'DA:%d,%s\n' % (line + 1, count) out += 'end_of_record\n' return out
13d6f2494ced8519be39d4c66d2e103fc21142c9
14,984
def format_rows_to_columns(forecast_dict): """ Helper function to reformat list of courses from Student object's self.plan (student_object.py) into a format appropriate for forecast.html i.e. rows from student object will be converted into columns for forecast.html """ # these lists will contain the info for the columns but will still need # to be parsed as columns later. for now, they are lists of lists, where the # inner lists represent different years. i.e. fall_col will have the form: # fall_col = [ [year 1 fall courses], [year 2 fall courses], etc] fall_col = [] winter_col = [] spring_col = [] summer_col = [] student_plan = forecast_dict # grabs the inner lists from the dictionary that represent each term from # their corresponding year (the key represents the year, the value represents # the courses taken that year, with it's inner lists being each term in that year) # stores all the lists of the Fall term courses from each year in "fall_col", # each year's Winter terms in winter_col, and so on. for year in student_plan: for term_i in range(len(student_plan[year])): term = student_plan[year][term_i] if term_i == 0: fall_col.append(term) elif term_i == 1: winter_col.append(term) elif term_i == 2: spring_col.append(term) elif term_i == 3: summer_col.append(term) # formatted_columns is a list containing 5 sub-lists # These sub-lists represents the courses taken in the 1st year, 2nd year, # and so on. They are formatted to display as columns rather than rows. # For example: # example = [ # [["CIS 210", "CIS 211", "CIS 212", ""],["CIS 110", "CIS 111", "CIS 199", ""]], # [["CIS 313", "CIS 315", "CIS 425", ""],["CIS 314", "MATH 343", "CIS 471", ""]] # ] # This example list represents a possible course-plan for the 1st and 2nd year, # where Year 1 Fall Term the student is taking CIS 210 and CIS 110 # Year 1 Winter Term the student is taking CIS 211 and CIS 111 # Year 1 Spring Term the student is taking CIS 212 and CIS 199 # Year 1 Summer Term the student is taking no courses # Year 2 Fall Term the student is taking CIS 313 and CIS 314 # Year 2 Winter Term the student is taking CIS 315 and MATH 343 # Year 2 Spring Term the student is taking CIS 425 and CIS 471 # Year 2 Summer Term the student is taking no courses # Assumes a maximum of 4 courses allowed per term formatted_columns = [] for year in student_plan: # This is hardcoding the max number of courses taken each term (4 lists # of empty strings to be in line with the established 16 credit max set in student_objects) formatted_columns.append([ ["", "", "", ""],["", "", "", ""],["", "", "", ""], ["", "", "", ""] ]) # formatted_columns[i][j][0] # i represents which Year the courses were taken # j represents how many courses taken that term # 0 is hard coding the location for Fall fall_col_len = len(fall_col) for i in range(fall_col_len): fall_col_i_len = len(fall_col[i]) for j in range(fall_col_i_len): formatted_columns[i][j][0] = fall_col[i][j].name # formatted_columns[i][j][1] # i represents which Year the courses were taken # j represents how many courses taken that term # 1 is hard coding the location for Winter winter_col_len = len(winter_col) for i in range(winter_col_len): winter_col_i_len = len(winter_col[i]) for j in range(winter_col_i_len): formatted_columns[i][j][1] = winter_col[i][j].name # formatted_columns[i][j][2] # i represents which Year the courses were taken # j represents how many courses taken that term # 2 is hard coding the location for Spring spring_col_len = len(spring_col) for i in range(spring_col_len): spring_col_i_len = len(spring_col[i]) for j in range(spring_col_i_len): formatted_columns[i][j][2] = spring_col[i][j].name # formatted_columns[i][j][3] # i represents which Year the courses were taken # j represents how many courses taken that term # 3 is hard coding the location for Summer summer_col_len = len(summer_col) for i in range(summer_col_len): summer_col_i_len = len(summer_col[i]) for j in range(summer_col_i_len): formatted_columns[i][j][3] = summer_col[i][j].name return formatted_columns
e7e5fc765641543130be4c103e77b73ed1e2b53e
14,986
def get_query_range(count: int, page: int): """Generate query for range of the search results :type count: ``int`` :param count: Max amount of the search results :type page: ``int`` :param page: Current page, depends on count :return: A query range :rtype: ``str`` """ if page < 1: raise ValueError('page value can\'t be less than 1') if count < 1: raise ValueError('max results value can\'t be less than 1') return f'from={(page-1)*count + 1}&to={page*count}'
c29285c56eed77275d2ec28a45de148ce3b7f591
14,987
def chunkify(l, chunk_size): """Return list of evenly sized lists""" book_chunks = [] for i in range(0, len(l), chunk_size): book_chunks.append(l[i:i + chunk_size]) return book_chunks
d205d8a916582e86d736e1372a18d7dd173e4683
14,989
def sort_and_pick_median(seq): """Standard and most apparent way or algorithm to find median of some sequence or list O(nlog(n)) :param seq: list, string, tuple, or other type :return: the value of the median of the sequence """ i = (len(seq) - 1) // 2 return sorted(seq)[i]
7f8566493d8764a9b6e68cbf798117ae2cdbc9dc
14,991
def dataDirection_1(datas, offset=0): """ 极小型指标正向化 :param datas: 迭代器 :param offset: 防止出现除以 0 的错误 """ def normalization(data): return 1 / (data + offset) return list(map(normalization, datas))
d5a70e8e3b0e232d6e3f7a420b815a2323efe065
14,992
def single_file_output(in_file): """ Extract sample name from file ie. /path/to/test.fa --> test :param in_file: path/to/file (str) :return: extracted sample name (str) """ return in_file.split('/')[-1].split('.')[-2]
4dd5c5f0de584f479fdf91e88cdede33c8072f10
14,993
def single_exon_transcript(single_exon_dict,transiddict): """Give single exon reads a transcript. Parameters ---------- single_exon_dict : dict read id is key, value is list [chromosome,start,end,strand] transiddict : dict read id is key, transcript id is value Returns ------- dict a dictionary of single exon reads, where read id is key, transcript id is value """ single_exon_reads_transcripts = {} single_exon_reads_transcripts_multi = {} for read in single_exon_dict.keys(): if read in single_exon_reads_transcripts.keys(): print("Read " + read + " has multimapping!") single_exon_reads_transcripts_multi.setdefault(read,set()).add(transiddict[read]) continue else: single_exon_reads_transcripts[read] = transiddict[read] return single_exon_reads_transcripts, single_exon_reads_transcripts_multi
215e7e2c67ab5f880db58a3a5efd03c5397e9ab4
14,994
def VerifyFileID(fi_disk, fi_ours): """Verifies that two file IDs are matching. Differences in the inode/device are not accepted, but and older timestamp for fi_disk is accepted. @param fi_disk: tuple (dev, inode, mtime) representing the actual file data @param fi_ours: tuple (dev, inode, mtime) representing the last written file data @rtype: boolean """ (d1, i1, m1) = fi_disk (d2, i2, m2) = fi_ours return (d1, i1) == (d2, i2) and m1 <= m2
c32acadd21b249904374eb44bcab3db42fe3972c
14,995
import os def get_migration_filenames(app_dir): """ Return list of filenames of all migrations in the given app dir. """ filenames = [] migrations_dir = os.path.join(app_dir, 'migrations') for filename in os.listdir(migrations_dir): if not filename.endswith('.py'): continue if filename == '__init__.py': continue filenames.append(filename) return filenames
21d7e477c27b83ef74d851476c06a4d9e6faad73
14,997
import six def list_as_string(option): """Returns the argument as a string. Useful for insuring that ConfigObj options are always returned as a string, despite the presence of a comma in the middle. Example: >>> print(list_as_string('a string')) a string >>> print(list_as_string(['a', 'string'])) a, string >>> print(list_as_string('Reno, NV')) Reno, NV """ # Check if it's already a string. if option is not None and not isinstance(option, six.string_types): return ', '.join(option) return option
0bd269fb455ce8eb20c4c0838fd4611832686baf
14,999
import subprocess def _linux_partition_mount_point(part_name): """Given a partition name, returns the mount point for the partition, or None if not mounted. """ if not part_name.startswith('/dev'): part_name = '/dev/' + part_name cmd = ['findmnt', part_name, '--noheadings', '--output', 'TARGET'] try: out = subprocess.check_output(cmd) return out.strip() except subprocess.CalledProcessError: # Not mounted... return None
b859433e2a02eca7a78b5fd68d196a998f29b1ad
15,000
def nip(string): """ Rather than escaping special chars like above, this simply deletes them. For use in CSS classes and other restrictive environments. N.B. THIS ALSO PUTS EVERYTHING IN LOWERCASE FOR CONSISTENCY """ out = "" for char in string.lower(): if char.isalnum() or char in "_-": out += char if out == "": out = "BADSTRING" return out
cf01e0d2736bbbf1b8046d23bc1f1d2dbfba6da9
15,001
def rewriter(field, rules): """Create a template field function that rewrites the given field with the given rewriting rules. ``rules`` must be a list of (pattern, replacement) pairs. """ def fieldfunc(item): value = item._values_fixed[field] for pattern, replacement in rules: if pattern.match(value.lower()): # Rewrite activated. return replacement # Not activated; return original value. return value return fieldfunc
c9cfba499f1dc8683df3918a9f497ec69ad07700
15,002
def get_response_headers(response): """ Django 3.2+ comes with changes to the repsonse headers and the _headers is not present in the newer versions. So this method checks if it's present in the response or not, else returns the newer attribute headers from the response. """ if hasattr(response, '_headers'): response_headers = response._headers else: response_headers = response.headers return response_headers
a4b135dffbe9ee85c602ea665e43590ff8beedd1
15,003
def similarity(event, places): """ Return a list of dissimilar events to the given event. This is the successor function used in the search tree: a node's successors are the list of dissimilar options for the next choice. event (dict): A potential place/event. places (list): All potential places. return: List of dissimilar places. """ dissimilar = [] for place in places: similarity = 0 if place["price"] == event["price"]: similarity += 1. for type1 in event["types"]: for type2 in place["types"]: if type1 == type2: similarity += 1.0 / float(len(place["types"])) # 1.67 is the empirically generated threshold for similarity # The empirical process behind this is described in the paper if similarity <= 1.7: dissimilar.append(place) return dissimilar
ca9a171abe37e7e9d48a604ca92c56092d4fa8c8
15,004
def define_band_colors(): """ Define dictionary of colors to be used in plots (e.g. histograms) for representing data in specific bands """ band_color_rep = {'B': 'blue', 'G': 'green', 'R': 'red', 'N': 'black', 'A': None} return band_color_rep
a17bf7d0a2742e2cf2c94beec3f97b27e1c640e3
15,005
def get_varinfo_from_table(discipline,parmcat,parmnum): """ Return the GRIB2 variable information given values of `discipline`, `parmcat`, and `parmnum`. NOTE: This functions allows for all arguments to be converted to a string type if arguments are integer. Parameters ---------- **`discipline`**: `int` or `str` of Discipline code value of a GRIB2 message. **`parmcat`**: `int` or `str` of Parameter Category value of a GRIB2 message. **`parmnum`**: `int` or `str` of Parameter Number value of a GRIB2 message. Returns ------- **`list`**: containing variable information. "Unknown" is given for item of information if variable is not found. - list[0] = full name - list[1] = units - list[2] = short name (abbreviated name) """ if isinstance(discipline,int): discipline = str(discipline) if isinstance(parmcat,int): parmcat = str(parmcat) if isinstance(parmnum,int): parmnum = str(parmnum) try: tblname = 'table_4_2_'+discipline+'_'+parmcat modname = '.section4_discipline'+discipline exec('from '+modname+' import *') return locals()[tblname][parmnum] except(ImportError,KeyError): return ['Unknown','Unknown','Unknown']
d6e3b08dfcbfb023a2725ef1ffbb44448881fa64
15,006