content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def sum_divisible_by_n(target, n): """ Note that for 5: 5+10+15+...+995 = 5*(1+2+....+199) Also note: 1+2+3+...+p = ½*p*(p+1) """ p = target // n return n * (p * (p + 1)) // 2
3dc42da324d19813242b27ff9794ce85163ab7f0
15,562
def invalid_ds_vsby(i, v): """Checks if visibility is inconsistent with DS or SS""" if i == '+' and v >= 0.3: return True elif i == '' and not 0.3 < v < 0.6: return True return False
c4ee05f4ce8b19f76dfdc1847dc8f3eaf7e1b11b
15,564
def get_valid_test_pipeline(): """Return an arbitrary pipeline definition.""" return { 'sg1': [ 'step1', 'step2', {'name': 'step3key1', 'in': {'in3k1_1': 'v3k1', 'in3k1_2': 'v3k2'}}, 'step4' ], 'sg2': False, 'sg3': 77, 'sg4': None }
23611472d2cb0d513c59cec42d45893cb328303f
15,565
import torch def instance_propagate(annots_inst, masks_inst, is_sequence=True): """ propagate the last valid instance to the last position. Args: annots_inst: (inst, [seq,] ..., dim) masks_inst: (inst, [seq,] ...) Returns new_annots, new_masks Notes: inst-first enforced if ndim > 3, then dim(1)=seq is assumed. useful when multiple instances are available for each sample. """ if is_sequence: has_insts = (masks_inst.sum(1) > 0).float() has_insts = has_insts.unsqueeze(1).expand_as(masks_inst) else: has_insts = masks_inst new_annots, new_masks = [annots_inst[0]], [masks_inst[0]] for annot_inst, mask_inst, has_inst in zip( annots_inst[1:], masks_inst[1:], has_insts[1:]): new_mask = has_inst * mask_inst + (1 - has_inst) * new_masks[-1] new_masks.append(new_mask) has_inst = has_inst.unsqueeze(-1) new_annot = has_inst * annot_inst + (1 - has_inst) * new_annots[-1] new_annots.append(new_annot) new_annots = torch.stack(new_annots) new_masks = torch.stack(new_masks) return new_annots, new_masks
f8f231b1e2601df2c634458a8db34407f910cd51
15,566
def get_duration_in_time( duration ): """ Calculate the duration in hh::mm::ss and return it @param duration: timestamp from the system @return: formatted string with readable hours, minutes and seconds """ seconds = int( duration % 60 ) minutes = int( (duration / 60) % 60 ) hours = int( (duration / 3600) % 24 ) output = "{:0>2}:{:0>2}:{:0>2}".format(hours, minutes, seconds) return output
bcd6413b32183688c8b2aac428c5f9bfd5d34b8e
15,570
def elimate_leading_whitespace(source, target=None): """ return the count of whitespaces before the first target if it is not the mode: <whitespace>*_target_, return 0 """ if not source: return 0 i, length = 0, len(source) while i < length: if source[i] not in ' \t': if (target and source[i] == target) or target is None: return i return 0 i += 1 return 0
474bb0094bb8c7f39dd76aa85b72c26af321eb1a
15,572
def set_(data_type): """ Create an alias for a SetType that contains this data type """ return frozenset([data_type])
3ffbe4e111506c5897793cfe423cbbe55137de53
15,573
import subprocess def get_tag(): """ Get the git tag currently checked out. """ p=subprocess.Popen(["git","describe","--tags"],stdout=subprocess.PIPE) tag = p.communicate()[0].decode("utf-8").strip() return tag
b9acdc3fe2252c13d885d83ffa0d81ba7de3235c
15,576
from bs4 import BeautifulSoup def tag(tagname, attrs=None, text=None, dtrs=None): """Return a soup Tag element.""" attrs = {} if attrs is None else attrs dtrs = [] if dtrs is None else dtrs newtag = BeautifulSoup('', features='lxml').new_tag(tagname, attrs=attrs) if text is not None: newtag.append(text) for dtr in dtrs: newtag.append(dtr) return newtag
911ba6fe1d26b0acfd9ca0eec5f2bfb0369e6e2e
15,577
import math def marginal_parameter_likelihood(p: float, lambda_: float, x: int, tx: float, T: float) -> float: """Computes the marginal likelihood of the parameters lambda and p, given the transaction history. See http://brucehardie.com/papers/018/fader_et_al_mksc_05.pdf equation (3). Args: p (float): The churn parameter ``p``. lambda_ (float): The time interval parameter ``lambda``. x (int): The number of events observed. tx (float): The time of the last transaction observed. T (float): The current time (``asof_time``). Returns: float: The likelihood """ z1 = (1 - p) ** x * lambda_**x * math.exp(-lambda_ * T) delta = int(x > 0) z2 = delta * p * (1 - p) ** (x - 1) * lambda_**x * math.exp(-lambda_ * tx) return z1 + z2
a90140259da58247b13f4ce165f035076c9816be
15,578
import os def app_url(content): """ Generate APPLICATION_URL from ENV VAR. """ url = os.environ['APPLICATION_URL'] return content.replace('$APP_URL', url)
ae13ae104915f1038e66b05fd290984574bfb5f5
15,580
def task_id_ranges_format(value): """Space-separated number ranges in 'start-end' format.""" try: start, end = [int(i) for i in value.split('-')] except ValueError: message = ("Incorrectly formatted task ID range. " "Argument values should be numbers in the format 'start-end'") raise ValueError(message) return {'start': start, 'end': end}
9081765efc6b6240a243798b635573d6a44be470
15,581
import math def __rotate(origin: tuple, point: tuple, angle: float): """ Rotates a point counterclockwise by a given angle around a given origin. :param origin: Landmark in the (X, Y) format of the origin from which to count angle of rotation :param point: Landmark in the (X, Y) format to be rotated :param angle: Angle under which the point shall be rotated :return: New landmarks (coordinates) """ ox, oy = origin px, py = point qx = ox + math.cos(angle) * (px - ox) - math.sin(angle) * (py - oy) qy = oy + math.sin(angle) * (px - ox) + math.cos(angle) * (py - oy) return qx, qy
af8acd38d07042c1ab8d9ae3fe05afb7a1fea623
15,582
def precision_ranges(result2rank, total_terms): """Computes precision at standard cutoff ranks: [5, 10, 15, 20, 30, 100, 200, 500, 1000] Args: result2rank: A dict of source to ranks of good translation candidates. total_terms: The expected term count. Returns: A dict containing a precision value for each cutoff rank """ map_of_prec = dict() for cutoff in [5, 10, 15, 20, 30, 100, 200, 500, 1000]: map_of_prec[cutoff] = sum(1.0 for ranks in result2rank.values() if len([r for r in ranks if r <= cutoff]) > 0) / total_terms return map_of_prec
7e1c60030933530c1d1b1bd09387270174ae2aad
15,583
def count_index(sequence): """ The index of `sequence` in a kMer profile. """ nucleotide_to_binary = { 'A': 0x00, 'a': 0x00, 'C': 0x01, 'c': 0x01, 'G': 0x02, 'g': 0x02, 'T': 0x03, 't': 0x03 } binary = 0x00 for b in sequence: binary = ((binary << 2) | nucleotide_to_binary[b]) return binary
30821109c85aa36cf2cc7a6e2fbfe2145ecfc58f
15,584
def newends_OOM(adapters, ends): """Only works for small datasets like the examples""" while not all((val == adapters[-1] for val in ends)): result = [] for val in ends: if val == adapters[-1]: result.append(val) else: for n in range(1, 4): if val + n in adapters: result.append(val + n) print(len(result)) ends = result return len(ends)
410a35a5107ba375fec4c9e247aa02ef34c2013f
15,586
import re def extract_cursor(html) -> str: """ extract token for next page :param html: :return: """ cursor = re.findall('cursor=(\d+)', html) if len(cursor) > 0: return cursor[0] else: return ""
ec610ac123d0527c17eac56c83fa792d1f8bc1ff
15,589
import socket def get_local_network(private=False, subnet=100): """ Returns the IP address of the local network Defaults to the local area network. If 'private", defaults to '100' for the ROACH network. """ if private: IP = socket.gethostbyname("192.168."+str(subnet)+".1") else: IP = socket.gethostbyname(socket.gethostname()) return '.'.join(IP.split('.')[:-1])
c007005bcf2e377b0c1789b5eeb43fd43c4e46e4
15,590
import random def newid(): """ Generate a new random object ID. """ return ''.join([random.choice("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_") for i in range(16)])
5d59b77e33712948ac30e2bbe5514dea992ab606
15,591
import os import sys def readScript(args): """ Either args.file should contains the provided file name Or the script was passed through pipe """ script = None # check input type (if piped or not) # is_pipe will be True if the input is piped is_pipe = not os.isatty(sys.stdin.fileno()) if is_pipe: script = "" for line in sys.stdin: script = script + line args.file = True # Reopen stdin for further input sys.stdin.close() sys.stdin = os.fdopen(1) elif bool(args.file): with open(args.file, "r") as file_handler: script = file_handler.read() return script
ff04538d900fc2170cb407ca9fdd4dc0b117f4d0
15,592
def make_text_objs(text, font, color): """ Function creates a text. text -> string; content of text font -> Font object; face of font color -> tuple of color (red, green blue); colour of text returns the surface object, rectangle object """ surf = font.render(text, True, color) return surf, surf.get_rect()
d5c0a41982f6d6979063dcb24aafc186ff132f1c
15,594
def get_string(element): """Helper for safely pulling string from XML""" return None if element == None else element.string
ff2dbec31f9c3c91c8cf6ca08a36313839265bbd
15,595
def dt_to_us_from_epoch(dt): """Convert datetime.datetime object to microseconds since the epoch. :param dt: datetime.datetime object. :returns: microseconds since the epoch as a string. """ return '{:.0f}'.format(dt.timestamp() * 1e6)
60d4befd19666ad4799bf45c00a083b3443e9c82
15,596
def form_errors(form): """ Displays errors on a form using the form_errors template""" return {"form":form}
762f8bd40d689e9846f970c47ef2d154c6503ace
15,600
def theoretical_yield(actual_yield, mole_ratio_top, mole_ratio_bottom): """Finds theoretical_yield\n actual_yield:\n\tThe yield given in the equation if it is grams convert it to moles before using it \n mole_ratio_top:\n\tFrom the balanced equation given, the number of moles of the wanted element\n mole_ratio_bottom:\n\tFrom the balanced equation given, the number of moles of the given element""" yielded = actual_yield * (mole_ratio_top / mole_ratio_bottom) return yielded
a601732b073ad9c7adff63fb52739b819425fe7e
15,601
import unittest def tests(): """Used by test_suite below.""" return unittest.TestLoader().discover( "vaeseq/", "*_test.py", top_level_dir=".")
fb3680800ade9e55f596426eca7455d62f8903d2
15,602
import pickle def load(to_file=""): """function load Args: to_file: Returns: """ dd = pickle.load(open(to_file, mode="rb")) return dd
b4351183e94b2ce2f40510e1af768f6e0751a3c0
15,603
from bs4 import BeautifulSoup def get_issue_metadata(issue_soup: BeautifulSoup, name: str) -> str: """ Return the value of the key """ if len(issue_soup.find_all("dd", id=name)) > 0: if (name != "issue_indicia_publisher") & (name != "issue_brand"): return issue_soup.find_all("dd", id=name)[0].contents[0].strip() else: try: return issue_soup.find_all("dd", id=name)[0].find("a").contents[0] except: return "" else: return ""
2643d36f9d79389048bc9ef6a3bd5de9c900d88f
15,604
import uuid def high_low_2_uuid(uuid_high, uuid_low): """Combine high and low bits of a split UUID. :param uuid_high: The high 64 bits of the UUID. :type uuid_high: int :param uuid_low: The low 64 bits of the UUID. :type uuid_low: int :return: The UUID. :rtype: :py:class:`uuid.UUID` """ return uuid.UUID(int=(uuid_high << 64) + uuid_low)
b0c7c53bc4b61085574bcda1d5a8c616f0db8c92
15,605
def _make_pr(source_repo, source_branch, base_ref, base_url=''): """Create a PR JSON object.""" return { 'head': { 'repo': { 'full_name': source_repo, }, 'ref': source_branch, }, 'base': { 'ref': base_ref, 'repo': { 'clone_url': base_url, }, }, }
d32e9748608bea7f491db39a750f64cea463b50b
15,607
def _isFloat(argstr): """ Returns True if and only if the given string represents a float. """ try: float(argstr) return True except ValueError: return False
414e802de9557b531e881a5c8430a9a2cb295339
15,609
import struct import re import io def get_size(data: bytes): """ Returns size of given image fragment, if possible. Based on image_size script by Paulo Scardine: https://github.com/scardine/image_size """ size = len(data) if size >= 10 and data[:6] in (b'GIF87a', b'GIF89a'): # GIFs w, h = struct.unpack("<HH", data[6:10]) return int(w), int(h) if size >= 24 and data.startswith(b'\211PNG\r\n\032\n') and data[12:16] == b'IHDR': # PNGs w, h = struct.unpack(">LL", data[16:24]) return int(w), int(h) if size >= 16 and data.startswith(b'\211PNG\r\n\032\n'): # older PNGs w, h = struct.unpack(">LL", data[8:16]) return int(w), int(h) if size >= 30 and data[:4] == b'RIFF' and data[8:12] == b'WEBP': # WebP webp_type = data[12:16] if webp_type == b'VP8 ': # Lossy WebP (old) w, h = struct.unpack("<HH", data[26:30]) elif webp_type == b'VP8L': # Lossless WebP bits = struct.unpack("<I", data[21:25])[0] w = int(bits & 0x3FFF) + 1 h = int((bits >> 14) & 0x3FFF) + 1 elif webp_type == b'VP8X': # Extended WebP w = int((data[26] << 16) | (data[25] << 8) | data[24]) + 1 h = int((data[29] << 16) | (data[28] << 8) | data[27]) + 1 else: w = 0 h = 0 return w, h if b'<svg' in data: # SVG start = data.index(b'<svg') end = data.index(b'>', start) svg = str(data[start:end + 1], 'utf8') w = re.search(r'width=["\'](\d+)', svg) h = re.search(r'height=["\'](\d+)', svg) return int(w.group(1) if w else 0), int(h.group(1) if h else 0) if size >= 2 and data.startswith(b'\377\330'): # JPEG with io.BytesIO(data) as inp: inp.seek(0) inp.read(2) b = inp.read(1) while (b and ord(b) != 0xDA): while ord(b) != 0xFF: b = inp.read(1) while ord(b) == 0xFF: b = inp.read(1) if 0xC0 <= ord(b) <= 0xC3: inp.read(3) h, w = struct.unpack(">HH", inp.read(4)) return int(w), int(h) inp.read(int(struct.unpack(">H", inp.read(2))[0]) - 2) b = inp.read(1) return 0, 0
1b45563b0f59f5670638d554821406389b5333a6
15,612
def category_grouping(data): """ Each of the features "TrafficType", "OperatingSystems", and "Browser" contain categorical values with less than 1% (123) overall datapoints. Since these "categorical outliers" could potentially skew a clustering algorithm, we will combine each value with ten or fewer datapoints into a single "Other" value. Parameters: data: The dataset in question. Returns: data: The transformed dataset. """ data['TrafficType'] = data['TrafficType'].apply(lambda x: 'Other' if x in ['7', '9', '12', '14', '15', '16', '17', '18', '19'] else x) data['OperatingSystems'] = data['OperatingSystems'].apply(lambda x: 'Other' if x in ['4', '5', '6', '7', '8'] else x) data['Browser'] = data['Browser'].apply(lambda x: 'Other' if x in ['3', '7', '9', '11', '12', '13'] else x) data['VisitorType'] = data['VisitorType'].apply(lambda x: x if x == 'Returning_Visitor' else 'New_or_Other') return data
25a461688c13b8e583cc2748c252b4035c789f48
15,613
def srgb_to_linear(c): """Convert SRGB value of a color channel to linear value.""" assert 0 <= c <= 1 if c <= 0.03928: return c /12.92 else: return ((c + 0.055) / 1.055)**2.4
805960e67b40923608d51cab2a1915aae3d1e3ba
15,614
def _df_to_html(df): """Converts DataFrame to HTML table with classes for formatting. Parameters --------------- df : pandas.DataFrame Returns --------------- str HTML table for display. """ classes = ['table', 'table-hover'] html_raw = '<div id="config_table">{src}</div>' src = df.to_html(index=False, classes=classes, justify='center', escape=False) html = html_raw.format(src=src) return html
e1310947ff84178e0da32a8be4cda35d9ea16326
15,615
def choose_theory(proc_keyword_dct, spc_mod_dct_i): """ choose between theories set in models.dat and in run.dat """ if proc_keyword_dct['geolvl']: # thy_info = tinfo.from_dct(thy_dct.get( # proc_keyword_dct['geolvl'])) spc_mod_dct_i = None # else: # thy_info = spc_mod_dct_i['vib']['geolvl'][1][1] # return thy_info, spc_mod_dct_i return spc_mod_dct_i
e548775d4376a6cd4d364c4820db4999dcc79062
15,618
from typing import Dict import collections import csv def from_csv(path: str) -> Dict[str, str]: """Load a CSV into a dictionary.""" result = collections.OrderedDict({}) # type: Dict[str, str] with open(path, "r") as csv_file: reader = csv.reader(csv_file) for row in reader: assert len(row) == 2 key, value = row result[key] = value return result
270cd3b3afe9927fffc737b57b8f49a9e6e051e4
15,619
def get_NICMOS3_G141_WCS(): """ Defines parameters for the NICMOS/G141 slitless mode @return: slitless mode parameters @rtype: dictionary """ wcs_keys = {} # WCS from NICMOS G141 image n6le01upq #/ World Coordinate System and Related Parameters wcs_keys['grism'] = [ ['WCSAXES',2,'number of World Coordinate System axes'], ['CRPIX1',128.0,'x-coordinate of reference pixel'], ['CRPIX2',128.0,'y-coordinate of reference pixel'], ['CRVAL1',1.291877276104E+02,'first axis value at reference pixel'], ['CRVAL2',9.136572816451E-01,'second axis value at reference pixel'], ['CTYPE1','RA---TAN','the coordinate type for the first axis'], ['CTYPE2','DEC--TAN','the coordinate type for the second axis'], ['CD1_1',-3.01591E-05,'partial of first axis coordinate w.r.t. x'], ['CD1_2',4.7564E-05,'partial of first axis coordinate w.r.t. y'], ['CD2_1',4.76877E-05,'partial of second axis coordinate w.r.t. x'], ['CD2_2',3.00809E-05,'partial of second axis coordinate w.r.t. y'], ] wcs_keys['drizzle'] = [ ['DRZCNUM', 10,'Number of coefficients per coordinate'], ['DRZSCALE', 0.2,'Scale for drizzling'], ['DRZ2X01', 0.0,'Drizzle coefficient 01 in X'], ['DRZ2X02', 1.0018288,'Drizzle coefficient 02 in X'], ['DRZ2X03', 0.0,'Drizzle coefficient 03 in X'], ['DRZ2X04', 8.034670000000001E-06,'Drizzle coefficient 04 in X'], ['DRZ2X05', 1.32241E-05,'Drizzle coefficient 05 in X'], ['DRZ2X06', 5.83064E-06,'Drizzle coefficient 06 in X'], ['DRZ2X07', 0.0,'Drizzle coefficient 07 in X'], ['DRZ2X08', 0.0,'Drizzle coefficient 08 in X'], ['DRZ2X09', 0.0,'Drizzle coefficient 09 in X'], ['DRZ2X10', 0.0,'Drizzle coefficient 10 in X'], ['DRZ2Y01', 0.0,'Drizzle coefficient 01 in Y'], ['DRZ2Y02', -0.000893359,'Drizzle coefficient 02 in Y'], ['DRZ2Y03', 0.99816635,'Drizzle coefficient 03 in Y'], ['DRZ2Y04', -1.80668E-05,'Drizzle coefficient 04 in Y'], ['DRZ2Y05', 5.989E-07,'Drizzle coefficient 05 in Y'], ['DRZ2Y06', -1.15787E-05,'Drizzle coefficient 06 in Y'], ['DRZ2Y07', 0.0,'Drizzle coefficient 07 in Y'], ['DRZ2Y08', 0.0,'Drizzle coefficient 08 in Y'], ['DRZ2Y09', 0.0,'Drizzle coefficient 09 in Y'], ['DRZ2Y10', 0.0,'Drizzle coefficient 10 in Y'] ] wcs_keys['direct'] = None wcs_keys['dimension'] = [256, 256] return wcs_keys
6f342930d4daa4c76de5ce85cbc44a976d0e6bd0
15,620
def ask_for_matrix(initial_message, second_message): """Obtain an input in the way supported by the task""" def take_input(message, type_of_value): return (type_of_value(x) for x in input(message).split()) n, _ = take_input(initial_message, int) values = [list(take_input(second_message if i == 0 else '', float)) for i in range(n)] return values
873a9b767f11eaaf59165fbc4d1a3dcb53adbd29
15,621
def ign2arr(ign_poses, robot_name): """Convert Ignition state poses into array""" arr = [] prev_seconds = None for timestamp, data in ign_poses: if robot_name in data: pos = data[robot_name] if prev_seconds != timestamp.seconds: # store only position every simulation second if prev_seconds is not None: # ignore the first sample which is probably not on time boundary (we cannot prove it) arr.append((int(round(timestamp.total_seconds())), pos.x, pos.y, pos.z)) prev_seconds = timestamp.seconds return arr
591bcd0edf981b1ff36cb725317780b5cdbf8faa
15,622
def separate_coords(df): """Separate the coordinates into a list of 'lats' and 'longs'.""" return df['coordinates'].apply(lambda x: x['latitude']), df['coordinates'].apply(lambda x: x['longitude'])
5141431e5d1d9a2a60e31867c07b50a645d48165
15,623
import math def calc_gps_distance(lat1, long1, lat2, long2): """ All calculations need to be done in radians, instead of degrees. Since most GPS coordinates tend to use degrees, we convert to radians first, and then use the Haversine formula. The Haversine formula gives the shortest great-circle distance between any two points, i.e. as-the-crow-flies distance using a reasonably focussed crow WARNING: The calculation is done in Kilometres. But, at the street level, kilometres is not useful. So, we convert to metres and return! >>> calc_gps_distance(53.34376885732333,-6.240988668839767,53.34376349, \ -6.24099402) 0000.6945396560484981 >>> calc_gps_distance(53.34376885732333,-6.240988668839767,0,0) 5959609.740337647 >>> calc_gps_distance(90,0,0,0) 10007543.398 """ radius_of_earth = 6371 # in Kilometres delta_latitude = math.radians(lat2 - lat1) delta_longitude = math.radians(long2 - long1) rad_lat1 = math.radians(lat1) rad_lat2 = math.radians(lat2) a = math.sin(delta_latitude / 2) * math.sin(delta_latitude / 2) + \ math.cos(rad_lat1) * math.cos(rad_lat2) * math.sin(delta_longitude / 2) \ * math.sin(delta_longitude / 2) c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a)) distance = radius_of_earth * c distance_in_metres = distance * 1000 return distance_in_metres
4c38ad7a7d468b137834d87959d1c45bd00df9fb
15,624
def get_frame_description(header, i): """ This function ... :param header: :param i: :return: """ planeX = "PLANE" + str(i) # Return the description if planeX in header: return header[planeX] else: return None
33d4b57cd936e1d0eb4b3557b8b8496b5e67db08
15,625
def render_preamble(): """Renders HTML preamble. Include this in the HTML of each cell to make sure that #NOTEBOOK_FILES# in links is correctly substituted """ return "" # return """<script>document.radiopadre.fixup_hrefs()</script>"""
4aa366901a6ab67b30a29beb7a25844b7122a306
15,626
import sys def get_contacts(filename): """ Retourne 2 listes: names, emails contenant les noms et adresses email lu à partir de filename """ try: names = [] emails = [] with open(filename, mode='r', encoding='utf-8') as contacts_file: for a_contact in contacts_file: names.append(a_contact.split()[0]) emails.append(a_contact.split()[1]) return names, emails except IOError: #erreur sys.exit (1) sys.exit("Erreur: impossible d'accéder à contacts.txt.")
df04f03ed0fa79a5b593a187bb51b3ae77f2d0c6
15,627
def fibonacci(n): """returns a list of the first n fibonacci values""" n0 = 0 n1 = 1 fib_list = [] if type(n) != type(0) or n<=0: raise Exception("'%s' is not a positive int" % str(n)) for i in range(n): fib_list.append(n1) (n0, n1) = (n1, n0+n1) return fib_list
eb680c89d9d66647b24b5d27dbb34e1a8bb4352c
15,629
def sanitize_mobile_number(number): """Add country code and strip leading zeroes from the phone number.""" if str(number).startswith("0"): return "+254" + str(number).lstrip("0") elif str(number).startswith("254"): return "+254" + str(number).lstrip("254") else: return number
8f08563f015d77722f5dec0d07686956b4f46019
15,630
def total_seconds(timedelta): """ Some versions of python don't have the timedelta.total_seconds() method. """ if timedelta is None: return None return (timedelta.days * 86400) + timedelta.seconds
800c70a2855034563ab9baf1ca12032677889f5b
15,631
import os def make_dir(directory_path, new_folder_name): """Creates an expected directory if it does not exist""" directory_path = os.path.join(directory_path, new_folder_name) if not os.path.exists(directory_path): os.makedirs(directory_path) return directory_path
bae473087c00e44cbaa1b65362ef17efcece7973
15,632
def find_minimal_helices(nturn_starts): """ Find helices on the basis of the n-turn beginnings lists Minimal helices are defined as consecutive n-turns """ min_helices = { "3-min_helices": [], "4-min_helices": [], "5-min_helices": [] } for n in [3, 4, 5]: name = str(n) + "-turn" list_nturns = nturn_starts[name] for i in range(len(list_nturns) - 1): if list_nturns[i+1] == list_nturns[i] + 1: helix_name = str(n) + "-min_helices" min_helices[helix_name].append(list_nturns[i]) return min_helices
2ceaa35efdd09de8b943c96e1caa70d86fcc8832
15,636
def get_max_seq_len(indexed_pairs): """Return max sequence length computed from index pairs. indexes already include SOS/EOS/SEP. """ max_seq_len = 0 # max_seq = '' for indexed_pair in indexed_pairs: max_seq_len = max(max_seq_len, len(indexed_pair[0])) max_seq_len = max(max_seq_len, len(indexed_pair[1])) # if len(indexed_pair[0]) > max_seq_len: # max_seq_len = len(indexed_pair[0]) # max_seq = indexed_pair[0] # if len(indexed_pair[1]) > max_seq_len: # max_seq_len = len(indexed_pair[1]) # max_seq = indexed_pair[1] # print(max_seq_len, max_seq) return max_seq_len
1830334bdea69cad996caf78e9225f70eda48dd0
15,637
import inspect def class_vars(obj): """Code from https://github.com/devsisters/DQN-tensorflow/blob/master/dqn/base.py""" return {k:v for k, v in inspect.getmembers(obj) \ if not k.startswith('__') and not callable(k)}
94112e3b5746e9c16294701a0446febb9a39da13
15,638
def get_authorization_key(request): """ Get the Authorization Key from the request """ auth = request.headers.get('Authorization') if auth: auth = auth.split() if len(auth) == 2: if auth[0].lower() == 'key': return auth[1] return None
5cba14bdebb4b203c773c1e0832373114e554c78
15,639
def reactor_efficiency(voltage, current, theoretical_max_power): """Assess reactor efficiency zone. :param voltage: voltage value (integer or float) :param current: current value (integer or float) :param theoretical_max_power: power that corresponds to a 100% efficiency (integer or float) :return: str one of 'green', 'orange', 'red', or 'black' Efficiency can be grouped into 4 bands: 1. green -> efficiency of 80% or more, 2. orange -> efficiency of less than 80% but at least 60%, 3. red -> efficiency below 60%, but still 30% or more, 4. black -> less than 30% efficient. The percentage value is calculated as (generated power/ theoretical max power)*100 where generated power = voltage * current """ generated_power = voltage * current efficiency = (generated_power / theoretical_max_power) * 100 if efficiency >= 80: return 'green' if 60 <= efficiency < 80: return 'orange' if 30 <= efficiency < 60: return 'red' return 'black'
b58fe806da2bcfdabc12bd3f5b36a0b296ce7142
15,640
def _bubbled_up_groups_from_units(group_access_from_units): """ Return {user_partition_id: [group_ids]} to bubble up from Units to Sequence. This is to handle a special case: If *all* of the Units in a sequence have the exact same group for a given user partition, bubble that value up to the Sequence as a whole. For example, say that every Unit in a Sequence has a group_access that looks like: { ENROLLMENT: [MASTERS] } (where both constants are ints). In this case, an Audit user has nothing to see in the Sequence at all, and it's not useful to give them an empty shell. So we'll act as if the Sequence as a whole had that group setting. Note that there is currently no way to set the group_access setting at the sequence level in Studio, so course teams can only manipulate it for individual Units. """ # If there are no Units, there's nothing to bubble up. if not group_access_from_units: return {} def _normalize_group_access_dict(group_access): return { user_partition_id: sorted(group_ids) # sorted for easier comparison for user_partition_id, group_ids in group_access.items() if group_ids # Ignore empty groups } normalized_group_access_dicts = [ _normalize_group_access_dict(group_access) for group_access in group_access_from_units ] first_unit_group_access = normalized_group_access_dicts[0] rest_of_seq_group_access_list = normalized_group_access_dicts[1:] # If there's only a single Unit, bubble up its group_access. if not rest_of_seq_group_access_list: return first_unit_group_access # Otherwise, go through the user partitions and groups in our first unit # and compare them to all the other group_access dicts from the units in the # rest of the sequence. Only keep the ones that match exactly and do not # have empty groups. common_group_access = { user_partition_id: group_ids for user_partition_id, group_ids in first_unit_group_access.items() if group_ids and all( group_ids == group_access.get(user_partition_id) for group_access in rest_of_seq_group_access_list ) } return common_group_access
a4e3c5ee563d65bc3b8c787512efe356abfdfbd6
15,641
def is_list_with_max_len(value, length): """ Is the list of given length or less? :param value: The value being checked :type value: Any :param length: The length being checked :type length: Nat :return: True if the list is of the length or less, False otherwise :rtype: bool """ return isinstance(value, list) and len(value) <= length
fd5016617d264b79ee4e4a0dae7782776d997fc5
15,642
def exists_transcript_id(db, transcript_id): """ Search bar MongoDB: db.transcripts.find({'transcript_id': 'ENST00000450546'}, fields={'_id': False}) SciDB: aggregate( filter(transcript_index, transcript_id = 'ENST00000450546'), count(*)); SciDBnew: res = bb.get_transcript(transcript_id='RP11-150', exact_match=True) ## needs to be updated bool(res.empty) ### True or False """ res = db.get_transcripts(str(db.namespace), transcript_eid=str(transcript_id)) if res.empty: return None else: return res
3a9588040e6e3fb7c2c0a78c0038aa873c1670b6
15,643
def split_data_to_chunks(data: list, max_chunk_size: int, overlapping_size: int): """ Because GP can take very long to finish, we split data into smaller chunks and train/predict these chunks separately :param data: :param max_chunk_size: :param overlapping_size: :return: list of split data """ chunks = list() n = len(data) i = 0 while True: next_i = min(i + max_chunk_size, n) chunks.append(data[i:next_i]) if n <= next_i: break i = next_i - overlapping_size return chunks
370e9fe9a17c58d7dca202bb4c822f9b8b662fae
15,644
def read_moves(file_name): """Read moves from file. Move features are pipe-separated. Returns moves and keys as lists (every move is a dict at this point).""" moves = [] with open(file_name, encoding='utf-8') as f: first_line = f.readline() first_line = first_line[1:] # remove '#' at the beginning keys = [w.strip() for w in first_line.split('|')] for line in f: if line.startswith('#') or not line.strip(): continue m = {} vals = [v.strip() for v in line.split('|')] for k, v in zip(keys, vals): m[k] = eval(v) moves.append(m) return moves, keys
a87b73feaf7d49b3ece716fe5732f5647e5dcd25
15,645
def number_of_constituents(bc_class): """ Calculates the number of constituents Args: bc_class: The ADH simulation class that holds all simulation information Returns: The number of transport constituents """ num_trn = 0 cn = bc_class.constituent_properties if cn.salinity: num_trn += 1 if cn.temperature: num_trn += 1 if cn.vorticity: num_trn += 1 if not cn.general_constituents.empty: num_trn += len(cn.general_constituents.index) if not cn.sand.empty: num_trn += len(cn.sand.index) if not cn.clay.empty: num_trn += len(cn.clay.index) return num_trn
b290bc6ef6f4b02889dcc82d91120f44bff5f650
15,646
import os def get_cols(datadir=".",infiles=None): """ Given a directory where simulation data are stored and the name of the input files, return the name of each body's output columns Parameters ---------- datadir : str Name of directory where simulation results are kept infiles : list list containing input file names for each body. Compute using get_infiles function Returns ------- data_cols : dict dictionary containing output variable names for each body like the following: data_cols["body0"] = ["Time","Radius",...] """ if infiles is None: raise IOError("infiles is None! Must be list of input files.") # Dict to hold all columns for each body [like time, semi, ecc ...] data_cols = {} # Loop over files corresponding to each body for infile in infiles: with open(os.path.join(datadir,infile)) as f: lines = f.readlines() # Loop over all lines in the input file for ii in range(0,len(lines)): line = lines[ii] line = str(line).strip(' \t\n\r') # Remove all kinds of whitespace from sides # Is this the saOutputOrder line and it isn't commented out? if line.find("saOutputOrder") != -1 and line[0] != "#": cols = line.split()[1:] # ignore saOutputOrder # Add all lines below it that have a "$", the line continuation character while("$" in str(lines[ii]).strip(' \t\n\r')): # Move to next line ii = ii + 1 cols = cols + str(lines[ii]).strip(' \t\n\r').split() # Remove any - if there are any # Also ignore commented out (#) stuff good_cols = [] for jj in range(0,len(cols)): if "#" in cols[jj]: # First time this goes, rest of stuff is ignored break # Get rid of - sign if it's there cols[jj] = cols[jj].replace("-", "") # Column name is good and processed, so add it good_cols.append(cols[jj]) # Get rid of $ sign if it's there if "$" in good_cols: good_cols.remove("$") # Save the columns, break out of this infile! data_cols[infile] = good_cols break return data_cols
d79bd112a76e28adb7df994d4a4a0998bc9a41ce
15,647
def is_hydrophilic(atom): """ Checks whether an atom belongs to hydrophilic residue :param atom: atom :return: True if the atom belongs to hydrophobic residue, otherwise false """ hydrophilic_atoms = ['H', 'N', 'S', 'O'] hydrophilic_residues = ['GLU', 'ASP', 'ASN', 'QLN', 'HIS', 'GLN', 'SER'] if atom.symbol in ['CD', 'CZ'] and atom.residue == 'ARG': return is_hydrophilic if atom.symbol in ['CE2', 'CD1'] and atom.residue == 'TRP': return is_hydrophilic if atom.symbol == 'CD' and atom.residue == 'PRO': return is_hydrophilic if atom.symbol == 'CB' and atom.residue == 'MET': return is_hydrophilic if atom.symbol in ['CD', 'CE'] and atom.residue == 'LYS': return is_hydrophilic if atom.symbol == 'CZ' and atom.residue == 'TYR': return is_hydrophilic if atom.symbol != 'CE' and atom.residue == 'MET': return is_hydrophilic if atom.symbol == 'C' or atom.symbol == 'CA': return is_hydrophilic if atom.residue in hydrophilic_residues: return is_hydrophilic if atom.atomType in hydrophilic_atoms: return is_hydrophilic
ac366f13934ce859eb9822855481fa7c2a49d21c
15,648
def after_folder_option(location): """location: folder full path""" return True
a2cdb5de2a6852964ecc7e71acbcd0ee2c6b92a8
15,649
def circle_distance_circle(circle, other): """ Give the distance between two circles. The circles must have members 'center', 'r', where the latest is the radius. """ d = abs(circle.center - other.center) - circle.r - other.r if d < 0.0: d = 0.0 return d
c76146d1ec9003be5345b14b12563dfb8bac7798
15,650
def class_to_json(obj): """ eturns the dictionary description with simple data structure (list, dictionary, string, integer and boolean) for JSON serialization of an object """ return(obj.__dict__)
8bac6f68d8fc18b1a80800943bede4106f618e6c
15,651
def parse_txt(txtfile): """Text data format: original tweet 1 ||| support ||| true reply tweet 1 ||| deny reply tweet 2 ||| query <newline> original tweet 2 ||| support ||| false reply tweet 3 ||| deny reply tweet 4 ||| comment <newline> ... """ raw = open(txtfile, 'r').read() def parse_orig(l): x, y, v = l.strip().split('|||') return v.strip(), [x.strip().split(), y.strip()] def parse_reply(l): x, y = l.strip().split('|||') return [x.strip().split(), y.strip()] def parse_thread(t): t = t.strip().split('\n') v, orig = parse_orig(t[0]) replies = [] for reply in t[1:]: replies.append(parse_reply(reply)) return [[orig] + replies, v] if raw.strip() == '': return [] records = [parse_thread(thread) for thread in raw.strip().split('\n\n')] return records
b1c96a759fd8929e58a43c2c2c2c68df2632ee13
15,652
def get_product_by_id(product_id): """ Gets all products :return: """ product = {"name": "test_product"} return product
471dafb41f4f77791f55b59c240c2ada4893fa03
15,653
import socket import struct import fcntl def get_ip(iface=''): """ The get_ip function retrieves the IP for the network interface BSDPY is running on. """ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sockfd = sock.fileno() SIOCGIFADDR = 0x8915 ifreq = struct.pack('16sH14s', iface, socket.AF_INET, '\x00' * 14) try: res = fcntl.ioctl(sockfd, SIOCGIFADDR, ifreq) except: return None ip = struct.unpack('16sH2x4s8x', res)[2] return socket.inet_ntoa(ip)
0a7c30460ab860c1c2ccd3de9371985508456399
15,655
def normalize_row(row, max_cols): """ Padding to equalize cell string lengths """ r = "" for i, max_col in enumerate(max_cols): r += row[i] + (max_col - len(row[i]) + 1) * " " return r + "\n"
c20dbdcf0b786bff13e04c5b1d04c5b0ba46162d
15,657
def is_single(text): """Determine if there are many items in the text.""" indicators = [', ', ' y ', ' e '] return not any(ind in text for ind in indicators)
edfa05e953d3c5816d4294d29d4b1725a1675a3b
15,658
def safe_equals(left: object, right: object) -> bool: """Safely check whether two objects are equal.""" try: return bool(left == right) except Exception: return False
0ba9bb81e6b5ef8580b4677c74e82a40522d5aeb
15,659
def get_minutes_remain(minutes: int) -> int: """ Returns minutes remaining after converting to hours :param minutes: Total minutes before converting to hours :return: minutes after converting to hours """ return minutes % 60
abf025a83804a03d2c41b88eaac35606a5eddc4c
15,660
def fmripop_save_imgdata(args, out_img, params_dict, output_tag=''): """ Saves the output 4D image """ # Output filename output_filename, _ = args.niipath.split(".nii.gz") # NOTE: this line is not general enough, but it'll do for now output_tag = '_confounds-removed' + output_tag + '.nii.gz' output_filename += output_tag # Save the clean data in a separate file out_img.to_filename(output_filename) params_dict['outputpath'] = output_filename return params_dict
2426c8a08740a0aa243091f5b62f60c1c0ab467e
15,661
def rb_join_arg(li=[], identif="default", pos=0): """Construct url argument with identifier and li as list.""" if li == []: return "" if pos == 0: letter = "?" else: letter = "&" return "{letter}{ident}={arg}".format(letter=letter, ident=identif, arg=','.join(li))
fefb80814ded2fd53ccf99c92a6ec9b538d85a28
15,663
def get_topics(bag): """ Get an alphabetical list of all the unique topics in the bag. @return: sorted list of topics @rtype: list of str """ return sorted(set([c.topic for c in bag._get_connections()]))
863faf144cef064324bb0dff41a9ac70464837ee
15,664
import requests from bs4 import BeautifulSoup def get_product_urls(page_lim=10): """ Get urls of products from product listed web page """ urls = [] for i in range(page_lim): url = 'http://www.cosme.net/item/item_id/1064/products/page/{0}'.format(i) r = requests.get(url) html = r.text soup = BeautifulSoup(html) item_spans = soup.find_all('span', {'class': 'item'}) for item_span in item_spans: a = item_span.a if a == None: continue urls.append(a['href']) return urls
2199c00a0e38280dd5bfdc84c583aaaf9897b316
15,666
from typing import Optional def format_ipfs_cid(path: str) -> Optional[str]: """Format IPFS CID properly.""" if path.startswith('Qm'): return path elif path.startswith('ipfs://'): return path.replace('ipfs://', '')
eca4a79bc2ba4151495831b51bbd50df68f73025
15,667
def openUnicode(file, mode='r', buffering=-1, encoding=None, errors=None, newline=None, closefd=True, opener=None): """ Unicode auto-detection by BOM. :return: Returns open() with the correct encoding and with the missing first character, if it is BOM. """ detect = False if "r" in mode: with open(file, "rb") as f: bom = f.read(4) detect = True if bom[0] == 0xEF and bom[1] == 0xBB and bom[2] == 0xBF: encoding = "utf-8" elif bom[0] == 0x00 and bom[1] == 0x00 and bom[2] == 0xFE and bom[3] == 0xFF: encoding = "utf-32be" elif bom[0] == 0xFE and bom[1] == 0xFF: encoding = "utf-16be" elif bom[0] == 0xFF and bom[1] == 0xFE: if bom[2] == 0x00 and bom[3] == 0x00: encoding = "utf-32le" else: encoding = "utf-16le" else: detect = False res = open(file=file, mode=mode, buffering=buffering, encoding=encoding, errors=errors, newline=newline, closefd=closefd, opener=opener) if detect: res.read(1) return res
c7af92a9acde80e5f1a14a304cb135b075b71d7b
15,668
def insertion_sort(integers): """Iterate over the list of integers. With each iteration, place the new element into its sorted position by shifting over elements to the left of the pointer until the correct location is found for the new element. Sorts the list in place. O(n^2) running time, O(1) space. """ integers_clone = list(integers) for i in range(1, len(integers_clone)): j = i while integers_clone[j] < integers_clone[j - 1] and j > 0: integers_clone[j], integers_clone[j-1] = integers_clone[j-1], integers_clone[j] j -= 1 return integers_clone
ee7aa8920406f7c74870e346e486f29486894df1
15,670
import curses def init_screen(args): """ Set ncurses screen and returns the screen object """ screen = curses.initscr() curses.curs_set(0) curses.start_color() curses.init_pair(1, 1, args.background) curses.init_pair(2, 2, args.background) curses.init_pair(3, 3, args.background) curses.init_pair(4, 4, args.background) curses.init_pair(5, 5, args.background) curses.init_pair(6, 6, args.background) curses.init_pair(7, 7, args.background) screen.clear() return screen
e46a5a3bee5f498219fbbf2b8b4df50abffa07fe
15,671
def get_settings(): """ Returns some nice default settings for matplotlib to be used with `matplotlib.pyplot.rc_context`. """ return {'axes.labelsize': 32, 'xtick.major.size': 10, 'xtick.major.width': 1.5, 'xtick.labelsize': 24, 'ytick.major.size': 10, 'ytick.major.width': 1.5, 'ytick.labelsize': 24, 'legend.fontsize': 18, 'lines.linewidth': 4, 'lines.markersize': 10, 'figure.figsize': (12,8)}
7864c0bcb3b7ab427f6046b3358ee81f70646dd5
15,672
def patron_pid_minter(record_uuid, data): """Dummy patron minter.""" return None
c4ab8008467d54326911634bb51335f3b86cc96e
15,673
import os def get_output_number(dst): """Gives the last output folder number Returns: int: Last output folder number """ data = os.listdir(dst) if not data == []: last_record = sorted(data)[-1] hiphen_index = last_record.rfind("-") return int(last_record[hiphen_index + 1:]) return 0
292f42e0f73eb11b45d611eaa270282ba04de509
15,674
import sys def start_logging(filename, params): """Logs the output of the execution in the specified file :param filename: The name of the log file :type filename: str """ f = open(f'{params["logs_path"]}/experiment-{filename}.txt' , 'w') sys.stdout = f return f
5edb40d87fa1b1c121670abc5aaa5ebdca0a2f60
15,675
import os def expand_path(path: str) -> str: """Expand variables and ~""" return os.path.expanduser(os.path.expandvars(path))
88242835c8fa37c9bbb877ebc6d48f94b1f675d3
15,676
from typing import List def primary() -> List[str]: """Primary color scheme.""" return ["#00A58D", "#008A8B", "#9FCD91", "#09505D", "#00587C"]
8527c8a649e554e077a57172dfb0d529fff4036a
15,677
import warnings def pull_halo_output(h5file, clusterID, apertureID, dataset): """ Function to extract a dataset from a Bahamas snapshot output. :param h5file: The h5py File object to extract the data from :param clusterID: The number of cluster in the order imposed by FoF :param apertureID: int(0-22) The index of the spherical aperture centred on the CoP :param dataset: The name of the dataset to pull :return: None if the cluster does not exist in the file, the dataset as np.ndarray if it exists """ if f'halo_{clusterID:05d}' not in h5file: warnings.warn(f"[-] Cluster {clusterID} not found in snap output.") return None else: return h5file[f'halo_{clusterID:05d}/aperture{apertureID:02d}/{dataset}'][...]
d389af763c7dc7a3c6e54a1f61f4906c7fa4dc0e
15,679
def rt_delta(maxdiff, precision=5): # type: (float, int) -> float """Return the delta tolerance for the given retention time. Keyword Arguments: maxdiff -- maximum time difference between a feature edge and an adjacent frame to be considered part of the same feature precision -- number of decimal digits to use with floats (e.g. a precision of 2 forces a difference of 0.01 between any two consecutive float numbers) [default: 5] """ return round(maxdiff, precision)
ad04d4f0405544d1cf137b7b3c8aa25ac670365f
15,680
def positive_negative_basic_reward_function(causal_prefetch_item): """ Consistent positive reward based on something being used within 128 steps (1, -1) rewards """ if causal_prefetch_item is None: return -1 return 1
beaefcaebe806329d17ec8fa584e2a0f102b978c
15,681
def create_template_dict(dbs): """ Generate a Template which will be returned by Executor Classes """ return {db: {'keys': [], 'tables_not_found': []} for db in dbs}
01dbd3733ec77fef0323eea35bc67064b19093c9
15,682
def get_current_application(request): """Get current application.""" try: app_name = request.resolver_match.namespace if not app_name: app_name = "home" except Exception as e: app_name = "home" return app_name
310004714da3129cafb2bc635837920a7457fbe7
15,683
import os def check_block_directory(): """ 檢查日誌檔存放所需資料夾是否存在 Args: None Returns: 1:成功 | 0:失敗 """ directory = str(os.getenv('DIRECTORY')) if os.path.isdir(directory): return True else: return False
563c42679a12c259bbf90294c02150c8eb6cf353
15,685
def build_json(image_content): """Builds a json string containing response from vision api.""" json_data = { 'requests': [{ 'image': { 'content': image_content }, 'features': [{ 'type': 'FACE_DETECTION', 'maxResults': 1, }] }] } return json_data
bf6f45187d2e6b2c4a50ccf7ecfbeeb862f353ff
15,686
def missing_summary(df): """ Takes in a dataframe and returns a summary of all missing values. Parameters: ----------- df : dataframe Dataframe to calculate the missing summary from. Returns: -------- df_miss : dataframe Missing values summary """ # Copy for output df_out = df.copy() # Create a new summary dataframe # for each column. df_miss = df_out.notnull().sum().reset_index() df_miss["Missing"] = df_out.isnull().sum().values df_miss["Percentage Missing"] = ( (df_miss["Missing"] / df_out.shape[0]) * 100 ).round(1) # Rename all the columns df_miss.columns = ["Column", "Not-Null", "Missing", "Perc Missing (%)"] return df_miss
4c6bb35e9d01827667d7b5ecbbc0b1ebcc8231bb
15,687
import hashlib def checksum(file, method='sha1', chunk_size=4096): """Calculate the checksum of a file. Args: file: str, path-like, or file-like bytes object """ try: fh = open(file, 'rb') except TypeError: fh = file try: h = hashlib.new(method) while True: chunk = fh.read(chunk_size) if not chunk: break h.update(chunk) return h.hexdigest() finally: if fh != file: fh.close()
9638985bec0c95d2c9cd30a93bf48f93381f99de
15,689
import argparse import os import sys def user_input(): """ Handle PBclust command line arguments """ parser = argparse.ArgumentParser( description="Cluster protein structures based on their PB sequences.") # mandatory arguments parser.add_argument("-f", action="append", required=True, help="name(s) of the PBs file (in fasta format)") parser.add_argument("-o", action="store", required=True, help="name for results") # --clusters or --compare arguments group = parser.add_mutually_exclusive_group(required=True) group.add_argument("--clusters", action="store", type=int, help="number of wanted clusters") # optional arguments group.add_argument("--compare", action="store_true", default=False, help="compare the first sequence versus all others") # get all arguments options = parser.parse_args() # test if the number of clusters is valid if options.clusters is not None and options.clusters <= 0: parser.error("Number of clusters must be > 0.") # check if input files exist for name in options.f: if not os.path.isfile(name): sys.exit("{0}: not a valid file. Bye".format(name)) return options
bcae7a5e7e186fac62fc0bd401deb83d356a6cd6
15,692
def week(make_week): """Fixture creating a week.""" return make_week()
52dda51ed415d75f966e302c930cc54119005307
15,693
def tf_score(word, sentence): """ Formula: Number of times term w appears in a document) / (Total number of terms in the document). """ word_frequency_in_sentence = 0 len_sentence = len(sentence) for word_in_sentence in sentence.split(): if word == word_in_sentence: word_frequency_in_sentence = word_frequency_in_sentence + 1 tf = word_frequency_in_sentence / len_sentence return tf
e93efbd52dfcdf05f771ab4d3f353e3f837dda96
15,694
def main(): """Computational pathology toolbox by TIA Centre.""" return 0
7a0c5a83b018860f683407bd9517d4d28b1c2443
15,695
import sys def compare_sequence_dicts(fasta_dict, bam_dict): """Compares a FASTA and BAM sequence dictionary, and prints any differences. Returns true if all required sequences are found, false otherwise.""" if fasta_dict == bam_dict: return True sys.stderr.write("Sequence dictionaries in FASTA/BAM files differ:\n") common = set(fasta_dict) & set(bam_dict) if not common: sys.stderr.write("FATAL ERROR: No sequences in common!\n") return False # Check that the lengths of required sequences match (fatal error) different = [] for key in sorted(common): if fasta_dict[key] != bam_dict[key]: different.append((key, fasta_dict[key], bam_dict[key])) if different: sys.stderr.write("FATAL ERROR: Length of required sequences differ:\n") for values in different: sys.stderr.write(" - %s: %i bp vs %i bp\n" % values) # Check for sequences only found in the BAM file (fatal errors) bam_only = set(bam_dict) - common if bam_only: sys.stderr.write("FATAL ERROR: Sequences missing from FASTA dictionary:\n") for key in bam_only: sys.stderr.write(" - %s = %i bp\n" % (key, bam_dict[key])) # Check for sequences only found in the BAM file (fatal errors) fasta_only = set(fasta_dict) - common if fasta_only: sys.stderr.write("WARNING: FASTA file contains extra sequences:\n") for key in fasta_only: sys.stderr.write(" - %s = %i bp\n" % (key, fasta_dict[key])) sys.stderr.write("\n") return not (different or bam_only)
476dac9f4aa025f586f5fbe33d52062260920677
15,696