content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def slave_passes_blacklist(slave, blacklist): """ :param slave: A single mesos slave with attributes :param blacklist: A list of lists like [["location_type", "location"], ["foo", "bar"]] :returns: boolean, True if the slave gets passed the blacklist """ attributes = slave['attributes'] for location_type, location in blacklist: if attributes.get(location_type) == location: return False return True
4aff5e7b7200cf9046f510ec554d575782700735
154,274
from typing import List import re def number2kansuuzi(tokens: List[str]) -> List[str]: """ >>> number2kansuuzi(['大引け', 'は', '147円', '高', 'の', '14000円']) ['大引け', 'は', '147円', '高', 'の', '1万4000円'] >>> number2kansuuzi(['10000円', '台']) ['1万円', '台'] """ def convert(token: str) -> str: if re.match(r'\d{5,8}円', token) is None: return token else: major_part = token[:-5] minor_part = token[-5:-1] return '{}万円'.format(major_part) \ if int(minor_part) == 0 \ else '{}万{}円'.format(major_part, minor_part) return [convert(token) for token in tokens]
39175f36e755bac96d0e0f978e3d436efd499b51
607,359
def _is_undefok(arg, undefok_names): """Returns whether we can ignore arg based on a set of undefok flag names.""" if not arg.startswith('-'): return False if arg.startswith('--'): arg_without_dash = arg[2:] else: arg_without_dash = arg[1:] if '=' in arg_without_dash: name, _ = arg_without_dash.split('=', 1) else: name = arg_without_dash if name in undefok_names: return True return False
a058a89836e32e749c3aeb0d583553f90e079185
599,526
def needs_batch_dim(image): """Determines whether an image has or is missing a batch dimension.""" if not hasattr(image, 'shape'): raise TypeError( "Can only determine batch dimensions for numpy arrays or tensors.") if len(image.shape) == 2: return True elif len(image.shape) == 3: if image.shape[0] != 1: return True return False
4452af473d2a308666458e1abb8e265d8aecb976
366,967
def remove_keys_filter(row,keys): """ Remove given keys from the row """ for key in keys: row.pop(key,None) return row
38b0020c931c8c62949b613941daa8270bc1c052
314,574
def is_letter(_code : int) -> bool: """ Detect letter """ lower : bool = _code >= 65 and _code <= 90 upper : bool = _code >= 97 and _code <= 122 #space_lowdash : bool = _code == 95 or _code == 32 space : bool = _code == 32 if lower or upper or space: return True return False
61f6a69c7e43d5a4e1e53c610e82b5c5b7634304
175,807
import asyncio async def review_embed(bot, ctx, embed) -> bool: """Given an embed, send it and wait for a review""" m = await ctx.send("Preview:\nYes | No", embed=embed, delete_after=35) await m.add_reaction("👍") await m.add_reaction("👎") def check(reaction, user): return user.id == ctx.author.id and str(reaction.emoji) in ["👍", "👎"] try: reaction, user = await bot.wait_for("reaction_add", timeout=30, check=check) except asyncio.TimeoutError: return False else: if str(reaction.emoji) == "👍": return True return False
52205d7ff693db486aa886f14b230194470e6f46
265,750
def first_index_not_in_set(seq, items): """Returns index of first occurrence of any of items in seq, or None.""" for i, s in enumerate(seq): if not s in items: return i
22a72425ea961ebfef5351c7332af6ddb09b4d02
340,139
def apmapr(a, a1, a2, b1, b2): """Vector linear transformation. Map the range of pixel values ``a1, a2`` from ``a`` into the range ``b1, b2`` into ``b``. It is assumed that ``a1 < a2`` and ``b1 < b2``. Parameters ---------- a : float The value to be mapped. a1, a2 : float The numbers specifying the input data range. b1, b2 : float The numbers specifying the output data range. Returns ------- b : float Mapped value. """ scalar = (b2 - b1) / (a2 - a1) return max(b1, min(b2, (a - a1) * scalar + b1))
52615600981a96b75d3ace32a2dd9d4c2d350e43
487,667
def euclidean_dist(p1, p2): """ Returns the euclidean distance between points (p1, p2) in n-dimensional space. Points must have the same number of dimensions. """ if len(p1) != len(p2): raise ValueError("Points must have the same number of dimensions.") return sum((d1 - d2) ** 2 for d1, d2 in zip(p1, p2)) ** 0.5
3f2ff5da253e14765074b97e322bc3042fbe58b9
606,272
from typing import List def strip_common_prefix(parts: List[List[str]]) -> str: """Find and remove the prefix common to all strings. Returns the last element of the common prefix. An exception is thrown if no common prefix exists. >>> paths = [["a", "b"], ["a", "b", "c"]] >>> strip_common_prefix(paths) 'b' >>> paths [['b'], ['b', 'c']] """ first_mismatch = 0 min_length = min([len(x) for x in parts]) for idx in range(min_length): if len({x[idx] for x in parts}) == 1: first_mismatch += 1 else: break if first_mismatch <= 0: msg = "No common prefix:\n" for path in parts: msg += f"{path}\n" raise Exception(msg) else: common = parts[0][first_mismatch - 1] for idx, path in enumerate(parts): parts[idx] = parts[idx][first_mismatch - 1 :] return common
92cc5e3a7e2cd65ed967ddd9ea57329d04b14cee
152,654
from typing import Iterable import re def filter_regex(regex: str, data: Iterable[str]) -> Iterable[str]: """ filter_regex takes a string iterator and returns an iterator that only has values that match the regex. """ r = re.compile(regex) return filter(lambda datum: r.search(datum), data)
4d7eeafeeaf9bd39add157159dfc0cf1f130432b
363,696
def partition_seq(seq, size): """ Splits a sequence into an iterable of subsequences. All subsequences are of the given size, except the last one, which may be smaller. If the input list is modified while the returned list is processed, the behavior of the program is undefined. :param seq: the list to split :param size: the desired size of the sublists, must be > 0 :type size: int :return: an iterable of sublists >>> list(partition_seq("",1)) [] >>> list(partition_seq("abcde",2)) ['ab', 'cd', 'e'] >>> list(partition_seq("abcd",2)) ['ab', 'cd'] >>> list(partition_seq("abcde",1)) ['a', 'b', 'c', 'd', 'e'] >>> list(partition_seq("abcde",0)) Traceback (most recent call last): ... ValueError: Size must be greater than 0 >>> l=[1,2,3,4] >>> i = iter( partition_seq(l,2) ) >>> l.pop(0) 1 >>> next(i) [2, 3] """ if size < 1: raise ValueError('Size must be greater than 0') return (seq[pos:pos + size] for pos in range(0, len(seq), size))
599e48da0db19b7fe7a5730a0659ac7ca130abf2
654,424
def all_diff(*values): """Returns True if all values are different, False otherwise""" return len(values) is len(set(values))
6bda2a82c1971d16177d73ca84dfe01795f265ad
546,436
import re def isIdentPub(s): """Returns True if string s is valid python public identifier, that is, an identifier that does not start with an underscore Returns False otherwise """ if re.match(r'^[a-zA-Z]\w*$',s): return True else: return False
361ce246de4efd8e005938d3b9d3b810787d9dec
327,813
import torch def rms(samples): """Root Mean Square (RMS).""" return torch.sqrt((samples**2).mean())
989d5faae35b09f1860ab7e327dbe1a7f24b765d
90,025
def node_is_inline(node): """ Test if a node is in an "inline" context. """ return node.parent.tagname in ['paragraph']
274c64c4e2109240b9928d79e5f25755437eefc3
383,434
import six def _ExtractKey(option): """Helper to extract a key for an option dictionary.""" if " " in option: # The option was quoted so it is a value. return None if six.ensure_str(option, "utf-8").startswith("--"): return option[2:] elif six.ensure_str(option, "utf-8").startswith("-"): return option[1:] return None
3ba8c6318b7a92d60514621ba2f162bb37d970d3
240,939
def _IsCurrentCommitTagged(raw): """True if the current commit is tagged, otherwise False""" # If this condition hits, then we are currently on a tagged commit. return (len(raw.split("-")) < 4)
8b72de72230e4f9f0013ec4c1a8d705dde7e66fe
163,977
import re def preprocess_ents(label, text): """ Preprocess an entity Parameters ---------- label: str: The named entity label text: str: The name entity text Returns ------- tuple: returns a tuple of strings containing the label and text """ pattern = ':' match = re.search(pattern, text) if match: text = text[match.end():] text = re.sub('^\s+', '', text) text = re.sub('[^A-Za-z0-9._,@]+', ' ', text) # remove non-alphanumeric characters return label, text
17d99929266a630733faa101d227a318db561990
640,948
def get_role(action): """Maps action from input to a role (promote or demote)""" if action == "demote": return 'user' if action == "promote": return 'admin'
1291927260b667d58572e2f4ea44f1063c76a744
126,079
import re def get_single_junction_overhang(cigar): """ Returns the number of reads left/right of a junction as indicated by the LEFTMOST N in a cigar string. Return -1, -1 for reads that don't span junctions. :param cigar: string :return left: int :return right: int """ cigar_overhang_regex = r"(\d+)M[\d]+N(\d+)M" overhangs = re.findall(cigar_overhang_regex, cigar) if overhangs: return int(overhangs[0][0]), int(overhangs[0][1]) else: return -1, -1
96331b12ba05eb13ae783aab76589a5556fb1166
31,947
def fixed_len(s, length, append=None, no_linebreaks=False, align_right=False): """Returns the given string with a new length of len If the given string is too long, it is truncated and ellipsis are added. If it is too short, spaces are added. Linebreaks are removed. Args: s (str): The string to length (int): The new length of the string append (str): The string to append no_linebreaks (bool): Whether line breaks should be filtered out Returns: The string with a fixed length plus the append string """ if not s: return " " * length + append if append else '' if no_linebreaks: s = s.replace('\n', ' ').replace('\r', '') s = f"{s[:length-2]}.." if len(s) > length else \ s.rjust(length, ' ') if align_right else s.ljust(length, ' ') return s + append if append else ''
e872e10e5f28e9aa275516ccf93b70a174dfa5d1
259,499
def mag(initial, current): """ Calculates the magnification of a specified value **Parameters** intial: *float* initial value (magnificiation of 1) current: *float* current value **Returns** magnification: *float* the magnification of the current value """ return float(initial) / float(current)
abc8d3603f11e62f57a62c47dc372b4b9ea19b0c
701,722
def parse_vars(vars): """ Transform a list of NAME=value environment variables into a dict """ retval = {} for var in vars: key, value = var.split("=", 1) retval[key] = value return retval
e2c6ae05cdf0151caaf8589eb7d7df90dcdd99a1
709,480
from typing import Any def repr_or_str(o: Any) -> str: """ repr_or_str function Returns a string representation of the input: - If input is bytes returns the hex representation - If input is str returns the string - If input is None returns empty string :type o: ``Any`` :param o: Input data (str or bytes) :return: String representation of the input :rtype: ``str`` """ if isinstance(o, str): return o elif isinstance(o, bytes): return o.hex() elif o is None: return '' return repr(o)
5e334d639ef7e6bb7d373d2264db29683077b23b
122,758
def _remove_batch_rule(rules): """Removes the batch rule and returns the rest.""" return [(k, v) for (k, v) in rules if k != "batch"]
0bd19cb9a4be86c89571820244aeb37d030658b8
461,692
import pytz def parse_timezone(tz): """ Parse a timezone description into a tzinfo object >>> parse_timezone("America/Los Angeles") <DstTzInfo 'America/Los_Angeles' PST-1 day, 16:00:00 STD> >>> parse_timezone("America/Los_Angeles") <DstTzInfo 'America/Los_Angeles' PST-1 day, 16:00:00 STD> """ return pytz.timezone(tz.replace(" ","_"))
7fb4f4f6506afb3f2057fae5020cb904f88e16a6
106,920
def remove_group(api, assessment_id): """Remove all groups from an assessment.""" allGroups = api.groups.get() for group in allGroups: if group.name.startswith(assessment_id): api.groups.delete(group.id) return True
0513ecd4fe9ebfcb6e70cbe69dd540b40401dd7e
442,516
def _fortran_float_converter(in_string: bytes) -> bytes: """ This utility converts fortran double precision float strings to python float strings by replacing D with e :param in_string: The fortran string containing double precision floats :return: The string ready for ingest by python float utilities """ return in_string.replace(b'D', b'e')
da838699d1f26354970c3984fa2a47e7344c5e52
340,627
def get_p2p_scatter_pfold_over_mad(model): """ Get ratio of median of period-folded data over median absolute deviation of observed values. """ return model['scatter_pfold_over_mad']
b2cb60d8ea744059a1dbaecd0a280cb5a499e70a
343,669
def find_period(samples_second): """ # Find Period Args: samples_second (int): number of samples per second Returns: float: samples per period divided by samples per second """ samples_period = 4 return samples_period / samples_second
c4a53e1d16be9e0724275034459639183d01eeb3
707,539
from typing import Union def _to_int(value: Union[str, int]) -> int: """Converts a vendor or product ID, specified either in hexadecimal notation as a string, or in decimal notation as an integer, to its integer representation. Parameters: value: the value to convert Returns: the vendor or product ID as an integer """ if isinstance(value, str): return int(value, 16) else: return int(value)
2b2153493a6e80564db32ac9735387853c9aa4f3
367,279
import logging def logger(name: str) -> logging.Logger: """ Obtain a logger which is configured with the default refinery format. """ logger = logging.getLogger(name) if not logger.hasHandlers(): stream = logging.StreamHandler() stream.setFormatter(logging.Formatter( '({asctime}) {levelname} in {name}: {message}', style='{', datefmt='%H:%M:%S' )) logger.addHandler(stream) logger.propagate = False return logger
e90451475d4587d4bf1e88ebf070db3df2405b46
181,509
def clean_venue_name(venue_name: str) -> str: """Clean the venue name, by removing or replacing symbols that are not allowed in a file name. Args: venue_name: Original venue name. Returns: Cleaned venue name. """ return venue_name.replace("*", "").replace("/", "_").replace(" ", "_")
90b6f8b3787af17750c548bb816383bf8a5b07a4
695,990
def list_manipulation(lst, command, location, value=None): """Mutate lst to add/remove from beginning or end. - lst: list of values - command: command, either "remove" or "add" - location: location to remove/add, either "beginning" or "end" - value: when adding, value to add remove: remove item at beginning or end, and return item removed >>> lst = [1, 2, 3] >>> list_manipulation(lst, 'remove', 'end') 3 >>> list_manipulation(lst, 'remove', 'beginning') 1 >>> lst [2] add: add item at beginning/end, and return list >>> lst = [1, 2, 3] >>> list_manipulation(lst, 'add', 'beginning', 20) [20, 1, 2, 3] >>> list_manipulation(lst, 'add', 'end', 30) [20, 1, 2, 3, 30] >>> lst [20, 1, 2, 3, 30] Invalid commands or locations should return None: >>> list_manipulation(lst, 'foo', 'end') is None True >>> list_manipulation(lst, 'add', 'dunno') is None True """ if command == "remove": if location == "end": return lst.pop() elif location == "beginning": return lst.pop(0) elif command == "add": if location == "beginning": lst.insert(0,value) return lst elif location == "end": lst.append(value) return lst
c847257ea5508f60b84282c3ac8237b43cd3825a
709,243
def public(view): """ Decorator that indicates that a view is visible to all users, including anonymous users. It's for purely semantic purposes and has no functionality. """ return view
8f4160e40e57ca29f0feae63ae8220109384d44b
506,654
from functools import reduce def get_average(lst): """ Function to compute the average of a list of values. Arguments: list of values: In this case these are timedelta objects Returns: average: Average value of the timedelta objects """ average = reduce(lambda a,b: a + b, lst) / len(lst) return average
fee0575808e5a91a12127ad7491f97507786e46c
284,653
def filter_out_non_passable_items(config: dict, depth: int): """Recursively filters out non-passable args started with '.' and '_'.""" if not isinstance(config, dict) or depth <= 0: return config return { k: filter_out_non_passable_items(v, depth - 1) for k, v in config.items() if not (k.startswith('.') or k.startswith('_')) }
fc586a4f2135bec7c91c7060485dd6fb92a55308
198,321
def argToInt(value): """ Given a size or addresse of the type passed in args (ie 512KB or 1MB) then return the value in bytes. In case of neither KB or MB was specified, 0x prefix can be used. """ for letter, multiplier in [("k", 1024), ("m", 1024 * 1024), ("kb", 1024), ("mb", 1024 * 1024), ]: if value.lower().endswith(letter): return int(value[:-len(letter)], 0) * multiplier else: return int(value, 0)
089d1d674b249d8ddb24432cce8df013bb9c8abb
657,278
from typing import List from typing import Tuple def percent_edges_in_other_edges(edge_list: List[Tuple[str, str]], edge_list2: List[Tuple[str, str]]) -> float: """Calculate what proportion of the first list of edges can be found in the second list. checks edges in both node orders to account for directed edges Parameters ---------- edge_list: List[Tuple[str, str]] list of query edges edge_list2: List[Tuple[str, str]] list of edges to search in Returns ------- proportion of first list that is in second list, accounting for order differences """ count = 0 for edge in edge_list: if (edge in edge_list2) or (edge[::-1] in edge_list2): count += 1 return count / len(edge_list)
0d7d7c80c1d560034ec59a2575d88563e8d250d9
579,971
def predict_cluster(vectorizer, model, abstract): """Map the given abstract to its cluster.""" Y = vectorizer.transform([abstract]) prediction = model.predict(Y) return prediction[0]
18a1aa71960015e19073056e130ee58787c3deb0
457,454
def bits_to_number(bits): """convert the binary representation to the original positive number""" res = 0 for x in bits: res = res * 2 + x return res
9280170a3bfbad88363cd886384a2253e83d5db9
50,193
def jumpTime(player, jump=30): """ jumpTime: jump in episode time @param player (vlc.MediaPlayer): @param jump (int): a time jump, in seconds """ if player.is_playing()==1: currentTime = player.get_time() player.set_time(currentTime + jump*1000) else: print('The podcast is not playing.') return None
3058df1b6f57d18c0c0428fef063f0186f2e288e
372,056
import string import base64 def encode_base64_ondemand(s): """ Decode string to base64 if it isn't ascii. Also put leading ': ' so that LDAP knows the string is b64 encoded. >>> encode_base64_ondemand("Hej") ' Hej' >>> encode_base64_ondemand("Höj") ": SMO2ag==" """ if not all(c in string.ascii_letters for c in s): # String not ASCII s = ": " + base64.b64encode(s.encode()).decode() else: s = " " + s return s
90ae315600aa60688e3c4ee1088d11696f443bb9
450,521
def check_special_value(expected, value): """Check if value equals to Null, Not null, empty or expected value.""" if expected == "NULL": return value is None elif expected == "NOT_NULL": return value is not None elif expected == "EMPTY": return value == "" elif expected == "NOT_EMPTY": return value is not None and len(value) > 0 else: return value == expected
d4309ea27742fd38c8980d86314e00dc1ccf54a2
230,301
def to_datetime_string(value): """ gets the datetime string representation of input value with utc offset. for example: `2015-12-24T23:40:15+03:30` :param datetime value: input object to be converted. :rtype: str """ return value.isoformat(timespec='seconds')
12e053a5ea0ab582f0212a8b74591f6ae72310ea
188,896
def convert2Ddistance(dists, header=None, row_order=None): """returns a 2 dimensional list, header and row order Parameters ---------- dists : dict a 1Ddict with {(a, b): dist, ..} header series with column headings. If not provided, the sorted top level dict keys are used. row_order a specified order to generate the rows Returns ------- 2D list, header and row_order. If a dist not present, it's set to 0, or the symmetric value e.g. (a, b) -> (b, a). """ if header is None: names = set() for pair in dists: names.update(set(pair)) header = list(sorted(names)) rows = [] for i in range(len(header)): n1 = header[i] row = [] for j in range(len(header)): n2 = header[j] dist = dists.get((n1, n2), dists.get((n2, n1), 0)) row.append(dist) rows.append(row) row_order = header[:] return rows, row_order, header
26e99589bcfcb6aa319d991b216dd669e2b7bd8b
412,626
def generate_order_by_clause(params): """Generates order_by clause strings from the given list. :param list params: A list of column names to sort the result to:: params = [ 'id', 'name', 'full_path', 'parent_id', 'resource', 'status', 'project_id', 'task_type', 'entity_type', 'percent_complete' ] will result a search string like:: order by tasks.id, tasks.name, tasks.full_path, tasks.parent_id, , resource_info.info, "Statuses".code, "Tasks".project_id, task_types.name, tasks.entity_type """ order_by_string = '' order_by_string_buffer = [] column_dict = { 'id': 'id', 'parent_id': "parent_id", 'name': "name", 'path': "full_path", 'full_path': "full_path", 'entity_type': "entity_type", 'task_type': "task_types.name", 'project_id': 'project_id', 'date_created': 'date_created', 'date_updated': 'date_updated', 'has_children': 'has_children', 'link': 'link', 'priority': 'priority', 'depends_to': 'dep_info', 'resource': "resource_info.resource_id", 'responsible': 'responsible_id', 'watcher': 'watcher_id', 'bid_timing': 'bid_timing', 'bid_unit': 'bid_unit', 'schedule_timing': 'schedule_timing', 'schedule_unit': 'schedule_unit', 'schedule_model': 'schedule_model', 'schedule_seconds': 'schedule_seconds', 'total_logged_seconds': 'total_logged_seconds', 'percent_complete': 'percent_complete', 'start': 'start', 'end': '"end"', 'status': '"Statuses".code', } for column_name in params: order_by_string_buffer.append(column_dict[column_name]) if len(order_by_string_buffer): # need to indent the first element by hand order_by_string = 'order by %s' % ', '.join(order_by_string_buffer) return order_by_string
9f9a74d6a16b53cd65542a000fe4215a9d16ced1
32,669
import torch def batch_to_time(x: torch.Tensor, block_size: int) -> torch.Tensor: """ Inverse of time_to_batch. Concatenates a batched time-signal back to correct time-domain. Args: x: The batched input size [Batch * block_size × Channels × Length] block_size: size of the blocks used for encoding Returns: Tensor with size: [Batch × channels × Length * block_size] """ if block_size == 1: return x assert x.ndimension() == 3 batch_size, channels, k = x.shape y = torch.reshape(x, [batch_size // block_size, block_size, channels, k]) y = y.permute(0, 2, 3, 1) y = torch.reshape(y, [batch_size // block_size, channels, k * block_size]) return y.contiguous()
38aa42fdb83bf9f326f371815e67b5b2396c090c
489,837
import json def load_json(json_path): """Load JSON file and parse it to a native object. Args: json_path: str File path from which to load the JSON file. Returns: object : Typically a nested structure of ``list`` and ``dict`` objects. """ with open(json_path, "r", encoding="utf-8") as f: return json.load(f)
2e5a8171d4489fb53fb940adbddcee408dc08e48
404,209
import base64 def base64_string_encode(data): """ Encodes a string into it's base64 string representation :param data: str: string to encode :return: str """ return base64.b64encode(data.encode('utf-8')).decode('utf-8')
542937dbf8050e0e2adb97c5f34b9852ca62af6a
351,103
def _get_rows(x): """ Return 2D signal rows. """ return [x[i, :] for i in range(x.shape[0])]
5de2275515b9b5df7139cb96c0a13f89e4bc9ccf
549,655
def sentence_starts(sentences, start_length): """ Returns a list of tuples that contain the first start_length number of words from a list of sentences. """ # pull all the sentence starts starts = [] for sentence in sentences: if len(sentence) > start_length: starts.append(tuple(sentence[:start_length])) return starts
92be32a9669e7ae2addf6e3c9fa23044564c9915
433,231
def ask(question, options, default): """ Ask the user a question with a list of allowed answers (like yes or no). The user is presented with a question and asked to select an answer from the given options list. The default will be returned if the user enters nothing. The user is asked to repeat his answer if his answer does not match any of the allowed anwsers. :param question: Question to present to the user (without question mark) :type question: ``str`` :param options: List of allowed anwsers :type options: ``list`` :param default: Default answer (if the user enters no text) :type default: ``str`` """ assert default in options question += " ({})? ".format("/".join(o.upper() if o == default else o for o in options)) selected = None while selected not in options: selected = input(question).strip().lower() if selected == "": selected = default else: if selected not in options: question = "Please type '{}'{comma} or '{}': ".format( "', '".join(options[:-1]), options[-1], comma=',' if len(options) > 2 else '', ) return selected
baf3e9e00b01573921e94bd7c508ee7fb0381392
581,268
def is_str_like(content): """Check if an instance is string-like Parameters ---------- content : str The content to check Returns ------- bool True if is str like else False Note ---- Function identical to :func:`numpy.is_str_like`, credits to the author """ try: content + "" except (TypeError, ValueError): return False return True
23453c67c05ea003d1f658f31958cf22726a3815
149,187
import pkg_resources def get_renderer_name(name: str) -> str: """ Return the name of the renderer used for a certain file extension. :param str name: The name of the extension to get the renderer name for. (.jpg, .docx, etc) :rtype : `str` """ # `ep_iterator` is an iterable object. Must convert it to a `list` for access. # `list()` can only be called once because the iterator moves to the end after conversion. ep_iterator = pkg_resources.iter_entry_points(group='mfr.renderers', name=name.lower()) ep_list = list(ep_iterator) # Empty list indicates unsupported file type. Return '' and let `make_renderer()` handle it. if len(ep_list) == 0: return '' # If the file type is supported, there must be only one element in the list. assert len(ep_list) == 1 return ep_list[0].attrs[0]
26ea05431111b1d00afccf519606cf1864183a7b
531,076
def _get_dihedral_rb_torsion_key(dihedral, epsilon_conversion_factor): """Get the dihedral_type key for a Ryckaert-Bellemans (RB) dihedrals/torsions Parameters ---------- dihedral : parmed.topologyobjects.Dihedral The dihedral information from the parmed.topologyobjects.Angle epsilon_conversion_factor : float or int The epsilon conversion factor Returns ---------- tuple, (dihed_type_RB_c0, dihed_type_RB_c1, dihed_type_RB_c2, dihed_type_RB_c3, dihed_type_RB_c4, dihed_type_RB_c5, dihed_type_scee, dihed_type_scnb, dihed_atom_1_type, dihed_atom_2_type, dihed_atom_3_type, dihed_atom_4_type, dihed_atom_1_res_type, dihed_atom_2_res_type, dihed_atom_3_res_type, dihed_atom_4_res_type) dihed_type_RB_c0 : float Ryckaert-Bellemans (RB) dihedrals/torsions C0 constant. dihed_type_RB_c1 : float Ryckaert-Bellemans (RB) dihedrals/torsions C1 constant. dihed_type_RB_c2 : float Ryckaert-Bellemans (RB) dihedrals/torsions C2 constant. dihed_type_RB_c3 : float Ryckaert-Bellemans (RB) dihedrals/torsions C3 constant. dihed_type_RB_c4 : float Ryckaert-Bellemans (RB) dihedrals/torsions C4 constant. dihed_type_RB_c5 : float Ryckaert-Bellemans (RB) dihedrals/torsions C5 constant. dihed_type_scee : float, default = 1.0 The 1-4 electrostatic scaling factor dihed_type_scnb : float, default = 1.0 The 1-4 Lennard-Jones scaling factor. dihed_atom_1_type : str The atom type for atom number 1 in the dihedral dihed_atom_2_type : str The atom type for atom number 2 in the dihedral dihed_atom_3_type : str The atom type for atom number 3 in the dihedral dihed_atom_4_type : str The atom type for atom number 4 in the dihedral dihed_atom_1_res_type : str The residue name for atom number 1 in the dihedral dihed_atom_2_res_type : str The residue name for atom number 2 in the dihedral dihed_atom_3_res_type : str The residue name for atom number 3 in the dihedral dihed_atom_4_res_type : str The residue name for atom number 4 in the dihedral """ lj_unit = 1 / epsilon_conversion_factor dihed_type_RB_c0 = round(dihedral.type.c0 * lj_unit, 8) dihed_type_RB_c1 = round(dihedral.type.c1 * lj_unit, 8) dihed_type_RB_c2 = round(dihedral.type.c2 * lj_unit, 8) dihed_type_RB_c3 = round(dihedral.type.c3 * lj_unit, 8) dihed_type_RB_c4 = round(dihedral.type.c4 * lj_unit, 8) dihed_type_RB_c5 = round(dihedral.type.c5 * lj_unit, 8) dihed_type_scee = round(dihedral.type.scee, 4) dihed_type_scnb = round(dihedral.type.scnb, 4) dihed_atom_1_type = dihedral.atom1.type dihed_atom_2_type = dihedral.atom2.type dihed_atom_3_type = dihedral.atom3.type dihed_atom_4_type = dihedral.atom4.type dihed_atom_1_res_type = dihedral.atom1.residue.name dihed_atom_2_res_type = dihedral.atom2.residue.name dihed_atom_3_res_type = dihedral.atom3.residue.name dihed_atom_4_res_type = dihedral.atom4.residue.name return ( dihed_type_RB_c0, dihed_type_RB_c1, dihed_type_RB_c2, dihed_type_RB_c3, dihed_type_RB_c4, dihed_type_RB_c5, dihed_type_scee, dihed_type_scnb, dihed_atom_1_type, dihed_atom_2_type, dihed_atom_3_type, dihed_atom_4_type, dihed_atom_1_res_type, dihed_atom_2_res_type, dihed_atom_3_res_type, dihed_atom_4_res_type, )
6e706227adff4892be02a799720274c0419c0427
594,229
def clear_line(buf, linenum): """Clear a line Args: buf (obj): Nvim buffer linenum (int): Line Number Returns: suc (bool): True if success """ buf[linenum] = [] return True
1dc668d1b882286e7b9f44c2580d61a7447ec997
498,492
def standard_slices(problem_size, num_agents, overlap=0): """Create standard slices for a problem. We assume that the problem size is exactly divisible by the number of agents; hence all agents have exactly the same subproblem size. Parameters ---------- problem_size : int problem size num_agents : int number of agents overlap : int (optional) how many rows overlap should the agents have. The overlap is cyclic, i.e. the last agent shares some rows with the first agent. Returns ------- list of lists of ints a list of row indices corresponding to each agent """ if num_agents < 1: raise ValueError("Number of agents must be greater or equal one") if overlap < 0: raise ValueError("Overlap must be greater or equal zero") if problem_size % num_agents != 0: raise ValueError( "Problem size must be an exact multiple of number of agents" ) stride = problem_size // num_agents if stride + overlap > problem_size: raise ValueError("Overlap is too large, repeating rows") return [ [(i + j) % problem_size for j in range(stride + overlap)] for i in range(0, problem_size, stride) ]
d98e9b6b4be42352bcd9f1060074bcdb1809fb78
108,073
def values_of_series_of_invest( rates_between_periods, invest_amounts=None, final_only=True, invest_at_begining_of_period=False, ): """ Total values after investing each of the values in invest_values, the running total increasing by the percentage in rate_between_values from one investment to the next. By default invest_at_begining_of_period is set to False meaning that each investment is made at the begining of the period and thus is not subject to the period growth. :param: invest_values, an iterable of invested values :param: rate_between_values, an iterable of rate of growth for the periods from one investment to the next :param: invest_at_begining_of_period, boolean, whether to invest at the begining of a period or the end. # no growth, the result is just the sum of the amounts >>> rates_between_periods = (0, 0) >>> invest_amounts = (1, 1) >>> values_of_series_of_invest(rates_between_periods, invest_amounts) 2 Final_only controls whether to get the intermediate values >>> values_of_series_of_invest(rates_between_periods, invest_amounts, final_only=False) [1, 2] The first rate is not used by default, since the amounts are invested at the END of the period >>> invest_amounts = (1, 1) >>> rates_between_periods = (0.05, 0) >>> values_of_series_of_invest(rates_between_periods, invest_amounts, final_only=False) [1.0, 2.0] This can be changed however, by setting invest_at_begining_of_period to True >>> values_of_series_of_invest(rates_between_periods, invest_amounts, final_only=False, invest_at_begining_of_period=True) [1.05, 2.05] >>> invest_amounts = (1, 1) >>> rates_between_periods = (0.05, 0.08) >>> values_of_series_of_invest(rates_between_periods, invest_amounts, invest_at_begining_of_period=True, final_only=False) [1.05, 2.1340000000000003] It can easily be used to get total invested value after several regular investments >>> n_years = 10 >>> rate = 0.08 >>> yearly_investment = 100 >>> rates_between_periods = [rate] * n_years >>> invest_amounts = [yearly_investment] * n_years >>> values_of_series_of_invest(rates_between_periods, invest_amounts) 1448.656246590984 Another application is to get the historical growth of a stock from one year to the next to evaluate the total value of a series of investments """ # if no invest amounts is given, it is assumed 1 unit is invested after the first period and nothing else if invest_amounts is None: invest_amounts = [1] + [0] * len(rates_between_periods) if invest_at_begining_of_period: invest_amounts = list(invest_amounts) rates_between_periods = list(rates_between_periods) total = invest_amounts.pop(0) * (1 + rates_between_periods.pop(0)) value_over_time = [total] else: total = 0 value_over_time = [] for invest, rate in zip(invest_amounts, rates_between_periods): total = total * (1 + rate) + invest value_over_time.append(total) if final_only: return total else: return value_over_time
96df5c3a713ba4de97a46709d64c5d4a0b3904a4
300,287
def create_table(title: str, data: dict, headers: str, **kwargs): """ Creates table given object and headers Usage: `{% create_table 'carparks' carpark 'name|description' %}` """ return {'title': title, 'headers': headers.split("|"), 'data': data, **kwargs}
6ef4abb5859bd179fdf4064b5dbe6df075eb75d0
530,447
def bool_yes_no(process, longname, flag, value): """ Phrase Boolean values as 'YES' or 'NO' """ if value==True: return "YES" if value==False: return "NO" # Anything else wasn't a bool! raise ValueError("Flag value '%s' wasn't boolean." % repr(value))
b6b5d63eb56284c3ccd475ad8b666727bda186ca
566,508
def find_an_even(L): """Assumes L is a list of integers Returns the first even number in L Raises ValueError if L does not contain an even number""" for i in L: if i % 2 == 0: return i raise ValueError('L does not contain an even number.')
82a987adea07bc291c38a35d45c01a93f9d9d1bb
239,093
import sqlite3 def open_db(database): """ Helper functiom to open a database connection :param db: database file :return: connection object and cursor """ # Make connection to storage database conn = sqlite3.connect(database) c = conn.cursor() return conn, c
5718a9c1b74382aa7412067ebd0e8c2387be1a09
53,708
def eulerC1(c2,rho,R): """Compute the optimal Euler consumption of old generation Args: rho (float): discount parameter c2 (float): consumption old generation R (float): gross return on saving Returns: (float): optimal Euler consumption of old generation """ return (c2*(1+rho))/R
16b2c8aec5d05836383cd2256ccf67b7c218ef85
375,654
import requests def fetch_output(input_prefix, input_value, output_prefix, enable_semantic_search=False): """Find APIs which can produce the output_prefix :arg str input_prefix: The prefix of the input, e.g. ncbigene, hgnc.symbol. :arg str output_prefix: The prefix of the output, e.g. ncbigene, hgnc.symbol. :arg str input_value: The actual value of the input :arg boolean enable_semantic_search: :returns: list -- api endpoints which can produce the output prefix """ if enable_semantic_search: response = requests.get('http://biothings.io/explorer/api/v2/semanticquery?input_prefix={{input}}&output_prefix={{output}}&input_value={{value}}'. replace("{{input}}", input_prefix).replace("{{output}}", output_prefix).replace("{{value}}", input_value)) else: response = requests.get('http://biothings.io/explorer/api/v2/directinput2output?input_prefix={{input}}&output_prefix={{output}}&input_value={{value}}'. replace("{{input}}", input_prefix).replace("{{output}}", output_prefix).replace("{{value}}", input_value)) if response.ok: doc = response.json() return doc['data'] if doc else [] else: doc = response.json() if 'error message' in doc: print(doc['error message']) else: print("No results could be found for your query!") return []
1ed8f27923695ab7587f49083070aecb040987f6
17,839
def merge(lst1: list, lst2: list, into: list) -> list: """Merge two sorted lists into a single sorted list. Complexity: O(n) time, O(1) space. """ i1 = 0 i2 = 0 i3 = 0 while i1 < len(lst1) and i2 < len(lst2): # The comparison must be <= for the merge (and hence the sort) to be # stable. if lst1[i1] <= lst2[i2]: into[i3] = lst1[i1] i1 += 1 else: into[i3] = lst2[i2] i2 += 1 i3 += 1 # Take care of any remaining elements for lists of unequal lengths. Only # one of these loops will execute on any given merge. for i in range(i1, len(lst1)): into[i3] = lst1[i] i3 += 1 for i in range(i2, len(lst2)): into[i3] = lst2[i] i3 += 1 return into
eeea39da20d3a3998e94a355f61f4f9cee6fad80
454,517
def has_annotations(doc): """ Check if document has any mutation mention saved. """ for part in doc.values(): if len(part['annotations']) > 0: return True return False
6b57893bc35af45950ec2eeb5008b663028d48bf
704,287
def is_palindrome(number): """ Check if a number is a palindrome. :param number: The int to check. :returns: True if the number is a palindrome, else False. """ number_string = str(number) reversed_number = ''.join(reversed(number_string)) return number_string == reversed_number
a1842ec14f095adc6d5c3ea6c1e7e137e457adc7
102,100
import uuid def is_valid_uuid(candidate): """Test if provided string is a valid uuid version 4 string. candidate (str): uuid to check Returns: bool: True if is a valid uuid v4, False if not """ try: uuid.UUID(candidate, version=4) return True except ValueError: return False
0c0dbe1626d1ebac85782382678904ddb873b8e2
545,810
def generate_component_annotation_overview(elements, db): """ Tabulate which MIRIAM databases the component's annotation match. Parameters ---------- elements : list Elements of a model, either metabolites, reactions, or genes. db : str One of the MIRIAM database identifiers. Returns ------- list The components that are not annotated with the specified MIRIAM database. """ return [elem for elem in elements if db not in elem.annotation]
848287af1078ae42c25fe52a8dd8f682a1749078
224,040
from typing import Union from pathlib import Path def find_table_file(root_project_dir: Union[str, Path]) -> Path: """Find the EUPS table file for a project. Parameters ---------- root_project_dir Path to the root directory of the main documentation project. This is the directory containing the ``conf.py`` file and a ``ups`` directory. Returns ------- table_path Path to the EUPS table file. """ root_project_dir = Path(root_project_dir) ups_dir_path = root_project_dir / "ups" table_path = None for p in ups_dir_path.iterdir(): if p.suffix == ".table" and p.is_file(): table_path = p if table_path is None: raise RuntimeError( f"Could not find the EUPS table file for {root_project_dir}" ) return table_path
59673a83e0ba9f0bb95e0b4d90f32fa48ac7fdef
178,234
import math def std(lst): """ Helper function that calculates the standard deviation of a numeric sequence """ average = sum(lst) / float(len(lst)) variance = sum(list(map(lambda x: (x - average) ** 2, lst))) / float(len(lst)) stdev = math.sqrt(variance) return stdev
060be5305eda97f2054862f2ca07c688498245f5
248,977
def test_datasource_connection(connection, body, error_msg=None): """Test a datasource connection. Either provide a connection id, or the connection parameters within connection object. Args: connection: MicroStrategy REST API connection object. body: Datasource Connection info. error_msg (string, optional): Custom Error Message for Error Handling Returns: Complete HTTP response object. HTTP STATUS 204/400 """ url = f"{connection.base_url}/api/datasources/connections/test" return connection.session.post(url=url, json=body)
96b1f08e7313583c12cb10d1b76bf3bdc17b2ab8
646,006
def get_N_RL(sym_list): """ Compute a value denoting the maximum "number of reachable locations", ($N_{r}$), over all possible locations. From the paper: Formally $N_{r}$ is calculated from an empirical symbolic time series $\mathcal{T} = \{s_{1}, s_{2}, \ldots, s_{m}\}$, with the set of all possible spatial locations being $\Omega$, as $N_{r} = \max_{x \in \Omega} | \{ s_{i+1} : s_i = x \} |$. :param sym_list: A list of location symbols :type sym_list: list """ mapLocation = {} ct_point = 0 for point in sym_list[:-1]: idNextPoint = sym_list[ct_point+1] try: mapLocation[point].add(idNextPoint) except KeyError: mapLocation[point] = set([idNextPoint]) ct_point += 1 N = 0 for SetPoint in mapLocation.values(): nb = len(SetPoint) if nb > N: N = nb return N
67b89b1d74dd2e24c60e5958ae6e8a7f09e1eb19
47,977
import asyncio def run_async_method(loop, method, *args): """ Run async method. :param loop: the loop to run method until complete. :param method: method to run. :param args: arguments of method. :return: result of "method". """ if not loop: loop = asyncio.get_event_loop() return loop.run_until_complete(method(*args))
852ff87d9766a57227f466bd68b6825c17c840b6
606,966
def count(matches): """Count occurrences of taxa in a map. Parameters ---------- matches : dict of str or dict Query-to-taxon(a) map. Returns ------- dict Taxon-to-count map. """ res = {} for taxa in matches.values(): try: # unique match (scalar) res[taxa] = res.get(taxa, 0) + 1 except TypeError: # multiple matches (dict of subject : count), to be normalized by # total match count k = 1 / sum(taxa.values()) for taxon, n in taxa.items(): res[taxon] = res.get(taxon, 0) + n * k return res
de8c790c102b5f4da4c42a0ac5d1ec03b09d3bc0
188,093
def get_city(df, city_name=None, city_index=None): """ returns an info dict for a city specified by `city name` containing {city_name: "São Paulo", city_ascii: "Sao Paulo", lat: -23.5504, lng: -46.6339, country: "Brazil", iso2: "BR", iso3: "BRA", admin_name: "São Paulo", capital: "admin", population: 22046000.0, id: 1076532519} :param df: pandas df containing cities data from simplemaps.com :return: """ assert any([city_name is not None, city_index is not None]) if city_name is not None: return df.loc[df['city_ascii'].str.lower() == city_name.lower()].to_dict('records')[0] if city_index is not None: return df.iloc[[city_index]].to_dict('records')[0]
2a18eb4f4dba2714db522acee58ddd2474915fd2
682,178
def comment(strng,indent=''): """return an input string, commented out""" template = indent + '# %s' lines = [template % s for s in strng.splitlines(True)] return ''.join(lines)
42386b7ed8de9127d7224481a5f5315d39b6ae97
707,661
def _index_keys(columns): """ Take all the key columns and build a list of possible index tuples that can arise from them. For example, given the following schema: "varId|dbSNP,gene" these are the possible index key lists: [['varId', 'gene'], ['dbSNP', 'gene']] """ keys = [k.split('|') for k in columns] # recursively go through the keys def build_keys(primary, secondary): for key in primary: if len(secondary) == 0: yield [key] else: for rest in build_keys(secondary[0], secondary[1:]): yield [key, *rest] # generate a list of all possible key tuples return list(build_keys(keys[0], keys[1:])) if len(keys) > 0 else []
ee7cb47b0abe6936ba7f5cfd720a4e92854a0d90
628,431
def merge_dicts(*dict_args): """Merge given dicts into a new dict. Examples:: >>> merge_dicts({"a": 1, "b": 2}, {"c": 3, "b": 20}, {"d": 4}) {'a': 1, 'b': 20, 'c': 3, 'd': 4} """ result = {} for dictionary in dict_args: result.update(dictionary) return result
1e5bcc15f8d0af402658ce0910850199a1fb90ff
265,630
def insert_or_ignore(session, model, **kwargs): """ Adds record to a table (model) or ignores if already exsits based on 'wid' Args: session: db session model: datastore module table kwargs: record arguments """ if "wid" in kwargs: instance = session.query(model).filter_by(wid=kwargs["wid"]).first() else: instance = session.query(model).filter_by(**kwargs).first() if not instance: instance = model(**kwargs) session.add(instance) return instance
065116b81b7ff7db648499f53db2af328991c8fe
253,944
def format_error_message(message, **kwargs): """ Replaces the tokens by `kwargs`. :param message: The message that contains the tokens. :param kwargs: The args used to replace the tokens. :return: The message formatted. """ if isinstance(message, str): message = message.format(**kwargs) elif isinstance(message, dict): for key, value in message.items(): message[key] = format_error_message(value, **kwargs) return message
acb06712d6fd4221bf6a222c381553eefdf2a1c2
254,427
def remove_last_dim(arr): """ Reshapes the given array to remove the last dimension (this makes the assumption that the last dimension is of shape 1). """ return arr.reshape(arr.shape[0], arr.shape[1])
8a59c48b08ad5702deebd241e67c7e3379fa9765
496,113
import csv def read_test_data(csv_path): """ To read the test data from a CSV file. This CSV file should only contain data and no other value. :param csv_path: the path to the CSV file :return: the read data in a two dimension array """ with open(csv_path, newline='') as f: reader = csv.reader(f) return [[float(v) for v in row] for row in reader]
403dfd98408ce35fa71bc5c52c8126fa4e597df3
141,430
def setup_walkers(cfg_emcee, params, level=0.1): """Initialize walkers for emcee. Parameters ---------- cfg_emcee: dict Configuration parameters for emcee. params: asap.Parameter object Object for model parameters. level: float, optional Returns ------- ini_positions: numpy array with (N_walker, N_param) shape Initial positions of all walkers. """ # Initialize the walkers if cfg_emcee['ini_prior']: # Use the prior distributions for initial positions of walkers. return params.sample(nsamples=cfg_emcee['burnin_n_walker']) return params.perturb(nsamples=cfg_emcee['burnin_n_walker'], level=level)
7a03d5f451a71f60acd64e7b22e852af99a9cefe
683,536
import struct def read_4(stream): """ read 4 byte (long) from stream """ return struct.unpack('<L', stream.read(4))[0]
a663f14a3e71c4aed2aa94be5404d836d12035d4
611,643
def custom(K_0, D_0, L_S, D_S): """ Defines the material properties for a custom nonlinear material. Args: K_0(float) : Bulk modulus of the material in Pascal for SAENO Simulation (see [Steinwachs,2015]) D_0(float) : Buckling coefficient of the fibers for SAENO Simulation (see [Steinwachs,2015]) L_S(float) : Onset of strain stiffening of the fibers (see [Steinwachs,2015]) D_S(float) : Strain stiffening coefficient of the fibers (see [Steinwachs,2015]) """ return {'K_0': K_0, 'D_0': D_0, 'L_S': L_S, 'D_S': D_S}
f3c39bd057ffb308a768e8427ed47eb065ad2fbc
473,941
def in_line(patterns, line): """Check if any of the strings in the list patterns are in the line""" return any([p in line for p in patterns])
e7715de654ac00ff39a1349767052a7a5df2a37d
221,686
def computeExtendedDependencies(dependencies): """ Computes the extended dependencies for the given dependencies. The extended dependencies are defined as follows: In the following let E denote the set of existential variables (i.e. the keys of <dependencies>) ext_dep(e):=dep(e) union {v in E | dep(v) < dep(e) or (dep(e) == dep(v) and v < e)} Parameters ---------- dependencies : dictionary The dependencies to consider Returns ------- A dictionary representing the extended dependencies """ dependency_sets = {v:set(dep) for v,dep in dependencies.items()} extended_dependencies = {} for v1, dep1 in dependencies.items(): extended_dependencies[v1]=dep1 dependency_set=dependency_sets[v1] for v2 in extended_dependencies.keys(): if v1 == v2: continue dep2=dependency_sets[v2] if dependency_set <= dep2: if len(dependency_set) == len(dep2): if v1 < v2: extended_dependencies[v2] = extended_dependencies[v2] + [v1] else: extended_dependencies[v1] = extended_dependencies[v1] + [v2] else: extended_dependencies[v2] = extended_dependencies[v2] + [v1] elif dep2 < dependency_set: extended_dependencies[v1] = extended_dependencies[v1] + [v2] return extended_dependencies
47d0e7603c6bf1cc843b0f420287495ddedcfbb9
70,097
def checkIdenticalExistingVectors( newgraphs, vectors, v ): """ Checks if there exists a vector from 'vectors' corresponding to one of the graphs from 'newgraphs', that is equal to v (ie, checks if a vector equal to v has already been generated). If such vector exists, it returns True, and else it returns None """ for i in newgraphs: for j in vectors[i]: if j == v: return True
66797a351bf1666e59630789aa2aefbda9a820d3
597,578
import hashlib def file_get_md5_checksum(path, block_size= 2 ** 20): """Returns MD% checksum for given file.""" # Function source originally from : https://gist.github.com/juusimaa/5846242. md5 = hashlib.md5() try: file = open(path, 'rb') while True: data = file.read(block_size) if not data: break md5.update(data) except IOError: print('File \'' + path + '\' not found!') return None except: return None return md5.hexdigest()
d1c51ffd25509d4fcfc36b4c037a67cd4cb22d3e
125,246
def get_changed_pipeline_structure(existing_pipeline, data, is_input=True): """ Get pipeline input/output type and field if pipeline input/output changed :param ubiops.PipelineVersion existing_pipeline: the current pipeline version object :param dict data: the pipeline input or output data containing: str input_type/output_type: e.g. plain list(PipelineInputFieldCreate) input_fields/output_fields: e.g. [PipelineInputFieldCreate(name=input1, data_type=int)] :param bool is_input: whether to use input_ or output_ prefix """ changed_data = dict() type_key = 'input_type' if is_input else 'output_type' type_fields = 'input_fields' if is_input else 'output_fields' # Input/output type changed if type_key in data and getattr(existing_pipeline, type_key) != data[type_key]: changed_data[type_key] = data[type_key] changed_data[type_fields] = data[type_fields] # Input/output fields changed elif type_fields in data and isinstance(data[type_fields], list): # Shuffle fields to {'field_name1': 'data_type1', 'field_name2': 'data_type2'} existing_fields = {field.name: field.data_type for field in getattr(existing_pipeline, type_fields)} fields = {field.name: field.data_type for field in data[type_fields]} # Check if dicts are equal if existing_fields != fields: changed_data[type_fields] = data[type_fields] return changed_data
801ba92efcbf07cc7c9eae26db7cc2b0085f18a1
515,556
def check_box(iou, difficult, crowd, order, matched_ind, iou_threshold, mpolicy="greedy"): """ Check box for tp/fp/ignore. Arguments: iou (torch.tensor): iou between predicted box and gt boxes. difficult (torch.tensor): difficult of gt boxes. order (torch.tensor): sorted order of iou's. matched_ind (list): matched gt indexes. iou_threshold (flaot): iou threshold. mpolicy (str): box matching policy. greedy - greedy matching like VOC PASCAL. soft - soft matching like COCO. """ assert mpolicy in ["greedy", "soft"] if len(order): result = ('fp', -1) n_check = 1 if mpolicy == "greedy" else len(order) for i in range(n_check): idx = order[i] if iou[idx] > iou_threshold: if not difficult[idx]: if idx not in matched_ind: result = ('tp', idx) break elif crowd[idx]: result = ('ignore', -1) break else: continue else: result = ('ignore', -1) break else: result = ('fp', -1) break else: result = ('fp', -1) return result
af80d9ae0b3ab910b4231d536e6f35d5e5e6c3a4
441,423
import itertools def get_chemical_gene_combinations(_dict_): """ Parameters ---------- _dict_ : dictionary Dictionary with annotated entities. Keys: PMID, Values: another dictionary with keys 'chemicals' and 'genes' and value the annotation mark Returns ------- combinations : dictionary PMIDs as keys, all possible CHEMICAL-GENE combinations are values. NCOMB : int DESCRIPTION. """ combinations = {} NCOMB = 0 for pmid, entities in _dict_.items(): chem = entities['chemicals'] genes = entities['genes'] combinations[pmid] = list(itertools.product(chem, genes)) NCOMB = NCOMB + len(combinations[pmid] ) return combinations, NCOMB
c87b842709125d1c174e89ba56ad3766bc915bce
314,759
def calc_t_duration(n_group, n_int, n_reset, t_frame, n_frame=1): """Calculates duration time (or exposure duration as told by APT.) Parameters ---------- n_group : int Groups per integration. n_int : int Integrations per exposure. n_reset : int Reset frames per integration. t_frame : float Frame time (in seconds). n_frame : int, optional Frames per group -- always one except brown dwarves. Returns ------- t_duration : float Duration time (in seconds). """ t_duration = t_frame*(n_group*n_frame + n_reset)*n_int return t_duration
4cd01d8dd0dde19c09112a1fd4f328266fcacb03
134,233
import statistics def fitness(population, goal): """measure fitness by comparing attribute mean vs target""" avg = statistics.mean(population) return avg/goal
d1024b87f5b2da4fc555874496db6709a51e9ea4
526,650
def get_defaults(env_id): """ Returns dict of default arguments. """ return { "num-train-steps": 100e6 if "Flagrun" in env_id else 50e6, "nenvs": 128 if "Flagrun" in env_id else 32, "num-runner-steps": 512, "num-epochs": 15, "num-minibatches": 16 if "Flagrun" in env_id else 4, "lr": 3e-4, "entropy-coef": 0., }
a9ae0461b0f2c135d811a03b068cca1c23078c79
316,796
import re def camelize(name): """Covert name into CamelCase. >>> camelize('underscore_name') 'UnderscoreName' >>> camelize('AlreadyCamelCase') 'AlreadyCamelCase' >>> camelize('') '' """ def upcase(match): return match.group(1).upper() return re.sub(r'(?:^|_)(.)', upcase, name)
8d3e5dc5669ebce33801edc6b9dd2d6ecf871c11
237,663