content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
import time def get_time_iso2unix(isotime): """Convert ISO8601 to UNIX timestamp""" return int(time.mktime(time.strptime( isotime, '%Y-%m-%dT%H:%M:%SZ'))) - time.timezone
f87ae5a5ea1fae1501d54a1ff1e1f71a5f2532c7
74,390
from datetime import datetime import pytz def timestamp_to_uct(timestamp: int) -> datetime: """ Converts a timestamp to a datetime in UTC """ return datetime.fromtimestamp(timestamp, pytz.utc)
c00473514d776ac966030a7a29ec85e47f93ff90
74,396
def removeQuotes(s): """ Remove quotation marks from an input string Args: s (str): input string that might have the quote "" characters Returns: str: a string without the quote characters """ return ''.join(i for i in s if i!='"')
36e985a8bef4ac3d81c4572a6dffe1ead48ae6e9
74,397
def transform_ports(ports): """ Passed a ports specification and return a list of port numbers for easy searching. Example: Ports specification "1-3,5,66" becomes list [1,2,3,5,66] """ result = [] ports = str(ports) for part in ports.split(','): if '-' in part: part_a, part_b = part.split('-') part_a, part_b = int(part_a), int(part_b) result.extend(range(part_a, part_b + 1)) else: part_a = int(part) result.append(part_a) return result
eb896c7f79f4647590d0dc8b8bdae9780c3290ef
74,398
def _get_QButtonGroup(self): """ Get a list of (index, checked) tuples for the buttons in the group """ return [(nr, btn.isChecked()) for nr, btn in enumerate(self.buttons())]
5a72b39927010d834e9f7302e47a090ee86cfca8
74,399
def str_slice(str_:str, indices:list): """ Split the string `str_` by breaks in list `indices`. Example: ---------- >>> str_slice("abcaa", [2,4]) ["ab", "ca", "a"] """ indices.insert(0, 0); indices.append(len(str_)) return [str_[indices[i]:indices[i+1]] for i in range(len(indices) - 1)]
bbe59b68f6d57a358dd5d9d1b9a8809f4d7d22bb
74,404
import re def FixIncludeGuards(text, file_name): """Change include guard according to the stantard.""" # Remove a possible webrtc/ from the path. file_name = re.sub(r'(webrtc\/)(.+)', r'\2', file_name) new_guard = 'WEBRTC_' + file_name new_guard = new_guard.upper() new_guard = re.sub(r'([/\.])', r'_', new_guard) new_guard += '_' text = re.sub(r'#ifndef WEBRTC_.+\n', r'#ifndef ' + new_guard + '\n', text, 1) text = re.sub(r'#define WEBRTC_.+\n', r'#define ' + new_guard + '\n', text, 1) text = re.sub(r'#endif *\/\/ *WEBRTC_.+\n', r'#endif // ' + new_guard + '\n', text, 1) return text
9666232c851356b7a4ad001819ff9ca26a828833
74,408
def overlaps(when, spans): """ Checks an overlap of a datetime with list of datetime spans. """ for start, stop in spans: if start <= when <= stop: return True return False
58a5b09e093224ae1d67257f00986c43a3c1f63c
74,412
def to_int(string: str) -> int: """Convert a string to an integer.""" return int(string)
ba2b12a027380c2be6cc8c0db71751dc4f58b67e
74,416
def GetAllXmlTags(root_element): """Gets a list of all tags (without XML namespaces) in an XML tree.""" return [element.tag.split('}')[-1] for element in root_element.getiterator()]
fe6df143ca51deb2dd8a216ba23b8221c0d29c2e
74,420
def binary_search(sequence, item, start=0, end=None): """ Returns the index of item in sequence if it exists, otherwise returns the index _would_ take if inserted. Searches between start (inclusive) and end (exclusive), which default to 0 and len(sequence) respectively. """ if end is None: end = len(sequence) while start < end: mid = (start + end)//2 if sequence[mid] > item: end = mid # Remember, end is exclsuive elif sequence[mid] < item: start = mid+1 else: return mid return start
f1c28351f215a14c34c656094f4bec899a928b04
74,422
def car(obj): """ Alias of ``obj[0]``. >>> car(loads('(a . b)')) Symbol('a') >>> car(loads('(a b)')) Symbol('a') """ return obj[0]
baa77d555d9ea42cf450ece7a6d052a057774250
74,425
def gamma_get_shape_scale(mean,stdev): """ Getting gamma distribution shape and scale Parameters ---------- mean (float): mean of the gamma distribution stdev (float): stander deviation of the gamma distribution Returns ---------- shape (float): shape of the gamma distribution scale (float): scale of the gamma distribution """ shape = (mean**2)/(stdev**2) scale = (stdev**2)/mean return shape,scale
dd41653c56bd566450195f23725bab84016ffc20
74,426
def invert_indices(indices, size): """ Returns the list of indices that are not in the given list, up to the given size. :param indices: The list of indices to invert. :param size: The size of the indexed list. :return: A list of indices. """ index_set = set(indices) return [i for i in range(size) if i not in index_set]
92314dedf05064d1e983e27cf944ef885ab5d5b2
74,427
from typing import Tuple def get_product_section_names() -> Tuple[str, ...]: """Get the list of product section names, in their correct order.""" return ( 'manufacturer', 'product_name', 'part_number', 'product_version', 'serial_number', 'asset_tag', 'fru_file_id', )
f69d536134f212e95c18236670885fb07aef5242
74,429
def typeName(ty): """ Return the name of a type, e.g.: typeName(int) => 'int' typeName(Foo) => 'Foo' typeName((int,str)) => 'int or str' @param ty [type|tuple of type] @return [str] """ if isinstance(ty, tuple): return " or ".join(t.__name__ for t in ty) else: return ty.__name__
e1af991a1ae75847da8edee86eb688ccee525e4b
74,443
def ack(m, n): """Computes the Ackermann function A(m, n) See http://en.wikipedia.org/wiki/Ackermann_function n, m: non-negative integers """ if m == 0: return n + 1 if (m > 0) and (n == 0): return ack(m - 1, 1) if (m > 0) and (n > 0): return ack((m - 1), ack(m, (n - 1))) return None
ee30584356c7825c1a56b9821a9bef9a69e87b65
74,444
def first_non_repeating_letter(string): """ Finds and returns the first non repeating character inside a string. :param string: a string value. :return: the first character that is not repeated anywhere in the string. """ return next((x for x in string if string.lower().count(x.lower()) == 1), "")
2ea5fa2197a5e492c360709ac2e281ef33d1efc3
74,445
import json def is_valid_json(string): """ Determines if a string is valid JSON :param string: The string to be tested :return: boolean representing if the string is valid JSON """ try: json.loads(string) except ValueError: return False except TypeError: return False return True
ce97917e5c7407b68a1b6c7c9541242b664c3bef
74,450
def different_layers(x, nmon): """ Test if monomers a pair of monomers are in separate layers (based on initial configuration build procedure) :param x : length 2 list of monomer numbers (calculated using hbonds.System.number_residues) :param nmon: number of monomers per layer (int) :return: True / False (bool) """ different = False layer = [a // nmon for a in x] if layer[0] != layer[1]: different = True return different
4b8e92c30d36218e25ae546c3e284f3783899fa7
74,452
def constantly(value): """ Creates a function that returns the same value that is used as the argument of >>> import function as f >>> std_gravity = f.constantly(9.81) # m/s2 >>> std_gravity(2) 9.81 """ def f(x): return value return f
d6c201a722bc34b87506a8a0a38c0137e0ad556e
74,453
def citep(body:str): """ Change \cite to \citep """ body = body.replace(r'\cite',r'\citep') return body
b8b80d0ee225ff81096dab3e51d99afa8b63de03
74,454
import textwrap def wrap_text(text_string): """Split a text string into multiple lines to improve legibility """ # First, check to see if the text was already formatted. We do # this by trying to split the text string into mutliple lines # based on newlines contained in the string. lines = text_string.splitlines() if len(lines) > 1: # Already formatted return lines # Not already formatted. Wrap it ourselves. return textwrap.wrap(text_string)
a2ed14a294e6e17b17b31e51ee6b42a217f6235c
74,455
def _get_queryset(klass): """Return a QuerySet or a Manager.""" # If it is a model class or anything else with ._default_manager if hasattr(klass, '_default_manager'): return klass._default_manager.all() return klass
aac7b5f64aa1a0e7ca569c826e1203e4a241dcea
74,456
def get_percent_held(context, security, portfolio_value): """ This calculates the percentage of each security that we currently hold in the portfolio. """ if security in context.portfolio.positions: position = context.portfolio.positions[security] value_held = position.last_sale_price * position.amount percent_held = value_held / float(portfolio_value) return percent_held else: # If we don't hold any positions, return 0% return 0.0
7685952de6ed579deb527c6b24fa9e9577d687b7
74,458
import logging def skip(f): """ Decorator which can be used to skip unit tests """ def g(self): logging.warning('skipping test %s' % f.__name__) return g
5b0a78407e9edc5a1eb3dde847763e3813dde564
74,459
def get_intersect_point(a1, b1, c1, d1, x0, y0): """Get the point on the lines that passes through (a1,b1) and (c1,d1) and s closest to the point (x0,y0).""" a = 0 if (a1 - c1) != 0: a = float(b1 - d1) / float(a1 - c1) c = b1 - a * a1 # Compute the line perpendicular to the line of the OF vector that passes throught (x0,y0) a_aux = 0 if a != 0: a_aux = -1 / a c_aux = y0 - a_aux * x0 # Get intersection of the two lines x1 = (c_aux - c) / (a - a_aux) y1 = a_aux * x1 + c_aux return (x1, y1)
9fc6caa8719b14cd61ec70c03fff25d796eb2596
74,460
def calci(MVA, kV): """ Calculates three phase I in amps from MVA and kV""" return round(MVA / (kV * 3**0.5) * 1000, 2)
f0d5a35cb81ba97fa02eeabafbd96a63bd58cf55
74,461
def tc_url(api=''): """Return TagCollector URL for given API name""" return 'https://cmssdt.cern.ch/tc/%s' % api
d466803115375aa08507b70e65bdfd8101bc6e37
74,466
from typing import Union from typing import Tuple from typing import List def shape_to_tuple(shape: Union[int, Tuple[int], List[int]]): """ Returns a tuple representation of the input shape. :param shape: The shape input to convert to tuple representation. :retrn: The shape in tuple representation """ if isinstance(shape, int): return (shape,) else: return tuple(shape)
1bd20cae8c1fc05be6a63e79dadbdb82aff1382b
74,467
def _calc_prandtl(coolant_obj): """Calculate the coolant Prandtl number""" return (coolant_obj.heat_capacity * coolant_obj.viscosity / coolant_obj.thermal_conductivity)
dddaaa015fc564e1b565cc97d6d24c41611539c3
74,471
def get_id(step_type, build): """Returns a unique step id based on |step_type| and |build|. Useful for parallelizing builds.""" return (f'{step_type}-{build.fuzzing_engine}-{build.sanitizer}' f'-{build.architecture}')
5be3a5ba95755d125f15e637a855c3bfd32adb39
74,475
def split_gates(inputs: int) -> tuple[int, int]: """Splits the number of inputs across the left and right of the karnaugh map.""" left = inputs // 2 # Larger for odd top = inputs - left return left, top
76e5eeae7e50b075eced8753da44c229e2b87578
74,478
from typing import Optional from typing import List from pathlib import Path def to_pathlib_path(path: Optional[List[str]], default: Optional[List[Path]] = None) -> List[Path]: """Normalize optional ``path`` argument of tasks into ``List[Path]``.""" if not path: return default if default else [] return list(map(Path, path))
d3c1b3a17ca10342be5df2305c009107a170ec34
74,489
import re def get_pattern_from_attr_permissions_dict(attr_permissions): """ Construct a compiled regular expression from a permissions dict containing a list of what to include and exclude. Will be used in ObjWrapper if permissible_attr_pattern is a dict. Note that the function enforces certain patterns (like inclusions ending with $ unless they end with *, etc. What is not checked for is that the "." was meant, or if it was "\." that was meant. This shouldn't be a problem in most cases, and hey! It's to the user to know regular expressions! :param attr_permissions: A dict of the format {'include': INCLUSION_LIST, 'exclude': EXCLUSION_LIST}. Both 'include' and 'exclude' are optional, and their lists can be empty. :return: a re.compile object >>> attr_permissions = { ... 'include': ['i.want.this', 'he.wants.that'], ... 'exclude': ['i.want', 'he.wants', 'and.definitely.not.this'] ... } >>> r = get_pattern_from_attr_permissions_dict(attr_permissions) >>> test = ['i.want.this', 'i.want.this.too', 'he.wants.that', 'he.wants.that.other.thing', ... 'i.want.ice.cream', 'he.wants.me' ... ] >>> for t in test: ... print("{}: {}".format(t, bool(r.match(t)))) i.want.this: True i.want.this.too: False he.wants.that: True he.wants.that.other.thing: False i.want.ice.cream: False he.wants.me: False """ s = "" # process inclusions corrected_list = [] for include in attr_permissions.get('include', []): if not include.endswith('*'): if not include.endswith('$'): include += '$' else: # ends with "*" if include.endswith('\.*'): # assume that's not what the user meant, so change include = include[:-3] + '.*' elif include[-2] != '.': # assume that's not what the user meant, so change include = include[:-1] + '.*' corrected_list.append(include) s += '|'.join(corrected_list) # process exclusions corrected_list = [] for exclude in attr_permissions.get('exclude', []): if not exclude.endswith('$') and not exclude.endswith('*'): # add to exclude all subpaths if not explicitly ending with "$" exclude += '.*' else: # ends with "*" if exclude.endswith('\.*'): # assume that's not what the user meant, so change exclude = exclude[:-3] + '.*' elif exclude[-2] != '.': # assume that's not what the user meant, so change exclude = exclude[:-1] + '.*' corrected_list.append(exclude) if corrected_list: s += '(?!' + '|'.join(corrected_list) + ')' return re.compile(s)
e44aa81d3fca49be24a50b7b81b375af22530c8d
74,491
import hashlib def get_sign_key(exported_session_key, magic_constant): """ 3.4.5.2 SIGNKEY @param exported_session_key: A 128-bit session key used to derive signing and sealing keys @param magic_constant: A constant value set in the MS-NLMP documentation (constants.SignSealConstants) @return sign_key: Key used to sign messages """ sign_key = hashlib.md5(exported_session_key + magic_constant).digest() return sign_key
8d16a6ae96f7c07b150baca3b69a8414d46f6120
74,494
def get_rule_group(client, rule_group_name): """Returns GET response for AWS Networking rule group""" response = client.describe_rule_group( RuleGroupName=rule_group_name, Type="STATELESS" ) return response
603daa4e5aef10f773211dfb13ff4f13000b3138
74,495
def is_non_zero_arabic_numeral(string): """ True if string is a non zero natural Arabic Numeral less than or equal to 3899 (max Roman Numeral), False otherwise. PARAMETERS: string : str RETURNS: bool """ # Is comprised only of digits (not a float) and is not only zero(es) return string.isdigit() and int(string) != 0 and int(string) <= 3899
bcb22704dbb1ddc0d49b3a6d227f6fb72df3a91e
74,497
def build_anti_testset_memory_managed(self, user_id, fill=None): """Return a list of ratings that can be used as a testset in the :meth:`test() <surprise.prediction_algorithms.algo_base.AlgoBase.test>` method. The ratings are all the ratings that are **not** in the trainset, i.e. all the ratings :math:`r_{ui}` where the user :math:`u` is known, the item :math:`i` is known, but the rating :math:`r_{ui}` is not in the trainset. As :math:`r_{ui}` is unknown, it is either replaced by the :code:`fill` value or assumed to be equal to the mean of all ratings :meth:`global_mean <surprise.Trainset.global_mean>`. Optimised to work on very large datasets (>2GB) Args: fill(float): The value to fill unknown ratings. If :code:`None` the global mean of all ratings :meth:`global_mean <surprise.Trainset.global_mean>` will be used. user_id(numeric): The desired user whose predictions are to be calculated. Returns: A list of tuples ``(uid, iid, fill)`` where ids are raw ids. """ fill = self.global_mean if fill is None else float(fill) user_id = self.to_inner_uid(int(user_id)) anti_testset = [] user_items = set([j for (j, _) in self.ur[user_id]]) anti_testset += [(self.to_raw_uid(user_id), self.to_raw_iid(i), fill) for i in self.all_items() if i not in user_items] return anti_testset
5efe9d7d697c7557ecb72a69acfc311ad4fff4cc
74,498
import binascii import struct def mac_string_to_ints(mac_string): """Returns a list of ints from standard mac address notation""" split_addr = mac_string.split(':')[::-1] # split by :, then reverse byte order addr_bytes = [binascii.unhexlify(n) for n in split_addr] # change to bytes addr_ints = [struct.unpack("B", n)[0] for n in addr_bytes] # change to ints return addr_ints
3e28c176721a0cb995df35233abc0f77a6e0f192
74,499
def sort_wc(w_c, sort_key): """Sorts the dictionary and returns sorted dictionary Args; dictionary, 0 or 1 0 - sort by key 1 - sort by value Return sorted dictionary """ sorted_w_c = {} # sorted is a built in function and returns a sorted list # if sort_key is 1 - sort on value in the dictionary # if sort_key is 0 - sort on key in the dictionary if sort_key == 1: sorted_list = sorted(w_c, key=w_c.get, reverse = True) else: sorted_list = sorted(w_c, reverse = True) #build the sorted dictionary for word in sorted_list: sorted_w_c[word] = w_c[word] return(sorted_w_c)
727714b0d32d894272037f98221d52d9bc180b53
74,500
def city_country(city, country): """Return a combined string to show city and country""" return f" {city.title()}, {country.title()}"
5f7a8a20ac5e40b789fb891afcda787b3e6ec2ee
74,505
def _expSum(x, iterations=1000): """Calculate e^x using power series. exp x := Sum_{k = 0}^{inf} x^k/k! = 1 + x + x^2/2! + x^3/3! + x^4/4! + ... Which can be rewritten as: = 1 + ((x/1)(1 + (x/2)(1 + (x/4)(...) ) ) ) This second way of writting it is easier to calculate than the first one as it does not need to directly calculate the factorial of each term. Arguments: iterations: Times to iterate over the exponential power series. The minimum valid number is 1 (one), and it'll return the equivalent to perform 1 + x. Default is 1000 as it does not take much time to complete (even for big numbers such as e^500) and going beyond that does not make a significative difference. e^500, e^2, e^50, and some other tried examples get the same number up to 14 decimal places using 1000 (the default) and 1000000 (the default value squared) iterations. Returns: Floating point number. Raises: ArithmeticError: When trying to iterate less than one time. """ if type(x) is (not int or not float): raise ArithmeticError('Please provide an int or float.') if (iterations < 1): raise ArithmeticError('At least one iteration needed to calculate e^x') # e^0 = 1 if (x == 0): return float(1.0) isNegative = False # The algorithm always calculates e^x (x > 0) and then divides 1 by the # result if x < 0. This avoids missing extra precission due to floating # point. if (x < 0): isNegative = True x *= -1 result = float(1.0) for num in range(iterations, 0, -1): # In the power series: = 1 + ((x/1)(1 + (x/2) (1 + (x/4) (...) ) ) ) # x/num is the same as (x/4), or (x/2), (x/1); result is the rightmost # part of the series, which has been already calculated. result = 1 + ((x * result)/num) if isNegative: result = float(1/result) return float(result)
42b51ed8ac9e2092553f9390b4035958b7e3c39d
74,507
import functools import threading def thread_it(func): """A wrapper function to run func in a daemon thread. Args: func (function): The function to run in a thread Returns: function: the wrapped function. """ @functools.wraps(func) def wrapper(*args, **kwargs): thread = threading.Thread(target=func, args=args, kwargs=kwargs) thread.daemon = True thread.start() return thread #returning to support unit testing return wrapper
d8760db04bb1afe4e7f9963efe64024dbd2e7752
74,509
def get_biggest_product(adjacent_digits = 13): """Get the biggest product of n adjacent digits in the following number.""" number = "\ 73167176531330624919225119674426574742355349194934\ 96983520312774506326239578318016984801869478851843\ 85861560789112949495459501737958331952853208805511\ 12540698747158523863050715693290963295227443043557\ 66896648950445244523161731856403098711121722383113\ 62229893423380308135336276614282806444486645238749\ 30358907296290491560440772390713810515859307960866\ 70172427121883998797908792274921901699720888093776\ 65727333001053367881220235421809751254540594752243\ 52584907711670556013604839586446706324415722155397\ 53697817977846174064955149290862569321978468622482\ 83972241375657056057490261407972968652414535100474\ 82166370484403199890008895243450658541227588666881\ 16427171479924442928230863465674813919123162824586\ 17866458359124566529476545682848912883142607690042\ 24219022671055626321111109370544217506941658960408\ 07198403850962455444362981230987879927244284909188\ 84580156166097919133875499200524063689912560717606\ 05886116467109405077541002256983155200055935729725\ 71636269561882670428252483600823257530420752963450" number = number.replace(" ", "").replace("\n", "") biggest_product = 0 for i in range(1, len(number) - adjacent_digits - 2): current_product = 1 for j in range(i, i+adjacent_digits): current_digit = number[j] current_product *= int(current_digit) if current_product > biggest_product: biggest_product = current_product return biggest_product
d10c3485d20262246d1756936b94263f33df81b0
74,510
def _chk(resval): """Check result of command.""" if resval[0] != 'OK': raise Exception(resval[1]) return resval[1]
9ba56f760a4b25874c62f44301856d77b786c778
74,514
def clean(df, GPS = False, elevation = False, TEC = False, VTEC = False, locktime = False): """Clean Removes erroneous values of VTEC Args: df (dataframe): Master dataframe containing TEC measurements GPS (bool): (default: False) If True, only GPS satellite data is included and vice-versa elevation (bool): (default: False) If True, only data for which elevation is greater than 30 degrees is included TEC (bool): (default: False) If True, only data with positive TEC values are included and vice-versa VTEC (bool): (default: False) If True, only data with positive VTEC values are included and vice-versa locktime (bool): (default: False) If True, only data with locktime greater than 3 minutes are included and vice-versa """ if elevation == True: df = df[df['elevation'] > 30] if TEC == True: df = df[df['TEC'] > 0] if VTEC == True: df = df[df['VTEC'] > 0] if locktime == True: df = df[df['locktime'] > 180] if GPS == True: df = df[(df['SVID'] >= 1) & (df['SVID'] <= 37)] return df
d00e3b7857c06e84d360e94c87f40e29a0ee7b71
74,516
def get_description(package): """Return a default package description.""" return f"This is package '{package}' generated automatically by pyp2spec."
cca40e871092cad3bc7fdeac4718fbe82224d6b8
74,519
import torch def to_alpha(x): """Return the alpha channel of an image.""" return torch.clamp(x[..., 3:4], 0.0, 1.0)
c015e76e01d8e87d7582be96eca8b42cdc4f8c71
74,521
def airEntryPotential(theta_s): """ Parameters ---------- theta_s : float Soil water content at saturation (m3/m3) Returns ------- TYPE The air entry potential (cm) Is also called bubbling pressure of the soil Source: Saxton (1986) DOI: 10.2136/sssaj1986.03615995005000040039x """ return 100.0 * (-0.108 + 0.341 * theta_s)
8a85e35cbcdc2034569def7ef535b0bd7dbbce4b
74,522
def generate_body(fields: dict = {}, custom_fields: dict = {}) -> dict: """Generates a body from fields and custom fields. Args: fields: fields data. custom_fields: custom fields data. Returns: body object for SNOW requests. """ body = {} if fields: for field in fields: body[field] = fields[field] if custom_fields: for field in custom_fields: # custom fields begin with "u_" if field.startswith('u_'): body[field] = custom_fields[field] else: body['u_' + field] = custom_fields[field] return body
477fdb52ba7c60bfbbc2ba9c88b8bbd71e528437
74,524
def get_region_of_all_exchanges(scenario: dict) -> dict: """Returns {ID: region_name, ...} map for `EnergyExchange` in given `scenario`. If no region is found, the string `Region` is applied""" try: exchanges = [ {exchange["Id"]: exchange["Attributes"]["Region"]} for exchange in scenario["Agents"] if exchange["Type"] == "EnergyExchange" ] except KeyError: exchanges = [ {exchange["Id"]: "Region"} for exchange in scenario["Agents"] if exchange["Type"] == "EnergyExchange" ] output = {} for exchange in exchanges: output.update(exchange) return output
475e7ef769c28fa9ad8653460ece3f0150dda244
74,526
import hashlib def md5sum(s): """MD5s the given string of data and returns the hexdigest. If you want the md5 of a file, call md5sum(open(fname).read())""" return hashlib.md5(s).hexdigest().lower()
2358b016901cf5cb9c27b1992344d5baba588660
74,527
def get_urls(status): """Return the expanded (non-media) URLs in a tweet""" # Check if the tweet has entities with expanded urls try: urls = [format(url['expanded_url']) for url in status.entities['urls']] except AttributeError: urls = [] return urls
a555bf8ddd5f6fb9d3e9566e41e79aa12c9e2b21
74,535
def calc_el_eff_with_p_el(el_power): """ Returns electric efficiency of CHP (input: electric power in W) Parameters ---------- el_power : float Electric output power in W Returns ------ el_eff : float Electric efficiency (without unit) """ assert el_power >= 0 # Asue classes of electric nominal power if el_power <= 10*1000: # Factor 1000 to convert kW into W el_eff = 0.21794*(el_power/1000)**0.108 elif el_power <= 100*1000: el_eff = 0.2256*(el_power/1000)**0.1032 elif el_power <= 1000*1000: el_eff = 0.25416*(el_power/1000)**0.0732 else: # Larger than 1000 kW el. power el_eff = 0.29627*(el_power/1000)**0.0498 assert el_eff >= 0 assert el_eff <= 1 return el_eff
ab0dfbc994612fd5968b836bd2ea9db57ab82a5d
74,536
def r_size(x): """Returns the difference between the largest and smallest value in a collection.""" return max(x) - min(x)
a0f6e3d616490edf0a205fba2f0e239fea6688bc
74,537
def check_index_exists(indexname, es): """Check if index in Elasticsearch.""" if es.indices.exists(index=indexname): return True return False
02b926ee3fd5e2348ab51e68ad8949996d7f0a15
74,538
def precipitable_water(temp, vap_press): """ Calculate precipitable water after Prata (1996). Parameters ---------- temp : numeric Air temperature (K). vap_press : numeric Vapor pressure (Pa). Returns ------- precipitable_water : numeric Precipitable water (kg m-2). References ---------- .. [1] Prata, A. J. (1996). A new long-wave formula for estimating downward clear-sky radiation at the surface. Quarterly Journal of the Royal Meteorological Society, 122(533), 1127–1151. doi:10.1002/qj.49712253306 """ vap_press_hpa = vap_press / 100 u = 46.5 * vap_press_hpa / temp # in g cm-2 return u * (1e-3 * 1e4)
bc7a809dbc40e1dd71d2851e8ac8aee9a3b3113b
74,541
def translate_path(sysroot, path): """Remove the SYSROOT prefix from paths that have it""" if path.startswith(sysroot): return path[len(sysroot):] return path
8860490023b5b0bb60ff3d4715d0a4b340f1182f
74,555
import six def convert_recursive_helper(converter, data): """ Given JSON-like data (a nested collection of lists or arrays), which may include Action tuples, replaces all primitive values with converter(value). It should be used as follows: def my_convert(data): if data needs conversion: return converted_value return convert_recursive_helper(my_convert, data) """ if isinstance(data, dict): return {converter(k): converter(v) for k, v in six.iteritems(data)} elif isinstance(data, list): return [converter(el) for el in data] elif isinstance(data, tuple): return type(data)(*[converter(el) for el in data]) else: return data
eb8e759affa0125d8f0dde598992d6e0caf714d6
74,558
def construct_num_check(token_list): """Construct num_check list to classify each item in token_list. At matching indices of token_list, elements of num_check are given either True(numeric value), False(string) or one of shorthand_keys. shorthand_keys: - R: Repetition. Ex) '2 4R' = '2 2 2 2 2' - I: Linear interpolation. Ex) '1 4I 6' = '1 2 3 4 5 6' - ILOG: Logarithmic interpolation. Ex) '0.01 2ILOG 10' = '0.01 0.1 1 10' - M: Multiplication. Ex) '1 1 2M 2M 2M 2M 4M 2M' = '1 1 2 4 8 16 64 128' Example: token_list = ['63-', ' ', 'mat', '5500', '4i', '5505', 'omit', '-1'] -> num_check = [False, False, False, True, 'i', True, False, True] Arguments: token_list (list)[-]: A list of tokens for which numeric values are checked. Returns: num_check (list)[-]: A list of classifications for items of token_list. """ # Shorthands for MCNP input entry. shorthand_keys = ['ilog', 'r', 'i', 'm'] num_check = [] for item in token_list: try: # Check if an item is a number. float(item) num_check.append(True) except: # Check if an item includes one of shorthands(mutually exclusive). key_check = False for key in shorthand_keys: try: float(item[:-len(key)]) # Check if an item is a shorthand. if item[-len(key):] == key: key_check = key except: if item == key: key_check = key else: continue # Map key_check to an element of num_check. num_check.append(key_check) return num_check
d956043c3714a8e91272f9f1228fc1f46f1d5ee8
74,561
def convert_prices(price): """Convert price string to int.""" return int(price.replace("$", "").replace(",", ""))
d02036be9ffeb59145e0ae1004915ea624be95d1
74,568
def _extract_pipeline_of_pvalueish(pvalueish): """Extracts the pipeline that the given pvalueish belongs to.""" if isinstance(pvalueish, tuple): pvalue = pvalueish[0] elif isinstance(pvalueish, dict): pvalue = next(iter(pvalueish.values())) else: pvalue = pvalueish if hasattr(pvalue, 'pipeline'): return pvalue.pipeline return None
9d9c7e1f007d9986feeb428047a6ae4a15fc4cae
74,569
def pad_dscrp(in_str, out_len=67, pad_char='-'): """Pad DESCRIP with dashes until required length is met. Parameters ---------- in_str : string String to pad. out_len : int, optional The required length. CDBS default is 67 char. pad_char : char, optional Char to pad with. CDBS default is '-'. Returns ------- out_str : string Padded string. """ sz_str = len(in_str) if sz_str > out_len: # truncate out_str = in_str[:out_len] elif sz_str < out_len: # pad out_str = in_str + pad_char * (out_len - sz_str) else: # no change out_str = in_str return out_str
20c7bf143e0b6d774cf7eb40cb201783325aaadf
74,570
import requests def get_all_series(cfg): """ Request and return all series in given Opencast organization. :param cfg: Opencast configuration :return: List of seriesId and title pairs """ url = cfg['uri'] + "/api/series" params = {"sort": "title:ASC", "offset": 0, "limit": 10000} offset = 0 result = [] while True: params["offset"] = offset r = requests.get(url=url, params=params, auth=(cfg['user'], cfg['password'])) if not r.json(): break offset += 10000 x = r.json() for elem in x: result.append((elem['identifier'], elem['title'])) return result
57966ade6feecf397e1ad31b3b96e0ef81ffee67
74,573
import re def _ppddl_tokenize(ppddl_txt): """Break PPDDL into tokens (brackets, non-bracket chunks)""" # strip comments lines = ppddl_txt.splitlines() mod_lines = [] for line in lines: try: semi_idx = line.index(';') except ValueError: pass else: line = line[:semi_idx] mod_lines.append(line) ppddl_txt = '\n'.join(mod_lines) # convert to lower case ppddl_txt = ppddl_txt.lower() matches = re.findall(r'\(|\)|[^\s\(\)]+', ppddl_txt) return matches
51b9feace02ac28cb65272ba86c481f7d0f75f92
74,581
from typing import Dict def update_dict(dic: Dict, key: str, new_v: int) -> Dict: """update nested dictionary key value with new_value Args: dic ([dict]): a nested dictionary with variant length key ([str]): key at any depth new_v ([int]): new value for same key of dic Returns: [dict]: dictionary with new value """ for k, val in dic.items(): if isinstance(val, dict): update_dict(val, key, new_v) if k == key: dic[k] = new_v return dic
bdbaa81a2884c3c99f32994633c196cf010b824f
74,583
import uuid def _multipartBody (*parts): """ Builds a multipart/form-data (RFC 2388) document out of a list of constituent parts. Each part is either a 2-tuple (name, value) or a 4-tuple (name, filename, contentType, value). Returns a tuple (document, boundary). """ while True: boundary = "BOUNDARY_%s" % uuid.uuid1().hex collision = False for p in parts: for e in p: if boundary in e: collision = True if not collision: break body = [] for p in parts: body.append("--" + boundary) if len(p) == 2: body.append("Content-Disposition: form-data; name=\"%s\"" % p[0]) body.append("") body.append(p[1]) else: body.append(("Content-Disposition: form-data; name=\"%s\"; " +\ "filename=\"%s\"") % (p[0], p[1])) body.append("Content-Type: " + p[2]) body.append("") body.append(p[3]) body.append("--%s--" % boundary) return ("\r\n".join(body), boundary)
acebcbd8de454317a894982259de8286fbf7b3f9
74,585
from typing import List import re def find_url(url_raw: List[str]) -> List[str]: """Function to find all URL matching a specific regex in a list of URL. Here, this function will fetch all URL beginning with: "https://geodatamine.fr/dump/" and continuing with "t3xt-4nd-numb3rs.text". This allows to fetch the direct URL of the files to download. Args: url_raw (str): The list of URL from which to find a corresponding URL. Returns: List[str]: The list of URL corresponding to the given format. """ regex = re.compile(r"^https:\/\/geodatamine\.fr\/dump\/[\w-]+geojson\.[A-Za-z]+$") url = [str(link) for link in url_raw if regex.match(str(link))] return url
2f38f161ccb11c5cfde08dd11a3d3c08ae84c516
74,588
import torch def transform_u(logits, driver_matrix, budget): """ A function that transforms RL logits to valid controls. :param logits: RL action logits :param driver_matrix: driver matrix for control selection :budget: total budget available """ logits = torch.matmul(driver_matrix.unsqueeze(0), logits.unsqueeze(-1)).squeeze(-1) u = torch.nn.functional.softmax(logits, dim=-1) \ * budget \ * driver_matrix.sum(-1) return u
c7792a2663f4e229fd358ef482a26f04be389d07
74,589
import itertools def cycle_over_colors(range_zero_one=False): """Returns a generator that cycles over a list of nice colors, indefinitely.""" colors = ((0.12156862745098039, 0.46666666666666667, 0.70588235294117652), (1.0, 0.49803921568627452, 0.054901960784313725), (0.17254901960784313, 0.62745098039215685, 0.17254901960784313), (0.83921568627450982, 0.15294117647058825, 0.15686274509803921), (0.58039215686274515, 0.40392156862745099, 0.74117647058823533), (0.5490196078431373, 0.33725490196078434, 0.29411764705882354), (0.8901960784313725, 0.46666666666666667, 0.76078431372549016), (0.49803921568627452, 0.49803921568627452, 0.49803921568627452), (0.73725490196078436, 0.74117647058823533, 0.13333333333333333), (0.090196078431372548, 0.74509803921568629, 0.81176470588235294)) if not range_zero_one: colors = [[c * 255 for c in color] for color in colors] return itertools.cycle(colors)
7adf7445e6b34a50942f78557200c225a57f4081
74,591
def adapt_hbond(hbond, res_list): """ Transforms hbond to a list of tuple (#residue, neighbor) """ list = [] for pos, val in enumerate(hbond): if val != - 1: list.append((res_list[pos], res_list[val])) return list
eb880afa908cc7657e07b0d48d242203c45d13e4
74,594
def torch_mean_sum(x, n_mean=1): """ Average over batch dim, sum over all other dims. """ sum_dims = tuple(range(n_mean, x.ndim)) if sum_dims: x = x.sum(sum_dims) return x.mean()
0b6018bbcb8b8bfeec355e70145f7d2e462837d8
74,597
def development_predictionfile_to_predictionlist(filepath): """ Parse prediction csv file. Parse prediction csv file to list. Parameters ---------- filepath : path Path to prediction. Returns ------- list_results_energies : list List of tuple (Energy, Prediction) """ # Open, read and close file file = open(filepath) list_results = file.read().splitlines() file.close() # Get collision energies in prediction file list_energies = [item for item in list_results if 'energy' in item]+[''] # Preallocate list list_results_energies = [] # Cycle collision energies for index, energy in enumerate([item for item in list_energies if item]): start = list_results.index(list_energies[index]) end = list_results.index(list_energies[index+1]) list_hold = list_results[start+1:end] list_results_energies.append((energy, list_hold)) return list_results_energies
d3998a17e2a007620e392d5845c1a778fe9efd4d
74,600
def binary_search(val, grid): """ Binary search that returns the bin index of a value `val` given grid `grid` Some special cases: `val` < min(`grid`) --> -1 `val` > max(`grid`) --> size of bins `val` = a grid point --> bin location whose upper bound is `val` (-1 if val = min(grid) """ left = 0 right = len(grid) - 1 mid = -1 while left <= right: mid = (int((left + right)/2)) if grid[mid] < val: left = mid + 1 else: right = mid - 1 return int(right)
0b26653c30ccf94eb062aa5fda685a220a30de8c
74,601
import torch def flip_feature_maps(feature_maps, flip_index=None): """Flip the feature maps and swap the channels. Args: feature_maps (list(torch.Tensor)): Feature maps. flip_index (list(int) | None): Channel-flip indexes. If None, do not flip channels. Returns: flipped_feature_maps (list(torch.Tensor)): Flipped feature_maps. """ flipped_feature_maps = [] for feature_map in feature_maps: feature_map = torch.flip(feature_map, [3]) if flip_index is not None: flipped_feature_maps.append(feature_map[:, flip_index, :, :]) else: flipped_feature_maps.append(feature_map) return flipped_feature_maps
2e11c0ae7f4c97fac58fca5e55a69e3211879a7e
74,608
def colored_pixels(img, thresh): """ Find the positions of all pixels in the image that are considered dark enough for text. The image is assumed to be grayscale. Args: img (numpy.ndarray): Image to check. thresh (int): Background threshold up to which a pixel is considered colored. Returns: list[tuple[int, int]]: List of coordinates for all colored pixels. """ assert len(img.shape) == 2 # 2D matrix return [(x, y) for y, row in enumerate(img) for x, pixel in enumerate(row) if pixel <= thresh]
c8a045245972e574df27f15219d5642b3e5cd02b
74,612
def regions_overlap(region_a, region_b): """Test if regions are identical""" if region_a is None or region_b is None: return False return region_a["base"] == region_b["base"] and region_a["length"] == region_b["length"]
7d34e009c9d516891e96002d3c257b0a3c8f622b
74,615
def parse_join_code(join_code): """ takes the join code and makes sure it's at least 6 digits long Args: join_code (int): the number of join codes sent out so far Returns: string: the actual join code """ if join_code < 10: return f"00000{join_code}" elif join_code < 100: return f"0000{join_code}" elif join_code < 1000: return f"000{join_code}" elif join_code < 10000: return f"00{join_code}" elif join_code < 100000: return f"0{join_code}" else: return f"{join_code}"
a83ddcd31ecbe701ede66118691b5a0cba9fae19
74,617
def render_js_script(inner_code): """ This wraps ``inner_code`` string inside the following code block:: <script type="text/javascript"> jQuery(function ($) { // inner_code here }); </script> :rtype: :py:obj:`unicode` """ return u""" <script type="text/javascript"> jQuery(function ($) { %s }); </script>""" % inner_code
e4e7dd2693c5f30493a962b9a7a90c8f2a439bfd
74,618
def cells_different(cell_a, cell_b, compare_outputs = True): """ Return true/false if two cells are the same cell_a: (obj) JSON representation of first cell cell_b: (obj) JSON representation of second cell compare_outputs: (bool) whether to compare cell outputs, or just inputs """ # check if cell type or source is different if (cell_a["cell_type"] != cell_b["cell_type"] or cell_a["source"] != cell_b["source"]): return True # otherwise compare outputs if it is a code cell elif compare_outputs and cell_b["cell_type"] == "code": # get the outputs cell_a_outs = cell_a['outputs'] cell_b_outs = cell_b['outputs'] # if different number of outputs, the cell has changed if len(cell_a_outs) != len(cell_b_outs): return True # compare the outputs one by one for j in range(len(cell_b_outs)): # check that the output type matches if cell_b_outs[j]['output_type'] != cell_a_outs[j]['output_type']: return True # and that the relevant data matches elif((cell_a_outs[j]['output_type'] in ["display_data","execute_result"] and cell_a_outs[j]['data'] != cell_b_outs[j]['data']) or (cell_a_outs[j]['output_type'] == "stream" and cell_a_outs[j]['text'] != cell_b_outs[j]['text']) or (cell_a_outs[j]['output_type'] == "error" and cell_a_outs[j]['evalue'] != cell_b_outs[j]['evalue'])): return True return False
c720a346550ca47b8841f0f60f8c566339b5e4aa
74,626
def MaxASA(scale): """Returns the maximum accessible surface area for amino acids. This function returns the maximum accessible surface area (ASA) for the amino acids in units of square angstroms. These ASAs are necessary for calculating the relative solvent accessibility of a residue. There are a large variety of estimates for the exact ASA for each amino acid. The calling variable *scale* specifies which scale to use. Allowed values are the following strings: * 'Tien2013' : The values provided by Tien et al (Maximum allowed solvent accessibilities of residues in proteins), as defined in Table 1 in the column labeled "Theoretical" at http://www.plosone.org/article/info:doi/10.1371/journal.pone.0080635 The returned variable is a dictionary *asa* keyed by each upper-case one-letter amino-acid code, with the values being the ASA for that residue. Example: >>> asa = MaxASA('Tien2013') >>> len(asa) == 20 True >>> asa == {'A':129.0, 'R':274.0, 'N':195.0, 'D':193.0, 'C':167.0, 'E':223.0, 'Q':225.0, 'G':104.0, 'H':224.0, 'I':197.0, 'L':201.0, 'K':236.0, 'M':224.0, 'F':240.0, 'P':159.0, 'S':155.0, 'T':172.0, 'W':285.0, 'Y':263.0, 'V':174.0} True """ if scale == 'Tien2013': return {'A':129.0, 'R':274.0, 'N':195.0, 'D':193.0, 'C':167.0, 'E':223.0, 'Q':225.0, 'G':104.0, 'H':224.0, 'I':197.0, 'L':201.0, 'K':236.0, 'M':224.0, 'F':240.0, 'P':159.0, 'S':155.0, 'T':172.0, 'W':285.0, 'Y':263.0, 'V':174.0,} else: raise ValueError("Invalid value for scale")
0c0b47d1a38a00e2286e5067a8d089f14b1eefad
74,628
def assign_road_surface(x): """Assign road surface to roads Parameters x - Pandas DataFrame of values - road_type - String name for type of road: national, province or rural - material_code - String code for road materials: [('A','Asfalto'),('H','Hormigon'), ('R','Ripio'), ('T','Tierra'), ('B','Tratamiento')] - surface - String name of already assigned road surface Returns String value of road surface as one or more of: ['Asfalto','Hormigon', 'Ripio', 'Tierra','Tratamiento'] """ asset_type = str(x.road_type).lower().strip() # This is an national and provincial roads with paved surfaces '''A - Asphalt, H - Hormigon, R - Ripio, T - Tierra, B - Tratamiento ''' matrerial_surfaces = [('A','Asfalto'),('H','Hormigon'), ('R','Ripio'), ('T','Tierra'), ('B','Tratamiento')] if asset_type == 'national': if str(x.material_code) != '0': ml = x.material_code.split(',') s = [] for ms in matrerial_surfaces: if ms[0] in ml: s.append(ms[1]) return ','.join(s) else: return 'Asfalto' elif str(x.surface).lower().strip() != '0': # Anything else not included above return x.surface.title() else: return 'Tierra'
b7c99cd338d4a0d1d729cfcb6548c42b9c788271
74,630
from typing import Any def _get_dot_path(elem_path: str, data_map: dict) -> Any: """ Return For dotted attribute, tries to search. Parameters ---------- elem_path : str The attribute name or prefix.name data_map : dict The dictionary/map to search through. Returns ------- Any The attribute value Raises ------ KeyError If the key/subkey is not found """ # if this is directly in the map return it if elem_path in data_map: return data_map[elem_path] # otherwise partition into prefix and name prefix, _, name = elem_path.rpartition(".") attrib = data_map.get(prefix) if isinstance(attrib, dict) and name in attrib: return attrib[name] raise KeyError(f"'{elem_path}' not found")
2cf44d92d27e5733ed855915b97c44f27c5d19ea
74,631
def format_udm_open(db_name, sequence_num, timestamp): """Return formatted opening <UDM ...> entity.""" return '<UDM DATABASE="{}" SEQUENCE="{}" TIMESTAMP="{}">\n'.format( db_name, sequence_num, timestamp)
8338b66e824a4f5651fb2afb9659cd170196f882
74,633
def flattenDict(d): """Reduces the depth of a dictionary to 1, parent keys are ignored. >>> d = {'a': 1, 'c': {'e': 5, 'd': 4}, 'b': 2, 'f': {'g': {'h': 8}}} >>> flattenDict(d) {'a': 1, 'h': 8, 'b': 2, 'e': 5, 'd': 4} """ result = {} for k, v in d.iteritems(): if isinstance(v, dict): result.update(flattenDict(v)) else: result.update({k:v}) return result
49369e32cb0b76f6783384905fc3d80e3460cac2
74,636
def make_comparison_dataframe(historical, forecast): """Join the history with the forecast. The resulting dataset will contain columns 'yhat', 'yhat_lower', 'yhat_upper' and 'y'. """ return forecast.set_index('ds')[['yhat', 'yhat_lower', 'yhat_upper']].join(historical.set_index('ds'))
984a42da5d655b4d8df1055030ad096dae210603
74,639
def is_pseudogene(gene, feature): """ Check if the given feature is a pseudogene. This is determined by check if the feature is labeled as a psuedogene or the gene it is a transcript from a pseudogene. """ raw_notes = feature.qualifiers.get("note", []) raw_notes.extend(gene.qualifiers.get("note", [])) return any("pseudogene" in n for n in raw_notes)
75f746c35528054d4e389399c2a1a35e7dda7c12
74,644
def int_str(val, keyspace = "suTaWp6Z7P2zFYn5IMQUfqr4wAgeldRDVyvkS801HGjNJCbBKotE9x3hmiXcOL"): """ Turn a positive integer into a string. Keyspace can be anything you like - this was just shuffled letters and numbers, but... each character must occur only once. """ assert len(set(keyspace)) == len(keyspace) assert val >= 0 out = "" while val > 0: val, digit = divmod(val, len(keyspace)) out += keyspace[digit] return out[::-1]
9292dcafa61f55bf6effff3bad01f3d0096e7caf
74,646
import random def random_number_generator(l): """ Generate a list of random numbers of length l. """ output = [] for i in range(l): output.append(random.randint(0, 1000)) return output
9d4ee658e37f755afe28351c7b1661001bdbbfb6
74,649
def split_interface(interface): """Split an interface name based on first digit, slash, or space match. Args: interface (str): The interface you are attempting to split. Returns: tuple: The split between the name of the interface the value. Example: >>> from netutils.interface import split_interface >>> split_interface("GigabitEthernet1/0/1") ('GigabitEthernet', '1/0/1') >>> split_interface("Eth1") ('Eth', '1') >>> """ head = interface.rstrip(r"/\0123456789. ") tail = interface[len(head) :].lstrip() # noqa: E203 return (head, tail)
059e1f8ff1819ead64010865f1343d6100bed6c8
74,651
import pyarrow def _to_pyarrow(value): """Convert Python value to pyarrow value.""" return pyarrow.array([value])[0]
a050c047b8ea7e4510c34993e5ad9a436920abf9
74,657
def gen_dlog_gamma (shape, scale): # gamma : G(a)/b^a * x^(a-1) * exp(x/b) """Returns a function that gives the derivative of the gamma log-density with respect to X.""" def fn(x): ret = ((shape-1.0)/x) - (1.0/scale) return ret return fn
3597fb2647e6e0fedc6dddc3934197ad092803fc
74,666
def right_tokens(token): """ Get all tokens to the right of a given token """ return [tok for right_tok in token.rights for tok in right_tok.subtree]
50b422564b73ca244e8241d4b53bcf98f22f0e4e
74,668
def publish_sync_ack_to_ralph3(obj, ralph3_id): """ Publish ACK to Ralph3 that some object was updated. """ return { 'model': obj._meta.object_name, 'id': obj.id, 'ralph3_id': ralph3_id, }
ebd8781ccfd1714a15870a9b64f71ad642f7a31d
74,671
def round_masses(mass): """ Round all the masses for the output to 8 digits after comma """ if mass == '-': return '-' else: return round(mass, 8)
bb7f3fbbda7fbc2453ab8dfd3060e9c92e1e54cc
74,673
def getBaseClasses(obj, includeSelf=False, includeObject=True): """ Get all base classes from an object's class. :param any obj: Generic obj or class :param includeSelf: Whether to include own class or not :param includeObject: Whether to include object class or not (Every object has object as base) :return: List of classes :rtype: list[type] """ if isinstance(obj, type): cls = obj else: cls = obj.__class__ classes = list(cls.__bases__) for base in classes: for baseClassBase in getBaseClasses(base): if baseClassBase not in classes: classes.append(baseClassBase) if includeSelf: classes.insert(0, cls) if not includeObject: classes.remove(object) return classes
d06f23e66994f5879d1a3673a8297d876830868d
74,684
def contains(iterable, value, from_index=None): """ Returns true if the value is present in the iterable. Use from_index to start your search at a given index. Params: iterable, value iterable -> list, sequenece, set, dictionary, generator etc value -> Any element that is to be searched in the iterable IMP: This method is not lazy Examples: >>> _.contains([1, 2, 3], 3); >>> True """ if from_index: for index, item in enumerate(iterable): if index > from_index and item == value: return True return False return value in iterable
fb866061b9e4612ccdce330a3bd1d4b5e657383d
74,686
import ast def _mcpyrate_attr(dotted_name, *, force_import=False): """Create an AST that, when compiled and run, looks up an attribute of `mcpyrate`. `dotted_name` is an `str`. Examples:: _mcpyrate_attr("dump") # -> mcpyrate.dump _mcpyrate_attr("quotes.lookup_value") # -> mcpyrate.quotes.lookup_value If `force_import` is `True`, use the builtin `__import__` function to first import the `mcpyrate` module whose attribute will be accessed. This is useful when the eventual use site might not import any `mcpyrate` modules. """ if not isinstance(dotted_name, str): raise TypeError(f"dotted_name name must be str; got {type(dotted_name)} with value {repr(dotted_name)}") if dotted_name.find(".") != -1: submodule_dotted_name, _ = dotted_name.rsplit(".", maxsplit=1) else: submodule_dotted_name = None # Issue #21: `mcpyrate` might not be in scope at the use site. Fortunately, # `__import__` is a builtin, so we are guaranteed to always have that available. if not force_import: mcpyrate_module = ast.Name(id="mcpyrate") else: globals_call = ast.Call(ast.Name(id="globals"), [], []) if submodule_dotted_name: modulename_to_import = f"mcpyrate.{submodule_dotted_name}" else: modulename_to_import = "mcpyrate" import_call = ast.Call(ast.Name(id="__import__"), [ast.Constant(value=modulename_to_import), globals_call, # globals (used for determining context) ast.Constant(value=None), # locals (unused) ast.Tuple(elts=[]), # fromlist ast.Constant(value=0)], # level []) # When compiled and run, the import call will evaluate to a reference # to the top-level `mcpyrate` module. mcpyrate_module = import_call value = mcpyrate_module for name in dotted_name.split("."): value = ast.Attribute(value=value, attr=name) return value
16ab9893075caccf5808a1ce7926bc266394a979
74,687
def GetRemoveLabelsListFromArgs(args): """Returns the remove labels list from the parsed args. Args: args: The parsed args. Returns: The remove labels list from the parsed args. """ return args.remove_labels
d8b2209f289180c500198be73f4ef9469cbd0c06
74,698