content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def nick_that_sent_message(tags, prefix): """Returns a nick that sent the message based on the given data passed to the callback. """ # 'tags' is a comma-separated list of tags that WeeChat passed to the # callback. It should contain a tag of the following form: nick_XYZ, where # XYZ is the nick that sent the message. for tag in tags: if tag.startswith('nick_'): return tag[5:] # There is no nick in the tags, so check the prefix as a fallback. # 'prefix' (str) is the prefix of the printed line with the message. # Usually (but not always), it is a nick with an optional mode (e.g. on # IRC, @ denotes an operator and + denotes a user with voice). We have to # remove the mode (if any) before returning the nick. # Strip also a space as some protocols (e.g. Matrix) may start prefixes # with a space. It probably means that the nick has no mode set. if prefix.startswith(('~', '&', '@', '%', '+', '-', ' ')): return prefix[1:] return prefix
973121bdf85d4a1c3df8f388f3564c8df5b4fb07
12,832
import torch def get_topk_logits_selector(logits, k=3): """ takes logits[batch, nout, voc_size] and returns a mask with ones at k largest logits """ topk_logit_indices = torch.topk(logits, k=k).indices.cpu() # indices = torch.stack([ # (torch.range(start=0, end=logits.shape[0] * logits.shape[1] * k-1) // logits.shape[1] * k), # ((torch.range(start=0, end=logits.shape[0] * logits.shape[1] * k-1) // k) % logits.shape[1]), # torch.reshape(topk_logit_indices, [-1]) # ], dim=1).unsqueeze(0).long() # (batch * nout * k, 3) # ones = torch.ones((indices.shape[0],)) return torch.zeros(logits.shape).scatter_(2, topk_logit_indices, 1) # return tf.scatter_nd(indices, ones, shape=logits.shape)
d9b2b83da1856c5d355c22257b3acc4c445f5960
12,834
def fp_mask_to_list(fp_mask): """Parse a fp_mask, eg 1-4,2,3 or 0x202 Returns fp cpus based on given mask """ if '0x' in fp_mask: fp_cpus = [i for i in range(0, 64) if (1 << i) & int(fp_mask, 16)] else: fp_cpus = [] for rule in fp_mask.split(','): if '-' in rule: # range of cpus start, end = rule.split('-', 1) try: fp_cpus += range(int(start), int(end) + 1) except ValueError: raise Exception("Invalid range expression %s" % rule) else: # single cpu try: fp_cpus.append(int(rule)) except ValueError: raise Exception("Invalid cpu number %s" % rule) return fp_cpus
6819495ac608d9e94845581da7567f56aebe51c9
12,835
def color_check(origin_color: list, target_color: list) -> bool: """ Function that check if it is the color we're looking for in C.I. Set C.I. properly for handling between accuracy and robustness. """ CI = 20 # Confidential Interval if target_color[0]-CI <= origin_color[0] <= target_color[0]+CI\ and target_color[1]-CI <= origin_color[1] <= target_color[1]+CI\ and target_color[2]-CI <= origin_color[2] <= target_color[2]+CI: return True else: return False
4a9e72f10779174eaa409ffb1aa2fe657382ba4a
12,836
from typing import Counter def getRepeatedList(mapping_argmat, score_mat_size): """ Count the numbers in the mapping dictionary and create lists that contain repeated indices to be used for creating the repeated affinity matrix for fusing the affinity values. """ count_dict = dict(Counter(mapping_argmat)) repeat_list = [] for k in range(score_mat_size): if k in count_dict: repeat_list.append(count_dict[k]) else: repeat_list.append(0) return repeat_list
fa2449379bf8bf2051c2492d56de11c2ee0191e4
12,837
import os def _read_last_line(filename): """Efficiently read the last line from a file Modified from https://stackoverflow.com/a/54278929 (CC BY-SA 4.0) 24 hours of log at 1 line per sec takes ~1e-3 sec to read """ if not os.path.exists(filename): return None try: with open(filename, 'rb') as f: f.seek(-2, os.SEEK_END) while f.read(1) != b'\n': f.seek(-2, os.SEEK_CUR) last_line = f.readline().decode() return last_line except OSError: return None
ddfbd541da2329583371ce294f7738d3d828064a
12,838
def _get_text_alignment(point1, point2): """Get the horizontal and vertical text alignment keywords for text placed at the end of a line segment from point1 to point2 args: point1 - x,y pair point2 - x,y pair returns: ha - horizontal alignment string va - vertical alignment string""" x1, x2, y1, y2 = point1[0], point2[0], point1[1], point2[1] if(x1 < x2): ha = 'left' else: ha = 'right' if(y1 < y2): va = 'bottom' else: va = 'top' return(ha, va)
317860030bf86750207bc891c236ad2618c686b1
12,839
def normalize_rectangle(rect): """Normalizes a rectangle so that it is at the origin and 1.0 units long on its longest axis. Input should be of the format (x0, y0, x1, y1). (x0, y0) and (x1, y1) define the lower left and upper right corners of the rectangle, respectively.""" assert len(rect) == 4, 'Rectangles must contain 4 coordinates' x0, y0, x1, y1 = rect assert x0 < x1, 'Invalid X coordinates' assert y0 < y1, 'Invalid Y coordinates' dx = x1 - x0 dy = y1 - y0 if dx > dy: scaled = float(dx) / dy upper_x, upper_y = 1.0, scaled else: scaled = float(dx) / dy upper_x, upper_y = scaled, 1.0 assert 0 < upper_x <= 1.0, 'Calculated upper X coordinate invalid' assert 0 < upper_y <= 1.0, 'Calculated upper Y coordinate invalid' return (0, 0, upper_x, upper_y)
b1a011948adb52bb6dea068116ab3a564ab2a1f8
12,840
import inspect import warnings def valid_func_args(func, *args): """ Helper function to test that a function's parameters match the desired signature, if not then issue a deprecation warning. """ keys = inspect.signature(func).parameters.keys() if set(args) == set(keys): return True label = getattr(func, '__name__', str(func)) msg = ( f'{label}({", ".join(keys)}) is deprecated, please override ' f'with {label}({", ".join(args)})' ) warnings.warn(msg, DeprecationWarning) return False
4872817033ea55881442e69711b620b2853407f1
12,843
import shutil def binary_available() -> bool: """Returns True if the GitHub CLI binary (gh) is availabile in $PATH, otherwise returns False. """ return shutil.which("gh") is not None
e959757ccce6d162b225fd6ef986a7e248c606fa
12,845
def ring_float_to_class_int(rings:float, step=0.1): """Ring value rounded to classifier value; rounded to nearest step size""" return round(rings/step)
563f155481592c9924ed61535e07a4d1ff7a1e8e
12,847
def weekday_list_to_hexweek(weekday_list): """ Helper to convert list of integers represending weekdays into speaker's hex representation of weekdays. :param hexweek: List of weekday integers e.g. [0, 1, 2, 3, 4] :returns: Hex string .e.g. 0x3E """ # Mon, Tue, Wed, Thu, Fri, Sat, Sun weekday_bits = [32, 16, 8, 4, 2, 1, 64] weekday_list = set(weekday_list) return hex(sum([weekday_bits[weekday] for weekday in weekday_list]))
c729dbc134c67c4c71911cbb5c817276583e1503
12,848
def extract_indices(idx_to_check, *args): """ Look if a given index j_gt is present in all the other series of indices (_, j) and return the corresponding one for argument idx_check --> gt index to check for correspondences in other method idx_method --> index corresponding to the method idx_gt --> index gt of the method idx_pred --> index of the predicted box of the method indices --> list of predicted indices for each method corresponding to the ground truth index to check """ checks = [False]*len(args) indices = [] for idx_method, method in enumerate(args): for (idx_pred, idx_gt) in method: if idx_gt == idx_to_check: checks[idx_method] = True indices.append(idx_pred) return all(checks), indices
c2de117417689d30d5b9a72f5bae07f12baf72ed
12,849
def get_file_date_part(now, hour) -> str: """ Construct the part of the filename that contains the model run date """ if now.hour < hour: # if now (e.g. 10h00) is less than model run (e.g. 12), it means we have to look for yesterdays # model run. day = now.day - 1 else: day = now.day date = '{year}{month:02d}{day:02d}'.format( year=now.year, month=now.month, day=day) return date
42c2beddccba755f66061364463f8ad759d3c020
12,851
from pathlib import Path import tempfile def get_basetemp() -> Path: """Return base temporary directory for tests artifacts.""" tempdir = Path(tempfile.gettempdir()) / "cardano-node-tests" tempdir.mkdir(mode=0o700, exist_ok=True) return tempdir
e34ffc5cae1373977f1b46ab4b68106e1bef3313
12,852
import os import shutil def remove_file(path): """ Removes a path from the file system. """ if os.path.isdir(path): return shutil.rmtree(path=path, ignore_errors=True) return os.unlink(path)
6fd5f62988849373204300a42990ab558fc14b89
12,853
def mutate_string(string, pos, change_to): """ Fastest way I've found to mutate a string @ 736 ns >>> mutate_string('anthony', 0, 'A') 'Anthony' :param string: :param pos: :param change_to: :return: """ string_array = list(string) string_array[pos] = change_to return ''.join(string_array)
c6119076411b57f9ded2e899d40b6b82bb5f7836
12,855
def count_unique(list_to_count): """Counnunmber of entries for each unique value in list """ uniques = {} for element in list_to_count: if element in uniques: uniques[element] += 1 else: uniques[element] = 1 return uniques
085ecdd0e960ac0de05e60c05e5417ca6dba8aef
12,856
def _get_field_type(column_info): """ Get the Django field type from the column's information. @param IN column_info Xylinq column information object @return A 2-element tuple: the name of the Django type class, and its attributes as a string """ xy_type = column_info.type xy_type_precision = column_info.type_precision # Integer if xy_type in ("big_integer", "integer", "small_integer"): return "IntegerField", "" # Decimal if xy_type == "decimal": return "DecimalField", "decimal_places=%s, max_digits=%s, " % (xy_type_precision[1], xy_type_precision[0]) # Date if xy_type == "date": return "DateField", "" # Datetime if xy_type == "date_time": return "DateTimeField", "" # String if xy_type in ("char", "var_char"): return "CharField", "max_length=%s, " % xy_type_precision[0] # Blob if xy_type == "blob": return "TextField", "" raise Exception("Column type not supported: %s" % xy_type)
74c61b70e07fcef033895d153f67524909c258e2
12,858
def with_last_degree(template_layers_config, degree): """ Change the degree of the last -- or actually penultimate -- layer in a layered micro-service application, while keeping the average service time for a user request constant. """ assert len(template_layers_config) >= 2 layers_config = [ layer_config for layer_config in template_layers_config] layers_config[-1] = \ layers_config[-1]._replace(average_work=layers_config[-1].average_work/degree) layers_config[-2] = \ layers_config[-2]._replace(degree=degree) return layers_config
f09bec9e27586349c679a66070bc4bac0dcba5d1
12,859
def bench(n): """Just a benchmarking function for relative expected performance""" items = [x for x in range(n)] return sum([x ** 2 for x in items])
5d91d787aead273d264d6cbc7e3e1edfd17e342f
12,860
def GetErrorOutput(error, new_error=False): """Get a output line for an error in regular format.""" line = '' if error.token: line = 'Line %d, ' % error.token.line_number code = 'E:%04d' % error.code error_message = error.message if new_error: error_message = 'New Error ' + error_message return '%s%s: %s' % (line, code, error.message)
4661c74fcef9f13c0aad3d74e827d9eea20f86ef
12,861
def deunicode(s): """Returns a UTF-8 compatible string, ignoring any characters that will not convert.""" if not s: return s return str(s.decode('utf-8', 'ignore').encode('utf-8'))
baaf99acec746c266059c07e03a1ee4a7e76f46a
12,862
def constructUniformAllelicDistribution(numalleles): """Constructs a uniform distribution of N alleles in the form of a frequency list. Args: numalleles (int): Number of alleles present in the initial population. Returns: (list): Array of floats, giving the initial frequency of N alleles. """ divisor = 100.0 / numalleles frac = divisor / 100.0 distribution = [frac] * numalleles return distribution
45e834d2129586cb6ff182e1a8fe6ecb1ae582ae
12,864
def get_cpu(board, addr): """ Retrieve CPU. """ if board == 'd2000_dev': cpu = 'd2000_x86' else: if addr == 'addr_arc': cpu = 'se_arc' else: cpu = 'se_x86' return cpu
02dcb057d7c1ed178090c84c2baa8d7ca732b98b
12,865
def sma(serie, look_back_delay): """Simple Moving Average""" return serie.rolling( window=int(look_back_delay), center=False ).mean()
d584577e315827ec97c092feb54fbbda8cc43952
12,866
import sys def format_sys_argv(): """ From the system arguments, get the specific positional and keyword arguments supported for launchpanel. These are (with default values): plugin_locations = None environment_id = 'launchpanel' style = 'space' title = 'Launch Panel' style_overrides = None parent = None Arguments can be provided as positional (in order, when not provided with an "=" sign), or keyword, when an "=" sign is included. :return: tuple of args, kwargs :rtype: (list, dict) """ # -- The first argument is always the filepath being executed # -- by python, so ignore that. all_args = sys.argv[1:] # -- Define our args and keyword arguments. We need to seperate these args = list() kwargs = dict() for arg in all_args: # -- If it has an =, its a kwarg if '=' in arg: kwargs[arg.split('=')[0]] = arg.split('=')[1] else: # -- We're dealing with an arg, so just # -- add it args.append(arg) return args, kwargs
33901287a1807c960703851c9ac5f4ec45f99c61
12,867
def id_record(rec): """Converts a record's id to a blank node id and returns the record.""" rec['id'] = '_:f%s' % rec['id'] return rec
1ee5a9e9600299b56543c92a77cadb1826bb9bb7
12,868
def obj_ext(value): """ Returns extention of an object. e.g. For an object with name 'somecode.py' it returns 'py'. """ return value.split('.')[-1]
7ef6f1009145a0acc543130c41d2082a14412b6d
12,869
import torch def binary_hyperplane_margin(X, Y, w, b, weight=1.0): """ A potential function based on margin separation according to a (given and fixed) hyperplane: v(x,y) = max(0, 1 - y(x'w - b) ), so that V(ρ) = ∫ max(0, y(x'w - b) ) dρ(x,y) Returns 0 if all points are at least 1 away from margin. Note that y is expected to be {0,1} Needs separation hyperplane be determined by (w, b) parameters. """ Y_hat = 2*Y-1 # To map Y to {-1, 1}, required by the SVM-type margin obj we use margin = torch.relu(1-Y_hat*(torch.matmul(X, w) - b)) return weight*margin.mean()
0f038dc2ae9def9823b3f440a087ede52dcee717
12,870
def odd_parity(bits): """ Determines if the array has even or odd parity. Returns True if odd, False if even. Note: this is an extremely inefficient computation, fix later. """ count = sum(1 for x in bits if x==1) return count % 2
5705df9b35761c57fa4160761198e2804fb23159
12,871
def _ensure_connection(fn): """Decorator that wraps MonitorSocket external methods""" def wrapper(*args, **kwargs): """Ensure proper connect/close and exception propagation""" mon = args[0] already_connected = mon.is_connected() mon.connect() try: ret = fn(*args, **kwargs) finally: # In general this decorator wraps external methods. # Here we close the connection only if we initiated it before, # to protect us from using the socket after closing it # in case we invoke a decorated method internally by accident. if not already_connected: mon.close() return ret return wrapper
802e96a03c07e2629c69cd97680ff5a04517401c
12,872
def getMapSpkrs(lstOracleSpkrs, dfSpkrTimes): """ This function maps ground truth speakers to diarization system speakers on the assumption that the maximum time overlaps in dfSpkrTimes reflect the intended mapping. Inputs: - lstOracleSpkrs: list of ground truth speakers, form: "['FEE029', 'FEE030', 'MEE031', 'FEE032']" - dfSpkrTimes: a Pandas dataframe size len(lstOracleSpkrs) x len(lstDiarizedSpeakrs) that shows how the ground truth and diarization system times match, form: " spkr_0 spkr_1 spkr_3 spkr_5 spkr_6 spkr_9 FEE029 194.480 13.925 12.185 14.040 2.610 7.235 FEE030 9.920 9.110 4.465 67.645 1.760 7.745 MEE031 6.445 0.570 106.655 5.105 0.000 5.020 FEE032 9.655 1.445 4.985 7.280 0.815 138.520" Outputs: - mapSpkrs: a dictionary that with the ground truth speakers as the key and their values as a 1 x 3 list showing the mapped speaker, the mapped speaker time and the percentage of time correctly mapped to that speaker, form: "{'FEE029': ['AMI_20050204-1206_spkr_0', 194.48, 79.6], 'FEE030': ['AMI_20050204-1206_spkr_5', 67.645, 67.2], 'MEE031': ['AMI_20050204-1206_spkr_3', 106.655, 86.2], 'FEE032': ['AMI_20050204-1206_spkr_9', 138.52, 85.1]}" """ mapSpkrs = {} for spkr in lstOracleSpkrs: dSpkr = dfSpkrTimes.loc[spkr].idxmax() topTime = dfSpkrTimes.loc[spkr].max() sumTime = sum(dfSpkrTimes.loc[spkr]) mapSpkrs[spkr] = [dSpkr, round(topTime, 3), round(100*topTime/sumTime, 1)] return mapSpkrs
2152ee61f5be4706314762253576aab78953a18c
12,873
def get_homologs(homologs_fname): """Extract the list of homolog structures from a list file :param homologs_fname: file name with the list of homologs :type homologs_fname: str :returns homologs_list containing the pdb codes of the homolog structures in the input file :rtype tuple """ homologs_list = [] with open(homologs_fname, 'r') as fhandle: for line in fhandle: homologs_list.append(line.rstrip()[:-2].lower()) return tuple(homologs_list)
1db108f30a3ef274cba918c5e1c056c6797a5bca
12,874
def _generate_summary(sentences, sentenceValue, threshold): """get the summary: if value above the threshold Args: sentences (list): all sentences sentenceValue (dict): the dict storing its value threshold (int): threshold to select sentences Returns: (str): summary """ sentence_count = 0 summary = '' # check if qualify for sentence in sentences: if sentence[:15] in sentenceValue and sentenceValue[sentence[:15]] >= (threshold): summary += " " + sentence sentence_count += 1 return summary
58f432dcc419288b9977d827abf1684d80e3a404
12,876
def _GetFields(trace=None): """Returns the field names to include in the help text for a component.""" del trace # Unused. return [ 'type_name', 'string_form', 'file', 'line', 'docstring', 'init_docstring', 'class_docstring', 'call_docstring', 'length', 'usage', ]
acf8a1c62853f7648082689002b3ced2689892fe
12,877
import builtins def iscallable(t): """True if type ``t`` is a code object that can be called""" return builtins.callable(t) and hasattr(t, '__call__')
9dc806eff1ebce90d9df8a3f3c53256ef73f0f66
12,878
def normalize(value, minimum, maximum): """min-max normalization""" if value < minimum: value = minimum if value > maximum: value = maximum return (value - minimum)/(maximum - minimum)
0c1318d71db43eeb27ff59ac743f8909279451b5
12,879
def get_valid_values(value, min_value, max_value): """Assumes value a string, min_value and max_value integers. If value is in the range returns True. Otherwise returns False.""" valid_values = [i for i in range(min_value, max_value + 1)] try: value = int(value) except ValueError: return False if value in valid_values: return True return False
4385f8328cfbe6497f7a723be37e54f0a86f9fbf
12,883
import torch def _spherical_harmonics_l0(xyz): """Compute the l=0 Spherical Harmonics Args: xyz : array (Nbatch,Nelec,Nrbf,Ndim) x,y,z, of (Point - Center) Returns Y00 = 1/2 \sqrt(1 / \pi) """ return 0.2820948 * torch.ones_like(xyz[..., 0])
83831d0a140d85dc356ae04bcbaeeda74c5a9fee
12,884
def index_all(elm, lst): """return list[int] all positions where elm appears in lst. Empty list if not found""" if type(list)!=list: lst=list(lst) return [i for i,e in enumerate(lst) if e==elm]
0db325714d6c8de5b1a1fbb516287c56931cf0a9
12,886
def get_lines(file_path): """ get all lines in file """ with open(file_path, encoding='utf-8') as data_file: lines = map(lambda line: line.strip(), data_file.readlines()) return lines
caf42286bc985c609076cb31432242552483a661
12,887
def version(server, component=None): """ Returns the version of the specified component. If *component* is None, then a list of all of the components available will be returned. :param CasparServer server: The :py:class:`~caspartalk.CasparServer` that the *amcp_command* will be sent to. :param str component: The component to query the version of. :rtype: List :return: A list containing either the version of the component queried, or of all of the components available. """ # VERSION {[component:string]} # Returns the version of specified component. if component: amcp_string = "VERSION {0}".format(component) else: amcp_string = "VERSION" response = server.send_amcp_command(amcp_string) if response: return response[0] else: return None
642d80ede6afee88484095f631b1b194e32fac4b
12,888
import base64 def __decode(encoded, n): """ decode the string """ token = encoded.split(":")[0] for _ in range(0, n): token = base64.b64decode(token) return token
9ac3ba6da27ae255117b25c5f1c61f35d06c2c2e
12,891
import numpy def smooth_waterfall(arr,fwhm=4.0,unsharp=False): """ Smooth a waterfall plot. If unsharp set, remove the smoothed component Input array should have dimensions [timelen, nbolos] """ timelen,nbolos = arr.shape kernel = numpy.exp(-numpy.linspace(-timelen/2,timelen/2,timelen)**2/ (2.0*fwhm/numpy.sqrt(8*numpy.log(2)))) kernel /= kernel.sum() kernelfft = numpy.fft.fft(kernel) arrfft = numpy.fft.fft(arr,axis=0) arrconv = numpy.fft.fftshift( numpy.fft.ifft(arrfft* numpy.outer(kernelfft,numpy.ones(nbolos)), axis=0).real,axes=(0,)) if unsharp: return arr-arrconv else: return arrconv
849b144a63b1209523e588724531e5cd24269e76
12,892
def find_geom(geom, geoms): """ Returns the index of a geometry in a list of geometries avoiding expensive equality checks of `in` operator. """ for i, g in enumerate(geoms): if g is geom: return i
2c2c9c2230ae745b6a781337192c694ba4ba4c59
12,893
def summary_table(params, proteins): """ Returns a string representing a simple summary table of protein classifcations. """ out = "" counts = {} for seqid in proteins: category = proteins[seqid]['category'] if category not in counts: counts[category] = 1 else: counts[category] += 1 out += "\n\n# Number of proteins in each class:\n" pse_total = 0 for c in counts: if "PSE-" in c: pse_total += counts[c] counts["PSE(total)"] = pse_total for c in sorted(counts): if "PSE-" in c: spacer = " " pse_total += counts[c] else: spacer = "" out += "# %s%-15s\t%i\n" % (spacer, c, counts[c]) # out += "#\n# PSE(total)\t\t%i\n" % (pse_total) return out
1fa3f16f799964bf9f1c0f1c593020902ab2219f
12,894
import random def random_password(): """ 随机生成一个8位的数字的字符串 :return: """ password = "" for i in range(8): password += str(random.randint(0, 9)) return password
d7f83d4e01c79166d6b116c40622c82148252c30
12,895
def on_board(i, j): """Return True if the location is on board >>> on_board(0,0) True >>> on_board(-1,17) False """ return 0 <= i < 8 and 0 <= j < 8
107f9e614c965b29c0c872738fd4ea5407971a29
12,896
def multiplicative_inverse(e, phi): """ Euclid's extended algorithm for finding the multiplicative inverse of two numbers """ d, next_d, temp_phi = 0, 1, phi while e > 0: quotient = temp_phi // e d, next_d = next_d, d - quotient * next_d temp_phi, e = e, temp_phi - quotient * e if temp_phi > 1: raise ValueError('e is not invertible by modulo phi.') if d < 0: d += phi return d
9934f6e2f86ff0ef4165728d59f11ba0d1cad928
12,897
def check_contradiction(statement1, statement2): """ Check if two statements are contradictory ignoring the time dimension. :param statement1: the first statement :param statement2: the second statement :return: """ if not (statement1.arguments[0][0].startswith("e") and statement1.arguments[0][-1].isnumeric()) \ and statement1.arguments[0] != "E": return False if not (statement2.arguments[0][0].startswith("e") and statement2.arguments[0][-1].isnumeric()) \ and statement2.arguments[0] != "E": return False ret = statement1.predicate == statement2.predicate \ and statement1.arguments[1:] == statement2.arguments[1:] \ and statement1.is_negative != statement2.is_negative return ret
fa0a485d68ece3a6ead3c41a017b58926273f42d
12,898
import os def xonshconfig(env): """Ensures and returns the $XONSHCONFIG""" xcd = env.get('XONSH_CONFIG_DIR') xc = os.path.join(xcd, 'config.json') return xc
19ad736e9128f22836151e84b55803e7d8dd940e
12,899
def raw_func(): """ foo bar baz """ return 12345
471716eed6860b8abd6a0e9129fe20e7eaadbdb4
12,900
import random def generate_token(length=64): """Generate a random token. Args: length (int): Number of characters in the token. Returns: Random character sequence. """ _allowed = 'abcdefghijklmnoprstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789+-_$' token = ''.join(random.SystemRandom().choice(_allowed) for i in range(length)) return token
b1cc37216ed43e8d1d3c86e9817e87ab9833bfdd
12,903
from datetime import datetime def calc_max_uptime(reboots): """Parse the passed in reboots output, extracting the datetimes. Calculate the highest uptime between reboots = highest diff between extracted reboot datetimes. Return a tuple of this max uptime in days (int) and the date (str) this record was hit. For the output above it would be (30, '2019-02-17'), but we use different outputs in the tests as well ... """ timestamps = [] for line in reboots.splitlines()[1:]: timestamps.append(datetime.strptime(line[-16:], "%a %b %d %H:%M")) timestamps = sorted([i.replace(year=2020) for i in timestamps]) timedeltas = [] for i in range(1, len(timestamps)): to_append = ((timestamps[i]-timestamps[i-1]), timestamps[i]) timedeltas.append(to_append) sorted_stamps = sorted(timedeltas, key=lambda x: x[0], reverse=True) #print(sorted_stamps) to_return = max(timedeltas) actual_return = (int(to_return[0].days), str(to_return[1].date())) return actual_return pass
6f7d02bc17c16d4185e566646a70abe840934af3
12,904
import string def get_number(s, cast=int): """ Try to get a number out of a string, and cast it. """ d = "".join(x for x in str(s) if x in string.digits) return cast(d) if d else s
f788bfd13cc7234ca5f290ed047ec9b8bf8acc9b
12,905
def _sp_print_stderr(m: str) -> str: """Return the subprocess cmd to print `m` to stderr.""" return 'python -c "import sys; print(\'{}\', file=sys.stderr)"'.format(m)
7a99aa77e3f54ede37a71df6a1432c82c79352aa
12,907
import json import re def sd_datatype_mapping(XNAT, project): """ Method to get the Datatype mapping from Project level :param XNAT: XNAT interface :param project: XNAT Project ID :return: Dictonary with scan_type/series_description and datatype mapping """ sd_dict = {} if XNAT.select('/data/projects/' + project + '/resources/BIDS_datatype').exists(): for res in XNAT.select('/data/projects/' + project + '/resources/BIDS_datatype/files').get(): if res.endswith('.json'): with open(XNAT.select('/data/projects/' + project + '/resources/BIDS_datatype/files/' + res).get(), "r+") as f: datatype_mapping = json.load(f) sd_dict = datatype_mapping[project] print('\t\t>Using BIDS datatype mapping in project level %s' % (project)) else: print('\t\t>WARNING: No BIDS datatype mapping in project %s - using default mapping' % (project)) scans_list_global = XNAT.get_project_scans('LANDMAN') for sd in scans_list_global: c = re.search('T1|T2|T1W', sd['scan_type']) if not c == None: sd_anat = sd['scan_type'].strip().replace('/', '_').replace(" ", "").replace(":", '_') sd_dict[sd_anat] = "anat" for sd in scans_list_global: c = re.search('rest|Resting state|Rest', sd['scan_type'], flags=re.IGNORECASE) if not c == None: sd_func = sd['scan_type'].strip().replace('/', '_').replace(" ", "").replace(":", '_') sd_dict[sd_func] = "func" for sd in scans_list_global: c = re.search('dwi|dti', sd['scan_type'], flags=re.IGNORECASE) if not c == None: sd_dwi = sd['scan_type'].strip().replace('/', '_').replace(" ", "").replace(":", '_') sd_dict[sd_dwi] = "dwi" for sd in scans_list_global: c = re.search('Field|B0', sd['scan_type'], flags=re.IGNORECASE) if not c == None: sd_fmap = sd['scan_type'].strip().replace('/', '_').replace(" ", "").replace(":", '_') sd_dict[sd_fmap] = "fmap" with open("global_mapping.json", "w+") as f: json.dump(sd_dict, f, indent=2) return sd_dict
b8a4c4e15408b2d6c83a467dac1d7dfc21f9059f
12,909
def remove_whitespace(tokens): """Remove any top-level whitespace in a token list. Whitespace tokens inside recursive :class:`~.token_data.ContainerToken` are preserved. :param tokens: A list of :class:`~.token_data.Token` or :class:`~.token_data.ContainerToken`. :return: A new sub-sequence of the list. """ return [token for token in tokens if token.type != 'S']
05a0c2d745544e1e60e86539f38e84e8c03d7c0c
12,910
def create_avg_stoplines_df(stoplines_df_name): """Create an aggregated stoplines_avg_df with the average stopline X, Y for each link and number of lanes""" stopline_avg_df = stoplines_df_name.groupby('Link_ID')['stopline_X'].mean().reset_index(name='mean_X') stopline_avg_df['mean_Y'] = stoplines_df_name.groupby('Link_ID')['stopline_Y'].mean().reset_index(name='mean_Y').iloc[:,1] stopline_avg_df['n_lanes'] = stoplines_df_name.groupby('Link_ID')['Lane'].count().reset_index().iloc[:,1] stopline_avg_df['link_direction'] = stoplines_df_name.groupby('Link_ID')['Link_direction'].first().reset_index().iloc[:,1] #print(stopline_avg_df.head()) return stopline_avg_df
284d22d121cb495b95e61da4c45c107b9cfd7a24
12,911
def create_grid(dataset: list) -> str: """ Create a table grid of the github users Args: dataset (list): The dataset to create the table from, in this case, the list of github users. Returns (str): table grid of github users """ row_length = 7 html_card = '' for i in range(0, len(dataset), row_length): html_card += '<tr>' for element in dataset[i:i+row_length]: html_card += f'\n<td>{element} </td>\n' html_card += '</tr>\n' return html_card
8cd9efaa01ece44ae06400e1e14456f80b2137fa
12,912
def caesar_1(char_in: str) -> str: """Character offset by 1 position. Parameters: char_in : function Character of the source text. Returns: char_out : str Character of the encrypted text. """ num = ord(char_in[0]) if (97 <= num <= 121) or (65 <= num <= 89): char_out = chr(num + 1) elif num == 122 or num == 90: char_out = chr(num - 25) else: char_out = char_in return char_out
0a77b94ea2f2b1bf75334d633da0a8299f8c7960
12,913
def get_runtime(job): """Returns the runtime in milliseconds or None if job is still running""" if job.metadata.get("finished_time") is not None: finished_time = job.metadata.get("finished_time") else: return None start_time = job.metadata.get("scrapystats")["start_time"] return float(finished_time - start_time)
710d9d135ae7097bb85b0c3e8ea88d142da593e2
12,914
import torch def bmtm(mat1, mat2): """batch matrix transpose matrix product""" return torch.einsum("bji, bjk -> bik", mat1, mat2)
9c00e371f4180da67b09be8e2cf35c3bab0785e3
12,915
def get_residue_ranges(numbers): """Given a list of integers, creates a list of ranges with the consecutive numbers found in the list. Parameters ---------- numbers: list A list of integers Returns ------ list A list with the ranges of consecutive numbers found in the list """ nums = sorted(set(numbers)) gaps = [[s, e] for s, e in zip(nums, nums[1:]) if s + 3 < e] edges = iter(nums[:1] + sum(gaps, []) + nums[-1:]) return list(zip(edges, edges))
0f263ee265574e64bec655ec2b5f35fdef2cf8af
12,916
def instance_get_host(instance): """ Retrieve the host the instance is located on """ return instance['OS-EXT-SRV-ATTR:host']
0fef8cdf9e2ba7ac26e8a1ef2b2a1786cbe73d9d
12,917
import csv def loadcsv(filename): """ Reads an input CSV file. Args: filename (str): input file path. Returns: List containing all rows from the CSV file without headers. """ with open(filename, "r", encoding="utf-8") as f: return list(filter(None, list(csv.reader(f))[1:]))
442d0fdf7bcc160e98c83d7c848ec9477cf757fe
12,919
from typing import Dict import re def apply_list_replace(input_str: str, replacements: Dict[str, str]) -> str: """ Apply a series of replacement on the input. :param input_str: the string to be modified :param replacements: a Dict regex -> replacement. Each item will be passed to re.sub() :return: the modified string """ temp = input_str if isinstance(replacements, dict): for replacement in replacements.items(): temp = re.sub(replacement[0], replacement[1], temp) return temp
287e1a7763e7f56719adf566358c62156bcf668c
12,920
def overlappingDates(date_set1, date_set2): """ date_set1: (tuple) a tuple with the start date and end date date_set2: (tuple) a tuple with the start date and end date """ # find recent date if date_set1[0] > date_set2[0]: start_date = date_set1[0] else: start_date = date_set2[0] # find older date if date_set1[-1] < date_set2[-1]: end_date = date_set1[-1] else: end_date = date_set2[-1] return(start_date, end_date)
01e0a6f28b8e8780a1d347f5a8864887965d4a56
12,922
from typing import Union def serialize_attribute_value(value: Union[str, int, float, bool, None]): """ Serialize a value to be stored in a Magento attribute. """ if isinstance(value, bool): return "1" if value else "0" elif value is None: return "" return str(value)
a9d5b4f6d507672b594eb1c88d4d86c9bfc6bc11
12,925
def mk_uni_port_num(intf_id, onu_id): """ Create a unique virtual UNI port number based up on PON and ONU ID :param intf_id: :param onu_id: (int) ONU ID (0..max) :return: (int) UNI Port number """ return intf_id << 11 | onu_id << 4
3d348b9d6dc40f54d6f3667a8a5310a3d52b3d5d
12,926
from pathlib import Path def find_files(base_root: Path): """ Search the given base directory for the actual dataset root. This makes it a little easier for the dataset manager :param base_root: :return: """ # These are folders we expect within the dataset folder structure. If we hit them, we've gone too far excluded_folders = {'depth', 'rgb', '__MACOSX'} to_search = {Path(base_root)} while len(to_search) > 0: candidate_root = to_search.pop() # Make the derivative paths we're looking for. All of these must exist. rgb_path = candidate_root / 'rgb.txt' trajectory_path = candidate_root / 'groundtruth.txt' depth_path = candidate_root / 'depth.txt' # If all the required files are present, return that root and the file paths. if rgb_path.is_file() and trajectory_path.is_file() and depth_path.is_file(): return candidate_root, rgb_path, depth_path, trajectory_path # This was not the directory we were looking for, search the subdirectories for child_path in candidate_root.iterdir(): if child_path.is_dir() and child_path.name not in excluded_folders: to_search.add(child_path) # Could not find the necessary files to import, raise an exception. raise FileNotFoundError("Could not find a valid root directory within '{0}'".format(base_root))
d54ab18fb1bc7b49a7bf486b11d020a5d17e3cdb
12,927
def summult(list1, list2): """ Multiplies elements in list1 and list2, element by element, and returns the sum of all resulting multiplications. Must provide equal length lists. Usage: lsummult(list1,list2) """ if len(list1) != len(list2): raise ValueError("Lists not equal length in summult.") s = 0 for item1, item2 in zip(list1, list2): s = s + item1 * item2 return s
2b1c4543867998c8edf372c2388df13ce07df910
12,928
def CRPS_compute(CRPS): """Compute the averaged values from the given CRPS object. Parameters ---------- CRPS : dict A CRPS object created with CRPS_init. Returns ------- out : float The computed CRPS. """ return 1.0*CRPS["CRPS_sum"] / CRPS["n"]
378dee32c7a9c7a88784438c228f21ae3b6f24f3
12,930
def cim_to_glm_name(prefix, cim_name): """Helper to manage the fact that we need to prefix our object names to make them match what's in the GridLAB-D model. Also, if the given name is NOT surrounded by quotes, it will be. It would appear that all the 'name' attributes in the GridLAB-D models from the platform are quoted with quotes. However, when querying the CIM triple-store, the names do NOT come back with quotes. """ return '"{}_{}"'.format(prefix.replace('"', ''), cim_name.replace('"', ''))
aedf44eccff2237b99458a59f2de3b1f2775c80b
12,931
import os from urllib import request as url def getPDBFile(path): """ Obtain a PDB file. First check the path given on the command line - if that file is not available, obtain the file from the PDB webserver at http://www.rcsb.org/pdb/ . Parameters path: Name of PDB file to obtain (string) Returns file: File object containing PDB file (file object) """ file = None if not os.path.isfile(path): URLpath = "http://www.rcsb.org/pdb/cgi/export.cgi/" + path + \ ".pdb?format=PDB&pdbId=" + path + "&compression=None" file = url.urlopen(URLpath) else: file = open(path, 'rU') return file
44d70aa7330be8256a89d0e1183111bcf59f3e91
12,932
def tz_syntax(string): """ returns 'good' and clean reply if reply to adjust the current time zone is fine """ signs = ['+','-'] cond = all(len(s) <= 2 for s in string[1:].replace(',','.').split('.')) if string[0] in signs and cond == True or string == '0': return 'good', string.replace(',','.').strip() else: return None, None
b727cc1459999410c91adc8dce5683e2fb583db3
12,933
def fun_dfun(obj, space, d): """ Computes the posterior predictive and posterior predictive gradients of the provided GPyOpt object. Parameters ---------- obj: GPyOpt object The GPyOpt object with a surrogate probabilistic model. space: GPyOpt space A GPyOpt object that contains information about the design domain. d: np.ndarray Proposed design. """ mask = space.indicator_constraints(d) pred = obj.model.predict_withGradients(d)[0][0][0] d_pred = obj.model.predict_withGradients(d)[2][0] return float(pred * mask), d_pred * mask
14c39d96dc810d8078ff2dd602fc6216214e8d3f
12,935
def partition_opm_license_list(df): """This function partitions the OPM Granted Licenses list by license type, storing the result in a Python dictionary. Parameters ---------- df (DataFrame): OPM Granted Licenses list Returns ------- license_list_dict (dict): OPM Granted Licenses list partitioned by license type """ license_list_dict = {} license_list_dict['PBI'] = df[ ~df['Power BI'].isna() ] license_list_dict['P1'] = df[ ~df['Essentials License (Project Plan Essential)'].isna() ] license_list_dict['P3'] = df[ ~df['Professional (Project Plan 3)'].isna() ] license_list_dict['P5'] = df[ ~df['Premium (Project Plan 5)'].isna() ] return license_list_dict
4f9bab49385e23732e5e56e3f0dbf8762eb05438
12,937
def get_upload_commands(system, release, package): """Returns the required package_cloud commands to upload this package""" repos = ["datawireio/stable", "datawireio/telepresence"] res = [] for repo in repos: res.append( "package_cloud push {}/{}/{} {}".format( repo, system, release, package ) ) return res
db3fccb3302657367be121f4bb64affcd4879180
12,939
def get_attached_policy(client, policy_arn): """ Get the policy document of an attached policy. """ try: policy = client.get_policy( PolicyArn=policy_arn )['Policy'] version = policy['DefaultVersionId'] can_get = True except Exception as error: print('Get policy failed: {}'.format(error)) return False # NOTE: If v1, v2, and v3 exist, then v2 is deleted, the next version will be v4 still, so this WILL error currently # print('Attempting to enumerate the default version...') # can_get = False try: if can_get is True: document = client.get_policy_version( PolicyArn=policy_arn, VersionId=version )['PolicyVersion']['Document'] return document # else: # If the user can't run get_policy, try to run get_policy_version to enumerate the default version # for version in ['v1', 'v2', 'v3', 'v4', 'v5']: # This won't error because it will return the default version before fetching a non-existent version # policy_version = client.get_policy_version( # PolicyArn=policy_arn, # VersionId=version # )['PolicyVersion'] # if policy_version['IsDefaultVersion'] is True: # return policy_version['Document'] except Exception as error: print('Get policy version failed: {}'.format(error)) return False
9a3f6e3a24727e203acc06b0b7022f695989c886
12,940
def fix_whitespace_for_phantom(text: str): """Transform output for proper display This is important to display pandas DataFrames, for instance """ text = text.replace(' ', r'&nbsp;') text = '<br>'.join(text.splitlines()) return text
7a0e3cddd778263977da74096f7bd30f44a0ce45
12,941
def normalize_assignments(assignments): """Convert a clustering state <assignments> so that all essentially equal states can be same array. Return value type is Tuple so that it can be hashed as a dict key. """ convert_table = {} new_id = 0 for cluster_id in assignments: if cluster_id not in convert_table: convert_table[cluster_id] = new_id new_id += 1 return tuple([convert_table[cluster_id] for cluster_id in assignments])
1574a62db1ca65c1051bfac6fce9b30918892404
12,942
def get_temperature_reading_as_json(temperature_reading): """ returns a single temp reading as a json object """ result = '{\n "probe_id" : "' + temperature_reading.probe_id + '",\n "temperature_C" : "' + str(temperature_reading.temperature_C) + '",\n "temperature_F" : "' + str(temperature_reading.temperature_F) + '",\n "timestamp" : "' + str(temperature_reading.timestamp) + '"\n}' return result
4836532602e4b0c704c6eab439ac873d8e53c237
12,943
import math def step_decay(epoch: int): """ Learning rate scheduler. Parameters ----------- epoch: int Number of epochs to perform Returns -------- float Learning rate """ initial_lrate = 1e-3 drop = 0.5 epochs_drop = 50.0 lrate = initial_lrate * math.pow(drop, math.floor((1+epoch)/epochs_drop)) return lrate
80af442b6ce0c7b454969896b7ac41efdf63eaf6
12,945
def _internal_is_knet_ascii(buf): """ Checks if the file is a valid K-NET/KiK-net ASCII file. :param buf: File to read. :type buf: Open file or open file like object. """ first_string = buf.read(11).decode() # File has less than 11 characters if len(first_string) != 11: return False if first_string == 'Origin Time': return True return False
9ca251ef9c5eab64a97f69e1acd8b6aa746ea09b
12,946
import pathlib import os def get_program_email_list(program): """ Return a list of emails for a given program. """ # Navigate to 'programs' directory in repository. programs = pathlib.Path(os.getcwd()) / ".." / "resources" / "views" / "programs" directory = programs / program emails = os.listdir(directory) emails.remove("home.blade.php") return emails
44e03cd5fa018480044441fbd92ace771e063073
12,949
def getInviteAcceptedRedirect(entity, _): """Returns the redirect for accepting an invite. """ return '/%s/accept_invite/%s/%s' % ( entity.role, entity.scope_path, entity.link_id)
39affb109481cb96bb4ca992911e6a6a1957962a
12,950
def format_month(month): """Formats a month to first 3 characters of the month input Args: month: user input month Returns: A ValueError if the input is not a month, or a 3 character month. """ months = ['Jan','Feb','Mar','Apr','May','Jun', 'Jul','Aug','Sep','Oct','Nov','Dec'] if (month.isdigit()): if(len(month) > 2): raise ValueError else: month = int(month) if((month > 12) | (month <= 0)): raise ValueError return months[month - 1] elif not(month.istitle() | month.islower()| month.isupper()): raise ValueError elif(month.capitalize() in months): return month.capitalize() else: raise ValueError
da7ffd8bc801377ecebcc76e972219633ae21566
12,951
def get_data(fn, split=False, split_char=None, filter_blanks=False): """ :param fn: filename to open :param split: if you want to split the data read :param split_char: character you want to split the data on :param filter_blanks: remove empty strings if split=True Example: >>>data = get_data('file.txt', split=True, split_char=",") >>>print(data) [1, 2, 3, 4] """ with open(fn, encoding='utf-8') as f: data = f.read() if split: if split_char: data = data.split(split_char) if filter_blanks: data = [s.strip() for s in data if s.strip() != ''] return data
6ff198281d41f96fb48c45c51525245cffe8e7ec
12,952
def add_cors(resp): """ Ensure all responses have the CORS headers. This ensures any failures are also accessible by the client. """ resp.headers['Access-Control-Allow-Origin'] = '*' resp.headers['Access-Control-Allow-Credentials'] = 'true' resp.headers['Access-Control-Allow-Methods'] = 'POST, OPTIONS, GET, PUT, DELETE' resp.headers['Allow'] = 'POST, GET, PUT, DELETE' if resp.headers.get('Access-Control-Max-Age') != '0': resp.headers['Access-Control-Max-Age'] = '3600' return resp
0e8044bcdbbfebad19b41b1323ba0935e178999c
12,953
def maybe_add(d, exclude_false=False, **kws): """ Adds keywork argumnts to a dict if their values are not None. Parameters ---------- d: dict The dictionary to add to. exclude_false: bool Exclue keys whose values are false. kws: dict The keys to maybe add """ for k, v in kws.items(): if v is not None: d[k] = v return d
bc7bc09261f37fe052d30d6b0c2452937f33c563
12,955
import collections def flatten_feature_dict(metapath_to_metric_dict): """ Processed the nested dictionaries returned by features_for_metapaths() into a single dimension dictionary with keys representing metapath and metric combinations. """ feature_dict = collections.OrderedDict() for metapath, metric_dict in metapath_to_metric_dict.iteritems(): for metric, value in metric_dict.iteritems(): key = metric + '_' + str(metapath) feature_dict[key] = value return feature_dict
5f92c9bb1b51fa130e9306b0740490d9fcc93e14
12,956
import os def del_repo(base_path): """ delete repository. """ os.system("rm -R -f " + base_path) return True
c46d87f2d38a52f70e2fcbb90cb3b07f3ee824a5
12,957
def decode_text(encoded_text: str, reverse_mapping: dict): """ This function returns the decoded text from the text file. Uses for loops and if statement to obtain the decoded text. """ decoded_text = "" current_code = "" # iterates over dictionaries using for loops reverse_mapping = dict((y, x) for x, y in reverse_mapping.items()) # print(reverse_mapping) # loops through each bit in the encoded compressed text for bit in encoded_text: current_code += bit if current_code in reverse_mapping: char = reverse_mapping[current_code] decoded_text += char current_code = "" print("Text file was decoded successfully, the following displays the decoded text: ", decoded_text) return decoded_text
58fbff9e00b691919537b40a765f437a566fc730
12,958
import functools def compose(*fns): """Compose functions.""" def _compose(*args, **kwargs): return functools.reduce(lambda v, g: g(v), fns[1:], fns[0](*args, **kwargs)) return _compose
118c05955584cb7dd494e0615cccb82c3ccd8142
12,959
import shutil def cut_paste(src_path, dst_path): """Cut paste functionality just copied from shutil. Works on files or dirs. Args: src_path (string): Source path to be cut dst_path (string): Destination path to paste to """ shutil.move(src_path, dst_path) return True
295eee15f8e31f510333967a45c683ff99c25efb
12,960
def getBase(path): """ :param str path: :return: Base :rtype: str """ return path.split("|")[-1]
a7dd2364531c5e1bea528dae738f60e2c2985d4c
12,961
def posterior(kalman, problem): """Kalman smoothing posterior.""" *_, obs, times, states = problem return kalman.filtsmooth(obs, times)
724a80cd69efa499cbc62f79ab611de30c10ac26
12,963
def get_cache_key_counter(bound_method, *args, **kwargs): """ Return the cache, key and stat counter for the given call. """ model = bound_method.__self__ ormcache = bound_method.clear_cache.__self__ cache, key0, counter = ormcache.lru(model) key = key0 + ormcache.key(model, *args, **kwargs) return cache, key, counter
241b13b29b3dce0888f2eeb40361dfa03d5f8389
12,964