content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def compute_mean_rt(df): """ Computes subject-wise mean RT Input --- df : dataframe aggregate response data Returns --- array of subject-wise mean RTs """ return df.groupby('subject').rt.mean().values
04b51953a8a5fa37fc746e32416bd0b598cfb138
698,188
import re def add_iteration_suffix(name): """ adds iteration suffix. If name already ends with an integer it will continue iteration examples: 'col' -> 'col_01' 'col' -> 'col_01' 'col1' -> 'col2' 'col_02' -> 'col_03' """ # pylint: disable=import-outside-toplevel m = re.search(r"\d+$", name) n = "00" endstr = None midchar = "_" if name[-1] != "_" else "" if m is not None: midchar = "" n = m.group() endstr = -len(n) name = f"{name[:endstr]}{midchar}{int(n)+1:0{len(n)}}" return name
8021f1bc131f0a303bfaa06caa39e77f1de0dff9
698,189
def min_max(tr): """Return the ratio of minimum to maximum of a trace. Parameters ---------- tr : 1D array of float The input profile. Returns ------- mm : float The ratio of the minimum value in `tr` over the maximum. Examples -------- >>> tr = np.array([0.8, 0.9, 1.4, 2.0, 1.1]) >>> min_max(tr) # doctest: +ELLIPSIS 0.4... """ tr = tr.astype(float) mm = tr.min() / tr.max() return mm
8fcb533f22addf95ccf6e75e3d116dfe928aa6ca
698,190
def f(x): # 2 """Simple recursive function.""" # 3 if x == 0: # 4 return 1 # 5 return 1 + f(x - 1)
ad71b050af9f3e634b8afab7565bc4df1c3f1222
698,191
def binary_to_string(num): """ This solution is a straight python implementation of Gayle's solution in CtCI. """ if num >= 1 or num <= 0: return 'ERROR' binary = [] binary.append('.') while num > 0: #Setting a limit on length: 32 characters if len(binary) >= 32: return 'ERROR' r = num * 2 if r >= 1: binary.append(1) num = r - 1 else: binary.append(0) num = r return ''.join(binary)
0752ddbf8ce76a3779fc6fba3cccf9b6ec66bca6
698,192
def _assert_float_dtype(dtype): """Validate and return floating point type based on `dtype`. `dtype` must be a floating point type. Args: dtype: The data type to validate. Returns: Validated type. Raises: ValueError: if `dtype` is not a floating point type. """ if not dtype.is_floating: raise ValueError("Expected floating point type, got %s." % dtype) return dtype
7aee4b4bc4b389b718b3e7a8cb9a77c37fd4ff1e
698,193
def get_value(str_val): """convert a string into float or int, if possible.""" if not str_val: return "" if str_val is None: return "" try: val = float(str_val) if "." not in str_val: val = int(val) except ValueError: val = str_val return val
a3c7deee4110ea25a88f8568139015be20fef1d0
698,194
def relation_bet_point_and_line( point, line ): """Judge the realtion between point and the line, there are three situation: 1) the foot point is on the line, the value is in [0,1]; 2) the foot point is on the extension line of segment AB, near the starting point, the value < 0; 3) the foot point is on the extension line of segment AB, near the ending point, the value >1; Args: point ([double, double]): point corrdination line ([x0, y0, x1, y1]): line coordiantions Returns: [float]: the realtion between point and the line (起点 < 0 <= 线段中 <= 1 < 终点) """ pqx = line[2] - line[0] pqy = line[3] - line[1] dx = point[0]- line[0] dy = point[1]- line[1] # 线段长度的平方 d = pow(pqx,2) + pow(pqy,2) # 向量 点积 pq 向量(p相当于A点,q相当于B点,pt相当于P点) t = pqx*dx + pqy*dy flag = 1 if(d>0): t = t/d flag = t return flag
92a1ed906ee4d7fbb97fa46668fedbbd5f704fba
698,195
def escape_quotes(value): """ DTD files can use single or double quotes for identifying strings, so &quot; and &apos; are the safe bet that will work in both cases. """ value = value.replace('"', "\\&quot;") value = value.replace("'", "\\&apos;") return value
1aa6e5f2325bfc293dff6e34b77117284d5bd018
698,196
import json def init_population(ind_init, filename): """ create initial population from json file ind_init: [class] class that and individual will be assigned to filename: [string] string of filename from which pop will be read returns: [list] list of Individual objects """ with open(filename, "r") as pop_file: contents = json.load(pop_file) return list(ind_init(c) for c in contents)
015b4d6b81dc44e325393535fc7abd65ca26db55
698,197
import random def shuffle(l): """ Returns the shuffled list. """ l = list(l) random.shuffle(l) return l
4945d5d57ecaf3c9b8340f3f20c5e960938f3914
698,198
import subprocess def svnversion(): """Produces a compact version identifier for the working copy @return SVN version identifier (from @c stdout of the subprocess) """ process = subprocess.Popen('svnversion', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (stdout, _) = process.communicate() return stdout
cd0bc4b34bc474085bf7ec6f15d536a8ef3b6b1c
698,199
def init_layout(): """ Initializes a layout dict for Plotly figures. :return: """ layout = { "xaxis": { "title": "Frequency (MHz)", "tickformat": ".," }, "yaxis": { "title": "Flux (Jy/beam)", "tickformat": ".," }, "hovermode": "closest", "legend": { "x": 1., "y": 1. } } return layout
545520e0059f5bd61c743cc604adcb28d6f3a6e4
698,201
def _star(inner: str) -> str: """Convenience function Takes a string representations of a regular expression, where the empty string is considered as matching the empty langage, and returns its star regular expression. """ if len(inner) > 0: return f'({inner})*' return 'ε'
cd3636cf0d39e4f9a74ef397dc47446320c3f0ea
698,202
import json def get_user_from_term(input): """ Get user from an input string example: jccli create-user \ --json "{\"email\": \"jc.tester1@sagebase.org\", \"username\": \"jctester1\"}" :param user_file: :return: a SystemUser """ user = {} if input != "": try: user = json.loads(input.replace("'", '"')) except Exception as error: raise error return user
1f7bdaa637453f429a78f140c741b797e0f79877
698,204
def vec2num(vec): """Convert list to number""" num = 0 for node in vec: num = num * 10 + node return num
ffce787a02bc9f2bc5669dd8fb622bdff1fc941b
698,205
def _time_to_range(hours): """ Converts a time from format '11:00AM - 1:00PM' to (660, 780) """ first_colon = hours.find(':') if first_colon != 1 and first_colon != 2: return first_half = hours[first_colon + 3:first_colon + 5] if first_half != 'AM' and first_half != 'PM': return hyphen = hours.find('-') if hyphen == -1: return second_colon = hours.find(':', first_colon + 1) if second_colon != hyphen + 3 and second_colon != hyphen + 4: return second_half = hours[second_colon + 3:second_colon + 5] if second_half != 'AM' and second_half != 'PM': return start_hours = int(hours[0:first_colon]) start_minutes = int(hours[first_colon + 1:first_colon + 3]) end_hours = int(hours[hyphen + 2:second_colon]) end_minutes = int(hours[second_colon + 1:second_colon + 3]) start_pm_offset = 12 if first_half == 'PM' else 0 end_pm_offset = 12 if second_half == 'PM' else 0 start_time = start_hours * 60 + start_pm_offset * 60 + start_minutes end_time = end_hours * 60 + end_pm_offset * 60 + end_minutes return (start_time, end_time)
035199ca17b474db003535dd1e3887d0797e607c
698,206
from datetime import datetime def _iardict_to_fil_header(iardict: dict) -> dict: """Build dict header from dict iar.""" source_name = iardict["Source Name"] source_ra = iardict["Source RA (hhmmss.s)"] source_dec = iardict["Source DEC (ddmmss.s)"] # ref_dm = iardict["Reference DM"] # pul_period = iardict["Pulsar Period"] # high_freq = iardict["Highest Observation Frequency (MHz)"] telescope_id = int(iardict["Telescope ID"]) machine_id = int(iardict["Machine ID"]) data_type = int(iardict["Data Type"]) # observing_time = int(iardict["Observing Time (minutes)"]) # gain = iardict["Gain (dB)"] # bandwidth = int(iardict["Total Bandwith (MHz)"]) avg_data = int(iardict["Average Data"]) sub_bands = int(iardict["Sub Bands"]) # ---- ROACH ---- # values fft_pts = 128 adc_clk = 200e6 # parameters tsamp = avg_data * fft_pts / adc_clk f_off = adc_clk / fft_pts * 1e-6 time_now = datetime.now().strftime("_%Y%m%d_%H%M%S") # tsamp = 1e6 / float(bandwidth) * avg_data rawdatafile = f"ds{avg_data}_{source_name}{time_now}.fil" return { "telescope_id": telescope_id, "machine_id": machine_id, "data_type": data_type, "rawdatafile": rawdatafile, "source_name": source_name, "az_start": 0.0, "za_start": 0.0, "src_raj": source_ra, "src_dej": source_dec, "tstart": 0.0, "tsamp": tsamp, "fch1": 0.0, "foff": f_off, "nchans": sub_bands, "nifs": 1, "ibeam": 1, "nbeams": 1, }
920ce832d0ab9b7fa219b14572cbfa04c4c0dfc2
698,207
def forwardPath(path): """Replace backslashes with slashes in a given path. :Parameter path: the path being transformed """ while path.count(chr(92)): path = path.replace(chr(92), '/') return path
4f2d2dfae06ba79f59e7fd2d938332fe5bbae9ac
698,208
def _trim_site(read_and_slice, read_type): """Remove DpnII or MboI site from the left read, remove NalIII site from the right read""" read, slice = read_and_slice start = slice.start stop = slice.stop sequence = read.sequence if read_type == 'R1': if sequence[-3:] == 'ATC': # If 3' is DpnII or MboI site, clip it (from the left read) read = read[:-4] stop -= 4 if sequence[:3] == 'CAT': # If 5' is NalIII site, clip it (from the right read) read = read[4:] start += 4 else: if sequence[-4:-1] == 'GAT': # If 3' is DpnII or MboI site, clip it (from the left read) read = read[:-4] stop -= 4 if sequence[1:4] == 'ATG': # If 5' is NalIII site, clip it (from the right read) read = read[4:] start += 4 read.name += f':{start}:{stop}' return read, (start, stop)
d5abb679a88a94176a0dac6aaf2f5996d34fcb94
698,209
from typing import OrderedDict def make_per_dataset_plot(delta_cchalf_i): """Make a line plot of delta cc half per group.""" d = OrderedDict() d.update( { "per_dataset_plot": { "data": [ { "y": [i * 100 for i in list(delta_cchalf_i.values())], "x": list(delta_cchalf_i.keys()), "type": "scatter", "mode": "lines", } ], "layout": { "title": "Delta CC-Half vs group", "xaxis": {"title": "Group number"}, "yaxis": {"title": "Delta CC-Half"}, }, } } ) return d
219b7a981f896c7723e8f6ccde16819703503e54
698,210
def merge_sort(array): """ An implementation of the merge sort algorithm. Recursively sorts arrays by calling merge sort on halves of the array, then merging by comparing the first element of each array, then adding the smaller of the two to a the sorted array. """ # Base Cases if len(array) == 1 or len(array) == 0: return array elif len(array) == 2: if array[0] > array[1]: array[0], array[1] = array[1], array[0] return array else: i, j, sort = 0, 0, [] # Split the array into 2 equal parts and sort them middle = int(len(array)/2) left = merge_sort(array[:middle].copy()) right = merge_sort(array[middle:].copy()) # Merge the sorted halves ll, lr = len(left), len(right) while i < ll and j < lr: if left[i] <= right[j]: sort.append(left[i]); i += 1 else: sort.append(right[j]); j += 1 # Add anything left over sort += left[i:] sort += right[j:] return sort
659260d8a9b9160d0576c62d17dadd91925fb05c
698,211
def remove_duplicates(nums) -> int: """ remove_duplicates :param nums: :return: """ # print(list(set(nums))) return len(set(nums))
1605c594d18ae39749fa9da17700504747b98024
698,212
def vectorize_cst(value, cst_precision): """ if cst_precision is a vector format return a list uniformly initialized with value whose size macthes the vector size of cst_precision, else return the scalar value """ if cst_precision.is_vector_format(): return [value] * cst_precision.get_vector_size() else: return value
9a5ba282309f8134007084e32a4a58876ec05d73
698,213
def writeEndfINTG( row, col, vals, ndigit ): """Special INTG format, only used for sparse correlation matrices.""" def toStr( val ) : if( val == 0 ): return ' ' * ( ndigit + 1 ) if( abs( val ) > 10**ndigit ) : raise ValueError( "%d too large for INTG format with ndigit=%d!" % ( val, ndigit ) ) return( ("%%%dd" % ( ndigit + 1 ) ) % val ) linelength = 56 # space available for writing integers nints = linelength // (ndigit+1) if ndigit==3: nints = 13 # special case while len(vals) < nints: vals.append(0) rets = "%5d%5d" % (row,col) # number of padding spaces depends on ndigit: padleft, padright = {2: (' ',' '), 3: (' ',' '), 4: (' ',''), 5: (' ',' '), 6: ('','')}[ndigit] rets += padleft for a in vals: rets += toStr(a) rets += padright return rets
89c3fae44fead6b8e670bbc69ed4b4083bdd406e
698,214
def ring_int_label_to_str(ring_int_label): """ Binary encoding only works when you can only have at most one of each type e.g. one inner 2^2 = 4 = 2^1 + 2^1 = two nuclear There are many examples of e.g. '36' which could be four outer rings, three outer and two inner, etc. This seems a silly way to encode data - maybe I have misunderstood? Especially, some seem insanely high - having at least 8 rings is implausible See e.g. row 48: ring type 76 i.e. 64 (8 * outer ring) + 8 (outer ring) + 4 (inner ring)...? http://skyserver.sdss.org/dr12/SkyserverWS/ImgCutout/getjpeg?ra=211.21558&dec=-00.641614&width=512&height=512 Args: ring_int_label (int): decoded int e.g. 2 Returns: (str) human-readable label, paraphrased from Nair 2010 definitions """ mapping = { 1: 'nuclear', 2: 'inner', # or two nuclear... 3: 'outer', # or two inner... 4: 'min_two', 5: 'min_four', 6: 'min_eight' } return mapping[ring_int_label]
2f8ce0476bf300a803f13cf5603e909f0b31045f
698,215
import json def prettify_dict(rule): """ Prettifies printout of dictionary as string. rule: rule Return value: rule string """ return json.dumps(rule, sort_keys=False, indent=4, separators=(',', ': '))
a4ff4e4b088246d8f36851b5fc7d69828dd99a83
698,216
import asyncio def event_loop(): """A module-scoped event loop.""" return asyncio.new_event_loop()
59f006ee32a6319cfeecb74d87c781d8a2887b20
698,217
def split_me(strings): """ Function to split strings into an array to loop later from the template. """ splited_values = strings.split(",") return splited_values
2b62cccac7602054b0e5cde34d0dbfc9964c0970
698,218
def hamming_distance(Subspace1, Subspace2): """ Returns the Hamming distance between to subspaces. Variables that are free in either subspace are ignored. **arguments**: * *Subspace1, Subspace2* (dict): subspaces in dictionary representation **returns**: * *Distance* (int): the distance between *Subspace1* and *Subspace2* **example**: >>> hamming_distance({"v1":0,"v2":0}, {"v1":1,"v2":1}) 2 >>> hamming_distance({"v1":1}, {"v2":0}) 0 """ return len([k for k, v in Subspace1.items() if k in Subspace2 and Subspace2[k] != v])
f42d13bd1d980b092235aa472a24a1cebe078f44
698,219
def get_last_root_page(pages): """ This tag returns the last page on the root level. :param pages: The requested page tree :type pages: list [ ~cms.models.pages.page.Page ] :return: The last root page of the given page list :rtype: ~cms.models.pages.page.Page """ root_pages = list(filter(lambda p: not p.parent, pages)) return root_pages[-1] if root_pages else None
0fc938ac35dadc7da6c2215c17a68fbb184ee40f
698,221
import re def get_clean_doi(doi: str) -> str: """Check if the DOI is actually a URL and in that case just get the exact DOI. :param doi: String containing a DOI. :returns: The extracted DOI. """ doi = re.sub(r'%2F', '/', doi) # For pdfs doi = re.sub(r'\)>', ' ', doi) doi = re.sub(r'\)/S/URI', ' ', doi) doi = re.sub(r'(/abstract)', '', doi) doi = re.sub(r'\)$', '', doi) return doi
5dd823b0ff69f3a51557f4fe992bb5108f86bf75
698,222
def generate_canonical_request(method, parsed_url, headers, signed_headers, content_sha256): """ Generate canonical request. :param method: HTTP method. :param parsed_url: Parsed url is input from :func:`urlsplit` :param headers: HTTP header dictionary. :param content_sha256: Content sha256 hexdigest string. """ # Should not encode ~. Decode it back if present. parsed_url_path = parsed_url.path.replace("%7E", "~") parsed_url_query = parsed_url.query.replace("%7E", "~") lines = [method, parsed_url_path, parsed_url_query] # Headers added to canonical request. header_lines = [] for header in signed_headers: value = headers[header.title()] value = str(value).strip() header_lines.append(header + ':' + value) lines = lines + header_lines lines.append('') lines.append(';'.join(signed_headers)) lines.append(content_sha256) return '\n'.join(lines)
2074a58f850893fe91fccb592ee9db238d5da7c6
698,223
from pathlib import Path def find_path(paths): """Given a search path of files or directories with absolute paths, find the first existing path. Args: paths (list): A list of strings with absolute paths. Returns: str: The first path in the list `paths` that exists, or `None` if none of the paths exist. Example: The following example works if the file system has a file /usr/local/etc/snips.toml (e.g. on macOS with Snips installed): >>> find_path(['/etc/snips.toml', '/usr/local/etc/snips.toml']) '/usr/local/etc/snips.toml' """ for name in paths: path = Path(name) if path.exists(): return str(path.resolve()) # If none of the paths in the search path are found in the file system, # return None. return None
53dda06e55c26d0fce43bca5eea0cdc3fca53b11
698,224
def embeddings2ranks(embeddings): """ Converts embeddings to ranks""" microbes = embeddings.loc[embeddings.embed_type == 'microbe'] metabolites = embeddings.loc[embeddings.embed_type == 'metabolite'] U = microbes.pivot(index='feature_id', columns='axis', values='values') V = metabolites.pivot(index='feature_id', columns='axis', values='values') pc_ids = sorted(list(set(U.columns) - {'bias'})) U['ones'] = 1 V['ones'] = 1 ranks = U[pc_ids + ['ones', 'bias']] @ V[pc_ids + ['bias', 'ones']].T # center each row ranks = ranks - ranks.mean(axis=1).values.reshape(-1, 1) return ranks
24bb2e9e06d20e025c7f9ee4054d9479987d7685
698,225
def s2b(s): """Converts an ASCII string into binary data Args: s: A string of ASCII characters Returns: A long of binary data """ #start a binary long r=0 #for each character in the string for c in s: #left shift the data by 8 bits and add the integer representation of the character r = (r<<8) + ord(c) #return the long return r
edf3ca83dd7339b509edd6820da1026dc20c2918
698,226
def time_to_str(t): """ Turns time objects to strings like '08:30' """ return t.strftime('%H:%M')
6427b9267b63dc6d75dce2afae24bbbd58b0b0dd
698,227
def aggregate_min(results, _): """aggregate_score is the minimum score.""" results = [result for result in results if result[-1].active] return min(results, key=lambda result: result[0])[:-1]
07b5caffcbbe0bad87c4f18bb6feda3780e54ec9
698,228
import os def write_to_file(topological_order): """ Writes the topological order to a file for inspection. """ with open("topological_order.txt","w") as f: for k, v in sorted(topological_order.items()): f.write("{}\t\t\t{}\n".format(k, v)) return os.getcwd()
6f76c6d25423109a5d690fe47dfd673f650621ad
698,229
def test(one, two, three, **kwargs) -> int: """Return the sum of the arguments.""" return one + two + three + kwargs["four"] + kwargs["five"] + kwargs["six"]
c48a6968b6d4f1c673f7749e810d8d360e8d8d20
698,231
import subprocess def fetch_release(url, tag, output_file): """Fetches the release from the specified GitHub API URL Args: url: string that is the GitHub API URL tag: string of release tag output_file: string containing the release output file to fetch Returns: The return code object of the subprocess used to fetch the file. """ popen = subprocess.Popen( [ 'curl -A "curl" -s {}{} | grep "{}" | cut -d : -f 2,3 |tr -d \\" | wget -qi -'. format(url, tag, output_file) ], shell=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE) _, err = popen.communicate() return err
71062090a80e9ba7b0ad71348baa65b155eb135d
698,232
def _hierarch_keywords(names): """ Prepend the 'HIERARCH ' string to all keywords > 8 characters Avoids FITS VerifyWarning. Parameters ---------- names : list keywords Returns ------- new_names : list keywords with HIERARCH prepended as apprpriate """ new_names = [] for cname in names: if len(cname) >= 8: new_names.append(f"HIERARCH {cname}") else: new_names.append(cname) return new_names
73fc3f1594bf12a7c4a2c79d320aa490eb3d39e9
698,235
def get_point_uuid(msg): """ Returns a point uuid that is either the passed message """ point_uuid = str(msg) return point_uuid
60939e6f81e3cb076b7b67306f83e1d48dbb50b6
698,236
import math def gps_to_dms(gps_data): """ 坐标转为度、分、秒(double) 116.397451 :param gps_data: :return: """ # 度:向下取整 gps_degree = math.floor(gps_data) gps_data_temp1 = (gps_data - gps_degree) * 60 # 分 gps_minute = math.floor(gps_data_temp1) gps_data_temp2 = gps_data_temp1 - gps_minute # 秒,取小数点后4位 gps_second = round(gps_data_temp2 * 60, 2) # 注意:秒必须转换为整形 result = ((gps_degree, 1), (gps_minute, 1), (int(gps_second * 100), 100)) return result
568cbf65e5bf479c52c2591511af0c2d85bded9a
698,237
def testfunc(): """ this is version 0.6 """ return 1
78b41057859d0dcecdf707dcab414d08a1cff27b
698,238
def get_size_of_corpus(filepaths): """ Given a list of filepaths, it will return the total number of lines Parameters ---------- filepaths : [ str ] A list of filepaths Returns ------- num_lines : int The total number of lines in filepaths """ def blocks(files, size=65536): while True: b = files.read(size) if not b: break yield b num_lines = 0 for filepath in filepaths: with open(filepath, encoding="utf-8") as f: num_lines += sum(bl.count("\n") for bl in blocks(f)) return num_lines
46d166f7ae21ed345c45c38b782c7405790a8de2
698,239
def getSpanVectors(normal, c, d): """ getSpanVectors(normal, prevA, prevB) -> (a,b) Given a normal, return two orthogonal vectors which are both orthogonal to the normal. The vectors are calculated so they match as much as possible the previous vectors. """ # Calculate a from previous b a1 = d.cross(normal) if a1.norm() < 0.001: # The normal and d point in same or reverse direction # -> Calculate b from previous a b1 = c.cross(normal) a1 = b1.cross(normal) # Consider the opposite direction a2 = -1 * a1 if c.distance(a1) > c.distance(a2): a1 = a2 # Ok, calculate b b1 = a1.cross(normal) # # Consider the opposite (don't: this would make backfacing faces) # b2 = -1 * b1 # if d.distance(b1) > d.distance(b2): # b1 = b2 # Done return a1.normalize(), b1.normalize()
5a0a99b8920ceb55f61edcf440a4af12f4fb2343
698,240
import numpy def create_dataset(dataset, look_back): """ 构造数据的特征列和类标 """ data_x, data_y = [], [] for i in range(len(dataset) - look_back): a = dataset[i:(i + look_back), 0] data_x.append(a) data_y.append(dataset[i + look_back, 0]) return numpy.array(data_x), numpy.array(data_y)
a0b6a17d72b435f2dae79f0c4e2aad98e9f19178
698,241
from struct import unpack from typing import Union from pathlib import Path def _tiff2xml(path: Union[Path, str]) -> bytes: """Extract OME XML from OME-TIFF path. This will use the first ImageDescription tag found in the TIFF header. Parameters ---------- path : Union[Path, str] Path to OME TIFF. Returns ------- xml : str OME XML Raises ------ ValueError If the TIFF file has no OME metadata. """ with Path(path).open(mode="rb") as fh: try: offsetsize, offsetformat, tagnosize, tagnoformat, tagsize, codeformat = { b"II*\0": (4, "<I", 2, "<H", 12, "<H"), b"MM\0*": (4, ">I", 2, ">H", 12, ">H"), b"II+\0": (8, "<Q", 8, "<Q", 20, "<H"), b"MM\0+": (8, ">Q", 8, ">Q", 20, ">H"), }[fh.read(4)] except KeyError as e: raise ValueError(f"{path!r} does not have a recognized TIFF header") from e fh.read(4 if offsetsize == 8 else 0) fh.seek(unpack(offsetformat, fh.read(offsetsize))[0]) for _ in range(unpack(tagnoformat, fh.read(tagnosize))[0]): tagstruct = fh.read(tagsize) if unpack(codeformat, tagstruct[:2])[0] == 270: size = unpack(offsetformat, tagstruct[4 : 4 + offsetsize])[0] if size <= offsetsize: desc = tagstruct[4 + offsetsize : 4 + offsetsize + size] break fh.seek(unpack(offsetformat, tagstruct[-offsetsize:])[0]) desc = fh.read(size) break else: raise ValueError(f"No OME metadata found in file: {path}") if desc[-1] == 0: desc = desc[:-1] return desc
6bd35d09c5edfd6362379ae3375e888e99f4609a
698,242
import shlex def run_cli(module, cli): """ Method to execute the cli command on the target node(s) and returns the output. :param module: The Ansible module to fetch input parameters. :param cli: The complete cli string to be executed on the target node(s). :return: Output/Error or Success msg depending upon the response from cli. """ cli = shlex.split(cli) rc, out, err = module.run_command(cli) results = [] if out: return out if err: json_msg = {'switch': '', 'output': u'Operation Failed: {}'.format(str(cli))} results.append(json_msg) module.exit_json( unreachable=False, failed=True, exception='', summary=results, task='CLI command to configure L2 zero touch provisioning', stderr=err.strip(), msg='L2 ZTP configuration failed', changed=False ) else: return 'Success'
fca5119a0f337952760922ee89c06a1c8578273f
698,243
def get_depth(count): """ returns the depth of the tree - count -- 32 bit value eg: If any number between 9 and 16 is passed the function returns 4 """ l = count depth = 0 l = l>> 1 while l != 0: depth = depth + 1 l = l>> 1 #more than one 1s in the binary representation if( (count & (count -1)) != 0): depth = depth + 1 return depth
705790d8353e65dd1fa16e36f5c6ea79d4bbbe27
698,244
def substituted_mol(self, mol, checkI): """ Returns a molecule object in which all metal atoms specified in args.metal_atoms are replaced by Iodine and the charge is set depending on the number of neighbors. """ Neighbors2FormalCharge = dict() for i, j in zip(range(2, 9), range(-3, 4)): Neighbors2FormalCharge[i] = j for atom in mol.GetAtoms(): symbol = atom.GetSymbol() if symbol in self.args.metal_atoms: self.args.metal_sym[self.args.metal_atoms.index(symbol)] = symbol self.args.metal_idx[self.args.metal_atoms.index(symbol)] = atom.GetIdx() self.args.complex_coord[self.args.metal_atoms.index(symbol)] = len( atom.GetNeighbors() ) if checkI == "I": atom.SetAtomicNum(53) n_neighbors = len(atom.GetNeighbors()) if n_neighbors > 1: formal_charge = Neighbors2FormalCharge[n_neighbors] atom.SetFormalCharge(formal_charge) return self.args.metal_idx, self.args.complex_coord, self.args.metal_sym
e6b122aad37f2e11543231b6fbe1834039d65f8f
698,245
import os def _get_pic_map(pic_files): """ pic_files: [pic_path, ] return: {pic_id : [file_path,]} """ file_dict = {} for f in pic_files: pic_basename = os.path.basename(f) file_dict[pic_basename] = f return file_dict
6baf2101518161788217d9b6e5fc835676947c48
698,246
import re def is_url(filePath): """ Checks wether or not the file path is a valid url. :param str filePath: file path :return: returns true if path matches pattern starting with http:// or https:// :rtype: boolean """ regex = re.compile(r'^https?:\/\/', re.IGNORECASE) return re.match(regex, filePath) is not None
8d86e07c8585f30bb8927ba9cfa7f1a8d128b598
698,247
def dump_identifier(value): """dump identifier""" if value is True: return "y" return "n"
45d5fded4a5a6f12488385c35321eb94e10a06e1
698,248
def DistanceFromMatrix(matrix): """Returns function(i,j) that looks up matrix[i][j]. Useful for maintaining flexibility about whether a function is computed or looked up. Matrix can be a 2D dict (arbitrary keys) or list (integer keys). """ def result(i, j): return matrix[i][j] return result
f3cb95aace0cfe70bfeef9d8778947df16cdd4b1
698,249
from pathlib import Path def homedir() -> str: """Return the user's home directory.""" return str(Path.home())
dbd65e7db4cdbf2bd06c1ab42ed1ea3503e14ac2
698,250
import os def append_str_to_filename(path_or_file_name, str_to_append): """ Appends a given string to the given filename. **Parameters**: path_or_file_name : str The path or filename to append to. str_to_append: str The str to append. **Returns**: `str` path or filename with the given str appended. """ name, ext = os.path.splitext(path_or_file_name) return "{name}{appended}{ext}".format(name=name, appended=str_to_append, ext=ext)
fed41face6e8aa66650d2ff8edc2aed395b5b8eb
698,251
import re def natural_keys(text): """ alist.sort(key=natural_keys) sorts in human order http://nedbatchelder.com/blog/200712/human_sorting.html (See Toothy's implementation in the comments) https://stackoverflow.com/questions/5967500/how-to-correctly-sort-a-string-with-a-number-inside """ def atoi(text): return int(text) if text.isdigit() else text return [atoi(c) for c in re.split(r'(\d+)', text)]
d3a017c9f0426175bfa8fe53ab779a501a3ec24c
698,252
def make_attention_mask_3d(source_block, target_block): """ Returns a 3-dimensional (3-D) attention mask :param source_block: 1-D array :param target_block: 1-D array """ mask = (target_block[:, None, :] >= 1) * (source_block[:, :, None] >= 1) # (batch, source_length, target_length) # mask = mask.astype(np.int64) return mask
d06e44da4dfdcf2312583d4601642a34094399db
698,255
def quote_aware_split(string, delim=',', quote='"'): """ Split outside of quotes (i.e. ignore delimiters within quotes.""" out = [] s = '' open_quote=False for c in string: if c == quote: open_quote = not open_quote if c == delim and not open_quote: out += [s] s = '' else: s += c return out + [s]
0ef5f7040eee2a041fa1b6e6d8bf3a773a80f5f9
698,256
def NormalizarSerie(SR): """ Descripción: ----------- Normaliza una serie basandose en maximo y minimo Haciendo que la misma se encuentre entre 0 y 1 Un ejemplo de uso seria normalizar el DPO y operar en 0.2 para la compra y 0.5 para la venta (estrategia comprado) Parametros ---------- SR : pandas serie, obligatorio Dataframe a ser multiplicado por un escalora Uso ------- from DataframeUtils import NormalizarSerie \n df_stocks["GGAL"]["DPO_NORMALIZADO"] = NormalizarSerie(df_stocks["GGAL"]["DPO"])\n Returns ------- Serie normalizada """ sr = SR.copy() sr_norm =(sr - sr.min())/(sr.max()-sr.min()) return sr_norm
6e7a9f0b2dd9be0fd82712ab5c9176e245e34549
698,257
def getcwd(): """Return a string representing the current working directory. :rtype: string """ return ''
31ba3c781dad707ef5feaa15a841d8d528ded3e3
698,258
def _tess_ffi_file(file_name): """ TESS FFI File """ # tess2018229142941-s0001-4-3-0120-s_ffic.fits # yyyyddd sssss ccc # s0001/2018/229/4-3 # 18-23 4-8 8-11 24-27 sector = file_name[18:23] year = file_name[4:8] day_number = file_name[8:11] camera_chip = file_name[24:27] parts = [ "tess", "public", "ffi", sector, year, day_number, camera_chip, file_name ] return ["/".join(parts)]
8a0381c1c6566ecd518e8e7c0033453a1a9e66fe
698,259
import torch def fft_shift(input: torch.Tensor) -> torch.Tensor: """ PyTorch version of np.fftshift Args - input: (Bx)CxHxWx2 Return - ret: (Bx)CxHxWx2 """ dims = [i for i in range(1 if input.dim() == 4 else 2, input.dim() - 1)] # H, W shift = [input.size(dim) // 2 for dim in dims] return torch.roll(input, shift, dims)
9a08c3e33516378b80a7a53cf17915d9baa5d290
698,260
from typing import Mapping from typing import Any import json def get_config(config_path: str = "secrets/config_oauth.json") -> Mapping[str, Any]: """ Get the config from /test_input """ with open(config_path, "r") as f: return json.loads(f.read())
9ad1726bc5b1017f47872ce422d1c7a6fbf5d59c
698,261
def LMpM_total_size(ell_min, ell_max): """Total array size of Wigner D matrix Assuming an array (e.g., Wigner D matrices) in the order [[ell,mp,m] for ell in range(ell_min, ell_max+1) for mp in range(-ell,ell+1) for m in range(-ell,ell+1)] this function returns the total size of that array. This can be calculated in sympy as from sympy import symbols, summation ell,ell_min,ell_max = symbols('ell,ell_min,ell_max', integer=True) summation((2*ell + 1)**2, (ell, ell_min, ell_max)) """ # raw output is: 4*ell_max**3/3 + 4*ell_max**2 + 11*ell_max/3 - 4*ell_min**3/3 + ell_min/3 + 1 # We rearrange that to act more nicely return (((4 * ell_max + 12) * ell_max + 11) * ell_max + (-4 * ell_min ** 2 + 1) * ell_min + 3) // 3
7a6175640236ec3bc905d3b458f99eedbc792e09
698,262
def get_fcurve_data_path_property(fcurve): """ Gets fcurve's data path property Example fcurve's data_path: 'sequence_editor.sequences_all["Transform"].scale_start_x' For that example path this function will return 'scale_start_x' :param fcurve: Fcurve instance to get data path from :return: The last component of data path defining actual property name """ # we want to extract last part - data path data_path_full = fcurve.data_path last_dot_index = data_path_full.rfind(".") return data_path_full[(last_dot_index+1):]
0a7ce5fecdaa5cb1fe0024a18f6b6f057b5fe6cb
698,263
import subprocess def _ReadFileTailInShell(file_path, line): """Tails the file in the last several lines.""" return subprocess.check_output(['tail', '-%d' % line, file_path])
614e55daf67c9e3ec996ce730df608fc40e25aef
698,264
import ctypes import numpy def ctypes2numpy(cptr, length, dtype): """convert a ctypes pointer array to numpy array """ assert isinstance(cptr, ctypes.POINTER(ctypes.c_float)) res = numpy.zeros(length, dtype=dtype) assert ctypes.memmove(res.ctypes.data, cptr, length * res.strides[0]) return res
d4f85de74946e5477f2f29bfa5cce119d3fe66a3
698,265
def align(offset, alignment): """ Return the offset aligned to the nearest greater given alignment Arguments: - `offset`: An integer - `alignment`: An integer """ if offset % alignment == 0: return offset return offset + (alignment - (offset % alignment))
1f9d8fd4d4ac7798e14ee92d83510bb4b0ba09aa
698,266
def parse_property_string(prop_str): """ Generate valid property string for extended xyz files. (ref. https://libatoms.github.io/QUIP/io.html#extendedxyz) Args: prop_str (str): Valid property string, or appendix of property string Returns: valid property string """ if prop_str.startswith("Properties="): return prop_str return "Properties=species:S:1:pos:R:3:" + prop_str
24a865dcf2cba5b5f840e3e682fd58486b658355
698,267
def getPacketKey(opCode): """ Gets the packet key for a given instruction Input: - opCode: The opcode of instruction for which the packet key has to be generated Output: - The packet keys initialized in packetDict function for the given opcode """ if opCode in ["00000", "00001", "00010", "00011"]: return "add0", "add1" elif opCode == "00100": return "mul", None elif opCode in ["00101", "00110", "00111", "01000"]: return "fadd0", "fadd1" elif opCode == "01001": return "fmul", None elif opCode in ["01010", "01011", "01100", "01101", "01110", "01111", "10000", "10001"]: return "logic", None elif opCode == "10010": return "ldr", None elif opCode == "10011": return "str", None elif opCode == "10100": return "mov", None
c7730a444a4e8dacc20ae83038ddfc04ca5ba721
698,268
def map_to_legacy_object_map(m): """Converts a Soy map to a Soy legacy_object_map. legacy_object_maps must have string keys, but maps do not have this restriction. Args: m: Map to convert. Returns: An equivalent legacy_object_map, with keys coerced to strings. """ return {str(key): m[key] for key in m}
17423488a85011c7be5429af58a5baf26ab8e30a
698,269
import signal import subprocess from warnings import warn def end_process(proc): """ Makes absolutely sure that a process is definitely well and truly dead. Args: proc (subprocess.Popen): Popen object for the process """ expected_return = 0 if proc.poll() is None: # I'm not dead! expected_return = -signal.SIGINT proc.send_signal(signal.SIGINT) try: proc.wait(timeout=10) except subprocess.TimeoutExpired: warn("Timeout waiting for {} to exit!".format(proc.pid)) if proc.poll() is None: # I'm getting better! warn("Sending SIGTERM to {}...".format(proc.pid)) expected_return = -signal.SIGTERM proc.terminate() try: proc.wait(timeout=10) except subprocess.TimeoutExpired: warn("Timeout waiting for {} to terminate!".format(proc.pid)) if proc.poll() is None: # I feel fine! warn("Sending SIGKILL to {}...".format(proc.pid)) expected_return = -signal.SIGKILL proc.kill() try: proc.wait(timeout=10) except subprocess.TimeoutExpired as err: warn("Timeout waiting for {} to die! Giving up".format(proc.pid)) raise err else: warn("Process {} already exited!".format(proc.pid)) if proc.returncode != expected_return: warn("Expected return code {} from {}, got {}!".format(expected_return, proc.pid, proc.returncode)) return proc.returncode
5a42b02afc50bf9c46020e9b52f7b2e4bdf290a7
698,270
from typing import List def _format_plugin_names_and_versions(plugininfo) -> List[str]: """Format name and version of loaded plugins.""" values: List[str] = [] for _, dist in plugininfo: # Gets us name and version! name = f"{dist.project_name}-{dist.version}" # Questionable convenience, but it keeps things short. if name.startswith("pytask-"): name = name[7:] # We decided to print python package names they can have more than one plugin. if name not in values: values.append(name) return values
2313f2f60e71be2209ebb201297d44e8dcad513a
698,272
def star_sub_sections(body:str): """ Change \subsection \subsubsection To: \subsection* \subsubsection* """ body = body.replace(r'\subsection',r'\subsection*') body = body.replace(r'\subsubsection',r'\subsubsection*') return body
06583d16c76393edb614955c4e3786b292c5fa51
698,273
import subprocess def clear_terminal() -> None: """Clears the terminal window. Returns None.""" subprocess.run(["clear"]) return None
be15e4cdb4d0a28c4718b1fe28955a47fd3c672c
698,274
def rjd_to_mjd(rjd): """ RJD (Reduced Julian Date) days elapsed since 1858-11-16T12Z (JD 2400000.0) MJD (Modified Julian Date) days elapsed since 1858-11-17T00Z (JD 2400000.5) This function transforms RJD in MJD """ return rjd - 0.5
2b1f4d830670f754fbbc2259face4d261877d335
698,275
def arg_xor_dict(args_array, opt_xor_dict): """Function: arg_xor_dict Description: Does a Xor check between a key in opt_xor_dict and its values using args_array for the check. Therefore, the key can be in args_array or one or more of its values can be in arg_array, but both can not appear in args_array. Arguments: (input) args_array -> Array of command line options and values. (input) opt_xor_dict -> Dictionary with key and values that will be xor with each other. (output) status -> True|False - If key or value is in args_array. """ args_array = dict(args_array) opt_xor_dict = dict(opt_xor_dict) status = True for opt in set(opt_xor_dict.keys()) & set(args_array.keys()): for item in set(opt_xor_dict[opt]) & set(args_array.keys()): print("Option {0} or {1}, but not both.".format(opt, item)) status = False break return status
08001050d5e542bcd09343920c49e8bb59c9c7a6
698,276
def example_function(a: int) -> int: """Takes an integer as input and return it's square Parameters ---------- a : int input number Returns ------- int square of a """ return a ** 2
fd519e72ec385aa905868facee198b3eaf57f778
698,277
def prepare_pre_actual_for_prestg(unnested_df): """ preparing preprocessed data to be pushed into prestg table """ pre_actual_old_columns=['nodeid','section','module','ts','nodename']+[f"preprocessed_output_{i}" for i in range(0,5)] pre_actual_df=unnested_df.cols.select(pre_actual_old_columns) pre_actual_new_columns=['nodeid','section','module','ts','nodename','BerPreFecMax','PhaseCorrectionAve', 'PmdMin','Qmin', 'SoPmdAve'] pre_actual_df=pre_actual_df.toDF(*pre_actual_new_columns) cols=['BerPreFecMax','PhaseCorrectionAve', 'PmdMin','Qmin', 'SoPmdAve'] index=['nodeid','section','module','ts','nodename'] pre_actual_df=pre_actual_df.melt(index,cols,var_name='measure',value_name='val') return pre_actual_df
10ff80f213df117b14ab8fb25dc2067bfcd83d72
698,278
def merge_dicts(*args): """ Creates a new dictionary that merges those passed as arguments. """ if not args: return {} d = args[0].copy() for extra in args[1:]: d.update(extra) return d
f23aeac179bd3ae16c0b1c141f8c918d87075396
698,279
import doctest def _quiet_testmod(module): """ Run all of a modules doctests, not producing any output to stdout. Return a tuple with the number of failures and the number of tries. """ finder = doctest.DocTestFinder(exclude_empty=False) runner = doctest.DocTestRunner(verbose=False) for test in finder.find(module, module.__name__): runner.run(test, out=lambda x: True) return (runner.failures, runner.tries)
2c788dd128214e7c230124636aad3b0630060dd1
698,280
import os def _RecursiveProbe(path, read_method): """Recursively probes in path and all the subdirectory using read_method. Args: path: Root path of the recursive probing. read_method: The method used to probe device information. This method accepts an input path and returns a string. e.g. _ReadSysfsUsbFields, _ReadSysfsPciFields, or _ReadSysfsDeviceId. Returns: A list of strings which contains probed results under path and all the subdirectory of path. Duplicated data will be omitted. """ def _InternalRecursiveProbe(path, visited_path, results, read_method): """Recursively probes in path and all the subdirectory using read_method. Args: path: Root path of the recursive probing. visited_path: A set containing visited paths. These paths will not be visited again. results: A list of string which contains probed results. This list will be appended through the recursive probing. read_method: The method used to probe device information. This method accepts an input path and returns a string. Returns: No return value. results in the input will be appended with probed information. Duplicated data will be omitted. """ path = os.path.realpath(path) if path in visited_path: return if os.path.isdir(path): data = read_method(path) # Only append new data for result in data: if result not in results: results.append(result) entries_list = os.listdir(path) visited_path.add(path) else: return for filename in entries_list: # Do not search directory upward if filename == 'subsystem': continue sub_path = os.path.join(path, filename) _InternalRecursiveProbe(sub_path, visited_path, results, read_method) return visited_path = set() results = [] _InternalRecursiveProbe(path, visited_path, results, read_method) return results
f8cf6198f086f7e902f30b1fd20dc286c80c3c19
698,281
def _pango_attr_list_types(attributes): """Returns the types of all attributes in the given Pango.AttrList.""" # Pango.AttrList does not appear to have any normal ways to access its # contents, so this is a bit of a hack. types = [] attributes.filter(lambda attribute: types.append(attribute.klass.type)) return types
d14a28fd88eb60cbe8190efaf09e1b5081c437c5
698,282
def is_street_name(elem): """This unction will get the elements in the file (i.e. the tag element) and return the attributes in that element for which their key is equal to 'addr:street'. """ return (elem.attrib['k'] == "addr:street")
72c751c903cf346548be59dc35c4d0b7345aea7e
698,283
import colorsys def hue_sat_to_cmap(hue, sat): """Mkae a color map from a hue and saturation value. """ # normalize to floats hue = float(hue) / 360.0 sat = float(sat) / 100.0 res = [] for val in range(256): hsv_val = float(val) / 255.0 r, g, b = colorsys.hsv_to_rgb(hue, sat, hsv_val) res.append((r, g, b)) return res
816cee4bbf69ee466cdade149b4f4b4547e9b29a
698,284
import sys def get_cal(service, name): """Get calendar ID by name.""" cals = service.calendarList().list( showHidden=True, minAccessRole="writer").execute() for entry in cals['items']: if entry['summary'] == name: return entry['id'] print("Calendar not found") sys.exit(1)
dc81eedd88bbe49ad8c61de9e3ba624cc5361957
698,285
import random import time import json def fetch_job_states(): """ dummy function returning random dictionaries with job names and job states for the purpose of simulating the actual long running tasks :return: """ job1 = [{'job_name': 'job1', 'job_state': 'running'}, {'job_name': 'job2', 'job_state': 'pending'}, {'job_name': 'job3', 'job_state': 'finished'}] job2 = [{'job_name': 'job1', 'job_state': 'pending'}, {'job_name': 'job2', 'job_state': 'failed'}, {'job_name': 'job3', 'job_state': 'finished'}] if random.randint(0, 2) == 1: time.sleep(1) return_value = json.dumps(job1) else: time.sleep(2) return_value = json.dumps(job2) return return_value
d872edb4ada47a59b0137ba45e3a0611ff99b0a8
698,286
import jinja2 def render_template(template_file, template_vars, searchpath="./templates/"): """ Render a jinja2 template """ templateLoader = jinja2.FileSystemLoader(searchpath=searchpath) template_env = jinja2.Environment(loader=templateLoader) template = template_env.get_template(template_file) return template.render(template_vars)
d15f5d6e120a22ee5735dbf5b1f8c324a5cae861
698,287
def subtract(x, y): """Subtract two objects and return the result >>> from methods import subtract >>> subtract(2, 1) 1 """ return x - y
00fd9bff041879b372ab7b0d8aa536e3aecb8378
698,288
def dict_sort(dictionary: dict) -> list: """Takes in a dictionary with integer values and outputs a list of the keys sorted by their associated values in descending order.""" return list(reversed(sorted(dictionary, key=dictionary.__getitem__)))
330e6033a9e511341e5d9216a385b366a09eed9c
698,289
import re def get_citation_form(attributes): """ Compute the citation form of a pronominal mention. Args: attributes (dict(str, object)): Attributes of the mention, must contain the key "tokens". Returns: str: The citation form of the pronoun, one of "i", "you", "he", "she", "it", "we", "they" and None. """ pronoun = attributes["tokens"][0] pronoun = pronoun.lower() if re.match("^(he|him|himself|his)$", pronoun): return "he" elif re.match("^(she|her|herself|hers|her)$", pronoun): return "she" elif re.match("^(it|itself|its)$", pronoun): return "it" elif re.match("^(they|them|themselves|theirs|their)$", pronoun): return "they" elif re.match("^(i|me|myself|mine|my)$", pronoun): return "i" elif re.match("^(you|yourself|yourselves|yours|your)$", pronoun): return "you" elif re.match("^(we|us|ourselves|ours|our)$", pronoun): return "we"
2e02063ae0694e7ce76e3be8ce111d9550de7697
698,290
def parse_int(value) -> int: """ this function is used to parse data from the DB into an integer. because a lot of the cells can be empty, we get None as the return type. This function makes sure we get 0 if the value is None or empty !!! IMPORTANT !!! This is to be added to values where we would not want to cause an exception if they were None(Null), like the amount of gold a character has or the amount of armor an item gives. On other cases, like cells pointing to certain other database IDs, a missing number there makes the row invalid, thus we want to create an exception. """ try: val = int(value) return val except (TypeError, ValueError): return 0
066e0a93436697bb31b91743faf851a752dd0e87
698,291
import os import requests def download_save_html_data(url, filename, save_html_content=True): """Downlaod html data from url and save it at filename Args: url (str): url of data to fetch filename (str): file path where to save downloaded data save_html_content (bool, optional): If True save data, else just return it. Defaults to True. Returns: str: downloaded html data """ if not os.path.isfile(filename): response = requests.get(url) # save html data in file if save_html_content: with open(filename, "wb") as html_file: html_file.write(response.content) else: print("{} already exists".format(filename)) return None return response.content
a7adaca61abf6c9866c5347ad9592d8ec4027288
698,292
def bitpos_from_mask(mask, lsb_pos=0, increment=1): """ Turn a decimal value (bitmask) into a list of indices where each index value corresponds to the bit position of a bit that was set (1) in the mask. What numbers are assigned to the bit positions is controlled by lsb_pos and increment, as explained below. :param mask: a decimal value used as a bit mask :param lsb_pos: The decimal value associated with the LSB bit :param increment: If this is +i, then the bit next to LSB will take the decimal value of lsb_pos + i. :return: List of bit positions where the bit was set in mask """ out = [] while mask: if mask & 0x01: out.append(lsb_pos) lsb_pos += increment mask >>= 1 return sorted(out)
d7938258c0f2dc523720bc82a48ecba1c2342223
698,293
def moving_average(list): """ list의 평균을 return한다. input: list return: average(float) """ mysum = 0 for i in range(0, len(list)): mysum = mysum + list[i] return mysum / len(list)
d5602113e74a7ef0f0027ec5f6964b72680c3499
698,294
def url_compose(camera): """ Compose URL string for the camera """ # rtsp://88.204.57.242:5533/user=admin&password=******&channel=1&stream=1.sdp? url: str = f'rtsp://{camera.ip_addr}:{camera.port}/user={camera.login}&password={camera.password}&channel=1&stream=0.sdp?' return url
62bf81b993e95a4c5ed7ca8362220cf16102c877
698,295
def filter_constant(vars): """Create T/F index for data with variance vars, F if variance constant. """ return vars != 0
57da83f60c42445903e8fd1442c60f7fd4e16bc6
698,296