content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def calc_overlap(row): """ Calculates the overlap between prediction and ground truth and overlap percentages used for determining true positives. """ set_pred = set(row.predictionstring_pred.split(' ')) set_gt = set(row.predictionstring_gt.split(' ')) # Length of each and intersection len_gt = len(set_gt) len_pred = len(set_pred) inter = len(set_gt.intersection(set_pred)) overlap_1 = inter / len_gt overlap_2 = inter/ len_pred return [overlap_1, overlap_2]
98e65250f82ab13b23de049fd80a59dea30ccce2
3,635,000
def delete_all(): """ Clear the config list """ def check_path(folder=None): """ Check if the folder exist and return boolean """ return isdir(folder._fullpath) # Clear the config list: kill_list = [elem for elem in config['Elements'] if elem._hide == 0 or check_path(elem) == 0] if not kill_list: # Display error in status bar: return refresh_status_bar(text=lang[1]['There are no folders to delete'], color=red2) for elem in kill_list: config['Elements'].remove(elem) # Display message in status bar: refresh_status_bar(text=lang[1]['All the unhide folders has been deleted'], color=green) # Update the json: save_data(db=True) # Update the listbox refresh_listbox()
a4c1359ee272a3f5fd7edd58913baf90ad15bd84
3,635,001
def get_range(l_list,l_position): """ Obtaining range of points in list (optionally at position inside of list)""" l_range = 0 l_abs_range = 0 l_max = 0 l_min = 0 ll_list = [] counter = 0 if l_position == None: ll_list = l_list else: while counter < len(l_list): ll_list.append(l_list[counter][l_position]) counter += 1 for l_object in ll_list: if int(max(l_object)) > l_max: l_max = int(max(l_object)) if int(min(l_object)) < l_min: l_min = int(min(l_object)) if (l_max - l_min) > l_range: l_range = (l_max - l_min) if abs(l_max - l_min) > l_abs_range: l_abs_range = abs(l_max - l_min) return l_range,l_abs_range
a98b1d12cd37545b5cb1932cfe273222d9c5e4c0
3,635,002
def equalise_paragraphs(a_para, b_para, sentence_ratio=DEFAULT_SENTENCE_RATIO, lowercase_glued=DEFAULT_LOWERCASE_GLUED, stop_chars=DEFAULT_STOP_CHARS): """ Glues together two collections of sentences so that they're of similar word-length. Discards sentences it cannot make parallel. """ equalised_a_para = [] equalised_b_para = [] a_index = 0 b_index = 0 # Keep merging while we still have sentences to draw from while a_index < len(a_para) and b_index < len(b_para): a_sentence = a_para[a_index] b_sentence = b_para[b_index] comparison_result = compare_sentences(a_sentence, b_sentence, sentence_ratio=sentence_ratio) try: if comparison_result == B_IS_LONGER: next_a_sentence = a_para[a_index + 1] glued = glue_sentences(a_sentence, next_a_sentence, lowercase_glued=lowercase_glued, stop_chars=stop_chars) if compare_sentences(glued, b_sentence, sentence_ratio=sentence_ratio) != A_IS_LONGER: # Expand the next sentence and move the index to it for the next pass a_para[a_index + 1] = glued a_index += 1 else: # Force a push comparison_result = BOTH_EQUIVALENT elif comparison_result == A_IS_LONGER: next_sentence = b_para[b_index + 1] glued = glue_sentences(b_sentence, next_sentence, lowercase_glued=lowercase_glued, stop_chars=stop_chars) if compare_sentences(a_sentence, glued, sentence_ratio=sentence_ratio) != B_IS_LONGER: # Expand the next sentence and move the index to it for the next pass b_para[b_index + 1] = glued b_index += 1 else: # Force a push comparison_result = BOTH_EQUIVALENT if comparison_result == BOTH_EQUIVALENT: # Sentences are close enough to being equal. Keep going. equalised_a_para.append(a_sentence) equalised_b_para.append(b_sentence) a_index += 1 b_index += 1 except IndexError: # Hit if we try to access any next_sentence that doesn't exist. We're done if that happens. break return equalised_a_para, equalised_b_para
c77a0c066dc0e7a6fad978563d2eb7caad15825a
3,635,003
def build_summary_rendering_context(schema_json, answer_store, metadata): """ Build questionnaire summary context containing metadata and content from the answers of the questionnaire :param schema_json: schema of the current questionnaire :param answer_store: all of the answers to the questionnaire :param metadata: all of the metadata :return: questionnaire summary context """ navigator = PathFinder(schema_json, answer_store, metadata) path = navigator.get_routing_path() sections = [] for group in schema_json['groups']: for block in group['blocks']: if block['id'] in [location.block_id for location in path]: if "type" not in block or block['type'] != "interstitial": link = url_for('questionnaire.get_block', eq_id=metadata['eq_id'], form_type=metadata['form_type'], collection_id=metadata['collection_exercise_sid'], group_id=group['id'], group_instance=0, block_id=block['id']) sections.extend([Section(section, answer_store.map(), link) for section in block['sections']]) return sections
b59b6ffb10a7383d7168b127bf8931f16ca7dc5a
3,635,004
def _bucket_from_workspace_name(wname): """Try to assert the bucket name from the workspace name. E.g. it will answer www.bazel.build if the workspace name is build_bazel_www. Args: wname: workspace name Returns: the guessed name of the bucket for this workspace. """ revlist = [] for part in wname.split("_"): revlist.insert(0, part) return ".".join(revlist)
4cf3f4505a894f63258846abbe41b3b787485d40
3,635,005
def load_results_with_table_definition( result_files, table_definition, table_definition_file, options ): """ Load results from given files with column definitions taken from a table-definition file. @return: a list of RunSetResult objects """ columns = extract_columns_from_table_definition_file( table_definition, table_definition_file ) columns_relevant_for_diff = _get_columns_relevant_for_diff(columns) return load_results( result_files, options=options, columns=columns, columns_relevant_for_diff=columns_relevant_for_diff, )
4bbe743774b74abbe8c9d4d6af6669cd819c9cba
3,635,006
def CIFAR10(flatten=True, split=[1.0, 0.0, 0.0]): """Returns the CIFAR10 dataset. Parameters ---------- flatten : bool, optional Convert the 3 x 32 x 32 pixels to a single vector split : list, optional Description Returns ------- cifar : Dataset Description """ # plt.imshow(np.transpose(np.reshape( # cifar.train.images[10], (3, 32, 32)), [1, 2, 0])) Xs, ys = cifar10_load() if flatten: Xs = Xs.reshape((Xs.shape[0], -1)) return Dataset(Xs, ys, split=split)
8af9997f5c530ac2aae9d680235007b3de289d96
3,635,007
def _cut_daytime(visi, tmstp): """Returns visibilities with night time only. Returns an array if a single night is present. Returns a list of arrays if multiple nights are present. """ tstp = tmstp[1] - tmstp[0] # Get time step risings = ch_eph.solar_rising(tmstp[0], tmstp[-1]) settings = ch_eph.solar_setting(tmstp[0], tmstp[-1]) if len(risings) == 0 and len(settings) == 0: next_rising = ch_eph.solar_rising(tmstp[-1]) next_setting = ch_eph.solar_setting(tmstp[-1]) if next_setting < next_rising: # All data is in daylight time cut_vis = None cut_tmstp = None else: # All data is in night time cut_vis = np.copy(visi) cut_tmstp = tmstp elif len(settings) == 0: # Only one rising: sr = risings[0] # Find time bin index closest to solar rising: idx = np.argmin(np.abs(tmstp - sr)) # Determine time limits to cut: # (20 min after setting and before rising, if within range) cut_low = max(0, idx - int(20.0 * 60.0 / tstp)) # lower limit of time cut # Cut daylight times: cut_vis = np.copy(visi[:, :cut_low]) cut_tmstp = tmstp[:cut_low] elif len(risings) == 0: # Only one setting: ss = settings[0] # Find time bin index closest to solar setting: idx = np.argmin(np.abs(tmstp - ss)) # Determine time limits to cut: # (20 min after setting and before rising, if within range) cut_up = min( len(tmstp), idx + int(20.0 * 60.0 / tstp) ) # upper limit of time to cut # Cut daylight times: cut_vis = np.copy(visi[:, cut_up:]) cut_tmstp = tmstp[cut_up:] else: cut_pairs = [] if risings[0] > settings[0]: cut_pairs.append([tmstp[0], settings[0]]) for ii in range(1, len(settings)): cut_pairs.append([risings[ii - 1], settings[ii]]) if len(risings) == len(settings): cut_pairs.append([risings[-1], tmstp[-1]]) else: for ii in range(len(settings)): cut_pairs.append([risings[ii], settings[ii]]) if len(risings) > len(settings): cut_pairs.append([risings[-1], tmstp[-1]]) cut_tmstp = [] cut_vis = [] tmstp_remain = tmstp vis_remain = np.copy(visi) for cp in cut_pairs: # Find time bin index closest to cuts: idx_low = np.argmin(np.abs(tmstp_remain - cp[0])) idx_up = np.argmin(np.abs(tmstp_remain - cp[1])) # Determine time limits to cut: # (20 min after setting and before rising, if within range) cut_low = max( 0, idx_low - int(20.0 * 60.0 / tstp) ) # lower limit of time cut cut_up = min( len(tmstp_remain), idx_up + int(20.0 * 60.0 / tstp) ) # upper limit of time to cut if len(tmstp_remain[:cut_low]) > 0: cut_vis.append(vis_remain[:, :cut_low]) cut_tmstp.append( tmstp_remain[:cut_low] ) # Append times before rising to cut_tmstp vis_remain = vis_remain[:, cut_up:] tmstp_remain = tmstp_remain[ cut_up: ] # Use times after setting for further cuts if len(tmstp_remain) > 0: # If there is a bit of night data in the end, append it: cut_tmstp.append(tmstp_remain) cut_vis.append(vis_remain) return cut_vis, cut_tmstp
f6a3164af732949807f5f3f8c810f5072ab19a6d
3,635,008
def decay(epoch): """ This method create the alpha""" # returning a very small constant learning rate return 0.001 / (1 + 1 * 30)
b3311fe38557ee18d0e72ce794a3123b04b92c7a
3,635,009
import functools import time def timer_function(function): """Print time taken to execute a function""" @functools.wraps(function) def inner_function(name): start = time.perf_counter() function(name) end = time.perf_counter() total = end-start print(start, end) print(f"The function finished in {total:.4f}") return inner_function
82981c28e9401581d38c1eed6b4efab30679cec8
3,635,010
def blob_delete(cache, key, namespace): # type: (Any, str, Optional[str]) -> bool """Delete stored values from memcache""" chunk_keys = blob_get_chunk_keys(cache, key, namespace=namespace) if not chunk_keys: # Keys are not set, no need to remove them. return True keys_to_delete = list(chunk_keys) keys_to_delete.append(key) return cache.delete_multi(keys_to_delete, namespace=namespace)
d2eaa38ead9e89461341c4a5df7062d723f5e62e
3,635,011
def shape_equality_robust_statistic(𝐗, args): """ GLRT test for testing a change in the shape of a deterministic SIRV model. Inputs: * 𝐗 = a (p, N, T) numpy array with: * p = dimension of vectors * N = number of Samples at each date * T = length of time series * args = tol, iter_max for Tyler, scale Outputs: * the statistic given the observations in input""" tol, iter_max, scale = args (p, N, T) = 𝐗.shape # Estimating 𝚺_0 using all the observations (𝚺_0, δ, niter) = tyler_estimator_covariance(𝐗.reshape((p,T*N)), tol, iter_max) i𝚺_0 = np.linalg.inv(𝚺_0) # Some initialisation log_numerator_determinant_terms = T*N*np.log(np.abs(np.linalg.det(𝚺_0))) log_denominator_determinant_terms = 0 log𝛕_0 = 0 log𝛕_t = 0 # Iterating on each date to compute the needed terms for t in range(0,T): # Estimating 𝚺_t (𝚺_t, δ, iteration) = tyler_estimator_covariance(𝐗[:,:,t], tol, iter_max) # Computing determinant add adding it to log_denominator_determinant_terms log_denominator_determinant_terms = log_denominator_determinant_terms + \ N*np.log(np.abs(np.linalg.det(𝚺_t))) # Computing texture estimation log𝛕_0 = log𝛕_0 + np.log(np.diagonal(𝐗[:,:,t].conj().T@i𝚺_0@𝐗[:,:,t])) log𝛕_t = log𝛕_t + np.log(np.diagonal(𝐗[:,:,t].conj().T@np.linalg.inv(𝚺_t)@𝐗[:,:,t])) # Computing quadratic terms log_numerator_quadtratic_terms = p*np.sum(log𝛕_0) log_denominator_quadtratic_terms = p*np.sum(log𝛕_t) # Final expression of the statistic if scale=='linear': λ = np.exp(np.real(log_numerator_determinant_terms - log_denominator_determinant_terms + \ log_numerator_quadtratic_terms - log_denominator_quadtratic_terms)) else: λ = np.real(log_numerator_determinant_terms - log_denominator_determinant_terms + \ log_numerator_quadtratic_terms - log_denominator_quadtratic_terms) return λ
c5f9f967e9f9bdbf314dde3d90dbe812b7dad565
3,635,012
def get_attrib_uri(json_dict, attrib): """ Get the URI for an attribute. """ url = None if type(json_dict[attrib]) == str: url = json_dict[attrib] elif type(json_dict[attrib]) == dict: if json_dict[attrib].get('id', False): url = json_dict[attrib]['id'] elif json_dict[attrib].get('@id', False): url = json_dict[attrib]['@id'] return url
838b698e3475ebdc877b29de6f3fd446d2be1cdf
3,635,013
def set_model_params(module, params_list, start_param_idx=0): """ Set params list into model recursively """ param_idx = start_param_idx for name, param in module._parameters.items(): module._parameters[name] = params_list[param_idx] param_idx += 1 for name, child in module._modules.items(): if child is not None: param_idx += set_model_params(child, params_list, param_idx) return param_idx
7ce6edb0c1b83020280cf0b586623d66839b4b0a
3,635,014
import os import logging import stat def push(local_path, remote_path): """Upload a file to the device. Arguments: local_path(str): Path to the local file to push. remote_path(str): Path or directory to store the file on the device. Returns: Remote path of the file. Example: >>> write('./filename', 'contents') >>> adb.push('./filename', '/data/local/tmp') '/data/local/tmp/filename' >>> adb.read('/data/local/tmp/filename') 'contents' >>> adb.push('./filename', '/does/not/exist') Traceback (most recent call last): ... PwnlibException: Could not stat '/does/not/exist' """ msg = "Pushing %r to %r" % (local_path, remote_path) remote_filename = os.path.basename(local_path) if log.isEnabledFor(logging.DEBUG): msg += ' (%s)' % context.device with log.waitfor(msg) as w: with AdbClient() as c: # We need to discover whether remote_path is a directory or not. # If we cannot stat the full path, assume it's a path-plus-filename, # where the filename does not exist. stat_ = c.stat(remote_path) if not stat_: remote_filename = os.path.basename(remote_path) remote_path = os.path.dirname(remote_path) stat_ = c.stat(remote_path) # If we can't find the exact path, or its parent directory, bail! if not stat_: log.error('Could not stat %r' % remote_path) # If we found the parent directory, append the filename mode = stat_['mode'] if stat.S_ISDIR(mode): remote_path = os.path.join(remote_path, remote_filename) c.write(remote_path, misc.read(local_path), callback=_create_adb_push_pull_callback(w)) return remote_path
b9f7980a78fdc0d4a68e652212ab1dfb6a2b01be
3,635,015
def concatIDF(idfObjectList, param): """Create tab separated strings from input yaml parameter objects""" outString = "" for obj in idfObjectList: outString += "\t" + str(noneClean(obj.params[param])) return outString
0cd6fe6dea85b2d7365a1e225d75386629dab98e
3,635,016
import inspect def super_class_property(*args, **kwargs): """ A class decorator that adds the class' name in lowercase as a property of it's superclass with a value constructed using the subclass' constructor with the given arguments. So for example: class A: pass @super_class_property(foo=5) class B(A): def __init__(self, foo=3): self.foo=foo Effectively results in the following, after the definition of B: A.b = B(foo=5) Can be used multiple times with different arguments if desired. """ def add_superclass_property(cls): nonlocal args, kwargs mro = inspect.getmro(cls) if len(mro) <= 2: raise TypeError( ( "Class {} can't be a super_class_property because it has no super " "class." ).format(cls) ) parent = mro[1] instance = cls(*args, **kwargs) setattr(parent, cls.__name__.lower(), instance) return cls return add_superclass_property
ecfd38ba3d7ea96266278ed6be6cf0ba87263d7d
3,635,017
def run_deferred_and_advance_now_until(ctx, u, until): """ Find all rows in the deferred table where the run_at time has passed for the given user and run the deferred action. gametime.now is set to the deferred actions run_at value to simulate the production environment when run from a cronjob. NOTE: gametime.now will be set to until at the end of this function, regardless of what happens. :param ctx: The database context. :param until: datetime Run deferred actions with run_at times older than this. Returns a list of DeferredRow instances that were run or any empty list. """ with db.conn(ctx) as ctx: # The SQL handles returning these in sorted order, oldest first. rows = db.rows(ctx, 'debug/select_deferred_since_by_user_id', user_id=u.user_id, since=until) processed = [] for row in rows: deferred_row = deferred.DeferredRow(**row) # Set the gametime's concept of now to a few seconds after this deferred was supposed to run, # to emulate the cronjob having just run and seen this deferred for the first time. gametime.set_now(deferred_row.run_at) # Process this deferred action. deferred.process_row(ctx, u, deferred_row) # If no exception ocurred for this deferred, delete it from the database and # commit the transaction. deferred_row.delete(ctx) processed.append(deferred_row) # Set the gametime to be the end of the window deferreds were run to. gametime.set_now(until) return processed
8e741cd7cc06d2883b8879af554f300d5a305625
3,635,018
def get_group_type_by_name(context, name): """Retrieves single group type by name.""" if name is None: msg = _("name cannot be None") raise exception.InvalidGroupType(reason=msg) return db.group_type_get_by_name(context, name)
ceff65a621fece573cec1ff60291b80bdd784bb7
3,635,019
import os def hdfs_to_local(hdfs_path, local_path, is_txt=True): """copy hdfs file to local param: * hdfs_path: hdfs file or dir * local_path: local file or dir return: * res: result message """ res = '' if is_txt: f = os.popen("hadoop dfs -text {} > {}".format(hdfs_path, local_path)) res = f.read() else: f = os.popen("hadoop dfs -get {} {}".format(hdfs_path, local_path)) res = f.read() if '' == res: res = 'ok' return res
46ee67069c7c43c1fb23a62a1c1d8fadcf058121
3,635,020
def register_tortoise_exception( app: FastAPI, add_exception_handlers: bool = False, ) -> None: """ rewrite from tortoise.contrib.fastapi import register_tortoise """ if add_exception_handlers: @app.exception_handler(DoesNotExist) async def doesnotexist_exception_handler(request: Request, exc: DoesNotExist): return JSONResponse(status_code=404, content={ "error": "STATUS_404_NOT_FOUND", "code": 404, "message": str(exc) }) @app.exception_handler(IntegrityError) async def integrityerror_exception_handler(request: Request, exc: IntegrityError): return JSONResponse( status_code=422, content={"detail": {"loc": [], "msg": str(exc), "type": "IntegrityError"}}, )
e9c140f61b32475cd2a396fb709aba1b578cf6bf
3,635,021
import numpy def norm_m(matrix, norm): """ Normaliza la matriz pasada con un formato float64 bits tipo de nomalizacion l1 :param matrix: {array-like, sparse matrix}, shape [n_samples, n_features] The data to normalize, element by element. scipy.sparse matrices should be in CSR format to avoid an un-necessary copy. :return: {array-like, sparse matrix}, shape [n_samples, n_features] Normalized input X. :param norm: array, shape [n_samples] if axis=1 else [n_features] An array of norms along given axis for X. When X is sparse, a NotImplementedError will be raised for norm ‘l1’ or ‘l2’. l1 es la suma de todos sus numeros """ return normalize(matrix.astype(numpy.float64), norm)
022f977f3eb937aa9a27deec229a19281eac281b
3,635,022
def user_from_dict(user_dictionary: dict): """ The function converts a dictionary of User to a User object. :param user_dict: A dictionary that contains the keys of a User. :type user_dict: dict :rtype: ibmpairs.query.User :raises Exception: if not a dict. """ return User.from_dict(user_dictionary)
fb2380d316a3c939afd9b6a7d6399ad198c881c7
3,635,023
def lind_safe_fs_getdents(args): """ Safely wrap the getdents call. See dispatcher.repy for details. Check the handle and count for consistancy, then call the real getdents dispatcher. """ handle = args[0] count = args[1] check_valid_fd_handle(handle) assert isinstance(count, int) result = lind_fs_getdents(args) if result.is_error == False: assert(len(result.data) <= TX_BUF_MAX), \ "returning data larger than transmission buffer." assert(len(result.data) <= count), \ "not observing byte count parameter." return result
4ec1b3ba52f0d24ebafc03829b4b520a93f460ee
3,635,024
from datetime import datetime def parser(): # parsing the whole circuit into lists of objects """ Start of Parse .nodes """ file = open("{}.nodes".format(fileName)) lines = file.readlines() saved = 0 node_list = [] # List of all nodes for the current circuit # Locate NumNodes + NumTerminals for i in range(len(lines)): # .upper everything cause of insensitive chars temp_parsing = lines[i].strip(" ,.\n#:").upper() # Locate NumNodes if temp_parsing.find("NUMNODES") != -1: point = temp_parsing.find("NUMNODES") length = len("NUMNODES") number_of_nodes = temp_parsing[point + length:] number_of_nodes = number_of_nodes.strip(": ") number_of_nodes = int(number_of_nodes) # Locate NumTerminals if temp_parsing.find("NUMTERMINALS") != -1: point = temp_parsing.find("NUMTERMINALS") length = len("NUMTERMINALS") number_of_terminals = temp_parsing[point + length:] number_of_terminals = number_of_terminals.strip(": ") number_of_terminals = int(number_of_terminals) # Starting point for the 2nd for, +1 for the next line. saved = i + 1 break # Parsing the Nodes for j in range(saved, len(lines)): temp = lines[j].strip("\t,.\n#: ") temp = temp.split() node_name = temp[0] node_width = int(temp[1]) node_height = int(temp[2]) if len(temp) == 3: # len == 3 -> Non_Terminal node_type = "Non_Terminal" elif len(temp) == 4: # len == 4 -> Terminal node_type = "Terminal" else: # Length is not 3 or 4 - Modified file print("Error. File is modified!") node_type = "Error. File is modified!" new_node = Node(node_name, node_width, node_height, node_type) node_list.append(new_node) # node_x,node_y not found yet file.close() # Close .nodes file """ End of Parse .nodes """ """ Start of Parse .pl """ file = open("{}.pl".format(fileName)) lines = file.readlines() # Skip first 4 lines - comments for i in range(4, len(lines)): temp_parsing = lines[i].strip() temp_parsing = temp_parsing.split() # temp_parsing type = list node_name = temp_parsing[0] node_x = int(temp_parsing[1]) # Lower Left Corner x Coordinate node_y = int(temp_parsing[2]) # Lower Left Corner y Coordinate # match the node_names and # update the node_x,node_y according to their coordinates for node in node_list: if node.node_name == node_name: node.set_x_y(node_x, node_y) file.close() # Close .pl file """ End of Parse .pl """ """ Start of Parse .nets """ file = open("{}.nets".format(fileName)) lines = file.readlines() saved = 0 # saving pointers that are used for parsing net_list = [] # List of all nets for the current circuit # Locate NumNets for i in range(len(lines)): temp_parsing = lines[i].strip(" ,.\n#:").upper() # Parse NumNets if temp_parsing.find("NUMNETS") != -1: point = temp_parsing.find("NUMNETS") length = len("NUMNETS") nets_number = temp_parsing[point + length:] nets_number = nets_number.strip(": ") nets_number = int(nets_number) saved = i break # Locating all NetDegree's name_counter = -1 # counter for names of the Nets for i in range(saved, len(lines)): temp_parsing = lines[i].strip(" ,.\n#:").upper() # Locate NetDegree if temp_parsing.find("NETDEGREE") != -1: name_counter += 1 # +1 for the next Net Name temp_parsing = temp_parsing.replace(":", " ") temp_parsing = temp_parsing.split() net_degree = int(temp_parsing[1]) net_name = "net{}".format(name_counter) # Read the "netDegree" number of lines of each Net # netDegree+1 because "range" stops at (max - 1) # Starting from 1, to skip the " NetDegree : x " line new_net = Net(net_name, net_degree) for j in range(1, net_degree + 1): next_line = lines[i + j].split() # contains node name & more current_node = str(next_line[0]) # parse only the node name # match the node name, to the node object for node in node_list: if node.node_name == current_node: new_net.append_node(node) # new_net.append_node(current_node) #it appends node name # find on which nets, the current_node belongs to # and then updating the net_list of the current_node # according to the matches for node in node_list: if node.node_name == current_node: node.append_net(new_net.net_name) new_net.find_coordinates_of_net() new_net.calculate_net_wirelength() new_net.calculate_net_size() net_list.append(new_net) # add every net on the list of nets file.close() # Close .nets file """ End of Parse .nets """ """ Start of Parse .scl """ file = open("{}.scl".format(fileName)) lines = file.readlines() row_coordinate = None row_sub = None row_numsites = None row_height = None row_list = [] # List of all rows for the current circuit name_counter = -1 # counter for name of the Rows for i in range(len(lines)): # .upper everything cause of insensitive chars temp_parsing = lines[i].strip(" ,.\n#:").upper() if temp_parsing.find("COREROW HORIZONTAL") != -1: name_counter += 1 # +1 for the next Row Name row_name = "row{}".format(name_counter) # Parse Row's Coordinate and check if Coordinate is at (i+1) # position # (i+1) = Coordinate # .upper everything cause of insensitive chars temp = lines[i + 1].strip(" ,.\n#:").upper() if temp.find("COORDINATE") != -1: point = temp.find("COORDINATE") length = len("COORDINATE") row_coordinate = temp[point + length:] row_coordinate = row_coordinate.strip(": ") # Lower Left Corner y coordinate of the row row_coordinate = int(row_coordinate) else: print("Error: File is modified.") # Parse Row's Height and check if Height is at (i+2) position # (i+2) = Height # .upper everything cause of insensitive chars temp = lines[i + 2].strip(" ,.\n#:").upper() if temp.find("HEIGHT") != -1: point = temp.find("HEIGHT") length = len("HEIGHT") row_height = temp[point + length:] row_height = row_height.strip(": ") row_height = int(row_height) else: print("Error: File is modified.") # Parse SubrowOrigin & Numsites & check if their position is # at (i+7) # (i+7) = SubrowOrigin + Numsites # .upper everything cause of insensitive chars temp = lines[i + 7].strip(" ,.\n#:").upper() if temp.find("SUBROWORIGIN") != -1: point = temp.find("SUBROWORIGIN") length = len("SUBROWORIGIN") row_sub = temp[point + length:] row_sub = row_sub.strip(": ") row_sub = row_sub.strip(" ,.\n#:").upper() if row_sub.find("NUMSITES") != -1: point2 = row_sub.find("NUMSITES") # filter and locate Numsites row_numsites = row_sub[point2 + length:] row_numsites = row_numsites.strip(": ") # Lower Right Corner x Coordinate row_numsites = int(row_numsites) # filter and locate SubrowOrigin row_sub = row_sub[:point2] row_sub = int(row_sub) # Lower Left Corner x Coordinate else: print("Error: File is modified.") # row_height + row_coordinate = y_max of each row new_row = Row(row_name, row_coordinate, (row_height + row_coordinate), row_sub, row_numsites) row_list.append(new_row) # add every row on the list of rows file.close() # Close .scl file """ End of Parse .scl """ # Find the row, each node is placed in begin1_time = datetime.datetime.now() for row in row_list: for node in node_list: # check for both lower_y and upper_y to avoid Terminal nodes if (node.node_y == row.y_min and (node.node_y + node.node_height) == row.y_max): node.set_row(row) row.append_node(node) begin2_time = datetime.datetime.now() - begin1_time # Find the row(s), each Net belongs to and the opposite for net in net_list: for node in net.net_nodes: if node.node_type == "Non_Terminal": net.append_row(node.node_row) node.node_row.append_net(net) net.net_rows = list(dict.fromkeys(net.net_rows)) # remove duplicates # Update each row, with its density begin3_time = datetime.datetime.now() for row in row_list: row.calculate_row_density() begin4_time = datetime.datetime.now() - begin3_time print("\nRow Density list time: ", begin4_time + begin2_time) # Create Design current_design = Design(number_of_nodes, number_of_terminals, nets_number) current_design.calculate_design_half_perimeter_wirelength(net_list) print("***\n\nCurrentDesign: ", current_design) return node_list, net_list, row_list
98e828248ec02ecd68928165791d549b04962992
3,635,025
def header(columns): """Create html for column headers.""" cells = add_bars(columns) return div(docfilter, div(*cells, cls='noselect'), id='header')
1d54b6f60085e0234aa7055c9ef17d684793b6d4
3,635,026
def set_playbook_config(ctx, **kwargs): """ Set all playbook node instance configuration as runtime properties :param _ctx: Cloudify node instance which is instance of CloudifyContext :param config: Playbook node configurations """ def _get_secure_values(data, sensitive_keys, parent_hide=False): """ ::param data : dict to check againt sensitive_keys ::param sensitive_keys : a list of keys we want to hide the values for ::param parent_hide : boolean flag to pass if the parent key is in sensitive_keys """ for key in data: # check if key or its parent {dict value} in sensitive_keys hide = parent_hide or (key in sensitive_keys) value = data[key] # handle dict value incase sensitive_keys was inside another key if isinstance(value, dict): # call _get_secure_value function recusivly # to handle the dict value inner_dict = _get_secure_values(value, sensitive_keys, hide) data[key] = inner_dict else: data[key] = '*'*len(value) if hide else value return data if kwargs and isinstance(kwargs, dict): kwargs = _get_secure_values(kwargs, kwargs.get("sensitive_keys", {})) for key, value in kwargs.items(): ctx.instance.runtime_properties[key] = value ctx.instance.update()
241642acdcd3b3b37c4b3736b375a03e5bc4cbec
3,635,027
def get_services_accounting_flow(device, field, output=None): """ Get value of field from show services accounting flow Args: device (`obj`): Device object field (`str`): field name in show output output (`str`): output of show services accounting flow Returns: value (`str`): value of field """ try: out = device.parse('show services accounting flow', output=output) except SchemaEmptyParserError: return None # example of out # { # "services-accounting-information": { # "flow-information": [ # { # "active-flows": "9382", # "flow-bytes": "11593270057235", # "flow-bytes-ten-second-rate": "181450", # "flow-packets": "13718715979", # "flow-packets-exported": "972808092", # "flow-packets-ten-second-rate": "231", # "flows": "8130313171", # "flows-aged": "8780965870", # "flows-expired": "8130303789", # "flows-exported": "8960674930", # "interface-name": "ms-9/2/0", # "local-ifd-index": "168" # } # ] # } # } field_value = out.q.get_values(field, 0) if field_value: return field_value else: return None
d2cc47826073d47a163edc9e12079705563baac1
3,635,028
import math def moving_window_stride(array, window, step): """ Returns view of strided array for moving window calculation with given window size and step :param array: numpy.ndarray - input array :param window: int - window size :param step: int - step lenght :return: strided: numpy.ndarray - view of strided array, index: numpy.ndarray - array of indexes """ stride = array.strides[0] win_count = math.floor((len(array) - window + step) / step) strided = as_strided(array, shape=(win_count, window), strides=(stride*step, stride)) index = np.arange(window - 1, window + (win_count-1) * step, step) return strided, index
50217f9830864375f801ef5412c99756fb9982ac
3,635,029
import os import shutil def _run_purple(paired, het_file, depth_file, vrn_files, work_dir): """Run PURPLE with pre-calculated AMBER and COBALT compatible inputs. """ purple_dir = utils.safe_makedir(os.path.join(work_dir, "purple")) out_file = os.path.join(purple_dir, "%s.purple.cnv" % dd.get_sample_name(paired.tumor_data)) if not utils.file_exists(out_file): with file_transaction(paired.tumor_data, out_file) as tx_out_file: cmd = ["PURPLE"] + _get_jvm_opts(tx_out_file, paired.tumor_data) + \ ["-amber", os.path.dirname(het_file), "-baf", het_file, "-cobalt", os.path.dirname(depth_file), "-gc_profile", dd.get_variation_resources(paired.tumor_data)["gc_profile"], "-output_dir", os.path.dirname(tx_out_file), "-ref_genome", "hg38" if dd.get_genome_build(paired.tumor_data) == "hg38" else "hg19", "-run_dir", work_dir, "-threads", dd.get_num_cores(paired.tumor_data), "-tumor_sample", dd.get_sample_name(paired.tumor_data), "-ref_sample", dd.get_sample_name(paired.normal_data)] if vrn_files: cmd += ["-somatic_vcf", vrn_files[0]["vrn_file"]] # Avoid X11 display errors when writing plots cmd = "unset DISPLAY && %s" % " ".join([str(x) for x in cmd]) do.run(cmd, "PURPLE: purity and ploidy estimation") for f in os.listdir(os.path.dirname(tx_out_file)): if f != os.path.basename(tx_out_file): shutil.move(os.path.join(os.path.dirname(tx_out_file), f), os.path.join(purple_dir, f)) out_file_export = os.path.join(purple_dir, "%s-purple-cnv.tsv" % (dd.get_sample_name(paired.tumor_data))) if not utils.file_exists(out_file_export): utils.symlink_plus(out_file, out_file_export) out = {"variantcaller": "purple", "call_file": out_file_export, "vrn_file": titancna.to_vcf(out_file_export, "PURPLE", _get_header, _export_to_vcf, paired.tumor_data), "plot": {}, "metrics": {}} for name, ext in [("copy_number", "copyNumber"), ("minor_allele", "minor_allele"), ("variant", "variant")]: plot_file = os.path.join(purple_dir, "plot", "%s.%s.png" % (dd.get_sample_name(paired.tumor_data), ext)) if os.path.exists(plot_file): out["plot"][name] = plot_file purity_file = os.path.join(purple_dir, "%s.purple.purity" % dd.get_sample_name(paired.tumor_data)) with open(purity_file) as in_handle: header = in_handle.readline().replace("#", "").split("\t") vals = in_handle.readline().split("\t") for h, v in zip(header, vals): try: v = float(v) except ValueError: pass out["metrics"][h] = v return out
4206ccca514c9728a1f0146146caba081862fbef
3,635,030
def ecef2geodetic(ecef, radians=False): """ Convert ECEF coordinates to geodetic using ferrari's method """ # Save shape and export column ecef = np.atleast_1d(ecef) input_shape = ecef.shape ecef = np.atleast_2d(ecef) x, y, z = ecef[:, 0], ecef[:, 1], ecef[:, 2] ratio = 1.0 if radians else (180.0 / np.pi) # Conver from ECEF to geodetic using Ferrari's methods # https://en.wikipedia.org/wiki/Geographic_coordinate_conversion#Ferrari.27s_solution r = np.sqrt(x * x + y * y) Esq = a * a - b * b F = 54 * b * b * z * z G = r * r + (1 - esq) * z * z - esq * Esq C = (esq * esq * F * r * r) / (pow(G, 3)) S = np.cbrt(1 + C + np.sqrt(C * C + 2 * C)) P = F / (3 * pow((S + 1 / S + 1), 2) * G * G) Q = np.sqrt(1 + 2 * esq * esq * P) r_0 = -(P * esq * r) / (1 + Q) + np.sqrt(0.5 * a * a*(1 + 1.0 / Q) - \ P * (1 - esq) * z * z / (Q * (1 + Q)) - 0.5 * P * r * r) U = np.sqrt(pow((r - esq * r_0), 2) + z * z) V = np.sqrt(pow((r - esq * r_0), 2) + (1 - esq) * z * z) Z_0 = b * b * z / (a * V) h = U * (1 - b * b / (a * V)) lat = ratio*np.arctan((z + e1sq * Z_0) / r) lon = ratio*np.arctan2(y, x) # stack the new columns and return to the original shape geodetic = np.column_stack((lat, lon, h)) return geodetic.reshape(input_shape)
a4ef47c2f7284066e2d97b744dd144d75ccff768
3,635,031
def merge(sorted1, sorted2): """Merge two sorted lists into a single sorted list.""" if sorted1 == (): return sorted2 elif sorted2 == (): return sorted1 else: h1, t1 = sorted1 h2, t2 = sorted2 if h1 <= h2: return (h1, merge(t1, sorted2)) else: return (h2, merge(sorted1, t2))
7c02b345b3d1e7c67e363e1535c608575a313f75
3,635,032
import os def write_gpt_fieldmesh(fm, outfile, asci2gdf_bin=None, verbose=False): """ Writes a GPT fieldmap file from a FieldMesh object. Requires cylindrical geometry for now. """ assert fm.geometry == 'cylindrical', f'Geometry: {fm.geometry} not implemented' assert fm.shape[1] == 1, 'Cylindrical symmetry required' dat = {} dat['R'], dat['Z'] = np.meshgrid(fm.coord_vec('r'), fm.coord_vec('z'), indexing='ij') keys = ['R', 'Z'] if fm.is_static: if fm.is_pure_magnetic: keys = ['R', 'Z', 'Br', 'Bz'] dat['Br'] = np.real(fm['Br'][:,0,:]) dat['Bz'] = np.real(fm['Bz'][:,0,:]) elif fm.is_pure_electric: keys = ['R', 'Z', 'Er', 'Ez'] dat['Er'] = np.real(fm['Er'][:,0,:]) dat['Ez'] = np.real(fm['Ez'][:,0,:]) else: raise ValueError('Mixed static field TODO') else: # Use internal Superfish routine keys = ['R', 'Z', 'Er', 'Ez', 'Bphi'] dat['Er'], dat['Ez'], dat['Bphi'], _ = fish_complex_to_real_fields(fm, verbose=verbose) # Flatten dat gptdata = np.array([dat[k].flatten() for k in keys]).T # Write file. # Hack to delete final newline # https://stackoverflow.com/questions/28492954/numpy-savetxt-stop-newline-on-final-line with open(outfile, 'w') as fout: NEWLINE_SIZE_IN_BYTES = 1 # 2 on Windows? np.savetxt(fout, gptdata, header=' '.join(keys), comments='') fout.seek(0, os.SEEK_END) # Go to the end of the file. # Go backwards one byte from the end of the file. fout.seek(fout.tell() - NEWLINE_SIZE_IN_BYTES, os.SEEK_SET) fout.truncate() # Truncate the file to this point. if asci2gdf_bin: run_asci2gdf(outfile, asci2gdf_bin, verbose=verbose) elif verbose: print(f'ASCII field data written. Convert to GDF using: asci2df -o field.gdf {outfile}') return outfile
dc8cd050efa4fe60cea334dbf30e060679c5cef6
3,635,033
from datetime import datetime import os def _build_bundleitems_update_request(player_id, bundle_name, bundle_item_key, bundle_item_value): """ Build the Bundle Items update request. """ player_id_bundle = f'{player_id}_{bundle_name}' timestamp = datetime.utcnow().replace(tzinfo=timezone.utc).isoformat() return { 'Key': { 'player_id_bundle': {'S': player_id_bundle}, 'bundle_item_key': {'S': bundle_item_key} }, 'ReturnValues': 'ALL_NEW', 'TableName': os.environ['BUNDLE_ITEMS_TABLE_NAME'], 'ExpressionAttributeNames': { '#bundle_item_value': 'bundle_item_value', '#created_at': 'created_at', '#updated_at': 'updated_at' }, 'ExpressionAttributeValues': { ':bundle_item_value': {'S': bundle_item_value}, ':created_at': {'S': timestamp}, ':updated_at': {'S': timestamp} }, 'UpdateExpression': 'SET #bundle_item_value = :bundle_item_value, ' '#created_at = if_not_exists(created_at, :created_at), #updated_at = :updated_at' }
0dec931485faaeeab66cf045c4cc192cfa594c89
3,635,034
from dask import delayed, compute from dask.bytes.core import open_files, read_bytes from dask.dataframe import from_delayed import copy def dask_read_avro(urlpath, blocksize=100000000, storage_options=None): """Read set of avro files into dask dataframes Use this only with avro schema that make sense as tabular data, i.e., not deeply nested with arrays and maps. Parameters ---------- urlpath: string or list Absolute or relative filepath, URL (may include protocols like ``s3://``), or globstring pointing to data. blocksize: int or None Size of chunks in bytes. If None, there will be no chunking and each file will become one partition. storage_options: dict or None passed to backend file-system """ storage_options = storage_options or {} files = open_files(urlpath, **storage_options) with copy.copy(files[0]) as f: # we assume the same header for all files head = read_header(f) storage_options = storage_options or {} files = open_files(urlpath, **storage_options) if blocksize is not None: dhead = delayed(open_head) heads = compute(*[dhead(f) for f in files]) dread = delayed(dask_read_chunk) bits = [] for head, f in zip(heads, files): _, chunks = read_bytes(f.path, sample=False, blocksize=blocksize, delimiter=head['sync'], include_path=False, **storage_options) bits.extend([dread(ch, head) for ch in chunks[0]]) return from_delayed(bits) else: files = open_files(urlpath, **storage_options) dread = delayed(dask_read_file) chunks = [dread(fo) for fo in files] return from_delayed(chunks)
a6559fbdc7a90149984f51f6d632663b36b5106e
3,635,035
import csv def msgs_csv(messages, header): """Return messages in .csv format.""" queue = cStringIO.StringIO() writer = csv.writer(queue, dialect=csv.excel, quoting=csv.QUOTE_ALL) if header: writer.writerow(['Date', 'From', 'To', 'Text']) for m in messages: writer.writerow([m['date'].encode('utf-8'), m['from'].encode('utf-8'), m['to'].encode('utf-8'), m['text'].encode('utf-8')]) output = queue.getvalue() queue.close() return output
f7397af4f19cd7b94bd6d56766608c607dbe6950
3,635,036
def alpha_s_plot_parameters( alpha_curve: "list[float]", loading: "list[float]", section: "list[float]", alpha_s_point: float, reference_area: float, molar_mass: float, liquid_density: float, ): """Get the parameters for the linear region of the alpha-s plot.""" slope, intercept, corr_coef, p, stderr = stats.linregress( alpha_curve[section], loading[section] ) # Check if slope is good if slope * (max(alpha_curve) / max(loading)) < 3: adsorbed_volume = intercept * molar_mass / liquid_density area = (reference_area / alpha_s_point * slope).item() return { 'section': section, 'slope': slope, 'intercept': intercept, 'corr_coef': corr_coef, 'adsorbed_volume': adsorbed_volume, 'area': area, } return None
a06a65ec2f7e13535ac96855f2ddb8b985268d26
3,635,037
def _MutualInformationTransformAccumulate(pcol): # pylint: disable=invalid-name """Accumulates information needed for mutual information computation.""" return (pcol | 'VocabCountPerLabelPerTokenAccumulate' >> beam.CombinePerKey( _CountAndWeightsMeansCombineFn()))
e3fb8c16dc025f8cb7ef4f0196d2b9341cbc0855
3,635,038
import torch def GTLRU(input_a, input_b, n_channels: int): """Gated[?] Tanh Leaky ReLU Unit (GTLRU)""" in_act = input_a+input_b t_act = torch.tanh(in_act[:, :n_channels, :]) r_act = torch.nn.functional.leaky_relu(in_act[:, n_channels:, :], negative_slope=0.01, inplace=True) acts = t_act * r_act return acts
62f36cda5329e3b1889abcab2f1c97d6d1448ea8
3,635,039
def get_items_by_category(category_id, limit, offset=None): """ Return items from catalog by category with limit and offset :param category_id: :param limit: :param offset: :return object: """ return session.query(Catalog).filter_by( category=category_id).offset(offset).limit(limit)
63e82b3f67cf2ff8e5a863dfe05d7a4acb3e5543
3,635,040
def cosine(u, v, w=None): """ Compute the Cosine distance between 1-D arrays. The Cosine distance between `u` and `v`, is defined as .. math:: 1 - \\frac{u \\cdot v} {||u||_2 ||v||_2}. where :math:`u \\cdot v` is the dot product of :math:`u` and :math:`v`. Parameters ---------- u : (N,) array_like Input array. v : (N,) array_like Input array. w : (N,) array_like, optional The weights for each value in `u` and `v`. Default is None, which gives each value a weight of 1.0 Returns ------- cosine : double The Cosine distance between vectors `u` and `v`. Examples -------- >>> from scipy.spatial import distance >>> distance.cosine([1, 0, 0], [0, 1, 0]) 1.0 >>> distance.cosine([100, 0, 0], [0, 1, 0]) 1.0 >>> distance.cosine([1, 1, 0], [0, 1, 0]) 0.29289321881345254 """ # cosine distance is also referred to as 'uncentered correlation', # or 'reflective correlation' return correlation(u, v, w=w, centered=False)
ad655f35963e64301686f9df440c59886984335b
3,635,041
import json def adjust_site_parameters(site): """Updates extra parameters with applicable datastreams from `arm_reference_sites.json` Parameters ---------- site: dict Returns ------- dict Copy of input with updated extra parameters. """ with open(DEFAULT_SITEFILE) as fp: sites_metadata = json.load(fp)['sites'] # ARM has multiple 'locations' at each 'site'. For example, the Southern # Great Plains (SGP) ARM site has many locations throughout Oklahoma, and # neighboring states. Each location is identified by a code, e.g. Byron # Oklahoma is location `E11` at the SGP site, and it's data is accessed via # datastreams with the pattern `sgp<product>e11.<data-level>` where product # indicates the contents of the datastream (we are interested in `met` and # `qcrad1long` products) and data-level indicates quality and any # processing applied to the data. In the Solar Forecast Arbiter we store # each 'location' as a SFA site. We use the `network_api_id` to indicate # the ARM site's location code and `network_api_abbreviation` to represent # ARM "site code (e.g. `sgp`). Both of these keys must be used to identify # a site in the Solar Forecast Arbiter, because ARM's location ids are only # unique for a given ARM site. arm_location_id = site['extra_parameters']['network_api_id'] arm_site_id = site['extra_parameters']['network_api_abbreviation'] for site_metadata in sites_metadata: site_extra_params = json.loads(site_metadata['extra_parameters']) if ( site_extra_params['network_api_id'] == arm_location_id and site_extra_params['network_api_abbreviation'] == arm_site_id ): site_out = site.copy() site_out['extra_parameters'] = site_extra_params return site_out return site
f58d10f69c95f4f3db5ef71f772e97cb4c08e8a2
3,635,042
def slice_repr(slice_obj): """ Get the best guess of a minimal representation of a slice, as it would be created by indexing. """ slice_items = [slice_obj.start, slice_obj.stop, slice_obj.step] if slice_items[-1] is None: slice_items.pop() if slice_items[-1] is None: if slice_items[0] is None: return "all" else: return repr(slice_items[0]) + ":" else: return ":".join("" if x is None else repr(x) for x in slice_items)
c894f66478ec830a4968d0cfc5d9e146457012b6
3,635,043
async def isAtLeastInstructor(context: commands.Context) -> bool: """ Returns true if context.author is either an admin or an instructor and False otherwise :param context: :return: """ return await isInstructor(context) or await isAdmin(context)
074d3726e42288ccfc3c6f5674679bd5e3510d2a
3,635,044
import kwimage def _prob_to_dets(probs, diameter=None, offset=None, class_probs=None, keypoints=None, min_score=0.01, num_min=10, max_dims=None, min_dims=None): """ Directly convert a one-channel probability map into a Detections object. Helper for Heatmap.detect It does this by converting each pixel above a threshold in a probability map to a detection with a specified diameter. Args: probs (ArrayLike[H, W]) a one-channel probability map indicating the liklihood that each particular pixel should be detected as an object. diameter (ArrayLike[2, H, W] | Tuple): H, W sizes for the bounding box at each pixel location. If passed as a tuple, then all boxes receive that diameter. offset (Tuple | ArrayLike[2, H, W], default=0): Y, X offsets from the pixel location to the bounding box center. If passed as a tuple, then all boxes receive that offset. class_probs (ArrayLike[C, H, W], optional): probabilities for each class at each pixel location. If specified, this will populate the `probs` attribute of the returned Detections object. keypoints (ArrayLike[2, K, H, W], optional): Keypoint predictions for all keypoint classes min_score (float, default=0.1): probability threshold required for a pixel to be converted into a detection. num_min (int, default=10): always return at least `nmin` of the highest scoring detections even if they aren't above the `min_score` threshold. Returns: kwimage.Detections: raw detections. It is the users responsbility to run non-max suppression on these results to remove duplicate detections. Example: >>> # xdoctest: +REQUIRES(module:torch) >>> rng = np.random.RandomState(0) >>> probs = rng.rand(3, 3).astype(np.float32) >>> min_score = .5 >>> diameter = [10, 10] >>> dets = _prob_to_dets(probs, diameter, min_score=min_score) >>> assert dets.boxes.data.dtype.kind == 'f' >>> assert len(dets) == 9 >>> dets = _prob_to_dets(torch.FloatTensor(probs), diameter, min_score=min_score) >>> assert dets.boxes.data.dtype.is_floating_point >>> assert len(dets) == 9 Example: >>> # xdoctest: +REQUIRES(module:torch) >>> import kwimage >>> from kwimage.structs.heatmap import * >>> from kwimage.structs.heatmap import _prob_to_dets >>> heatmap = kwimage.Heatmap.random(rng=0, dims=(3, 3), keypoints=True) >>> # Try with numpy >>> min_score = .5 >>> dets = _prob_to_dets(heatmap.class_probs[0], heatmap.diameter, >>> heatmap.offset, heatmap.class_probs, >>> heatmap.data['keypoints'], >>> min_score) >>> assert dets.boxes.data.dtype.kind == 'f' >>> assert 'keypoints' in dets.data >>> dets_np = dets >>> # Try with torch >>> heatmap = heatmap.tensor() >>> dets = _prob_to_dets(heatmap.class_probs[0], heatmap.diameter, >>> heatmap.offset, heatmap.class_probs, >>> heatmap.data['keypoints'], >>> min_score) >>> assert dets.boxes.data.dtype.is_floating_point >>> assert len(dets) == len(dets_np) >>> dets_torch = dets >>> assert np.all(dets_torch.numpy().boxes.data == dets_np.boxes.data) Ignore: import kwil kwil.autompl() dets.draw(setlim=True, radius=.1) Example: >>> heatmap = Heatmap.random(rng=0, dims=(3, 3), diameter=1) >>> probs = heatmap.class_probs[0] >>> diameter = heatmap.diameter >>> offset = heatmap.offset >>> class_probs = heatmap.class_probs >>> min_score = 0.5 >>> dets = _prob_to_dets(probs, diameter, offset, class_probs, None, min_score) """ impl = kwarray.ArrayAPI.impl(probs) if diameter is None: diameter = 1 if offset is None: offset = 0 diameter_is_uniform = tuple(getattr(diameter, 'shape', []))[1:] != tuple(probs.shape) offset_is_uniform = tuple(getattr(offset, 'shape', []))[1:] != tuple(probs.shape) if diameter_is_uniform: if hasattr(diameter, 'shape'): if len(diameter.shape) > 2: raise Exception('Trailing diameter shape={} does not agree with probs.shape={}'.format( diameter.shape, probs.shape)) if not ub.iterable(diameter): diameter = [diameter, diameter] if offset_is_uniform: if not ub.iterable(offset): offset = impl.asarray([offset, offset]) flags = probs > min_score if not diameter_is_uniform: if max_dims is not None: max_dims = max_dims if ub.iterable(max_dims) else (max_dims, max_dims) max_height, max_width = max_dims if max_height is not None: flags &= diameter[0] <= max_height if max_width is not None: flags &= diameter[1] <= max_width if min_dims is not None: min_dims = min_dims if ub.iterable(min_dims) else (min_dims, min_dims) min_height, min_width = min_dims if min_height is not None: flags &= diameter[0] >= min_height if min_width is not None: flags &= diameter[1] >= min_width # Ensure that some detections are returned even if none are above the # threshold. if num_min is not None: numel = impl.numel(flags) if flags.sum() < num_min: if impl.is_tensor: topxs = probs.view(-1).argsort()[max(0, numel - num_min):numel] flags.view(-1)[topxs] = 1 else: idxs = kwarray.argmaxima(probs, num=num_min, ordered=False) # idxs = probs.argsort(axis=None)[-num_min:] flags.ravel()[idxs] = True yc, xc = impl.nonzero(flags) yc_ = impl.astype(yc, np.float32) xc_ = impl.astype(xc, np.float32) if diameter_is_uniform: h = impl.full_like(yc_, fill_value=diameter[0]) w = impl.full_like(xc_, fill_value=diameter[1]) else: h = impl.astype(diameter[0][flags], np.float32) w = impl.astype(diameter[1][flags], np.float32) cxywh = impl.cat([xc_[:, None], yc_[:, None], w[:, None], h[:, None]], axis=1) ltrb = kwimage.Boxes(cxywh, 'cxywh').toformat('ltrb') scores = probs[flags] # TODO: # Can we extract the detected segmentation mask/poly here as well? dets = kwimage.Detections(boxes=ltrb, scores=scores) # Get per-class probs for each detection if class_probs is not None: det_probs = impl.T(class_probs[:, yc, xc]) dets.data['probs'] = det_probs if offset is not None: if offset_is_uniform: det_dxdy = offset[[1, 0]] else: det_dxdy = impl.T(offset[:, yc, xc][[1, 0]]) dets.boxes.translate(det_dxdy, inplace=True) if keypoints is not None: # Take keypoint predictions for each remaining detection det_kpts_xy = impl.contiguous(impl.T(keypoints[:, :, yc, xc][[1, 0]])) # Translate keypoints to absolute coordinates det_kpts_xy[..., 0] += xc_[:, None] det_kpts_xy[..., 1] += yc_[:, None] # The shape of det_kpts_xy is [N, K, 2] # TODO: need to package kp_classes as well # TODO: can we make this faster? It is bottlenecking, in this instance # the points list wont be jagged, so perhaps we can use a denser data # structure? if 1: # Try using a dense homogenous data structure det_coords = kwimage.Coords(det_kpts_xy) det_kpts = kwimage.Points({'xy': det_coords}) else: # Using a jagged non-homogenous data structure is slow det_coords = [ kwimage.Coords(xys) for xys in det_kpts_xy ] det_kpts = kwimage.PointsList([ kwimage.Points({'xy': xy}) for xy in det_coords ]) dets.data['keypoints'] = det_kpts assert len(dets.scores.shape) == 1 return dets
3142495ac14122c1d42b68c527c9ec21cf3b5c68
3,635,045
def pad_sequences(sequences, pad_symbol, max_length=None, mask_present_symbol=None, padding_mode='both'): """ Pads a collection of sequences. Will work only for two dimensional data. :param sequences: list or np array, which has sequences (lists or np arrays) that should be padded with respect to the max_length. :param pad_symbol: a symbol that should be used for padding. :param max_length: length of the desired padded sequences. If it's not provided the length of the longest sequence will be used. :param mask_present_symbol: a symbol(token) that should be masked in sequences. E.g. Can be used to mask <UNK> tokens. :param padding_mode: left, right, or both. Defines the side to which padding symbols should be appended. :return: 2D numpy array with padded sequences, and 2D binary mask (numpy float). """ if not isinstance(sequences, (list, np.ndarray)): raise TypeError("Please provide a valid collection of sequences." " It must be np array or list.") padded_sentences, masks = [], [] if max_length is None: max_length = 0 for seq in sequences: max_length = max(max_length, len(seq)) if max_length == 0: raise ValueError("Can't pad empty sequences.") for sequence in sequences: if not isinstance(sequence, (list, np.ndarray)): raise ValueError("All sequences must be lists or 1D numpy arrays.") x, m = pad_sequence(sequence, pad_symbol, max_length=max_length, mask_present_symbol=mask_present_symbol, padding_mode=padding_mode) padded_sentences.append(x) masks.append(m) return np.array(padded_sentences), np.array(masks, dtype="float32")
6ccdc43b63e04526a8376c878493323075f14808
3,635,046
from typing import Sequence from typing import Tuple def cast_cal_range(cal_range: Sequence[raw_mz_type]) -> Tuple[float, float]: """ :param cal_range: """ min_val, max_val = cal_range return float(min_val), float(max_val)
22bfb7461209e4a31d871aa2237a1add190abaaf
3,635,047
import tarfile from io import StringIO import re def get_warc_identifiers(sip): """Parses the SIP in HDFS and retrieves WARC/ARK tuples.""" w = webhdfs.API(prefix=WEBHDFS) identifiers = [] tar = "%s/%s.tar.gz" % (SIP_ROOT, sip) if w.exists(tar): logger.debug("Found %s" % tar) t = w.open(tar) tar = tarfile.open(mode="r:gz", fileobj=StringIO(t)) for i in tar.getmembers(): if i.name.endswith(".xml"): xml = tar.extractfile(i).read() tree = etree.fromstring(xml) for warc in tree.xpath("//mets:file[@MIMETYPE='application/warc']", namespaces=NS): try: admid = warc.attrib["ADMID"] amdsec = tree.xpath("//mets:amdSec[@ID='%s']" % admid, namespaces=NS)[0] oiv = amdsec.xpath("mets:digiprovMD/mets:mdWrap/mets:xmlData/premis:object/premis:objectIdentifier/premis:objectIdentifierValue", namespaces=NS)[0] path = re.findall("^.+(/heritrix.+\.warc\.gz)\?.+$", warc.xpath("mets:FLocat", namespaces=NS)[0].attrib["%shref" % XLINK])[0] identifiers.append((path, oiv.text)) except IndexError as i: logger.error("Problem parsing METS for SIP: %s" % sip) else: logger.warning("Could not find SIP: hdfs://%s" % tar) return identifiers
1df6eab92c8d553b3a3e1d30752b7338482819a5
3,635,048
def request_get(url): """ Realisa una solicitud 'GET' en la url proporcinada, si se realiza con exito retorna el contenido y se ocurre algun error retorna 'None' Parametro 'url' direccion del sitio web return: 'requests.get.content' """ try: with closing(get(url, stream=True)) as resp: if is_good_response(resp): return resp.content else: return None except RequestException as e: log_error('Error during requests to {0} : {1}'.format(url, str(e))) return None
e13de348651c9254c1b17fa26c49faa420a670f9
3,635,049
import sys def in_virtualenv_currently(): """Check whether currently running inside of a virtualenv or not""" return get_base_prefix_compat() != sys.prefix
867bc0d73d8fca95297e7830abe6b2c896785019
3,635,050
def make_tensor(tensor): """ 转换numpy数组到potobuf格式 """ shape = projector_pb2.Tensor.TensorShape(dim=[projector_pb2.Tensor.TensorShape.Dim(size=d) for d in tensor.shape]) return projector_pb2.Tensor(dtype=str(tensor.dtype), tensor_shape=shape, tensor_content=tensor.tobytes())
9bfe06f21f5286b1604f3dde13d8740dc1c5a5df
3,635,051
def pad(sequences, max_length, pad_value=0): """Pads a list of sequences. Args: sequences: A list of sequences to be padded. max_length: The length to pad to. pad_value: The value used for padding. Returns: A list of padded sequences. """ out = [] for sequence in sequences: padded = sequence + [0]*(max_length - len(sequence)) out.append(padded) return out
68d0a8a19352e3e724ef012a396b51c28005ff02
3,635,052
def dict_blocks_decoder(nB, v, step, lookup: dict, dec_fmt: str): """ Decodes a single block from a NORB pooling design """ ds = Design() dm = ds.matrix decoder = DictBlockDecoder(dm, lookup, format=dec_fmt) res = [] for b in range(nB): p = v.squeeze()[step*b:step*b + step] if decoder.fmt == 'GP': q = decoder.decode_genotypes_gp(p) elif decoder.fmt == 'GT': q = decoder.decode_genotypes_gt(p) res.append(q) return np.asarray(res).squeeze()
10297e9b09919cca73962cfca54f4b6de7738f30
3,635,053
def _build_circuit_layers_and_connectivity_nearest_neighbors(n_qubits): """Function to generate circuit layers for processors with nearest-neighbor connectivity Args: n_qubits (int): number of qubits in the qubit array Returns: (zquantum.core.circuit.CircuitConnectivity, zquantum.core.circuit.CircuitLayers) """ even_layer = [] odd_layer = [] for index in range(0, n_qubits - 1, 2): even_layer.append((index, index + 1)) for index in range(1, n_qubits - 1, 2): odd_layer.append((index, index + 1)) connectivity = [] connectivity.extend(even_layer) connectivity.extend(odd_layer) return CircuitConnectivity(connectivity), CircuitLayers([even_layer, odd_layer])
62e0a9b5f1ca9e6ddb5713ee8389126983d44793
3,635,054
def band_colormap( cmap, nband=10 ): """ -> a colormap with e.g. 10 bands """ cmap = get_cmap( cmap ) h = .5 / nband A = cmap( np.linspace( h, 1 - h, nband )) name = "%s-band-%d" % (cmap.name, nband) return array_cmap( A, name, n=nband )
dca6fa0afcefc25e288f8eb24599554286de7c05
3,635,055
def rolling_optimal_combo_stats(ret1, ret2, window_len, window_step, nsteps=20, period='monthly', rebal_period=3, downside_vol=True): """Find the optimal (volatility-minimizing) combination of two return series over a rolling window. Args: - ret1: a sequence of period returns (.01 = 1% return), indexed by date - ret2: a sequence of period returns (.01 = 1% return), indexed by date - window_len: length of window of return series to use for computation of stats, repeatedly - window_step: number of periods to advance window on each iteration (`window_step` == `window_len` => contiguous nonoverlapping windows) - nsteps: divide toe [0.0, 1.0] interval into this many steps in the search for the min vol combination - period: return interval in `ret1`, `ret2`: {'daily', 'weekly', 'monthly'} - rebal_period: interval for rebalancing the allocation; expressed as number of periods of type `period` - downside_vol: If true, use downside deviation as volatility metric, else standard deviation Return: - DataFrame containing portfolio performance stats for the optimal portfolio over each window. """ # column name of chosen volatility metric vol_column = 'downside_dev' if downside_vol else 'standard_dev' # find the inner-joined indices of the two return series joined_index = pd.concat([ret1, ret2], axis=1, join='inner').index #print(joined_index) data = [] dates = [] for start_date, end_date in window_gen(joined_index, window_len, window_step): df, _ = ret_vol_combos(ret1[start_date:end_date], ret2[start_date:end_date], nsteps, period, rebal_period) # pick "optimal" portfolio -- choose one with minimum volatility (could use std or downside) # index of row with lowest value for downside_dev best_row_idx = sorted(enumerate(df[vol_column]), key=lambda x: x[1])[0][0] data.append(df.iloc[best_row_idx]) dates.append(end_date) return pd.DataFrame(data, index=dates)
4796faf706fc8ce49588e64ffcf98222432ff031
3,635,056
def forward_backward_prop(data, labels, params, dimensions): """ Forward and backward propagation for a two-layer sigmoidal network Compute the forward propagation and for the cross entropy cost, and backward propagation for the gradients for all parameters. Arguments: data -- M x Dx matrix, where each row is a training example. labels -- M x Dy matrix, where each row is a one-hot vector. params -- Model parameters, these are unpacked for you. dimensions -- A tuple of input dimension, number of hidden units and output dimension """ ### Unpack network parameters (do not modify) ofs = 0 Dx, H, Dy = (dimensions[0], dimensions[1], dimensions[2]) W1 = np.reshape(params[ofs:ofs+ Dx * H], (Dx, H)) ofs += Dx * H b1 = np.reshape(params[ofs:ofs + H], (1, H)) ofs += H W2 = np.reshape(params[ofs:ofs + H * Dy], (H, Dy)) ofs += H * Dy b2 = np.reshape(params[ofs:ofs + Dy], (1, Dy)) ### YOUR CODE HERE: forward propagation l1 = sigmoid(np.dot(data, W1) + b1) l2 = softmax(np.dot(l1, W2) + b2) M, _ = data.shape cost = -np.sum(np.log(l2[labels == 1])) / M ### END YOUR CODE ### YOUR CODE HERE: backward propagation # gradient = y - y_head gradl2 = (l2 - labels) / M # input = gradl2, wight = l1.T gradW2 = np.dot(l1.T, gradl2) # bias is caused by input from the neuron by fixed activation 1 gradb2 = np.sum(gradl2, axis=0, keepdims=True) # gradient = d(sigmoid(x)) / dx gradl1 = np.dot(gradl2, W2.T) * sigmoid_grad(l1) gradW1 = np.dot(data.T, gradl1) gradb1 = np.sum(gradl1, axis=0, keepdims=True) ### END YOUR CODE ### Stack gradients (do not modify) grad = np.concatenate((gradW1.flatten(), gradb1.flatten(), gradW2.flatten(), gradb2.flatten())) return cost, grad
c3a774117aced21c117f2dea53f1b1891f7562db
3,635,057
import copy def generate_output_descriptors(filename_out_base, max_block_size_voxels, overlap_size_voxels, dim_order, header, output_type, num_dims, output_file_format, image_size, msb, compression, voxel_size): """Creates descriptors representing file output""" max_block_size_voxels_array = convert_to_array(max_block_size_voxels, "block size", num_dims) overlap_voxels_size_array = convert_to_array(overlap_size_voxels, "overlap size", num_dims) ranges = ranges_for_max_block_size(image_size, max_block_size_voxels_array, overlap_voxels_size_array) extension = FormatFactory.get_extension_for_format(output_file_format) descriptors_out = [] index = 0 for subimage_range in ranges: suffix = "" if len(ranges) <= 1 else \ "_" + '{0:04d}'.format(index) output_filename_header = filename_out_base + suffix + extension file_descriptor_out = SubImageDescriptor( filename=output_filename_header, file_format=output_file_format, ranges=subimage_range, suffix=suffix, index=index, dim_order_condensed=dim_order, data_type=output_type, template=copy.deepcopy(header), msb=msb, compression=compression, voxel_size=voxel_size ) descriptors_out.append(file_descriptor_out) index += 1 return descriptors_out
ebb7ecbc3f3105ee995033fbccccd8f745a6a12d
3,635,058
def clean_data(df): """ The function is to clean the data. Parameters: df (pandas dataframe): loaded data from load_data function. Returns: df (pandas dataframe): cleaned version of the data. """ # Create a dataframe of the 36 individual category columns cate_df = df['categories'].str.split(";", expand = True) row = cate_df.head(1) category_colnames = [i.split("-")[0] for i in row.values.tolist()[0]] cate_df.columns = category_colnames # Convert category values to just numbers 0 or 1 for column in cate_df: cate_df[column] = [i[-1:] for i in cate_df[column]] cate_df[column] = cate_df[column].astype(float) df = pd.merge(df, cate_df, left_index=True, right_index=True, how='inner') # Drop duplicates df = df.drop_duplicates() return df
12706ed7889482b709f33e085d10dc92f0d1bf9e
3,635,059
import math def squeezenet1_0_fpn_feature_shape_fn(img_shape): """ Takes an image_shape as an input to calculate the FPN output sizes Ensure that img_shape is of the format (..., H, W) Args img_shape : image shape as torch.Tensor not torch.Size should have H, W as last 2 axis Returns P3_shape, P4_shape, P5_shape, P6_shape, P7_shape : as 5 (2,) Tensors """ C0_shape = img_shape[-2:] C1_shape = (math.floor((C0_shape[0] - 5) / 2), math.floor((C0_shape[1] - 5) / 2)) C2_shape = (math.ceil((C1_shape[0] - 1) / 2), math.ceil((C1_shape[1] - 1) / 2)) P3_shape = (math.ceil((C2_shape[0] - 1) / 2), math.ceil((C2_shape[1] - 1) / 2)) P4_shape = (math.ceil((P3_shape[0] - 1) / 2), math.ceil((P3_shape[1] - 1) / 2)) P5_shape = (math.ceil((P4_shape[0] - 1) / 2), math.ceil((P4_shape[1] - 1) / 2)) P6_shape = (math.ceil(P5_shape[0] / 2), math.ceil(P5_shape[1] / 2)) P7_shape = (math.ceil(P6_shape[0] / 2), math.ceil(P6_shape[1] / 2)) return C2_shape, P3_shape, P4_shape, P5_shape, P6_shape, P7_shape
d56fe3d834bcd9633727defe3ad9a27ea756ed40
3,635,060
from pathlib import Path def temp_paths_2(tmp_path_factory): """ Makes temporary directories, for testing bak_to_git_2, and populates them with test files. Returns pathlib.Path objects for each. """ temp_path: Path = tmp_path_factory.mktemp("baktogit2") bak_path = temp_path / "_0_bak" bak_path.mkdir() t1 = "20211001_083010" p1 = bak_path / f"test.txt.{t1}.bak" p1.write_text("One\n") t2 = "20211101_093011" p2 = bak_path / f"test.txt.{t2}.bak" p2.write_text("Tahoo\n") t3 = "20211201_103012" p3 = bak_path / f"test.txt.{t3}.bak" p3.write_text("Tharee\n") csv_path = temp_path / "step-1-files-changed.csv" csv_lines = [] csv_lines.append(csv_header_row()) csv_lines.append( csv_data_row( "1", f"{t1}:{bak_base_name(p1.name)}", str(p1), "", t1, bak_base_name(p1.name), "", "", "", "", ) ) csv_lines.append("2,,,,,,,,,") csv_lines.append( csv_data_row( "3", f"{t2}:{bak_base_name(p2.name)}", str(p2), str(p1), t2, bak_base_name(p2.name), "", "", "", "", ) ) csv_lines.append("4,,,,,,,,,") csv_lines.append( csv_data_row( "5", f"{t3}:{bak_base_name(p3.name)}", str(p3), str(p2), t3, bak_base_name(p3.name), "", "", "", "", ) ) csv_lines.append("6,,,,,,,,,") csv_path.write_text("\n".join(csv_lines)) return temp_path, bak_path, csv_path
da3b9ef6af7dc04bbc1aadfb3147223564f71458
3,635,061
def clip_alpha(aj, H, L): """ cLips alpha vaLues tHat are greater tHan H or Less tHan L """ if aj > H: aj = H if L > aj: aj = L return aj
d272e2703c1b6008fc4840e887ce842005dfad62
3,635,062
def nextfig(): """Return one greater than the largest-numbered figure currently open. If no figures are open, return unity. No inputs or options.""" # 2010-03-01 14:28 IJC: Created figlist = getfigs() if len(figlist)==0: return 1 else: return max(figlist)+1 return figlist
d8a4ec57880f247d243f80e662e1172456551984
3,635,063
from pathlib import Path def load_laurent2016(): """Model dataset for refolded fold Returns ------- tuple pandas data frame with loopstructural dataset and numpy array for bounding box """ module_path = dirname(__file__) data = pd.read_csv(join(module_path, Path('data/refolded_fold.csv'))) bb = np.loadtxt(join(module_path, Path('data/refolded_bb.txt'))) return data, bb
01a048b4e8748e9cc4e7ef9ea1c1385bfd0faaed
3,635,064
def get_user_best(key: str, user: int, mode: int = 0, limit: int = 10, type_: str = None, type_return: str = 'dict'): """Get the top scores for the specified user.""" params = { 'k': key, 'u': user, 'm': mode, 'limit': limit, 'type': type_} r = req.get(urls['user_best'], params=params) return from_json(r.text, type_return)
b42357c0ca3553c2cf01624869931748c2df897c
3,635,065
import types def _copy_fn(fn): """Create a deep copy of fn. Args: fn: a callable Returns: A `FunctionType`: a deep copy of fn. Raises: TypeError: if `fn` is not a callable. """ if not callable(fn): raise TypeError("fn is not callable: %s" % fn) # The blessed way to copy a function. copy.deepcopy fails to create a # non-reference copy. Since: # types.FunctionType == type(lambda: None), # and the docstring for the function type states: # # function(code, globals[, name[, argdefs[, closure]]]) # # Create a function object from a code object and a dictionary. # ... # # Here we can use this to create a new function with the old function's # code, globals, closure, etc. return types.FunctionType( code=fn.__code__, globals=fn.__globals__, name=fn.__name__, argdefs=fn.__defaults__, closure=fn.__closure__)
37fca64ddaadfc8a6a24dce012af2143038cacd2
3,635,066
from re import T def ireport(): """ Incident Reports, RESTful controller """ resource = request.function tablename = "%s_%s" % (module, resource) table = db[tablename] # Don't send the locations list to client (pulled by AJAX instead) table.location_id.requires = IS_NULL_OR(IS_ONE_OF_EMPTY(db, "gis_location.id")) # Non-Editors should only see a limited set of options if not shn_has_role("Editor"): allowed_opts = [irs_incident_type_opts.get(opt.code, opt.code) for opt in db().select(db.irs_icategory.code)] table.category.requires = IS_NULL_OR(IS_IN_SET(allowed_opts)) # Pre-processor def prep(r): if r.method == "ushahidi": auth.settings.on_failed_authorization = r.other(method="", vars=None) elif r.method == "update": # Disable legacy fields, unless updating, so the data can be manually transferred to new fields table.source.readable = table.source.writable = False table.source_id.readable = table.source_id.writable = False elif r.representation in ("html", "popup") and r.method == "create": table.datetime.default = request.utcnow person = session.auth.user.id if auth.is_logged_in() else None if person: person_uuid = db(db.auth_user.id == person).select(db.auth_user.person_uuid, limitby=(0, 1)).first().person_uuid person = db(db.pr_person.uuid == person_uuid).select(db.pr_person.id, limitby=(0, 1)).first().id table.person_id.default = person return True response.s3.prep = prep if not shn_has_role("Editor"): table.incident_id.readable = table.incident_id.writable = False db.irs_iimage.assessment_id.readable = \ db.irs_iimage.assessment_id.writable = False db.irs_iimage.report_id.readable = \ db.irs_iimage.report_id.writable = False # Post-processor def user_postp(jr, output): shn_action_buttons(jr, deletable=False, copyable=True) return output response.s3.postp = user_postp rheader = lambda r: shn_irs_rheader(r, tabs = [(T("Report Details"), None), (T("Images"), "iimage") ]) response.s3.pagination = True output = shn_rest_controller(module, resource, listadd=False, rheader=rheader) return output
02cc630ce76336ff3e94021c0378daf48fe6a3bd
3,635,067
import torch def makenetbn(dims, softmax=True, single=True): """A batch-normalizing version of makenet. Experimental.""" ndims = len(dims) class Net(nn.Module): def __init__(self): super(Net, self).__init__() # the weights must be set explicitly as attributes in the class # (i.e., we can't collect them in a single list) for l in range(ndims - 1): layer = nn.Linear(dims[l], dims[l+1]) bn = nn.BatchNorm1d(num_features=dims[l+1]) if not single: layer = layer.double() bn = bn.double() if torch.cuda.is_available(): layer = layer.cuda() bn = bn.cuda() setattr(self, f'fc{l}', layer) setattr(self, f'bn{l}', bn) def forward(self, x): # per Alvin's recipe, apply relu everywhere but last layer for l in range(ndims - 2): x = getattr(self, f'bn{l}')(F.leaky_relu(getattr(self, f'fc{l}')(x), negative_slope=0.2)) x = getattr(self, f'fc{ndims - 2}')(x) if softmax: return F.softmax(x, dim=1) else: return x return Net
b9978c610992bbd0566cb8d855613761446a050f
3,635,068
def _ccsd_t_energy(output_str): """ Reads the CCSD(T)/UCCSD(T) energy from the output file string. Returns the energy in Hartrees. :param output_str: string of the program's output file :type output_str: str :rtype: float """ ene = ar.energy.read( output_str, app.one_of_these([ app.escape('!CCSD(T) total energy') + app.maybe(':'), app.escape('!RHF-UCCSD(T) energy'), app.LINESPACES.join([ app.escape('!CCSD(T) STATE'), app.FLOAT, app.escape('Energy')]), ])) return ene
74352ad0f717de6b6c1125508a0556635e6446a3
3,635,069
from miner_globals import getCurrentScriptPath def getMyPath(): """returns path of current script""" return getCurrentScriptPath()
7ba447d8a7b34a9e0ada1ae11cdefa81ec5a89e9
3,635,070
import random def get_codename(): """Helper for generating a random codename to represent a voter in the admin interface. To protect voting privacy of our voters, we are using hashes to make it slightly more difficult to reveal/infer who voted for who. On the admin interface, however, instead of using a less human-friendly hash, we are representing voters with randomly generated codenames, which still obscures who they are but looks a bit better. The codename is always randomly generated and does not stay the same like the hash, since we don't need to use it to track anything. """ codename = random.choice(['Black', 'Blue', 'Red', 'Brown', 'Gray', 'Green', 'Yellow', 'Purple', 'White', 'Orange', 'Pink']) codename += " " + random.choice(['Mamba', 'Raptor', 'Eagle', 'Hawk', 'Sparrow', 'Snake', 'Mosquito', 'Turkey', 'Opossum', 'Narwhal', 'Seabass','Octopus', 'Jellyfish', 'Armadillo', 'Lemur', 'Tiger', 'Whale', 'Elephant','Turtle', 'Dragon', 'Horse', 'Donkey', 'Coyote', 'Penguin', 'Fox', 'Mouse', 'Albatross', 'Mammoth', 'Tiger', 'Bear', 'Weasel']) return codename
6baf9cc8dd774f0d5541980d7a48be98cb4c66a0
3,635,071
def load_paste_config(app_name, options, args): """ Looks for a config file to use for an app and returns the config file path and a configuration mapping from a paste config file. We search for the paste config file in the following order: * If --config-file option is used, use that * If args[0] is a file, use that * Search for keystone.conf in standard directories: * . * ~.keystone/ * ~ * /etc/keystone * /etc :param app_name: Name of the application to load config for, or None. None signifies to only load the [DEFAULT] section of the config file. :param options: Set of typed options returned from parse_options() :param args: Command line arguments from argv[1:] :returns: Tuple of (conf_file, conf) :raises: RuntimeError when config file cannot be located or there was a problem loading the configuration file. """ conf_file = find_config_file(options, args) if not conf_file: raise RuntimeError("Unable to locate any configuration file. "\ "Cannot load application %s" % app_name) try: conf = deploy.appconfig("config:%s" % conf_file, name=app_name) conf.global_conf.update(get_non_paste_configs(conf_file)) return conf_file, conf except Exception, e: raise RuntimeError("Error loading config %s: %s" % (conf_file, e))
26524003ac407433eb82b196aed8836aad7ccf92
3,635,072
import numpy def get_nearest_to_layers_mean_indicators(layers): """ Return indicators of weights in layers nearest to layer weight mean. This function, for every given layer, computes weights mean and returns importance indicators for every weight based on how close to it's layer mean it is. Weights closest to it's layer mean will be marked as more likely to delete. :param iterable layers: layers to get indicators for :return: indicators """ return numpy.array([_get_nearest_to_layer_mean_indicators(layer) for layer in layers])
5fe27d680566097743b708c07fdeb44b1f68ce0f
3,635,073
def mach_wave_angle(mach: float): """Return the angle of the Mach wave given the Mach number after a turn Notes ----- Parameters ---------- mach : float The mach number after a turn Returns ------- float The angle of the mach wave in degrees Examples -------- """ mu = arctand(1 / (mach**2 -1)**.5) return mu
a5dd1d2021dbdf87a4c255a207ad0d8b9627a407
3,635,074
from ssl import SSLError def download_to_file(url, file, quiet=False): """Downloads a URL to file. Returns the file size. Returns -1 if the downloaded file size does not match the expected file size Returns -2 if the download is skipped due to the file at the URL not being newer than the local copy (identified by matching timestamp and size) """ # Create directory structure if necessary if os.path.dirname(file): try: os.makedirs(os.path.dirname(file)) except Exception: pass localcopy = os.path.isfile(file) try: except ImportError: SSLError = None # Open connection to remote server try: url_request = Request(url) if localcopy: # Shorten timeout to 7 seconds if a copy of the file is already present socket = urlopen(url_request, None, 7) else: socket = urlopen(url_request) except SSLError as e: # This could be a timeout if localcopy: # Download failed for some reason, but a valid local copy of # the file exists, so use that one instead. if not quiet: print(str(e)) return -2 # otherwise pass on the error message raise except (pysocket.timeout, HTTPError) as e: if localcopy: # Download failed for some reason, but a valid local copy of # the file exists, so use that one instead. if not quiet: print(str(e)) return -2 # otherwise pass on the error message raise except URLError as e: if localcopy: # Download failed for some reason, but a valid local copy of # the file exists, so use that one instead. if not quiet: print(str(e)) return -2 # if url fails to open, try using curl # temporary fix for old OpenSSL in system Python on macOS # https://github.com/cctbx/cctbx_project/issues/33 command = ["/usr/bin/curl", "--http1.0", "-fLo", file, "--retry", "5", url] subprocess.call(command) socket = None # prevent later socket code from being run try: received = os.path.getsize(file) except OSError: raise RuntimeError("Download failed") if socket is not None: try: file_size = int(socket.info().get("Content-Length")) except Exception: file_size = 0 remote_mtime = 0 try: remote_mtime = time.mktime(socket.info().getdate("last-modified")) except Exception: pass if file_size > 0: if remote_mtime > 0: # check if existing file matches remote size and timestamp try: ( mode, ino, dev, nlink, uid, gid, size, atime, mtime, ctime, ) = os.stat(file) if (size == file_size) and (remote_mtime == mtime): if not quiet: print("local copy is current") socket.close() return -2 except Exception: # proceed with download if timestamp/size check fails for any reason pass hr_size = (file_size, "B") if hr_size[0] > 500: hr_size = (hr_size[0] / 1024, "kB") if hr_size[0] > 500: hr_size = (hr_size[0] / 1024, "MB") if not quiet: print("%.1f %s" % hr_size) print(" [0%", end="") sys.stdout.flush() # becomes print(flush=True) when we move to 3.3+ received = 0 block_size = 8192 progress = 1 # Write to the file immediately so we can empty the buffer tmpfile = file + ".tmp" with open(tmpfile, "wb") as fh: while True: block = socket.read(block_size) received += len(block) fh.write(block) if file_size > 0 and not quiet: while (100 * received / file_size) > progress: progress += 1 if (progress % 20) == 0: print(progress, end="%") sys.stdout.flush() # becomes print(flush=True) when we move to 3.3+ elif (progress % 2) == 0: print(".", end="") sys.stdout.flush() # becomes print(flush=True) when we move to 3.3+ if not block: break socket.close() if not quiet: if file_size > 0: print("]") else: print("%d kB" % (received / 1024)) sys.stdout.flush() # becomes print(flush=True) when we move to 3.3+ # Do not overwrite file during the download. If a download temporarily fails we # may still have a clean, working (yet older) copy of the file. shutil.move(tmpfile, file) if (file_size > 0) and (file_size != received): return -1 if remote_mtime > 0: # set file timestamp if timestamp information is available st = os.stat(file) atime = st[stat.ST_ATIME] # current access time os.utime(file, (atime, remote_mtime)) return received
723d5e733c623b6b770f71b31085861433d7ad3d
3,635,075
def peakfit(xvals, yvals, yerrors=None, model='Voight', background='slope', initial_parameters=None, fix_parameters=None, method='leastsq', print_result=False, plot_result=False): """ Fit x,y data to a peak model using lmfit E.G.: res = peakfit(x, y, model='Gauss') print(res.fit_report()) res.plot() val = res.params['amplitude'].value err = res.params['amplitude'].stderr Peak Models: Choice of peak model: 'Gaussian', 'Lorentzian', 'Voight',' PseudoVoight' Background Models: Choice of background model: 'slope', 'exponential' Peak Parameters: 'amplitude', 'center', 'sigma', pvoight only: 'fraction' output only: 'fwhm', 'height' Background parameters: 'bkg_slope', 'bkg_intercept', or for exponential: 'bkg_amplitude', 'bkg_decay' Provide initial guess: res = peakfit(x, y, model='Voight', initial_parameters={'center':1.23}) Fix parameter: res = peakfit(x, y, model='gauss', fix_parameters={'sigma': fwhm/2.3548200}) :param xvals: array(n) position data :param yvals: array(n) intensity data :param yerrors: None or array(n) - error data to pass to fitting function as weights: 1/errors^2 :param model: str, specify the peak model: 'Gaussian','Lorentzian','Voight' :param background: str, specify the background model: 'slope', 'exponential' :param initial_parameters: None or dict of initial values for parameters :param fix_parameters: None or dict of parameters to fix at positions :param method: str method name, from lmfit fitting methods :param print_result: if True, prints the fit results using fit.fit_report() :param plot_result: if True, plots the results using fit.plot() :return: lmfit.model.ModelResult < fit results object """ xvals = np.asarray(xvals, dtype=float).reshape(-1) yvals = np.asarray(yvals, dtype=float).reshape(-1) weights = gen_weights(yerrors) if initial_parameters is None: initial_parameters = {} if fix_parameters is None: fix_parameters = {} peak_mod = None bkg_mod = None for model_name, names in PEAK_MODELS.items(): if model.lower() in names: peak_mod = MODELS[model_name]() for model_name, names in BACKGROUND_MODELS.items(): if background.lower() in names: bkg_mod = MODELS[model_name](prefix='bkg_') pars = peak_mod.guess(yvals, x=xvals) pars += bkg_mod.make_params() # pars += bkg_mod.make_params(intercept=np.min(yvals), slope=0) # pars['gamma'].set(value=0.7, vary=True, expr='') # don't fix gamma # user input parameters for ipar, ival in initial_parameters.items(): if ipar in pars: pars[ipar].set(value=ival, vary=True) for ipar, ival in fix_parameters.items(): if ipar in pars: pars[ipar].set(value=ival, vary=False) mod = peak_mod + bkg_mod res = mod.fit(yvals, pars, x=xvals, weights=weights, method=method) if print_result: print(res.fit_report()) if plot_result: res.plot() return res
2f5aab6bb2eff7eb72217924d1487e5a2f87ec43
3,635,076
import re def error_027_mnemonic_codes(text): """Fix some cases and return (new_text, replacements_count) tuple.""" (text, ignored) = ignore(text, r"https?://\S+") (text, count1) = re.subn(r"&#8211;", "–", text) (text, count2) = re.subn(r"&#x20;", " ", text) text = deignore(text, ignored) return (text, count1 + count2)
4716e567db007ab49182ccc9fa82f556f56d55b3
3,635,077
import functools def debug(_func, *, write=False): """Prints/writes debuging info for the decorated function """ def debug_decorator(func): @functools.wraps(func) def debug_decorator_wrapper(*args, **kwargs): args_repr = [repr(arg) for arg in args] kwargs_repr = [f"{k}={v!r}" for k, v in kwargs.items()] signature = ", ".join(args_repr + kwargs_repr) print(f"Calling {func.__name__}({signature})") value = func(*args, **kwargs) print(f"{func.__name__!r} returned {value!r}") return value return debug_decorator_wrapper if _func is None: return debug_decorator return debug_decorator(_func)
f55a8c9620d863292dce16e05289a0cd3a2114fb
3,635,078
def memodict(f): """Memoization decorator for a function taking a single argument http://code.activestate.com/recipes/578231-probably-the-fastest-memoization-decorator-in-the-/ """ class memodict(dict): def __missing__(self, key): ret = self[key] = f(key) return ret return memodict().__getitem__
e49da93343320a86d07394c0015589d4d34aab97
3,635,079
def xval(v): """Return the scalar x value of a single vector. >>> xval(make(1, 2, 3)) 1 """ assert is_vec3(v) assert v.shape[0] == 1 return v[0, 0]
f4637f54e7350d7c24e40db687cf1ee8b5467a31
3,635,080
from datetime import datetime def payload_full(): """full jwt payload""" return { "iss": "https://www.myapplication.com", "aud": "https://www.myapplication.com", "exp": datetime.datetime.utcnow() + datetime.timedelta(seconds=10), "iat": datetime.datetime.utcnow(), "nbf": datetime.datetime.utcnow(), "jid": "user@domain.ext", }
c459fecc3b6de6960be7b2eabb44388e09153ca4
3,635,081
def ensure_package( requirement_str, error_level=None, error_msg=None, log_success=False ): """Verifies that the given package is installed. This function uses ``pkg_resources.get_distribution`` to locate the package by its pip name and does not actually import the module. Therefore, unlike :meth:`ensure_import`, ``requirement_str`` should refer to the package name (e.g., "tensorflow-gpu"), not the module name (e.g., "tensorflow"). Args: requirement_str: a PEP 440 compliant package requirement, like "tensorflow", "tensorflow<2", "tensorflow==2.3.0", or "tensorflow>=1.13,<1.15". This can also be an iterable of multiple requirements, all of which must be installed, or this can be a single "|"-delimited string specifying multiple requirements, at least one of which must be installed error_level (None): the error level to use, defined as: - 0: raise error if requirement is not satisfied - 1: log warning if requirement is not satisifed - 2: ignore unsatisifed requirements By default, ``fiftyone.config.requirement_error_level`` is used error_msg (None): an optional custom error message to use log_success (False): whether to generate a log message if the requirement is satisifed Returns: True/False whether the requirement is satisifed """ if error_level is None: error_level = fo.config.requirement_error_level return etau.ensure_package( requirement_str, error_level=error_level, error_msg=error_msg, error_suffix=_REQUIREMENT_ERROR_SUFFIX, log_success=log_success, )
1006358dff21366424d2b4085599956d67542ab5
3,635,082
def deliver_image_gif(): # type: () -> str """Return a minimal GIF image.""" return b64decode(""" R0lGODlhAQABAIABAP///wAAACH5BAEKAAEALAAAAAABAAEAAAICTAEAOw== """)
9dadcba602aeae9ae64f789d594ef42ede5562f3
3,635,083
from re import L def build_bootstrap_likelihood(lex, sentence, ontology, alpha=0.25, meaning_prior_smooth=1e-3): """ Prepare a likelihood function `p(meaning | syntax, sentence)` based on syntactic bootstrapping. Args: lex: sentence: ontology: alpha: Mixing parameter for bootstrapping distributions. See `alpha` parameter of `Lexicon.lf_ngrams_mixed`. Returns: likelihood_fn: A likelihood function to be used with `predict_zero_shot`. """ # Prepare for syntactic bootstrap: pre-calculate distributions over semantic # form elements conditioned on syntactic category. lf_ngrams = lex.lf_ngrams_mixed(alpha=alpha, order=1, smooth=meaning_prior_smooth) for category in lf_ngrams: # Redistribute UNK probability uniformly across predicates not observed for # this category. unk_lf_prob = lf_ngrams[category].pop(None) unobserved_preds = set(f.name for f in ontology.functions) - set(lf_ngrams[category].keys()) lf_ngrams[category].update({pred: unk_lf_prob / len(unobserved_preds) for pred in unobserved_preds}) L.info("% 20s %s", category, ", ".join("%.03f %s" % (prob, pred) for pred, prob in sorted(lf_ngrams[category].items(), key=lambda x: x[1], reverse=True))) def likelihood_fn(tokens, categories, exprs, sentence_parse, model): likelihood = 0.0 for token, category, expr in zip(tokens, categories, exprs): # Retrieve relevant bootstrap distribution p(meaning | syntax). cat_lf_ngrams = lf_ngrams[category] for predicate in expr.predicates(): if predicate.name in cat_lf_ngrams: likelihood += np.log(cat_lf_ngrams[predicate.name]) return likelihood return likelihood_fn
19274987b8ef5ace9f31638c81933bcab122d55f
3,635,084
def getTJstr(text, glyphs, simple, ordering): """ Return a PDF string enclosed in [] brackets, suitable for the PDF TJ operator. Notes: The input string is converted to either 2 or 4 hex digits per character. Args: simple: no glyphs: 2-chars, use char codes as the glyph glyphs: 2-chars, use glyphs instead of char codes (Symbol, ZapfDingbats) not simple: ordering < 0: 4-chars, use glyphs not char codes ordering >=0: a CJK font! 4 chars, use char codes as glyphs """ if text.startswith("[<") and text.endswith(">]"): # already done return text if not bool(text): return "[<>]" if simple: # each char or its glyph is coded as a 2-byte hex if glyphs is None: # not Symbol, not ZapfDingbats: use char code otxt = "".join(["%02x" % ord(c) if ord(c) < 256 else "b7" for c in text]) else: # Symbol or ZapfDingbats: use glyphs otxt = "".join(["%02x" % glyphs[ord(c)][0] if ord(c) < 256 else "b7" for c in text]) return "[<" + otxt + ">]" # non-simple fonts: each char or its glyph is coded as 4-byte hex if ordering < 0: # not a CJK font: use the glyphs otxt = "".join(["%04x" % glyphs[ord(c)][0] for c in text]) else: # CJK: use the char codes otxt = "".join(["%04x" % ord(c) for c in text]) return "[<" + otxt + ">]"
bd5b7abd1b5ceb0b273e99e30ecc248482ed7476
3,635,085
import logging import sys def create_dest_group(glab, dest, src_group_tree): """Create destination group structure""" if '/' in dest: logging.error('SubGroup as destination not supported "%s"', dest) sys.exit(1) dest_group_tree = Tree() logging.info('Attempting to create destination group at %s/%s', glab.url, dest) try: top_level_group = glab.groups.create({'name': dest, 'path': dest}) logging.info('Group Created at %s/%s', glab.url, top_level_group.full_path) dest_group_tree = add_new_group(top_level_group) # For root node src_group_tree.update_node(src_group_tree.root, data=GitLabInfo(new_id=top_level_group.id)) except gitlab.exceptions.GitlabCreateError as err: logging.error('Group Cannot be created: %s', err) sys.exit(1) except: logging.debug('An error occurred') raise for grp in src_group_tree.expand_tree(): if src_group_tree.level(grp) == 0: continue new_parent = src_group_tree.get_node(src_group_tree.parent(grp).identifier).data.new_id logging.debug('Creating Group "%s" with Path "%s" and Parent ID "%s"', src_group_tree.get_node(grp).data.name, src_group_tree.get_node(grp).data.path, new_parent) new_group = glab.groups.create( {'name': src_group_tree.get_node(grp).data.name, 'path': src_group_tree.get_node(grp).data.path, 'parent_id': new_parent, 'description': src_group_tree.get_node(grp).data.description}) src_group_tree.update_node(grp, data=GitLabInfo(new_id=new_group.id)) dest_group_tree.paste(new_parent, add_new_group(new_group)) logging.info('Created %s sub-groups', len(dest_group_tree)-1) print("Destination Groups[group_id]:") dest_group_tree.show(idhidden=False) return dest_group_tree
20f2839de1c86eed999a3feabc6618a61fc3c843
3,635,086
def datetime_to_jd(date): """ Convert a `datetime.datetime` object to Julian Day. Parameters ---------- date : `datetime.datetime` instance Returns ------- jd : float Julian day. Examples -------- >>> d = datetime.datetime(1985,2,17,6) >>> d datetime.datetime(1985, 2, 17, 6, 0) >>> jdutil.datetime_to_jd(d) 2446113.75 """ days = date.day + hmsm_to_days(date.hour,date.minute,date.second,date.microsecond) return date_to_jd(date.year,date.month,days)
6a149aba3719eaf4e0a81e2372192b9d17676b1f
3,635,087
def parse_channel_mention(part, message): """ If the message's given part is a channel mention, returns the respective channel. Parameters ---------- part : `str` A part of a message's content. message : ``Message`` The respective message of the given content part. Returns ------- channel : `None` or ``ChannelBase`` instance """ channel_mentions = message.channel_mentions if channel_mentions is None: return parsed = CHANNEL_MENTION_RP.fullmatch(part) if parsed is None: return channel_id = int(parsed.group(1)) for channel in channel_mentions: if channel.id == channel_id: return channel
38b3bbe5f7a918210a4ddc4005d61c651687ab55
3,635,088
def keep_alive(headers, version, method): """ return True if the connection should be kept alive""" conn = set((v.lower() for v in headers.get_all('connection', ()))) if "close" in conn: return False elif 'upgrade' in conn: headers['connection'] = 'upgrade' return True elif "keep-alive" in conn: if version == HTTP_1_1: headers.pop('connection') return True elif version == HTTP_1_1: return True elif method == 'CONNECT': return True else: return False
da8e9a5908d19a5bdb5ba2915abc5f85e1e3c553
3,635,089
def apply_dies_factory(have_dies, jones_type): """ Factory function returning a function that applies Direction Independent Effects """ # We always "have visibilities", (the output array) jones_mul = jones_mul_factory(have_dies, True, jones_type, False) if have_dies: def apply_dies(time, ant1, ant2, die1_jones, die2_jones, tmin, dies_out): # Iterate over rows for r in range(time.shape[0]): ti = time[r] - tmin a1 = ant1[r] a2 = ant2[r] # Iterate over channels for c in range(dies_out.shape[1]): jones_mul(die1_jones[ti, a1, c], dies_out[r, c], die2_jones[ti, a2, c], dies_out[r, c]) else: # noop def apply_dies(time, ant1, ant2, die1_jones, die2_jones, tmin, dies_out): pass return njit(nogil=True, inline="always")(apply_dies)
460477cc5cd6b195f331c8db2dfd0d0ec9080750
3,635,090
def check_skip(timestamp, filename): """ Checks if a timestamp has been given and whether the timestamp corresponds to the given filename. Returns True if this condition is met and False Otherwise" """ if ((len(timestamp) > 0) and not(timestamp in filename)): return True elif ((len(timestamp) > 0) and (timestamp in filename)): return False
738043fb554f20b79fa3ac8861f9e60d0d697e5e
3,635,091
from typing import Optional from typing import Union import re def extract_emoji(string: str, bot: Bot) -> Optional[Union[str, Emoji]]: """ Extracts a single emoji or custom emote from the input string. :param string: Input string :param bot: Discord bot object :return: Either a string containing a unicode emoji, or a Discord emoji object representing a custom emote, or None if no emojis are found """ emote_repr = string.strip() if emote_repr in emoji.UNICODE_EMOJI: return emote_repr matches = re.search(EMOTE_REGEX, emote_repr) if matches: emote_id = int(matches.group(1)) elif emote_repr.isnumeric(): emote_id = int(emote_repr) else: return None # This is not a coroutine emote = bot.get_emoji(emote_id) return emote
00d2c3614b8f4e9b8292fec781aefa231d9d4e83
3,635,092
def get_bridge(driver): """Call this method to get a Bridge instead of a standalone accessory.""" bridge = Bridge(driver, 'Bridge') light_1 = LightBulb(driver, 'Red Light', pin=LedPin1) light_2 = LightBulb(driver, 'Blue Light', pin=LedPin2) bridge.add_accessory(light_1) bridge.add_accessory(light_2) #temp = TemperatureSensor(driver, 'Thermometer', pin=TempPin) #bridge.add_accessory(temp) return bridge
e3ee071661e4cc19da8ec5f7b7b7b5017c2f76d4
3,635,093
def compute_gradient_penalty(D, real_samples, fake_samples): """Calculates the gradient penalty loss for WGAN GP""" # Random weight term for interpolation between real and fake samples alpha = Tensor(np.random.random((real_samples.size(0), 3, 1, 1))) # Get random interpolation between real and fake samples interpolates = (alpha * real_samples + ((1 - alpha) * fake_samples)).requires_grad_(True) d_interpolates = D(interpolates) fake = Variable(Tensor(real_samples.shape[0], d_interpolates.shape[1]).fill_(1.0), requires_grad=False) # Get gradient w.r.t. interpolates gradients = autograd.grad( outputs=d_interpolates, inputs=interpolates, grad_outputs=fake, create_graph=True, retain_graph=True, only_inputs=True, )[0] gradients = gradients.view(gradients.size(0), -1) gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() return gradient_penalty
724da4c0d18996e0813e4cea6cbb33d1f3a316fc
3,635,094
def is_tabledap(url): """ Identify a dataset as an ERDDAP TableDAP dataset. Parameters ---------- url (str) : URL to dataset Returns ------- bool """ return "tabledap" in url
9f4650bc3a3bc0794637b042c1779a84d7c02779
3,635,095
import argparse import logging def parse_args(args): """Parse command line parameters Args: args ([str]): command line parameters as list of strings Returns: :obj:`argparse.Namespace`: command line parameters namespace """ parser = argparse.ArgumentParser(description="compare results from benchmarks") parser.add_argument( "--version", action="version", version="touchstone {ver}".format(ver=__version__), ) parser.add_argument( dest="benchmark", help="which type of benchmark to compare", type=str, choices=["uperf", "ycsb", "pgbench", "vegeta", "mb", "kubeburner", "scaledata"], metavar="benchmark", ) parser.add_argument( dest="database", help="the type of database data is stored in", type=str, choices=["elasticsearch"], metavar="database", ) parser.add_argument( dest="harness", help="the test harness that was used to run the benchmark", type=str, choices=["ripsaw"], metavar="harness", ) parser.add_argument( "--id", "--identifier-key", dest="identifier", help="identifier key name(default: uuid)", type=str, metavar="identifier", default="uuid", ) parser.add_argument( "-u", "--uuid", dest="uuid", help="identifier values to fetch results and compare", type=str, nargs="+", ) parser.add_argument( "-o", "--output", dest="output", help="How should touchstone output the result", type=str, choices=["json", "yaml", "csv"], ) parser.add_argument( "--metadata-config", dest="metadata_config", help="Metadata configuration file", type=argparse.FileType("r", encoding="utf-8"), ) parser.add_argument( "--config", dest="config", help="Touchstone configuration file", type=argparse.FileType("r", encoding="utf-8"), ) parser.add_argument( "--output-file", dest="output_file", help="Redirect output of json/csv/yaml to file", type=argparse.FileType("w"), ) parser.add_argument( "-url", "--connection-url", dest="conn_url", help="the database connection strings in the same order as the uuids", type=str, nargs="+", ) parser.add_argument( "-v", "--verbose", dest="loglevel", help="set loglevel to INFO", action="store_const", const=logging.INFO, ) parser.add_argument( "-vv", "--very-verbose", dest="loglevel", help="set loglevel to DEBUG", action="store_const", const=logging.DEBUG, ) return parser.parse_args(args)
4a579ca5b25ae85ad836d76a7dc293ba53945801
3,635,096
def generate_timestamp(time_to_use, stamp_type="default"): """ Genrate a text timestamp """ new_stamp = time_to_use.strftime("%Y%m%d-%H%M%S") return new_stamp
1b386ed7375b3158867d980796c764a627c68338
3,635,097
def scr2idb(*args): """scr2idb(char name) -> char""" return _idaapi.scr2idb(*args)
0ec28ae35176b4f28c755723a063c8ddadb583df
3,635,098
import torch def compute_local_nre_maps( source_descriptors: torch.Tensor, target_features: torch.Tensor, prior_target_keypoints: torch.Tensor, norm_coarse: torch.Tensor, window_size: int, ): """Compute dense local correspondence maps. Args: * source_descriptors: The interpolated source keypoint descriptors. * target_features: The dense feature map of the target image. * prior_target_keypoints: The prior keypoint locations, in target feature space. * norm_coarse: The norm_coarse normalizing factors. * window_size: The size of the window around the prior keypoint locations, in target feature space. Returns: * maps: The sparse correspondence maps * num_non_zero: The number of non-zero values per maps """ assert len(source_descriptors) == len(prior_target_keypoints) assert len(norm_coarse) == len(prior_target_keypoints) local_target_features, patch_origins = extract_local_features( target_features, prior_target_keypoints, window_size )[:2] maps = correlate_local(source_descriptors, local_target_features) maps = softmax(maps) * norm_coarse[..., None, None] / window_size maps = truncate(log(maps).neg_(), target_features) maps, num_non_zero = offset_and_sparsify( maps, patch_origins, list(target_features.shape[-2:]) ) return maps, num_non_zero
281eb31981b81e9a9d4053786a22256ee182c7b4
3,635,099