content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
from typing import List import sys def read_zones() -> List[str]: """Read the list of zone_names from the sys.stdin.""" zones: List[str] = [] for line in sys.stdin: line = line.strip() if not line: continue if line.startswith('#'): continue zones.append(line) return zones
b13c39e87167d54ca731f7b1c19b01cdca6f2943
29,500
def get_username(strategy, details, backend, user=None, *args, **kwargs): """Resolve valid username for use in new account""" if user: return None settings = strategy.request.settings username = perpare_username(details.get("username", "")) full_name = perpare_username(details.get("full_name", "")) first_name = perpare_username(details.get("first_name", "")) last_name = perpare_username(details.get("last_name", "")) names_to_try = [username, first_name] if username: names_to_try.append(username) if first_name: names_to_try.append(first_name) if last_name: # if first name is taken, try first name + first char of last name names_to_try.append(first_name + last_name[0]) if full_name: names_to_try.append(full_name) username_length_max = settings.username_length_max for name in names_to_try: if len(name) > username_length_max: names_to_try.append(name[:username_length_max]) for name in filter(bool, names_to_try): try: validate_username(settings, name) return {"clean_username": name} except ValidationError: pass
728a0aadf9aa58369fcf791d8cccb0d9214a4583
29,501
def not_at_max_message_length(): """ Indicates if we have room left in message """ global message return message.count(SPACE) < WORD_LIMIT
0bd7d758c80ed272de0571b4f651fe6a24c39b58
29,502
def _create_jwt( user, scopes=None, expires_in=None, is_restricted=False, filters=None, aud=None, additional_claims=None, use_asymmetric_key=None, secret=None, ): """ Returns an encoded JWT (string). Arguments: user (User): User for which to generate the JWT. scopes (list): Optional. Scopes that limit access to the token bearer and controls which optional claims are included in the token. Defaults to ['email', 'profile']. expires_in (int): Optional. Overrides time to token expiry, specified in seconds. filters (list): Optional. Filters to include in the JWT. is_restricted (Boolean): Whether the client to whom the JWT is issued is restricted. Deprecated Arguments (to be removed): aud (string): Optional. Overrides configured JWT audience claim. additional_claims (dict): Optional. Additional claims to include in the token. use_asymmetric_key (Boolean): Optional. Whether the JWT should be signed with this app's private key. If not provided, defaults to whether the OAuth client is restricted. secret (string): Overrides configured JWT secret (signing) key. """ use_asymmetric_key = _get_use_asymmetric_key_value(is_restricted, use_asymmetric_key) # Default scopes should only contain non-privileged data. # Do not be misled by the fact that `email` and `profile` are default scopes. They # were included for legacy compatibility, even though they contain privileged data. scopes = scopes or ['email', 'profile'] iat, exp = _compute_time_fields(expires_in) payload = { # TODO (ARCH-204) Consider getting rid of the 'aud' claim since we don't use it. 'aud': aud if aud else settings.JWT_AUTH['JWT_AUDIENCE'], 'exp': exp, 'iat': iat, 'iss': settings.JWT_AUTH['JWT_ISSUER'], 'preferred_username': user.username, 'scopes': scopes, 'version': settings.JWT_AUTH['JWT_SUPPORTED_VERSION'], 'sub': anonymous_id_for_user(user, None), 'filters': filters or [], 'is_restricted': is_restricted, 'email_verified': user.is_active, } payload.update(additional_claims or {}) _update_from_additional_handlers(payload, user, scopes) role_claims = create_role_auth_claim_for_user(user) if role_claims: payload['roles'] = role_claims return _encode_and_sign(payload, use_asymmetric_key, secret)
5a7e48630f54471be041e42c5666e511b3e445b3
29,503
from typing import Union import os import logging def make_log_dir( _run, log_dir: str, log_level: Union[int, str], ) -> str: """Creates log directory and sets up symlink to Sacred logs. Args: log_dir: The directory to log to. log_level: The threshold of the logger. Either an integer level (10, 20, ...), a string of digits ('10', '20'), or a string of the designated level ('DEBUG', 'INFO', ...). Returns: The `log_dir`. This avoids the caller needing to capture this argument. """ os.makedirs(log_dir, exist_ok=True) # convert strings of digits to numbers; but leave levels like 'INFO' unmodified try: log_level = int(log_level) except ValueError: pass logging.basicConfig(level=log_level) logger.info("Logging to %s", log_dir) sacred_util.build_sacred_symlink(log_dir, _run) return log_dir
c275feaeef3627adc3984740da2d05cfeb4deaf6
29,504
from typing import Iterable from pathlib import Path import os def check_files_exist(file_list: Iterable[str]) -> list[str]: """Check if all files exist. Return False if not.""" file_errors: list[str] = [] cwd = Path(os.getcwd()) for file_ in file_list: if cwd.joinpath(file_).is_file() is False: file_errors.append(file_) return sorted(file_errors)
20fc5caba0fe8ad173020ce18eea109c59425243
29,505
import math def _fill_arc_trigonometry_array(): """ Utility function to fill the trigonometry array used by some arc* functions (arcsin, arccos, ...) Returns ------- The array filled with useful angle measures """ arc_trig_array = [ -1, math.pi / 4, # -45° math.pi / 6, # -30° 0, # 0° math.pi / 6, # 30° math.pi / 4, # 45° 1 ] return arc_trig_array
6b5c39dbacf028d84a397e2911f9c9b7241fe0f4
29,506
from typing import Optional def get_default_tag_to_block_ctor( tag_name: str ) -> Optional[CurvatureBlockCtor]: """Returns the default curvature block constructor for the give tag name.""" global _DEFAULT_TAG_TO_BLOCK_CTOR return _DEFAULT_TAG_TO_BLOCK_CTOR.get(tag_name)
33d002ef206aa13c963b951325a92f49c86eb202
29,507
def panel_list_tarefas(context, tarefas, comp=True, aluno=True): """Renderiza uma lista de tarefas apartir de um Lista de tarefas""" tarefas_c = [] for tarefa in tarefas: tarefas_c.append((tarefa, None)) context.update({'tarefas': tarefas_c, 'comp': comp}) return context
3de659af41a6d7550104321640526f1970fd415c
29,508
from datetime import datetime def get_bdy_times(init_time, fcst_hours, bdy_interval): """ Returns a list of datetime objects representing the times of boundary conditions. Read the init_time, fcst_hours and bdy_interval from config and returns a list of datetime objects representing the boundary condition times. Arguments:""" logger = get_logger() logger.debug("get_bdy_times called") hour = datetime.timedelta(0, 60*60) end_time = init_time + datetime.timedelta(0, fcst_hours*60*60) # # Get the range of files representing boundary condition files # Because rrule is not inclusive, we add one hour on to the end # time to make it so # freq = rrule.HOURLY rec = rrule.rrule(freq, dtstart=init_time,until=end_time+hour, interval=bdy_interval) bdy_times = list(rec) return bdy_times
b84ffca80240bb2291bc4c27d58631e41c06d9c0
29,509
def remove_stopwords(label): """ Remove stopwords from a single label. """ tokenized = label.split() # Keep removing stopwords until a word doesn't match. for i,word in enumerate(tokenized): if word not in STOPWORDS:# and len(word) > 1: return ' '.join(tokenized[i:]) # For the empty string. return ''
f15e50e5e11ecc6a0abca6b68219789e72070a69
29,510
def single_device_training_net(data_tensors, train_net_func): """ generate training nets for multiple devices :param data_tensors: [ [batch_size, ...], [batch_size, ...], ... ] :param train_net_func: loss, display_outputs, first_device_output = train_net_func(data_tensors, default_reuse) Remark: loss can be a list or a dict, then the return of this function can be organized accordingly :return output_entry: a class instance with fields for a single device :return unique_variable_list: variable_list on the first/only device """ old_variable_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) output_struct, new_variable_list = _single_device_training_net(data_tensors, train_net_func) unique_variable_list = list(set(new_variable_list) - set(old_variable_list)) return output_struct, unique_variable_list
d94e7b552f3612d2f0fd16973612adea77de0bd0
29,511
import requests import os import json import time def get_grafana_session(config): """ Connect to grafana and get a session. We try for 60 seconds. """ session = requests.Session() tries = 30 url = get_grafana_url(config) user = config['grafana']['user'] password = config['grafana']['password'] # Wait for grafana to be ready for 60 seconds while tries: try: session.post( os.path.join(url, 'login'), data=json.dumps({ 'user': user, 'email': '', 'password': password }), headers={ 'content-type': 'application/json' }) break except requests.exceptions.ConnectionError as connection_error: tries -= 1 time.sleep(2) if tries == 0: print(connection_error) exit(1) return session
f0353f8e7be3db26e01bf4260d1f50dd23185f40
29,512
def ALMACopyTable(inObj, outObj, inTab, err, inVer=1, outVer=0, logfile='', check=False, debug = False): """ Copy AIPS Table Returns task error code, 0=OK, else failed * inObj = Input Object (UV or Image) * outObj = Output object * inTab = Table type, e.g. "AIPS AN" * err = Obit error/message stack * inVer = intput version * outVer = output version * logfile = logfile for messages * check = Only check script, do not execute tasks * debug = Run tasks debug, show input """ ################################################################ mess = "Copy "+inTab+" Table "+str(inVer)+" to "+str(outVer) printMess(mess, logfile) taco = ObitTask.ObitTask("TabCopy") try: taco.userno = OSystem.PGetAIPSuser() # This sometimes gets lost except Exception as exception: pass if not check: setname(inObj, taco) setoname(outObj, taco) taco.inTab = inTab taco.inVer = inVer taco.outVer = outVer taco.taskLog = logfile if debug: taco.debug = debug taco.i # Trap failure try: if not check: taco.g except Exception as exception: print(exception) mess = "Copy of "+inTab+" table Failed retCode="+str(taco.retCode) printMess(mess, logfile) return 1 else: pass return 0 # end ALMACopyTable
b2fd0e0a0de0a2ff792719624441079cf0663c8a
29,513
import requests def get_commit_date(component_path, repo_name, bug_id, version): """ Get date of triggerring commit """ bug_info_path = join(component_path, DependencyAnalyzerConstants.PROJECTS_DIR, repo_name, DependencyAnalyzerConstants.BUGS_DIR, str(bug_id), DependencyAnalyzerConstants.BUG_INFO_FILENM) commit = None if version == 0: commit = get_value_from_info_file( bug_info_path, DependencyAnalyzerConstants.INFO_BUGGY_COMMIT) elif version == 1: commit = get_value_from_info_file( bug_info_path, DependencyAnalyzerConstants.INFO_FIXED_COMMIT) if not commit: return None commit = commit.replace(DependencyAnalyzerConstants.CHAR_DOUBLE_QUOTE, DependencyAnalyzerConstants.CHAR_EMPTY) proj_info_path = join( component_path, DependencyAnalyzerConstants.PROJECTS_DIR, repo_name, DependencyAnalyzerConstants.PROJECT_INFO_FILE_NAME) proj_slug = get_value_from_info_file( proj_info_path, DependencyAnalyzerConstants.INFO_GIT_URL) if not proj_slug: return None proj_slug = proj_slug.replace(DependencyAnalyzerConstants.CHAR_DOUBLE_QUOTE, DependencyAnalyzerConstants.CHAR_EMPTY) proj_slug = proj_slug.replace( DependencyAnalyzerConstants.GITHUB_URL, DependencyAnalyzerConstants.CHAR_EMPTY) git_commit_api_url = DependencyAnalyzerConstants.GITHUB_COMMITS_API_URL.format( proj_slug, commit) git_commit_details = requests.get(git_commit_api_url, auth=( DependencyAnalyzerConstants.GITHUB_USER_NAME, DependencyAnalyzerConstants.GITHUB_AUTH_TOKEN)) if git_commit_details.status_code != HTTPStatus.OK: return None git_commit_details = git_commit_details.json() if DependencyAnalyzerConstants.GITHUB_JSON_COMMIT_KEY in git_commit_details and 'author' in git_commit_details[DependencyAnalyzerConstants.GITHUB_JSON_COMMIT_KEY] and 'date' in git_commit_details[DependencyAnalyzerConstants.GITHUB_JSON_COMMIT_KEY]['author']: return git_commit_details[DependencyAnalyzerConstants.GITHUB_JSON_COMMIT_KEY]['author']['date'] return None
2bd26012204ea8cd14cd71c8646a0cf941f8f134
29,514
def clopper_pearson(k,n,alpha): """Confidence intervals for a binomial distribution of k expected successes on n trials: http://en.wikipedia.org/wiki/Binomial_proportion_confidence_interval Parameters ---------- k : array_like number of successes n : array_like number of trials alpha : float confidence level Returns ------- hi, lo : array_like lower and upper bounds on the expected number of successes """ lo = beta.ppf(alpha/2, k, n-k+1) lo[onp.isnan(lo)] = 0 # hack to remove NaNs where we only have 0 samples hi = beta.ppf(1 - alpha/2, k+1, n-k) hi[onp.isnan(hi)] = 1 # hack to remove NaNs where the marginal is 1 return lo, hi
c10db17a4fb75cc0d7304aceacde9cf716a5cb77
29,515
def add_link(news_list:list) -> list: """ Description: Function to remove the readmore and the add the url as a marked up link for the 'content' key in the 'articles' dictionary. Arguments: news_list {list} : list containing the news articles dictionaries Returns: news_list {list} : list containing the news articles dictionaries """ x = 0 for i in news_list: # retrieves the content t be changed content = news_list[x]['content'] # retrieves the url to be added to content url = news_list[x]['url'] size = len(content) # removes [readmore] and adds the link to the webpage in markup content = content[:size-13].replace('[', '') + Markup(f"<a href='{url}'>Read More</a>") # adds the new content news_list[x]['content'] = content x = x + 1 return news_list
f1339ddb8854800ae241b7cbb0badc6654c30696
29,516
import array def taitnumber(c): """ Return Tait number from signed edge list of Tait graph. """ c = array(c) # If type(c) != ndarray tau = sum(sign(c[:, 0])) return tau
b30e3d294ae4af80e5d09b7c5de0a6a0682d6d27
29,517
def _difference_map(image, color_axis): """Difference map of the image. Approximate derivatives of the function image[c, :, :] (e.g. PyTorch) or image[:, :, c] (e.g. Keras). dfdx, dfdy = difference_map(image) In: image: numpy.ndarray of shape C x h x w or h x w x C, with C = 1 or C = 3 (color channels), h, w >= 3, and [type] is 'Float' or 'Double'. Contains the values of functions f_b: R ^ 2 -> R ^ C, b = 1, ..., B, on the grid {0, ..., h - 1} x {0, ..., w - 1}. Out: dfdx: numpy.ndarray dfdy: numpy.ndarray of shape C x h x w or h x w x C contain the x and y derivatives of f at the points on the grid, approximated by central differences (except on boundaries): For c = 0, ... , C, i = 1, ..., h - 2, j = 1, ..., w - 2. e.g. for shape = c x h x w: dfdx[c, i, j] = (image[c, i, j + 1] - image[c, i, j - 1]) / 2 dfdx[c, i, j] = (image[c, i + 1, j] - image[c, i - 1, j]) / 2 positive x-direction is along rows from left to right. positive y-direction is along columns from above to below. """ if color_axis == 2: image = _transpose_image(image) # Derivative in x direction (rows from left to right) dfdx = np.zeros_like(image) # forward difference in first column dfdx[:, :, 0] = image[:, :, 1] - image[:, :, 0] # backwards difference in last column dfdx[:, :, -1] = image[:, :, -1] - image[:, :, -2] # central difference elsewhere dfdx[:, :, 1:-1] = 0.5 * (image[:, :, 2:] - image[:, :, :-2]) # Derivative in y direction (columns from above to below) dfdy = np.zeros_like(image) # forward difference in first row dfdy[:, 0, :] = image[:, 1, :] - image[:, 0, :] # backwards difference in last row dfdy[:, -1, :] = image[:, -1, :] - image[:, -2, :] # central difference elsewhere dfdy[:, 1:-1, :] = 0.5 * (image[:, 2:, :] - image[:, :-2, :]) return dfdx, dfdy
deff16dbe73005d52444babf05857c2cfea25e0b
29,518
import math def force_grid(force_parameters, position_points, velocity_min, velocity_max): """Calculates the force on a grid of points in phase space.""" velocity_min_index = velocity_index(velocity_min) velocity_max_index = velocity_index(velocity_max) spacing = 2*math.pi / position_points force = np.zeros((velocity_max_index - velocity_min_index, position_points + 1)) for vel_index in range(velocity_min_index, velocity_max_index): for position_index in range(position_points + 1): position = spacing * position_index features = fourier_basis(position) force[vel_index - velocity_min_index][position_index] = ( force_parameters[vel_index] @ features) return force
d8d74604c8e313904f97e97364778b0db8db801c
29,519
def calc_coordination(mysupport, debugging=0): """Calculate the coordination number of the support using a 3x3x3 kernel.""" nbz, nby, nbx = mysupport.shape mykernel = np.ones((3, 3, 3)) mycoord = np.rint(convolve(mysupport, mykernel, mode="same")) mycoord = mycoord.astype(int) if debugging == 1: plt.figure(figsize=(18, 15)) plt.subplot(2, 2, 1) plt.imshow(mycoord[:, :, nbx // 2]) plt.colorbar() plt.axis("scaled") plt.title("Coordination matrix in middle slice in YZ") plt.subplot(2, 2, 2) plt.imshow(mycoord[:, nby // 2, :]) plt.colorbar() plt.title("Coordination matrix in middle slice in XZ") plt.axis("scaled") plt.subplot(2, 2, 3) plt.imshow(mycoord[nbz // 2, :, :]) plt.gca().invert_yaxis() plt.colorbar() plt.title("Coordination matrix in middle slice in XY") plt.axis("scaled") plt.pause(0.1) return mycoord
827d36ee297ead88e885dff086323261a25f97f3
29,520
def valueFromMapping(procurement, subcontract, grant, subgrant, mapping): """We configure mappings between FSRS field names and our needs above. This function uses that config to derive a value from the provided grant/subgrant""" subaward = subcontract or subgrant if mapping is None: return '' elif isinstance(mapping, str): return getattr(subaward, mapping) elif isinstance(mapping, tuple) and subcontract: return valueFromMapping(procurement, subcontract, grant, subgrant, mapping[0]) elif isinstance(mapping, tuple) and subgrant: return valueFromMapping(procurement, subcontract, grant, subgrant, mapping[1]) else: raise ValueError("Unknown mapping type: {}".format(mapping))
1bf2dda830183d1c8289e957b83b1c0d01619160
29,521
def get_cards_in_hand_values_list(player): """Gets all the cards in a players's hand and return as a values list""" return list(Card.objects.filter(cardgameplayer__player=player, cardgameplayer__status=CardGamePlayer.HAND).values('pk', 'name', 'text'))
474ac071950857783dfd76b50ae08483a03fc8bc
29,522
def get_dGdE(fp, tau_E, a_E, theta_E, wEE, wEI, I_ext_E, **other_pars): """ Compute dGdE Args: fp : fixed point (E, I), array Other arguments are parameters of the Wilson-Cowan model Returns: J : the 2x2 Jacobian matrix """ rE, rI = fp # Calculate the J[0,0] dGdrE = (-1 + wEE * dF(wEE * rE - wEI * rI + I_ext_E, a_E, theta_E)) / tau_E return dGdrE
9a53cc9b0cadea8f8884b64d687a2397c0a973a7
29,523
def convert_data_to_ints(data, vocab2int, word_count, unk_count, eos=True): """ Converts the words in the data into their corresponding integer values. Input: data: a list of texts in the corpus vocab2list: conversion dictionaries word_count: an integer to count the words in the dataset unk_count: an integer to count the <UNK> tokens in the dataset eos: boolean whether to append <EOS> token at the end or not (default true) Returns: converted_data: a list of corpus texts converted to integers word_count: updated word count unk_count: updated unk_count """ converted_data = [] for text in data: converted_text = [] for token in text.split(): word_count += 1 if token in vocab2int: # Convert each token in the paragraph to int and append it converted_text.append(vocab2int[token]) else: # If it's not in the dictionary, use the int for <UNK> token instead converted_text.append(vocab2int['<UNK>']) unk_count += 1 if eos: # Append <EOS> token if specified converted_text.append(vocab2int['<EOS>']) converted_data.append(converted_text) assert len(converted_data) == len(data) return converted_data, word_count, unk_count
c415aea164f99bc2a44d5098b6dbcc3d723697a6
29,524
def apply_objective_fn(state, obj_fn, precision, scalar_factor=None): """Applies a local ObjectiveFn to a state. This function should only be called inside a pmap, on a pmapped state. `obj_fn` will usually be a the return value of `operators.gather_local_terms`. See the docstrings of `SevenDiscretedOperator` and `operators.gather_local_terms` for more information. Args: state: The probabilityfunction. obj_fn: The ObjectiveFn, given as a sequence of `SevenDiscretedOperator`s, that represent local terms collected together. precision: Jax matrix multiplication precision. add_original: If `add_original` is `True`, return (1 + scalar_factor * obj_fn)|state>, otherwise return obj_fn|state>. Should be a Jax tracing static argument. scalar_factor: Optional; If `None`, return obj_fn|state>, otherwise return 1 + scalar_factor * obj_fn)|state>. `None` by default. Returns: Either (1 + scalar_factor * obj_fn)|state> or obj_fn|state>, depending on `scalar_factor`. """ orig_shape = state.shape _, n_local_discretes = number_of_discretes(state) if scalar_factor is not None: result = state else: result = jnp.zeros_like(state) i = 0 for n_term, term in enumerate(obj_fn): position_to_apply = i - term.left_pad if scalar_factor is not None: array = scalar_factor * term.array else: array = term.array result = result + _apply_building_block( state, array, position_to_apply, n_local_discretes, precision, ).reshape(result.shape) i += term.width if n_term < len(obj_fn) - 1: state, result, i = _apply_permutations( state, result, i=i, permutations=term.permutations_after, ) else: # For the last term, avoid doing an unnecessary permutation on the # original state that is no longer needed. del state result, i = _apply_permutations( result, i=i, permutations=term.permutations_after, ) return result.reshape(orig_shape)
c4fa72f84ce241aa765416fe21ff5426758d5303
29,525
import requests def oauth_generate_token( consumer_key, consumer_secret, grant_type="client_credentials", env="sandbox"): """ Authenticate your app and return an OAuth access token. This token gives you time bound access token to call allowed APIs. NOTE: The OAuth access token expires after an hour (3600 seconds), after which, you will need to generate another access token so you need to keep track of this. :param consumer_key: :param consumer_secret: :param grant_type: :param env: :return response: """ url = urls.get_generate_token_url(env) try: req = requests.get( url, params=dict(grant_type=grant_type), auth=(consumer_key, consumer_secret)) except Exception as e: logger.exception("Error in {} request. {}".format(url, str(e))) return None, None else: return req.json(), req.status_code
7ab44b7ba1eb569d0b498946e2936928612e3fa7
29,526
from typing import List import shlex def parse_quoted_string(string: str, preserve_quotes: bool) -> List[str]: """ Parse a quoted string into a list of arguments :param string: the string being parsed :param preserve_quotes: if True, then quotes will not be stripped """ if isinstance(string, list): # arguments are already a list, return the list we were passed lexed_arglist = string else: # Use shlex to split the command line into a list of arguments based on shell rules lexed_arglist = shlex.split(string, posix=False) if not preserve_quotes: lexed_arglist = [utils.strip_quotes(arg) for arg in lexed_arglist] return lexed_arglist
6715778f5190445e74b8705542cbfdb1fe022ecc
29,527
def scalar(name, scalar_value): """ 转换标量数据到potobuf格式 """ scalar = make_np(scalar_value) assert (scalar.squeeze().ndim == 0), 'scalar should be 0D' scalar = float(scalar) metadata = SummaryMetadata(plugin_data=SummaryMetadata.PluginData(plugin_name='scalars')) return Summary(value=[Summary.Value(tag=name, simple_value=scalar, metadata=metadata)])
e31046a00dc0e2ae6c33bd041b34652c08d2a439
29,528
def filter_citations_by_type(list_of_citations, violation_description): """Gets a list of the citations for a particular violation_description. """ citations = [] for citation in list_of_citations: filtered_citation = check_citation_type(citation, violation_description) if filtered_citation: citations.append(filtered_citation) return citations
398d7cbe43761070c8b5b9117478f6fe5c985a2c
29,529
def user_import_circular_database(injector, session, user_mock_circular_database) -> UserMockDataSource: """Return the circular data source and import its schema to the user's project.""" facade = injector.get(DataSourceFacade) facade.import_schema(user_mock_circular_database.data_source) session.commit() return user_mock_circular_database
492b87c7cf8d5ef8306fb827620cba860677f5be
29,530
import torch def loss_fn(model, data, marginal_prob_std, eps=1e-5): """The loss function for training score-based generative models. Args: model: A PyTorch model instance that represents a time-dependent score-based model. x: A mini-batch of training data. marginal_prob_std: A function that gives the standard deviation of the perturbation kernel. eps: A tolerance value for numerical stability. """ node2graph = data.batch edge2graph = node2graph[data.edge_index[0]] graph_num = len(data.smiles) d = data.edge_length # (num_edge,1) random_t = torch.rand(graph_num, device=d.device) * (1. - eps) + eps # (batch_size) z = torch.randn_like(d) # (num_edge,1) std = marginal_prob_std(random_t)[edge2graph] # (num_edge) perturbed_d = d + z * std[:, None] # std[:,None]转化尺度为(num_edge,1),perturbed_d.size() = (edge_num,1) data.edge_length = perturbed_d score = model(data, random_t) loss = torch.mean((score[:, None] * std[:, None] + z) ** 2) return loss
f42dd43d1de865ec31c7702e747852a6df04e479
29,531
from functools import reduce from operator import mul def nCk(n, k): """ Combinations number """ if n < 0: raise ValueError("Invalid value for n: %s" % n) if k < 0 or k > n: return 0 if k in (0, n): return 1 if k in (1, n-1): return n low_min = 1 low_max = min(n, k) high_min = max(1, n - k + 1) high_max = n return reduce(mul, range(high_min, high_max + 1), 1) // reduce(mul, range(low_min, low_max + 1), 1)
9d84ba8fad27860f64980fb4165f72f0a7ec944c
29,532
def knowledge_extract_from_json(): """ 半结构化数据知识抽取的第二步 json <-> 数据表映射 Returns: """ data = request.json result = extract_data_from_json(data) return jsonify({"data": result})
361d38891a8d90d30a75e3041e082e9c60395666
29,533
import random def vote_random_ideas(request, owner, repository, full_repository_name): """ Get 2 random ideas """ database_repository = get_object_or_404(models.Repository, owner=owner, name=repository) jb = jucybot.from_config() context = {} context = jb.get_issues(full_repository_name, context=context, issues_to_get=['ready']) # If there are not enough issues ready, also get the new issues if len(context['issues']) < 4: context = jb.get_issues(full_repository_name, context=context, issues_to_get=['new']) issues = get_issues_subscribers(request, database_repository, context['issues']) # Remove issues I already voted for issues = [issue for issue in issues if not issue['subscribed']] # If there are less than 2 issues, return null try: issues = random.sample(issues, 2) except ValueError: issues = None return JsonResponse({'issues': [{ 'title': issue['title'], 'body': issue['body'], 'number': issue['number'], 'total_subscribers': issue['total_subscribers'] } for issue in issues] if issues else None})
dba4d24711e49f68e85ef6b9f5d3fb4428cb6351
29,534
def delta_EF_asym(ave,t_e,t_mu,comp,t_f,n,alpha = None,max_ave_H = 1): """computes the EF with asymptotic f, f(N) = f_i*H_i*N_i/(N_i+H_i) For more information see S10 H_i is uniformly distributed in [0,2*ave_H] Input ave, t_e, t_mu, t_f, comp,n: As in output of rand_par alpha: optional Is not needed. They are just used s.t. one can run delta_EF_asym returns: deltaEF/EF: array Array containing 100*deltaEF/EF""" num = len(ave) #number of communities # choose distribution of H: H ~u[0,2*ave] ave_H = uni(0,max_ave_H,num) t_H = uni(-1/sqrt, 1/sqrt,num) #stdv/mean of H H = lambda x: ave_H*(1+t_H*sqrt*x) #H_i for each species in a community #asymptotic EF in N, EF(N) = f_i*H_i*N_i/(N_i+H_i) #change to consider different contribution to function eco_fun = lambda x, N: n*(1+t_f*x*sqrt)*H(x)*N(x)/(N(x)+H(x)) # computes the equilibrium densities of species N, in changed and ref site N_ref = lambda x: (1+t_mu*sqrt*x-comp)/(1+alpha) N_change = lambda x: ((1+x*t_mu*sqrt)*(1-ave*(1+t_e*sqrt*x))-\ comp*(1-ave*(1+t_mu*t_e)))/(1+alpha) # integrate over all species for EF x_simp = np.array(num*[np.linspace(-1,1,51)]) #x_axes y_ref = eco_fun(x_simp.T, N_ref).T #y_values in ref y_change = eco_fun(x_simp.T, N_change).T #y values in changed site EF_ref = simps(y_ref,x_simp) EF_change = simps(y_change,x_simp) return 100*(EF_change-EF_ref)/EF_ref
82abe7a6473b9a0b432654837fb8bffff86513e8
29,535
from scipy.signal.spectral import _median_bias from scipy.signal.windows import get_window def time_average_psd(data, nfft, window, average="median", sampling_frequency=1): """ Estimate a power spectral density (PSD) by averaging over non-overlapping shorter segments. This is different from many other implementations as it does not account for the window power loss factor (<window ** 2>) Parameters ---------- data: np.ndarray The input data to use to estimate the PSD nfft: int The number of input elements per segment window: [str, tuple] Input arguments for scipy.signal.windows.get_window to specify the window. average: str Time averaging method, should be either "mean" or "median" sampling_frequency: float The sampling frequency of the input data, used to normalize the PSD estimate to have dimensions of 1 / Hz. Returns ------- psd: np.ndarray The estimate PSD """ if not isinstance(window, np.ndarray): window = get_window(window, nfft) blocked_data = data.reshape(-1, nfft) * window blocked_psd = abs(np.fft.rfft(blocked_data, axis=-1) / sampling_frequency) ** 2 if average == "median": normalization = 1 / _median_bias(len(blocked_data)) func = np.median elif average == "mean": normalization = 1 func = np.mean else: raise ValueError(f"PSD method should be mean or median, not {average}") psd = func(blocked_psd, axis=0) / 2 * normalization return psd
98fb788fdec7f2a868cc576f209c37e196880edf
29,536
import torch def Variable(tensor, *args, **kwargs): """ The augmented Variable() function which automatically applies cuda() when gpu is available. """ if use_cuda: return torch.autograd.Variable(tensor, *args, **kwargs).cuda() else: return torch.autograd.Variable(tensor, *args, **kwargs)
b8b0534efd0fd40966eaa70e78e6a8db41156cd4
29,537
import time import os def get_log(device): """ Gets log file from device. :param device: device identifier (e.g. "TA9890AMTG"). """ file_name = str(int(time.time() * 1000)) + ".txt" target_dir = os.getcwd() log_path = os.path.join(target_dir, file_name) clear_log_command = "adb -s " + device + " logcat -c" get_log_command = "adb -s " + device + " logcat -v time" console.execute(clear_log_command) log.info("Logging in progress to '" + log_path + "'... To finish press Ctrl+C") console.execute(get_log_command, False, log_path) return log_path
23a7ca9834c92510fdedb8da32bd45ae6db4f85f
29,538
def edit_distance(graph1, graph2, node_attr='h', edge_attr='e', upper_bound=100, indel_mul=3, sub_mul=3): """ Calculates exact graph edit distance between 2 graphs. Args: graph1 : networkx graph, graph with node and edge attributes graph2 : networkx graph, graph with node and edge attributes node_attr : str, key for node attribute edge_attr : str, key for edge attribute upper_bound : int, maximum edit distance to consider indel_mul: float, insertion/deletion cost sub_mul: float, substitution cost Returns: np.float, distance, how similar graph1 is to graph2 """ def node_substitution_scoring(dict_1, dict_2): """Calculates node substitution score.""" multiplier = sub_mul if distance.rogerstanimoto( dict_1[node_attr], dict_2[node_attr]) != 0 else 0 return multiplier*(1 - distance.rogerstanimoto( dict_1[node_attr], dict_2[node_attr])) def edge_substitution_scoring(dict_1, dict_2): """Calculates edge substitution score.""" multiplier = sub_mul if distance.rogerstanimoto( dict_1[edge_attr], dict_2[edge_attr]) != 0 else 0 return multiplier*(1 - distance.rogerstanimoto( dict_1[edge_attr], dict_2[edge_attr])) def constant_value(dict_1): """Returns constant score for insertion/deletion.""" return indel_mul graph1 = feature_conversion(graph1, node_attr, edge_attr) graph2 = feature_conversion(graph2, node_attr, edge_attr) return min( nx.optimize_graph_edit_distance( graph1, graph2, node_subst_cost = node_substitution_scoring, edge_subst_cost = edge_substitution_scoring, upper_bound = upper_bound, node_del_cost = constant_value, node_ins_cost = constant_value, edge_del_cost = constant_value, edge_ins_cost = constant_value, ))
550f44e91e60a7c3308d5187af3d32054cf6dffa
29,539
def get_node_elements(coord,scale,alpha,dof,bcPrescr=None,bc=None,bc_color='red',fPrescr=None,f=None,f_color='blue6',dofs_per_node=None): """ Routine to get node node actors. :param array coord: Nodal coordinates [number of nodes x 3] :param int scale: Node actor radius :param float alpha: Node actor transparency [0-1] :param array dof: Global degrees of freedom [number of nodes x degrees of freedom per node] :param array bcPrescr: Degrees of freedom with prescribed boundary conditions [number of prescribed boundary contidions x 1] :param array bc: Values for prescribed boundary conditions [number of prescribed boundary contidions x 1] :param string bc_color: Color for nodes with prescribed boundary conditions :param array fPrescr: Degrees of freedom with applied forces [number of applied forces x 1] :param array f: Values for forces [number of applied forces x 1] :param string f_color: Color for nodes with applied forces :param int dofs_per_node: Degrees of freedom per node [1-6] :return list nodes: Node actors """ nnode = np.size(coord, axis = 0) ncoord = np.size(coord, axis = 1) nodes = [] bc_dict = {} indx = 0 if isinstance(bcPrescr, np.ndarray): for i in bcPrescr: bc_dict[i] = bc[indx] indx += 1 f_dict = {} indx = 0 if isinstance(fPrescr, np.ndarray): for i in fPrescr: f_dict[i] = f[indx] indx += 1 for i in range(nnode): dofs = dof[i] if np.any(np.isin(bcPrescr, dofs, assume_unique=True)) == True: color = bc_color elif np.any(np.isin(fPrescr, dofs, assume_unique=True)) == True: color = f_color else: color = 'black' node = Sphere(c=color).scale(1.5*scale).pos([coord[i,0],coord[i,1],coord[i,2]]).alpha(alpha) if np.any(np.isin(bcPrescr, dofs, assume_unique=True)) == True: node.name = f"Node nr. {i+1}, DoFs & BCs: [" for j in range(dofs_per_node): node.name += str(dof[i,j]) if dof[i,j] in bc_dict: node.name += (': ' + str(bc_dict[dof[i,j]])) if j == dofs_per_node-1: node.name += ']' else: node.name += ', ' elif np.any(np.isin(fPrescr, dofs, assume_unique=True)) == True: node.name = f"Node nr. {i+1}, DoFs & Forces: [" for j in range(dofs_per_node): node.name += str(dof[i,j]) if dof[i,j] in f_dict: node.name += (': ' + str(f_dict[dof[i,j]])) if j == dofs_per_node-1: node.name += ']' else: node.name += ', ' else: node.name = f"Node nr. {i+1}, DoFs: [" for j in range(dofs_per_node): node.name += str(dof[i,j]) if j == dofs_per_node-1: node.name += ']' else: node.name += ', ' nodes.append(node) return nodes
f6e9c2eec12c1816331651d821fa907e5ce34d42
29,540
import logging import time import pickle def temporal_testing( horizon, model, observ_interval, first_stage, bm_threshold, ratio, bootstrap, epsilon, solve ): """ first stage random forest, cross validation, not selecting a best model, without separate testing """ model_name = "horizon-%s-ratio-%0.2f" % (horizon, ratio) # ====================== load data ======================== observ_horizon = (horizon - 1) * 60 interval = 5 if first_stage == "RF" else 12 ML_data = pd.read_csv( 'data/{}-{}.csv'.format(first_stage, horizon), index_col=False ) sepsis_stream = ML_data.loc[ML_data['label'] == 1] sepsis_stream = sepsis_stream.reset_index(drop=True) sepsis_stream = sepsis_stream.drop( ['patientunitstayid', 'label'], axis=1 ) nonsep_stream = ML_data.loc[ML_data['label'] == 0] nonsep_stream = nonsep_stream.reset_index(drop=True) nonsep_stream = nonsep_stream.drop( ['patientunitstayid', 'label'], axis=1 ) # ===================== discretize data ========================= sepsis_discr = discretize_data( stream_data=dcopy(sepsis_stream), levels=dcopy(model.observations) ) nonsep_discr = discretize_data( stream_data=dcopy(nonsep_stream), levels=dcopy(model.observations) ) # =========================== Bootstrapping =============================== # metrics sensitivity, specificity, precision, f_1, ave_time = {}, {}, {}, {}, {} bm_sensitivity, bm_specificity, bm_precision = {}, {}, {} bm_f_1, bm_ave_time = {}, {} # update trans_function according to observation_interval def trans_func(new_state, old_state, action): """transition function""" p = 0.99967 ** (observ_interval * interval) if old_state == "sepsis": if new_state == "sepsis": return 1.0 if new_state == "nonsep": return 0.0 if old_state == "nonsep": if new_state == "sepsis": return 1 - p if new_state == "nonsep": return p return 0 model.trans_func = trans_func # start bootstrap for boot in range(bootstrap): logging.info("Bootstrap: {}\n".format(boot)) # -------------- sample data --------------- # index sepsis_tr_ind = np.random.choice( range(sepsis_discr.shape[0]), 500, False ) nonsep_tr_ind = np.random.choice( range(nonsep_discr.shape[0]), 500, False ) # data sepsis_data, nonsep_data = {}, {} # train data sepsis_data['train'] = sepsis_discr.iloc[sepsis_tr_ind, :] nonsep_data['train'] = nonsep_discr.iloc[nonsep_tr_ind, :] # test data sepsis_data['test'] = sepsis_discr[ ~sepsis_discr.index.isin(sepsis_tr_ind) ] nonsep_data['test'] = nonsep_discr.iloc[ ~nonsep_discr.index.isin(nonsep_tr_ind) ] # -------------- estimate observation probability ----------------- model.name = "{}_{}_{}".format(first_stage, horizon, boot) obs_mat = estimate_observation_pr( observations=dcopy(model.observations), sepsis_data=dcopy(sepsis_data['train']), nonsep_data=dcopy(nonsep_data['train']), interval=1 ) # update observ matrix def observ_func(observation, state, action): """observation function""" obser_matrix = obs_mat return obser_matrix.loc[ "{}".format(state), observation ] model.observ_func = observ_func logging.info("Problem Loaded!\n") # ---------------------- solving -------------------------- solve_time = time.time() if not solve: alpha_vectors = pickle.load(open( 'solutions/{}-{}-boot_{}.pickle'.format( first_stage, horizon, boot ), 'rb' )) else: alpha_vectors = PBVI_OS( POMDP_OS=model, epsilon=epsilon, iterations=10, fig_dir='figures/solution' ) pickle.dump(alpha_vectors, open( 'solutions/{}-{}-boot_{}.pickle'.format( first_stage, horizon, boot ), 'wb' )) logging.info("Solving Time = {}\n".format( time.time() - solve_time )) # -------------------- testing ------------------------- logging.info("Testing...") prediciton_time, sepsis_cohort, nonsep_cohort = [], [], [] bm_prediciton_time = [] bm_sepsis_cohort, bm_nonsep_cohort = [], [] for test_name in ["sepsis", "nonsep"]: if test_name == "sepsis": test_data = sepsis_data['test'] iter_list = range(int(ratio * test_data.shape[0])) elif test_name == "nonsep": test_data = nonsep_data['test'] iter_list = range(test_data.shape[0]) # for each patient for i in iter_list: # ------------ benchmark test ----------------- bm_result = [] for t in range(len(test_data.iloc[i, ])): if test_data.iloc[i, t] > bm_threshold: bm_result.append(1) else: bm_result.append(0) try: bm_prediciton_time.append(np.sum([ -1 * (observ_horizon + 60), observ_interval * bm_result.index(1) ])) if test_name == "sepsis": bm_sepsis_cohort.append(1) elif test_name == "nonsep": bm_nonsep_cohort.append(1) except ValueError: if test_name == "sepsis": bm_sepsis_cohort.append(0) elif test_name == "nonsep": bm_nonsep_cohort.append(0) # --------------- POMDP test ---------------- result = test_POMDP( POMDP=model, policy=alpha_vectors, test_data=test_data.iloc[i], status=test_name ) try: prediciton_time.append(np.sum([ -1 * (observ_horizon + 60), observ_interval * result.index("sepsis") ])) if test_name == "sepsis": sepsis_cohort.append(1) elif test_name == "nonsep": nonsep_cohort.append(1) except ValueError: if test_name == "sepsis": sepsis_cohort.append(0) elif test_name == "nonsep": nonsep_cohort.append(0) # ----------------- benchmark statistics ---------------- tn, fp, fn, tp = confusion_matrix( y_true=[0] * len(bm_nonsep_cohort) + [1] * len(bm_sepsis_cohort), y_pred=bm_nonsep_cohort + bm_sepsis_cohort ).ravel() bm_sensitivity[boot] = tp / (tp + fn) bm_specificity[boot] = 'Inf' if tn + fp == 0 else tn / (tn + fp) bm_precision[boot] = 'Inf' if tp + fp == 0 else tp / (tp + fp) bm_f_1[boot] = 'Inf' if 2 * tp + fp + fn == 0 else 2*tp / (2*tp+fp+fn) bm_ave_time[boot] = np.mean(bm_prediciton_time) # ----------------- POMDP statistics ------------------- tn, fp, fn, tp = confusion_matrix( y_true=[0] * len(nonsep_cohort) + [1] * len(sepsis_cohort), y_pred=nonsep_cohort + sepsis_cohort ).ravel() sensitivity[boot] = tp / (tp + fn) specificity[boot] = 'Inf' if tn + fp == 0 else tn / (tn + fp) precision[boot] = 'Inf' if tp + fp == 0 else tp / (tp + fp) f_1[boot] = 'Inf' if 2 * tp + fp + fn == 0 else 2 * tp / (2*tp+fp+fn) ave_time[boot] = np.mean(prediciton_time) # ------------------ Output -------------------- bm_output( model_name, bootstrap, bm_sensitivity, bm_specificity, bm_precision, bm_f_1, bm_ave_time, first_stage, horizon ) POMDP_output( model_name, bootstrap, sensitivity, specificity, precision, f_1, ave_time, first_stage, horizon ) # --------------- Done --------------- logging.info("Done!\n") return { 'sens': list(sensitivity.values()), 'spec': list(specificity.values()), 'prec': list(precision.values()), 'f_1': list(f_1.values()), 'time': list(ave_time.values()) }
c6320d2638ee98931af8523085d37981095e9f14
29,541
def _endian_char(big) -> str: """ Returns the character that represents either big endian or small endian in struct unpack. Args: big: True if big endian. Returns: Character representing either big or small endian. """ return '>' if big else '<'
2e1a63ec593ca6359947385019bcef45cb3749c0
29,542
def planar_angle2D(v1, v2): """returns the angle of one vector relative to the other in the plane defined by the normal (default is in the XY plane) NB This algorithm avoids carrying out a coordinate transformation of both vectors. However, it only works if both vectors are in that plane to start with. """ return atan2(sin2D(v1, v2), cos_sim2D(v1, v2))
c244ce7a2bcd27e110062dba0c88f2537e0cb7dd
29,543
def test_agg_same_method_name(es): """ Pandas relies on the function name when calculating aggregations. This means if a two primitives with the same function name are applied to the same column, pandas can't differentiate them. We have a work around to this based on the name property that we test here. """ # test with normally defined functions def custom_primitive(x): return x.sum() Sum = make_agg_primitive(custom_primitive, input_types=[Numeric], return_type=Numeric, name="sum") def custom_primitive(x): return x.max() Max = make_agg_primitive(custom_primitive, input_types=[Numeric], return_type=Numeric, name="max") f_sum = ft.Feature(es["log"]["value"], parent_entity=es["customers"], primitive=Sum) f_max = ft.Feature(es["log"]["value"], parent_entity=es["customers"], primitive=Max) fm = ft.calculate_feature_matrix([f_sum, f_max], entityset=es) assert fm.columns.tolist() == [f_sum.get_name(), f_max.get_name()] # test with lambdas Sum = make_agg_primitive(lambda x: x.sum(), input_types=[Numeric], return_type=Numeric, name="sum") Max = make_agg_primitive(lambda x: x.max(), input_types=[Numeric], return_type=Numeric, name="max") f_sum = ft.Feature(es["log"]["value"], parent_entity=es["customers"], primitive=Sum) f_max = ft.Feature(es["log"]["value"], parent_entity=es["customers"], primitive=Max) fm = ft.calculate_feature_matrix([f_sum, f_max], entityset=es) assert fm.columns.tolist() == [f_sum.get_name(), f_max.get_name()]
638447c081d2a5dcf4b2377943146876b7438e2c
29,544
def dispatch(request): """If user is admin, then show them admin dashboard; otherwise redirect them to trainee dashboard.""" if request.user.is_admin: return redirect(reverse("admin-dashboard")) else: return redirect(reverse("trainee-dashboard"))
046107c46cbac5e7495fee19c4354a822c476a5b
29,545
def log_pdf_factor_analysis(X, W, mu, sigma): """ log pdf of factor analysis Args: X: B X D W: D X K mu: D X 1 sigma: D X 1 Returns: log likelihood """ Pi = tf.constant(float(np.pi)) diff_vec = X - mu sigma_2 = tf.square(sigma) # phi = tf.eye(K) * sigma_2 # M = tf.matmul(W, W, transpose_a=True) + phi # using Sherman-Morrison-Woodbury formula to compute the inverse # inv_M = tf.matrix_inverse(M) # inv_cov = tf.eye(DIM) / sigma_2 + tf.matmul( # tf.matmul(W, inv_M), W, transpose_b=True) / sigma_2 # using Sylvester's determinant identity to compute log determinant # implementation 1: directly compute determinant # log_det = tf.log(tf.matrix_determinant(M)) + 2.0 * (DIM - K) * tf.log(sigma) # implementation 2: using Cholesky decomposition # log_det = 2.0 * tf.reduce_sum(tf.log(tf.diag_part(tf.cholesky(M)))) + 2.0 * ( # DIM - K) * tf.log(sigma) # phi = tf.eye(DIM) * sigma_2 phi = tf.diag(sigma_2) M = phi + tf.matmul(W, W, transpose_b=True) inv_cov = tf.matrix_inverse(M) log_det = 2.0 * tf.reduce_sum(tf.log(tf.diag_part(tf.cholesky(M)))) log_likelihood = tf.matmul( tf.matmul(diff_vec, inv_cov), diff_vec, transpose_b=True) log_likelihood = tf.diag_part(log_likelihood) log_likelihood += DIM * tf.log(2 * Pi) log_likelihood += log_det log_likelihood = tf.reduce_sum(log_likelihood) * (-0.5) return log_likelihood
70eb515c3a7b7cc8ea49f6a0e79c11327629c7b5
29,546
import logging def _assert_initial_conditions(scheduler_commands, num_compute_nodes): """Assert cluster is in expected state before test starts; return list of compute nodes.""" compute_nodes = scheduler_commands.get_compute_nodes() logging.info( "Assert initial condition, expect cluster to have {num_nodes} idle nodes".format(num_nodes=num_compute_nodes) ) _assert_num_nodes_in_scheduler(scheduler_commands, num_compute_nodes) _assert_compute_node_states(scheduler_commands, compute_nodes, expected_states=["idle"]) return compute_nodes
6a19830caf029dd2a28cdb2363988940610bbc14
29,547
from typing import Dict from typing import Any def _clean_parameters(parameters: Dict[str, Any]) -> Dict[str, str]: """ Removes entries which have no value.""" return {k: str(v) for k, v in parameters.items() if v}
b8e911674baee7a656f2dc7ba68514c63f84290c
29,548
def delete(run_id): """Submits a request to CARROT's runs delete mapping""" return request_handler.delete("runs", run_id)
8f106d83ba39995f93067a3f8eb67b430b8fd301
29,549
import math def get_tile_lat_lng(zoom, x, y): """convert Google-style Mercator tile coordinate to (lat, lng) of top-left corner of tile""" # "map-centric" latitude, in radians: lat_rad = math.pi - 2*math.pi*y/(2**zoom) # true latitude: lat_rad = gudermannian(lat_rad) lat = lat_rad * 180.0 / math.pi # longitude maps linearly to map, so we simply scale: lng = -180.0 + 360.0*x/(2**zoom) return (lat, lng)
6bf0e31b30930f3916112d6540e4387a72238586
29,550
def process_zdr_precip(procstatus, dscfg, radar_list=None): """ Keeps only suitable data to evaluate the differential reflectivity in moderate rain or precipitation (for vertical scans) Parameters ---------- procstatus : int Processing status: 0 initializing, 1 processing volume, 2 post-processing dscfg : dictionary of dictionaries data set configuration. Accepted Configuration Keywords:: datatype : list of string. Dataset keyword The input data types ml_filter : boolean. Dataset keyword indicates if a filter on data in and above the melting layer is applied. Default True. rmin : float. Dataset keyword minimum range where to look for rain [m]. Default 1000. rmax : float. Dataset keyword maximum range where to look for rain [m]. Default 50000. Zmin : float. Dataset keyword minimum reflectivity to consider the bin as precipitation [dBZ]. Default 20. Zmax : float. Dataset keyword maximum reflectivity to consider the bin as precipitation [dBZ] Default 22. RhoHVmin : float. Dataset keyword minimum RhoHV to consider the bin as precipitation Default 0.97 PhiDPmax : float. Dataset keyword maximum PhiDP to consider the bin as precipitation [deg] Default 10. elmax : float. Dataset keyword maximum elevation angle where to look for precipitation [deg] Default None. ml_thickness : float. Dataset keyword assumed thickness of the melting layer. Default 700. fzl : float. Dataset keyword The default freezing level height. It will be used if no temperature field name is specified or the temperature field is not in the radar object. Default 2000. radar_list : list of Radar objects Optional. list of radar objects Returns ------- new_dataset : dict dictionary containing the output ind_rad : int radar index """ if procstatus != 1: return None, None temp_field = None iso0_field = None for datatypedescr in dscfg['datatype']: radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr) if datatype == 'ZDR': zdr_field = 'differential_reflectivity' if datatype == 'ZDRc': zdr_field = 'corrected_differential_reflectivity' if datatype == 'PhiDP': phidp_field = 'differential_phase' if datatype == 'PhiDPc': phidp_field = 'corrected_differential_phase' if datatype == 'RhoHV': rhohv_field = 'cross_correlation_ratio' if datatype == 'RhoHVc': rhohv_field = 'corrected_cross_correlation_ratio' if datatype == 'uRhoHV': rhohv_field = 'uncorrected_cross_correlation_ratio' if datatype == 'dBZc': refl_field = 'corrected_reflectivity' if datatype == 'dBZ': refl_field = 'reflectivity' if datatype == 'TEMP': temp_field = 'temperature' if datatype == 'H_ISO0': iso0_field = 'height_over_iso0' ind_rad = int(radarnr[5:8])-1 if radar_list[ind_rad] is None: warn('No valid radar') return None, None radar = radar_list[ind_rad] if ((refl_field not in radar.fields) or (rhohv_field not in radar.fields) or (zdr_field not in radar.fields) or (phidp_field not in radar.fields)): warn('Unable to estimate ZDR in rain. Missing data') return None, None # if data in and above the melting layer has to be filtered determine the # field to use fzl = None ml_filter = True if 'ml_filter' in dscfg: ml_filter = dscfg['ml_filter'] if ml_filter: # determine which freezing level reference temp_ref = 'temperature' if temp_field is None and iso0_field is None: warn('Field to obtain the freezing level was not specified. ' + 'Using fixed freezing level height') temp_ref = 'fixed_fzl' elif temp_field is not None: if temp_field not in radar.fields: warn('COSMO temperature field not available. ' + 'Using fixed freezing level height') temp_ref = 'fixed_fzl' elif iso0_field is not None: if iso0_field not in radar.fields: warn('Height over iso0 field not available. ' + 'Using fixed freezing level height') temp_ref = 'fixed_fzl' else: temp_ref = 'height_over_iso0' # determine freezing level height if necessary if temp_ref == 'fixed_fzl': if 'fzl' in dscfg: fzl = dscfg['fzl'] else: fzl = 2000. warn('Freezing level height not defined. Using default ' + str(fzl)+' m') else: temp_ref = None # default values rmin = 1000. rmax = 50000. zmin = 20. zmax = 22. rhohvmin = 0.97 phidpmax = 10. elmax = None thickness = 700. # user defined values if 'rmin' in dscfg: rmin = dscfg['rmin'] if 'rmax' in dscfg: rmax = dscfg['rmax'] if 'Zmin' in dscfg: zmin = dscfg['Zmin'] if 'Zmax' in dscfg: zmax = dscfg['Zmax'] if 'RhoHVmin' in dscfg: rhohvmin = dscfg['RhoHVmin'] if 'PhiDPmax' in dscfg: phidpmax = dscfg['PhiDPmax'] if 'elmax' in dscfg: elmax = dscfg['elmax'] if 'ml_thickness' in dscfg: thickness = dscfg['ml_thickness'] ind_rmin = np.where(radar.range['data'] > rmin)[0][0] ind_rmax = np.where(radar.range['data'] < rmax)[0][-1] zdr_precip = pyart.correct.est_zdr_precip( radar, ind_rmin=ind_rmin, ind_rmax=ind_rmax, zmin=zmin, zmax=zmax, rhohvmin=rhohvmin, phidpmax=phidpmax, elmax=elmax, thickness=thickness, doc=15, fzl=fzl, zdr_field=zdr_field, rhohv_field=rhohv_field, phidp_field=phidp_field, temp_field=temp_field, iso0_field=iso0_field, refl_field=refl_field, temp_ref=temp_ref) # prepare for exit new_dataset = {'radar_out': deepcopy(radar)} new_dataset['radar_out'].fields = dict() new_dataset['radar_out'].add_field( 'differential_reflectivity_in_precipitation', zdr_precip) return new_dataset, ind_rad
44b58f755a103756a2cd6726d19b3a7d958d09c3
29,551
import random def get_initators(filepath, n_lines): """ Open text file with iniator words and sample random iniator for each line in the poem. """ with open(filepath, "r", encoding = "utf-8") as file: # save indices of all keywords loaded_text = file.read() # load text file lines = loaded_text.splitlines() # seperate initiator lines initiators_list = list(random.sample(lines, n_lines)) # sample random initators return initiators_list
94792679a6ea4e0bb14afd5eb38b656a2cc8af67
29,552
def GSAOI_DARK(): """ No. Name Ver Type Cards Dimensions Format 0 PRIMARY 1 PrimaryHDU 289 () 1 1 ImageHDU 144 (2048, 2048) float32 2 2 ImageHDU 144 (2048, 2048) float32 3 3 ImageHDU 144 (2048, 2048) float32 4 4 ImageHDU 144 (2048, 2048) float32 """ return download_from_archive("S20150609S0023.fits")
c1cea8420ef518027d14bcf4d430c772268c6024
29,553
import traceback import time def wrapLoop(loopfunc): """Wraps a thread in a wrapper function to restart it if it exits.""" def wrapped(): while True: try: loopfunc() except BaseException: print(f"Exception in thread {loopfunc}," " restarting in 10s...") traceback.print_exc() else: print(f"Thread {loopfunc} exited, restarting in 10s...") time.sleep(10) return wrapped
86c48bc850bb1cf17121130ee9349dd529acf5e3
29,554
def get_version(tp): """ Get Version based on input parameters `tp` - Object of class: Transport """ response = None try: response = tp.send_data('proto-ver', '---') except RuntimeError as e: on_except(e) response = '' return response
276dae2599ec99906ea954aae8ad9f79eb2de7d7
29,555
def _decode_feed_ids(option_feeds): """ >>> _decode_feed_ids('123,456') [123, 456] """ return [int(x) for x in option_feeds.strip().split(',')]
9218a170c445b3b8d83f08c39d1547c3ff6e2d20
29,556
def append_to_phase(phase, data, amt=0.05): """ Add additional data outside of phase 0-1. """ indexes_before = [i for i, p in enumerate(phase) if p > 1 - amt] indexes_after = [i for i, p in enumerate(phase) if p < amt] phase_before = [phase[i] - 1 for i in indexes_before] data_before = [data[i] for i in indexes_before] phase_after = [phase[i] + 1 for i in indexes_after] data_after = [data[i] for i in indexes_after] return ( np.concatenate((phase_before, phase, phase_after)), np.concatenate((data_before, data, data_after)), )
1b416e5352efdff9e578e77f8a068a8f6a446a38
29,557
def timedelta2s(t_diff): """return number of seconds from :class:`numpy.timedelta64` object Args: t_diff: time difference as :class:`numpy.timedelta64` object Returns: scalar corresponding to number of seconds """ return t_diff / np.timedelta64(1, 's')
47d3b41717c877aa9c57a0f2745b95888738523b
29,558
def window(MT_seq, WT_seq, window_size=5): """ Chop two sequences with a sliding window """ if len(MT_seq) != len(WT_seq): raise Exception("len(MT_seq) != len(WT_seq)") pos = [] mt = [] wt = [] for i in xrange(len(MT_seq) - window_size + 1): pos.append(i) mt.append(MT_seq[i:i+window_size]) wt.append(WT_seq[i:i+window_size]) dt = pd.DataFrame({"position": pos, "MT": mt, "WT": wt}) return dt
67fecea9ed7155a2c85e9cd7acae9ff5a17402e7
29,559
def splits_for_blast(target, NAME): """Create slices for BLAST This function creates multiple slices of 400 nucleotides given an fasta sequence. The step size is 50. This the gaps are excluded from the sequence. Thats why sequences with less than 400 nucleotides are excluded. Args: target (np.array): Fasta sequence in an array. NAME (str): Global variable. Internal index of SNAPPy for this fasta. Returns: List of fasta files slices. Each is a proper fasta. """ target_seq = target[1:] no_gap_t = target_seq[target_seq != '-'] target_length = no_gap_t.shape[0] sub_aligns =[ [[f'>{NAME}_{x}'] , no_gap_t[x:x+400]] for x in range(0, target_length, 50) if len(no_gap_t[x:x+400]) == 400] return sub_aligns
6ad193fe494a6387fbb06d2c2a3b6a059b903a5f
29,560
from io import StringIO def test_load_items_errors() -> None: """ Test error cases when creating a list of classification Items from a dataframe """ def load(csv_string: StringIO) -> str: df = pd.read_csv(csv_string, sep=",", dtype=str) numerical_columns = ["scalar2", "scalar1"] non_image_channels = _get_non_image_dict(["label", "image2"], ["scalar2", "scalar1"]) with pytest.raises(Exception) as ex: DataSourceReader(data_frame=df, # Provide values in a different order from the file! image_channels=["image2", "image1"], image_file_column="path", label_channels=["label"], label_value_column="value", # Provide values in a different order from the file! non_image_feature_channels=non_image_channels, numerical_columns=numerical_columns).load_data_sources() return str(ex) csv_string = StringIO("""subject,channel,path,value,scalar1 S1,image1,foo1.nii,,2.1 """) assert "columns are missing: scalar2" in load(csv_string) csv_string = StringIO("""subject,channel,path,scalar1,scalar2 S1,image1,foo1.nii,2.1,2.2 """) assert "columns are missing: value" in load(csv_string) csv_string = StringIO("""id,channel,path,value,scalar1,scalar2 S1,image,foo.nii S1,label,,True,1.1,1.2 """) assert "columns are missing: subject" in load(csv_string)
649358c42db33e178a4269ed48b186999903bbdb
29,561
import imghdr def validate_image(stream): """ Ensure the images are valid and in correct format Args: stream (Byte-stream): The image Returns: str: return image format """ header = stream.read(512) stream.seek(0) format = imghdr.what(None, header) if not format: return None return '.' + (format if format != 'jpeg' else 'jpg')
1a1976f5b009c2400071ebf572d886c1f7d12ab0
29,562
def my_mean(my_list): """Calculates the mean of a given list. Keyword arguments: my_list (list) -- Given list. return (float) -- Mean of given list. """ return my_sum(my_list)/len(my_list)
2423d51bf457a85ee8a6a8f1505a729b6d1d3f6f
29,563
def pr(labels, predictions): """Compute precision-recall curve and its AUC. Arguments: labels {array} -- numpy array of labels {0, 1} predictions {array} -- numpy array of predictions, [0, 1] Returns: tuple -- precision array, recall array, area float """ precision, recall, threshold = precision_recall_curve(labels, predictions) area = auc(recall, precision) return precision, recall, area, threshold
5cf18052875396483f7a76e4c6c0b55f1541803d
29,564
def grouped_evaluate(population: list, problem, max_individuals_per_chunk: int = None) -> list: """Evaluate the population by sending groups of multiple individuals to a fitness function so they can be evaluated simultaneously. This is useful, for example, as a way to evaluate individuals in parallel on a GPU.""" if max_individuals_per_chunk is None: max_individuals_per_chunk = len(population) def chunks(lst, n): """Yield successive n-sized chunks from lst.""" for i in range(0, len(lst), n): yield lst[i:i + n] fitnesses = [] for chunk in chunks(population, max_individuals_per_chunk): phenomes = [ ind.decode() for ind in chunk ] fit = problem.evaluate_multiple(phenomes) fitnesses.extend(fit) for fit, ind in zip(fitnesses, population): ind.fitness = fit return population
ea43be334def0698272ba7930cc46dc84ce78de9
29,565
def ZeusPaypalAccounts(request): """ Zeus Paypal Account Credentials """ if request.method == "GET": return render(request, "lost-empire/site_templates/zeus/paypal_accounts.html")
34a0fc616beac2869d501d1652ccd7c9d8ff2489
29,566
def upper_tri_to_full(n): """Returns a coefficient matrix to create a symmetric matrix. Parameters ---------- n : int The width/height of the matrix. Returns ------- SciPy CSC matrix The coefficient matrix. """ entries = n*(n+1)//2 val_arr = [] row_arr = [] col_arr = [] count = 0 for i in range(n): for j in range(i, n): # Index in the original matrix. col_arr.append(count) # Index in the filled matrix. row_arr.append(j*n + i) val_arr.append(1.0) if i != j: # Index in the original matrix. col_arr.append(count) # Index in the filled matrix. row_arr.append(i*n + j) val_arr.append(1.0) count += 1 return sp.coo_matrix((val_arr, (row_arr, col_arr)), (n*n, entries)).tocsc()
5fdca1868f0824d9539bd785aa99b20c6195b7c0
29,567
def prod(a, axis=None, dtype=None, out=None, keepdims=False): """Returns the product of an array along given axes. Args: a (cupy.ndarray): Array to take product. axis (int or sequence of ints): Axes along which the product is taken. dtype: Data type specifier. out (cupy.ndarray): Output array. keepdims (bool): If ``True``, the specified axes are remained as axes of length one. Returns: cupy.ndarray: The result array. .. seealso:: :func:`numpy.prod` """ if _fusion_thread_local.is_fusing(): if keepdims: raise NotImplementedError( 'cupy.prod does not support `keepdims` in fusion yet.') if dtype is None: func = _math._prod_auto_dtype else: func = _math._prod_keep_dtype return _fusion_thread_local.call_reduction( func, a, axis=axis, dtype=dtype, out=out) # TODO(okuta): check type return a.prod(axis, dtype, out, keepdims)
567ca4b23d2828b9a978e44729ff10f823d13113
29,568
import os import sys def get_dataset_path(filename): """Searches for filename in SEARCH_PATH""" for p in SEARCH_PATH: candidate = os.path.join(p, filename) if os.path.exists(candidate): print("Found %s in %s" % (filename, candidate)) sys.stdout.flush() return candidate raise FileNotFoundError("Could not find %s in search path" % filename)
b1e0c7359aa0868acf78bae1e2ef561500b48804
29,569
def sort_dnfs(x, y): """Sort dnf riders by code and riderno.""" if x[2] == y[2]: # same code if x[2]: return cmp(strops.bibstr_key(x[1]), strops.bibstr_key(y[1])) else: return 0 # don't alter order on unplaced riders else: return strops.cmp_dnf(x[2], y[2])
ccf20fb43df26219ce934e18b2d036e3cf6d13b7
29,570
import torch def gen_diag(dim): """generate sparse diagonal matrix""" diag = torch.randn(dim) a_sp = sparse.diags(diag.numpy(), format=args.format) a_pt = _torch_from_scipy(a_sp) return a_pt, a_sp
9ca2842553a1b6331347210bd1da049f53e67361
29,571
def one_of(patterns, eql=equal): """Return a predicate which checks an object matches one of the patterns. """ def oop(ob): for p in patterns: if validate_object(ob, p, eql=eql): return True return False return oop
058f1f64780760d9e996858dcfad4c0a47c07448
29,572
import os def load_dataset_BlogCatalog3(location): """ This method loads the BlogCatalog3 network dataset (http://socialcomputing.asu.edu/datasets/BlogCatalog3) into a networkx undirected heterogeneous graph. The graph has two types of nodes, 'user' and 'group', and two types of edges, 'friend' and 'belongs'. The 'friend' edges connect two 'user' nodes and the 'belongs' edges connects 'user' and 'group' nodes. The node and edge types are not included in the dataset that is a collection of node and group ids along with the list of edges in the graph. Important note about the node IDs: The dataset uses integers for node ids. However, the integers from 1 to 39 are used as IDs for both users and groups. This would cause a confusion when constructing the networkx graph object. As a result, we convert all IDs to string and append the character 'u' to the integer ID for user nodes and the character 'g' to the integer ID for group nodes. Args: location: <str> The directory where the dataset is located Returns: A networkx Graph object. """ location = os.path.expanduser(location) if not os.path.isdir(location): raise NotADirectoryError("The location {} is not a directory.".format(location)) # load the raw data user_node_ids = pd.read_csv(os.path.join(location, "nodes.csv"), header=None) group_ids = pd.read_csv(os.path.join(location, "groups.csv"), header=None) edges = pd.read_csv(os.path.join(location, "edges.csv"), header=None) group_edges = pd.read_csv(os.path.join(location, "group-edges.csv"), header=None) # convert the dataframes to lists because that is what networkx expects as input user_node_ids = user_node_ids[0].tolist() group_ids = group_ids[0].tolist() edges = list(edges.itertuples(index=False, name=None)) # convert to list of tuples group_edges = list(group_edges.itertuples(index=False, name=None)) # The dataset uses integers for node ids. However, the integers from 1 to 39 are used as IDs for both users and # groups. This would cause a confusion when constructing the networkx graph object. As a result, we convert all # IDs to string and append the character 'p' to the integer ID for user nodes and the character 'g' to the integer # ID for group nodes. user_node_ids = ["u" + str(user_node_id) for user_node_id in user_node_ids] group_ids = ["g" + str(group_id) for group_id in group_ids] edges = [("u" + str(from_node), "u" + str(to_node)) for from_node, to_node in edges] group_edges = [ ("u" + str(from_node), "g" + str(to_node)) for from_node, to_node in group_edges ] g_nx = nx.Graph() # create the graph # add user and group nodes with labels 'Person' and 'Group' respectively. g_nx.add_nodes_from(user_node_ids, label="user") g_nx.add_nodes_from(group_ids, label="group") # add the user-user edges with label 'friend' g_nx.add_edges_from(edges, label="friend") # add user-group edges with label 'belongs' g_nx.add_edges_from(group_edges, label="belongs") return g_nx
709fa29d0493b90ebc5cf986c9ad2a2c2a836e7d
29,573
def convert_to_MultiDiGraph(G): """ takes any graph object, loads it into a MultiDiGraph type Networkx object :param G: a graph object """ a = nx.MultiDiGraph() node_bunch = [] for u, data in G.nodes(data = True): node_bunch.append((u,data)) a.add_nodes_from(node_bunch) edge_bunch = [] for u, v, data in G.edges(data = True): if 'Wkt' in data.keys(): data['Wkt'] = str(data['Wkt']) edge_bunch.append((u,v,data)) a.add_edges_from(edge_bunch) return a
bde49710bed50386bd7bb09816e6f18089ed8030
29,574
def x_to_world_transformation(transform): """ Get the transformation matrix from x(it can be vehicle or sensor) coordinates to world coordinate. Parameters ---------- transform : carla.Transform The transform that contains location and rotation Returns ------- matrix : np.ndarray The transformation matrx. """ rotation = transform.rotation location = transform.location # used for rotation matrix c_y = np.cos(np.radians(rotation.yaw)) s_y = np.sin(np.radians(rotation.yaw)) c_r = np.cos(np.radians(rotation.roll)) s_r = np.sin(np.radians(rotation.roll)) c_p = np.cos(np.radians(rotation.pitch)) s_p = np.sin(np.radians(rotation.pitch)) matrix = np.identity(4) # translation matrix matrix[0, 3] = location.x matrix[1, 3] = location.y matrix[2, 3] = location.z # rotation matrix matrix[0, 0] = c_p * c_y matrix[0, 1] = c_y * s_p * s_r - s_y * c_r matrix[0, 2] = -c_y * s_p * c_r - s_y * s_r matrix[1, 0] = s_y * c_p matrix[1, 1] = s_y * s_p * s_r + c_y * c_r matrix[1, 2] = -s_y * s_p * c_r + c_y * s_r matrix[2, 0] = s_p matrix[2, 1] = -c_p * s_r matrix[2, 2] = c_p * c_r return matrix
718227deea6a6be4a0b24ebf4eda40d78be20fcf
29,575
def protobuf_get_constant_type(proto_type) : """About protobuf write types see : https://developers.google.com/protocol-buffers/docs/encoding#structure +--------------------------------------+ + Type + Meaning + Used For + +--------------------------------------+ + + + int32, int64, uint32+ + 0 + Varint + uint64,sint32,sint64+ + + + boolean, enum + +--------------------------------------+ + + + + + 1 + 64-bit + fixed64, sfixed64, + + + + double + +--------------------------------------+ + 2 + string + string + +--------------------------------------+ + 5 + 32-bit + float + +--------------------------------------+ """ if 'uInt32' == proto_type or \ 'sInt32' == proto_type or \ 'int32' == proto_type : return 0 elif 'double' == proto_type : return 1 elif 'string' == proto_type : return 2 elif 'float' == proto_type : return 5 return 2
46ce7e44f8499e6c2bdcf70a2bc5e84cb8786956
29,576
def edit_delivery_products(request, delivery): """Edit a delivery (name, state, products). Network staff only.""" delivery = get_delivery(delivery) if request.user not in delivery.network.staff.all(): return HttpResponseForbidden('Réservé aux administrateurs du réseau '+delivery.network.name) if request.method == 'POST': # Handle submitted data _parse_form(request) JournalEntry.log(request.user, "Edited products for delivery %s/%s", delivery.network.name, delivery.name) return redirect('edit_delivery', delivery.id) else: # Create and populate forms to render vars = { 'user': request.user, 'delivery': delivery} vars.update(csrf(request)) return render_to_response('edit_delivery_products.html', vars)
fc734e5ded0a17d20a79d36e8ae599ee763ea73a
29,577
def _tc_imul_ ( self , other ) : """Multiplication for TCut objects >>> cut = ... >>> other = ... >>> cut *= other """ ## self.strip() ## if isinstance ( other , num_types ) : if self : self.SetTitle ( "(%s)*%s" % ( self , other ) ) else : self.SetTitle ( "%s" % other ) return self elif isinstance ( other , ( str , ROOT.TCut ) ) : other = ROOT.TCut ( other.strip() ) else : return NotImplemented if other and self : self.SetTitle("(%s)*(%s)" % ( self , other ) ) return self elif other : self.SetTitle ( "%s" % other ) ## logger.debug ('(*=) empty argument is ignored, the result is "%s"' % self) return self
003ca68e1995e7741cf23576e7515b626d49ff20
29,578
import pprint def format_locals(sys_exc_info): """Format locals for the frame where exception was raised.""" current_tb = sys_exc_info[-1] while current_tb: next_tb = current_tb.tb_next if not next_tb: frame_locals = current_tb.tb_frame.f_locals return pprint.pformat(frame_locals) current_tb = next_tb
b5a21f42c8543d9de060ff7be2b3ad6b23065de9
29,579
def binarize_garcia(label: str) -> str: """ Streamline Garcia labels with the other datasets. :returns (str): streamlined labels. """ if label == 'hate': return 'abuse' else: return 'not-abuse'
5cc26303e0c496d46b285e266604a38a0c88e8d7
29,580
def diagstack(K1,K2): """ combine two kernel matrices along the diagonal [[K1 0][0 K2]]. Use to have two kernels in temporal sequence Inputs ------- K1, K2 : numpy arrays kernel matrics Returns -------- matrix of kernel values """ r1,c1 = K1.shape r2,c2 = K2.shape Kt = np.hstack((K1,np.zeros([r1,c2]))) Kb = np.hstack((np.zeros([r2,c1]),K2)) return np.vstack((Kt,Kb))
6e163bf62ca2639e5bacebad6c03700b1056de2e
29,581
import string def submit_new_inteface(): """POST interface configuration from form data""" global unassigned_ints, interface_nums ip = None mask = None status = None descr = None vrf = None negotiation = None int_num = [i for i in request.form.get("interface") if i not in string.ascii_letters] int_type = [i for i in request.form.get("interface") if i in string.ascii_letters] interface = BuildInterface.Templates(''.join(int_type), ''.join(int_num)) if request.form.get('ip') and request.form.get('mask'): ip = request.form.get('ip') mask = request.form.get('mask') if request.form.get('status'): status = request.form.get('status') if request.form.get('description'): descr = request.form.get('description') if request.form.get('vrf'): vrf = request.form.get('vrf') if request.form.get('negotiation'): negotiation = request.form.get('negotiation') config = interface.build_interface(ip, mask, status, descr, vrf, negotiation) status = SendConfig.send_configuration(netconf_session, config) if status == 'Success': show_interfaces = GetInterfacesInfo.get_single_interfaces(netconf_session, request.form.get("interface")) return jsonify({'data': render_template('new_interface_table.html', interfaces=show_interfaces)}) else: return jsonify({'data': render_template('config_failed.html', status=status)})
f746071d1f1ce2c1bdd9c0b4d4401edbb1119c36
29,582
from typing import Optional def component_clause(): # type: ignore """ component_clause = type_prefix type_specifier array_subscripts? component_list """ return ( syntax.type_prefix, syntax.type_specifier, Optional(syntax.array_subscripts), syntax.component_list, )
cd788687645d028c39f7ad439aab1ee21e5ad495
29,583
import numpy as np def clean_time_series(time, val, nPoi): """ Clean doubled time values and checks with wanted number of nPoi :param time: Time. :param val: Variable values. :param nPoi: Number of result points. """ # Create shift array Shift = np.array([0.0], dtype='f') # Shift time to right and left and subtract time_sr = np.concatenate((Shift, time)) time_sl = np.concatenate((time, Shift)) time_d = time_sl - time_sr time_dn = time_d[0:-1] # Get new values for time and val tol = 1E-5 timen = time[time_dn > tol] valn = val[time_dn > tol] if len(timen) != nPoi: raise ValueError( "Error: In clean_time_series, length and number of results \ points do not match.") return timen, valn
35a4cea11a0dbf33916f3df6f8aae5c508a0c838
29,584
from typing import Callable def deep_se_print(func: Callable) -> Callable: """Transforms the function to print nested side effects. Searches recursively for changes on deep inner attributes of the arguments. Goes down a tree until it finds some element which has no __dict__. For each element of the tree, if it has no __dict__ or if __eq__ is user-defined, it verifies equality (==/__eq__) with the previous state. """ def g(*args): previous_states = deepcopy(args) arg_names = [str(previous_state) for previous_state in previous_states] gen = (f"{previous_state}" for previous_state in previous_states) print(f"Call of {func.__qualname__} on args " f"{tuple(gen)}:") result = func(*args) search_recursive(args, previous_states, arg_names) return result def search_recursive(args, previous_states, arg_names, tree: str = ""): for arg, previous_state, arg_name in zip(args, previous_states, arg_names): if (not hasattr(previous_state, "__dict__") or _has_eq_defined(previous_state)): if arg != previous_state: print(f" {str(arg_name)}{tree} changed" f" from {str(previous_state)}" f" to {str(arg)}.") else: tree = f" of {arg}" + tree for key, attr in vars(arg).items(): if not hasattr(attr, "__dict__"): if attr != getattr(previous_state, key): print(f" {str(key)}{tree}" f" changed" f" from {str(getattr(previous_state, key))}" f" to {str(attr)}.") continue search_recursive(tuple(vars(attr).values()), tuple( vars(getattr(previous_state, key)).values() ), vars(attr).keys(), f" of {str(attr)}" + tree) return g
e5c2ee57f9f5ecd992ac36a30ac6e32c7afdbd8a
29,585
from pathlib import Path def prepare_checkpoints(path_to_checkpoints:str, link_keys=["link1","link2","link3","link4"], real_data=True,*args, **kwargs)-> str: """ The main function preparing checkpoints for pre-trained SinGANs of Polyp images. Parameters ----------- path_to_checkpoints: str A directory path to download checkpoints. link_keys: list A list of link keys: link1, link2, link3, link4. One or multiple link keys can be put in this list. real_data: bool If True, the real images and masks used to train SinGANs will be downloaded to the checkpoint directory. Return ------ checkpoint_paths_list, real_image_mask_pair_list A sorted list of paths to downloaded checkpoints. A sorted (image_path, mask_path) tuple list. """ all_links = load_configs()["links"] real_data_links = load_configs()["real_data_links"] #alls_in_one_dir = os.path.join(path_to_checkpoints, "all") #os.makedirs(alls_in_one_dir, exist_ok=True) checkpoint_paths = [] for link_key in link_keys: print(all_links[link_key]) download_link = all_links[link_key] directory = download_and_extract_single_file(download_link, path_to_checkpoints) #print("Directory=", directory) checkpoint_paths = checkpoint_paths + list(Path(directory).iterdir()) ## moving checkpoints to root directory #for sub_dir in tqdm(Path(directory).iterdir()): #print(sub_dir) # shutil.move(str(sub_dir), alls_in_one_dir) checkpoint_paths_str = [str(p) for p in checkpoint_paths] # Download and prepair real images and maks real_data_paths = None if real_data: image_directory = download_and_extract_single_file(real_data_links["images_link"], path_to_checkpoints) mask_directory = download_and_extract_single_file(real_data_links["masks_link"], path_to_checkpoints) image_paths = list(Path(image_directory).iterdir()) mask_paths = list(Path(mask_directory).iterdir()) image_paths = [str(p) for p in image_paths] mask_paths = [str(p) for p in mask_paths] image_paths = natsorted(image_paths) mask_paths = natsorted(mask_paths) real_data_paths = list(zip(image_paths, mask_paths)) return natsorted(checkpoint_paths_str), real_data_paths
4e49a495b3dd587c4b9b350d5e329f7dab36ef30
29,586
def data_count(): """ :return: 数据集大小 """ return 300
1582c3782cd77ee79727a7874afbb74539f3ff9e
29,587
import logging import re def grid_name_lookup(engine): """Constructs a lookup table of Institute names to ids by combining names with aliases and cleaned names containing country names in brackets. Multinationals are detected. Args: engine (:obj:`sqlalchemy.engine.base.Engine`): connection to the database Returns: (:obj:`list` of :obj:`dict`): lookup table [{name: [id1, id2, id3]}] Where ids are different country entities for multinational institutes. Most entities just have a single [id1] """ institute_name_id_lookup = defaultdict(set) with db_session(engine) as session: for institute in session.query(Institute).all(): name = institute.name.lower() institute_name_id_lookup[name].add(institute.id) logging.info(f"{len(institute_name_id_lookup)} institutes in GRID") for alias in session.query(Alias).all(): name = alias.alias.lower() institute_name_id_lookup[name].add(alias.grid_id) logging.info(f"{len(institute_name_id_lookup)} institutes after adding aliases") # look for institute names containing brackets: IBM (United Kingdom) n_countries = 0 for bracketed in (session .query(Institute) .filter(Institute.name.contains('(') & Institute.name.contains(')')) .all()): found = re.match(r'(.*) \((.*)\)', bracketed.name) if found: # results: s/country name/institute --> the processed to give {"ibm" : {grid_id1, grid_id2}} name = found.groups()[0].lower() institute_name_id_lookup[name].add(bracketed.id) n_countries += 1 logging.info(f"{n_countries} institutes with country name in the title") # Convert to dict --> list institute_name_id_lookup = {k: list(v) for k, v in institute_name_id_lookup.items()} logging.info(f"{len(institute_name_id_lookup)} total institute names in lookup") return institute_name_id_lookup
0a0fef49c722d6c8c40e2d00f1d87d8f41efbbef
29,588
def sample_vMF(theta, kappa,size=1): """ Sampling from vMF This is based on the implementation I found online here: http://stats.stackexchange.com/questions/156729/sampling-from-von-mises-fisher-distribution-in-python (**** NOTE THE FIX BY KEVIN *****) which is based on : Directional Statistics (Mardia and Jupp, 1999) and on the Ulrich-Wood's algorithm for sampling. """ warn('Not sure about sampling vMF, use with caution!!!! ') #print "kappa : ", kappa #print "norm direction :" , np.linalg.norm(theta) np.testing.assert_array_almost_equal( np.linalg.norm(theta) , 1 ) #print "kappa : ", kappa assert kappa > 0 , "kappa must be positive !" if np.ndim(theta)==2: theta = np.squeeze(theta) assert np.ndim(theta)==1, "theta should be one one-dimensional!" res_sampling = _rvMF(size, kappa * theta) return np.vstack(res_sampling)
1e8d327b5613d9f2e5f77c26eab86d09d9d8338b
29,589
def fbx_mat_properties_from_texture(tex): """ Returns a set of FBX metarial properties that are affected by the given texture. Quite obviously, this is a fuzzy and far-from-perfect mapping! Amounts of influence are completely lost, e.g. Note tex is actually expected to be a texture slot. """ # Mapping Blender -> FBX (blend_use_name, blend_fact_name, fbx_name). blend_to_fbx = ( # Lambert & Phong... ("diffuse", "diffuse", b"DiffuseFactor"), ("color_diffuse", "diffuse_color", b"DiffuseColor"), ("alpha", "alpha", b"TransparencyFactor"), ("diffuse", "diffuse", b"TransparentColor"), # Uses diffuse color in Blender! ("emit", "emit", b"EmissiveFactor"), ("diffuse", "diffuse", b"EmissiveColor"), # Uses diffuse color in Blender! ("ambient", "ambient", b"AmbientFactor"), # ("", "", b"AmbientColor"), # World stuff in Blender, for now ignore... ("normal", "normal", b"NormalMap"), # Note: unsure about those... :/ # ("", "", b"Bump"), # ("", "", b"BumpFactor"), # ("", "", b"DisplacementColor"), # ("", "", b"DisplacementFactor"), # Phong only. ("specular", "specular", b"SpecularFactor"), ("color_spec", "specular_color", b"SpecularColor"), # See Material template about those two! ("hardness", "hardness", b"Shininess"), ("hardness", "hardness", b"ShininessExponent"), ("mirror", "mirror", b"ReflectionColor"), ("raymir", "raymir", b"ReflectionFactor"), ) tex_fbx_props = set() for use_map_name, name_factor, fbx_prop_name in blend_to_fbx: # Always export enabled textures, even if they have a null influence... if getattr(tex, "use_map_" + use_map_name): tex_fbx_props.add(fbx_prop_name) return tex_fbx_props
363c9f60084a55aa8d9c01c2f06d4d30d5e45993
29,590
def get_from_STEAD(key=None, h5file_path='/mnt/GPT_disk/DL_datasets/STEAD/waveforms.hdf5'): """ Input: key, h5file_path Output: data, p_t, s_t """ HDF5 = h5py.File(h5file_path, 'r') if key.split('_')[-1] == 'EV': dataset = HDF5.get('earthquake/local/'+str(key)) p_t = int(dataset.attrs['p_arrival_sample']) s_t = int(dataset.attrs['s_arrival_sample']) elif key.split('_')[-1] == 'NO': dataset = HDF5.get('non_earthquake/noise/'+str(key)) p_t = None s_t = None data = np.array(dataset).astype(np.float32) return data, p_t, s_t
9bab2db49eab81abe72cb27e86d3cdf787c4e902
29,591
import traceback def _safeFormat(formatter, o): """ Helper function for L{safe_repr} and L{safe_str}. """ try: return formatter(o) except: io = NativeStringIO() traceback.print_exc(file=io) className = _determineClassName(o) tbValue = io.getvalue() return "<%s instance at 0x%x with %s error:\n %s>" % ( className, id(o), formatter.__name__, tbValue)
610e8063fa91d211e749be829c2d562fa1b86ea6
29,592
import os def get_int(name, default): """ Get an environment variable as an int. Args: name (str): An environment variable name default (int): The default value to use if the environment variable doesn't exist. Returns: int: The environment variable value parsed as an int """ value = os.environ.get(name) if value is None: return default try: parsed_value = int(value) except ValueError as ex: raise ImproperlyConfigured("Expected value in {name}={value} to be an int".format( name=name, value=value, )) from ex return parsed_value
6adb80b14034c4561a5bae8fe9983e2165678a55
29,593
from datetime import datetime import pytz def now_func(): """Return current datetime """ func = get_now_func() dt = func() if isinstance(dt, datetime.datetime): if dt.tzinfo is None: return dt.replace(tzinfo=pytz.utc) return dt
c715be9fde2d245c79536d792b775678bc743aaa
29,594
import itertools def flatten_search_result(search_result): """ Converts all nested objects from the provided search result into non-nested `field->field-value` dicts. Raw values (such as memory size, timestamps and durations) are transformed into easy-to-read values. :param search_result: result to flatten :return: the flattened result """ return list( itertools.chain.from_iterable( map( lambda result_entry: transform_definition_result( definition_id=result_entry[0], definition_result=result_entry[1] ), search_result['definitions'].items() ) ) )
380b244bcee0d968532db512b6bf79cc062ef962
29,595
import os def get_stretch_directory(sub_directory=''): """Returns path to stretch_user dir if HELLO_FLEET_PATH env var exists Parameters ---------- sub_directory : str valid sub_directory within stretch_user/ Returns ------- str dirpath to stretch_user/ or dir within it if stretch_user/ exists, else /tmp """ base_path = os.environ.get('HELLO_FLEET_PATH', None) full_path = base_path + '/' + sub_directory if base_path is not None else '/tmp/' return full_path
0af8b46c160008750c62b4aada700ed46b87aff9
29,596
def xroot(x, mu): """The equation of which we must find the root.""" return -x + (mu * (-1 + mu + x))/abs(-1 + mu + x)**3 - ((-1 + mu)*(mu + x))/abs(mu + x)**3
5db07cc197f1bc4818c4591597099cd697576df2
29,597
import random def spliter(data_dict, ratio=[6, 1, 1], shuffle=True): """split dict dataset into train, valid and tests set Args: data_dict (dict): dataset in dict ratio (list): list of ratio for train, valid and tests split shuffle (bool): shuffle or not """ if len(ratio) != 3: raise ValueError(f'ratio must include three int numbers') train = {'x': list(), 'y': list()} valid = {'x': list(), 'y': list()} tests = {'x': list(), 'y': list()} for _, [samples, labels] in data_dict.items(): samples_lens = len(samples) train_ratio = round(samples_lens * (ratio[0] / sum(ratio))) tests_ratio = round(samples_lens * (ratio[2] / sum(ratio))) valid_ratio = samples_lens - train_ratio - tests_ratio data = list(zip(samples, labels)) if shuffle: random.shuffle(data) x, y = zip(*data) train['x'].extend(x[:train_ratio]) train['y'].extend(y[:train_ratio]) valid['x'].extend(x[train_ratio:train_ratio + valid_ratio]) valid['y'].extend(y[train_ratio:train_ratio + valid_ratio]) tests['x'].extend(x[-tests_ratio:]) tests['y'].extend(y[-tests_ratio:]) return train, valid, tests
793af274e3962d686f2ef56b34ae5bc0a53aac5b
29,598
import scipy def smooth(x, window_len=None, window='flat', method='zeros'): """Smooth the data using a window with requested size. This method is based on the convolution of a scaled window with the signal. :param x: the input signal (numpy array) :param window_len: the dimension of the smoothing window; should be an odd integer :param window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman' flat window will produce a moving average smoothing. :param method: handling of border effects\n 'zeros': zero padding on both ends (len(smooth(x)) = len(x))\n 'reflect': pad reflected signal on both ends (same)\n 'clip': pad signal on both ends with the last valid value (same)\n None: no handling of border effects (len(smooth(x)) = len(x) - len(window_len) + 1) """ if window_len is None: return x if x.ndim != 1: raise ValueError("smooth only accepts 1 dimension arrays.") if x.size < window_len: raise ValueError("Input vector needs to be bigger than window size.") if window_len < 3: return x if window not in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']: raise ValueError("Window is one of 'flat', 'hanning', 'hamming'," "'bartlett', 'blackman'") if method == 'zeros': s = np.r_[np.zeros((window_len - 1) // 2), x, np.zeros(window_len // 2)] elif method == 'reflect': s = np.r_[x[(window_len - 1) // 2:0:-1], x, x[-1:-(window_len + 1) // 2:-1]] elif method == 'clip': s = np.r_[x[0] * np.ones((window_len - 1) // 2), x, x[-1] * np.ones(window_len // 2)] else: s = x if window == 'flat': w = np.ones(window_len, 'd') else: w = getattr(np, window)(window_len) return scipy.signal.fftconvolve(w / w.sum(), s, mode='valid')
148c1f4b420ce825d3b658e90329dac7b9360c2c
29,599