content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def estimate_perfomance_plan(sims, ntra, stateinit, destination, plan=list(), plot=False, verbose=True): """ Estimates the performances of two plans and compares them on two scenarios. :param list() sims: List of :class:`simulatorTLKT.Simulator` :param int ntra: Number of trajectories used to estimate the performances on each scenarios :param list(int,float,float) stateinit: [t_index, lat, lon], starting point of the plans :param list(int,float,float) destination: [t_index, lat, lon], destination point of the plans :param list plan: list of actions to apply :param bool plot: if True displays the mean trajectories per scenario :param bool verbose: if True verbose results :return: mean_arrival_times, var_arrival_times, global_mean_time, variance_globale with length : len(list) = len(sims) :rtype: list(float), list(float), float, float """ ################### Arrival Time ############################# meantrajs = [] mean_arrival_times = [] var_arrival_times = [] all_arrival_times = [] nb_actions = len(plan) for _, sim in enumerate(sims): arrivaltimes = [] trajsofsim = np.zeros((ntra, len(sims[0].times), 3)) for ii in range(ntra): traj = [] sim.reset(stateinit) traj.append(list(sim.state)) compte_action = 0 while (compte_action < nb_actions): action = plan[compte_action] compte_action += 1 sim.doStep(action) traj.append(list(sim.state)) if nb_actions == 0: dist, action = sim.getDistAndBearing(sim.state[1:], destination) sim.doStep(action) traj.append(list(sim.state)) atDest, frac = Tree.is_state_at_dest(destination, sim.prevState, sim.state) while (not atDest) \ and (not Tree.is_state_terminal(sim, sim.state)): dist, action = sim.getDistAndBearing(sim.state[1:], destination) sim.doStep(action) traj.append(list(sim.state)) atDest, frac = Tree.is_state_at_dest(destination, sim.prevState, sim.state) if atDest: finalTime = sim.times[sim.state[0]] - \ (1 - frac) * (sim.times[sim.state[0]] - sim.times[sim.state[0] - 1]) arrivaltimes.append(finalTime) all_arrival_times.append(finalTime) else: finalTime = sim.times[-1] arrivaltimes.append(finalTime) all_arrival_times.append(finalTime) trajsofsim[ii, :, :] = traj[-1] trajsofsim[ii, :, 0] = [i for i in range(len(sim.times))] trajsofsim[ii, :len(traj), :] = traj meantrajs.append(np.mean(trajsofsim, 0)) average_scenario = np.mean(arrivaltimes) mean_arrival_times.append(average_scenario) variance_scenario = 0 for value in arrivaltimes: variance_scenario += (average_scenario - value) ** 2 variance_scenario = variance_scenario / ntra var_arrival_times.append(variance_scenario) global_mean_time = np.mean(all_arrival_times) variance_globale = 0 for value in all_arrival_times: variance_globale += (global_mean_time - value) ** 2 variance_globale = variance_globale / len(all_arrival_times) if plot: basemap_time = sims[0].prepareBaseMap(proj='aeqd', centerOfMap=stateinit[1:]) plt.title('Mean trajectory for minimal travel time estimation') colors = plt.get_cmap("tab20") colors = colors.colors[:len(sims)] xd, yd = basemap_time(destination[1], destination[0]) xs, ys = basemap_time(stateinit[2], stateinit[1]) basemap_time.scatter(xd, yd, zorder=0, c="red", s=100) plt.annotate("destination", (xd, yd)) basemap_time.scatter(xs, ys, zorder=0, c="green", s=100) plt.annotate("start", (xs, ys)) for ii, sim in enumerate(sims): sim.plotTraj(meantrajs[ii], basemap_time, color=colors[ii], label="Scen. num : " + str(ii)) plt.legend() if verbose: for nb in range(len(sims)): print("temps scénario isochrones ", nb, " = ", mean_arrival_times[nb]) print("variance scénario isochrones = ", var_arrival_times[nb]) print() print("moyenne des temps isochrones = ", global_mean_time) print("variance globale des isochrones = ", variance_globale) return [global_mean_time] + mean_arrival_times, [variance_globale] + var_arrival_times
274ebadafa7f7637e27a0f25a013171a0955d4ce
22,300
def xls_dslx_ir_impl(ctx, src, dep_src_list): """The implementation of the 'xls_dslx_ir' rule. Converts a DSLX source file to an IR file. Args: ctx: The current rule's context object. src: The source file. dep_src_list: A list of source file dependencies. Returns: DslxModuleInfo provider ConvIRInfo provider DefaultInfo provider """ ir_file = _convert_to_ir(ctx, src, dep_src_list) dslx_module_info = ctx.attr.dep[DslxModuleInfo] return [ dslx_module_info, ConvIRInfo( dslx_source_file = src, conv_ir_file = ir_file, ), DefaultInfo(files = depset([ir_file])), ]
119112184086ccb469157eae1b17e1a0f38b57ef
22,301
def split_data(images, labels): """ Split data into training (80%), validation (10%), and testing (10%) datasets Returns (images_train, images_validate, images_test, labels_train, labels_validate, labels_test) Assumes that num_covid_points <= num_normal_points and num_virus_points """ images, labels = shuffle_data_pair(images, labels) num_covid_points = sum(map(lambda label: label == 0, labels)) # Calculate split num_test = int(num_covid_points * 0.1) num_covid_train = num_covid_points - num_test * 2 num_other_train = int(num_covid_train * 1.1) # (train, validate, test) points added num_points_added = [ [0, 0, 0], # COVID-19 [0, 0, 0], # Viral pneumonia [0, 0, 0] # Normal ] # Datasets images_train = [] labels_train = [] images_validate = [] labels_validate = [] images_test = [] labels_test = [] # Add images and labels to datasets notifier.send(" Adding images and labels to dataset...") for i, label in enumerate(labels): print(f" Point: {i} / {len(labels)}") completed_labels = [False, False, False] # Enough of label added if all(completed_labels): break for j in range(3): # 0: COVID-19, 1: Viral pneumonia, 2: Normal if completed_labels[j]: continue if label == j: # Add training data can_add_training = False if j == 0: # COVID-19 if num_points_added[j][0] < num_covid_train: can_add_training = True num_points_added[j][0] += 1 elif num_points_added[j][0] < num_other_train: # Not COVID-19 can_add_training = True num_points_added[j][0] += 1 if can_add_training: images_train.append(images[i]) labels_train.append(labels[i]) break # Add validation data if num_points_added[j][1] < num_test: num_points_added[j][1] += 1 images_validate.append(images[i]) labels_validate.append(labels[i]) break # Add testing data if num_points_added[j][2] < num_test: num_points_added[j][2] += 1 images_test.append(images[i]) labels_test.append(labels[i]) break # Point couldn't be added anywhere: label is complete completed_labels[j] = True break # Shuffle all data notifier.send(" Shuffling data...") images_train, labels_train = shuffle_data_pair( images_train, labels_train ) images_validate, labels_validate = shuffle_data_pair( images_validate, labels_validate ) images_test, labels_test = shuffle_data_pair( images_test, labels_test ) if PLOT_LABELS: # Plot data frequencies plt.hist(labels, bins=3) plt.title("Labels") plt.hist(labels_train, bins=3) plt.title("Train Labels") plt.hist(labels_validate, bins=3) plt.title("Validate Labels") plt.hist(labels_test, bins=3) plt.title("Test Labels") plt.show() # Make labels categorical notifier.send(" Making labels categorical: train...") labels_train = tf.keras.utils.to_categorical(labels_train) notifier.send(" Making labels categorical: validate...") labels_validate = tf.keras.utils.to_categorical(labels_validate) notifier.send(" Making labels categorical: test...") labels_test = tf.keras.utils.to_categorical(labels_test) notifier.send(" Converting data to NumPy arrays...") return \ np.array(images_train), np.array(images_validate), np.array(images_test), \ np.array(labels_train), np.array(labels_validate), np.array(labels_test)
87950ef842781abb8500961a11d997b254bde6af
22,302
import random def randomlyInfectRegions(network, regions, age_groups, infected): """Randomly infect regions to initialize the random simulation :param network: object representing the network of populations :type network: A NetworkOfPopulation object :param regions: The number of regions to expose. :type regions: int :param age_groups: Age groups to infect :type age_groups: list :param infected: People to infect :type infected: int :return: Structure of initially infected regions with number :rtype: dict """ infections = {} for regionID in random.choices(list(network.graph.nodes()), k=regions): infections[regionID] = {} for age in age_groups: infections[regionID][age] = infected return infections
213450bfbdba56a8671943905d6ac888a548c8aa
22,303
def timestamp_to_uint64(timestamp): """Convert timestamp to milliseconds since epoch.""" return int(timestamp.timestamp() * 1e3)
165df202cb5f8cee5792bfa5778114ea3e98fa65
22,304
def extensible(x): """ Enables a function to be extended by some other function. The function will get an attribute (extensible) which will return True. The function will also get a function (extendedby) which will return a list of all the functions that extend it. """ extensible_functions.append(x.__name__) @wraps(x) def wrapper(*args, **kwargs): if x.__name__ in extensions: for f in extensions[x.__name__]: if not f.after: f.func(*args, **kwargs) result = x(*args, **kwargs) if x.__name__ in extensions: for f in extensions[x.__name__]: if f.after: f.func(*args, **kwargs) return result wrapper.extensible = True def extended_by(): return extensions[x.__name__] wrapper.extendedby = extended_by return wrapper
a810e90e386441e8b223824c77ee452b4f7ff6d5
22,305
def _validate_user_deploy_steps(task, user_steps, error_prefix=None): """Validate the user-specified deploy steps. :param task: A TaskManager object :param user_steps: a list of deploy steps. A deploy step is a dictionary with required keys 'interface', 'step', 'args', and 'priority':: { 'interface': <driver_interface>, 'step': <name_of_deploy_step>, 'args': {<arg1>: <value1>, ..., <argn>: <valuen>}, 'priority': <priority_of_deploy_step> } For example:: { 'interface': 'bios', 'step': 'apply_configuration', 'args': { 'settings': [ { 'foo': 'bar' } ] }, 'priority': 150 } :param error_prefix: String to use as a prefix for exception messages, or None. :raises: InvalidParameterValue if validation of deploy steps fails. :raises: InstanceDeployFailure if there was a problem getting the deploy steps from the driver. :return: validated deploy steps update with information from the driver """ driver_steps = _get_deployment_steps(task, enabled=False, sort=False) return _validate_user_steps(task, user_steps, driver_steps, 'deploy', error_prefix=error_prefix)
58cf55b444c533ec96a86ad09b76ca9bc275f7dd
22,306
from operator import gt def is_period_arraylike(arr): """ return if we are period arraylike / PeriodIndex """ if isinstance(arr, pd.PeriodIndex): return True elif isinstance(arr, (np.ndarray, gt.ABCSeries)): return arr.dtype == object and lib.infer_dtype(arr) == 'period' return getattr(arr, 'inferred_type', None) == 'period'
f675f56dbca7ef80dc75bbe454a4f6e11a419c50
22,307
def reset_password_step_2(token): """Processing the second step of changing the password (password change)""" email = confirm_token_reset_password(token) if not email: return redirect(url_for('web_pages.reset_password_step_1')) form = EditPassword() if form.validate_on_submit(): password = form.password.data session = create_session() user = session.query(User).filter(User.email == email).first() if not user: abort(404) user.set_password(password) session.merge(user) session.commit() flash('Пароль успешно изменен', 'success') return redirect(url_for('web_pages.login_page')) return render_template('reset_password_step_2.html', form=form)
dcec97ba112ff96af4510488f801926190cfe221
22,308
def FStarTypeRole(typ, rawtext, text, lineno, inliner, options={}, content=[]): """An inline role to highlight F* types.""" #pylint: disable=dangerous-default-value, unused-argument return nodes.literal(typ, rawtext, text, lineno, inliner, options=options, content=content)
970ed43558e87a4319aed91c33d781fbe6a39d20
22,309
def matobj2dict(matobj): """A recursive function which converts nested mat object to a nested python dictionaries Arguments: matobj {sio.matlab.mio5_params.mat_struct} -- nested mat object Returns: dict -- a nested dictionary """ ndict = {} for fieldname in matobj._fieldnames: attr = matobj.__dict__[fieldname] if isinstance(attr, sio.matlab.mio5_params.mat_struct): ndict[fieldname] = matobj2dict(attr) elif isinstance(attr, np.ndarray) and fieldname == "move": for ind, val in np.ndenumerate(attr): ndict[ fieldname + str(ind).replace(",", "").replace(")", "").replace("(", "_") ] = matobj2dict(val) elif fieldname == "skel": tree = [] for ind in range(len(attr)): tree.append(matobj2dict(attr[ind])) ndict[fieldname] = tree else: ndict[fieldname] = attr return ndict
6b8413fd0c4dc9bb4e778944e7a6d4c260b56fa1
22,310
import io def download_from_vt(client: vt.Client, file_hash: str) -> bytes: """ Download file from VT. :param vt.Client client: the VT client :param str file_hash: the file hash :rtype: bytes :return: the downloaded data :raises ValueError: in case of any error """ try: buffer = io.BytesIO() client.download_file(file_hash, buffer) buffer.seek(0, 0) return buffer.read() except (IOError, vt.APIError) as e: raise ValueError(str(e)) from e
055cd636d853d81921034d197bac9ad7a9c206c2
22,311
import torch def divide_and_conquer(x, k, mul): """ Divide and conquer method for polynomial expansion x is a 2d tensor of size (n_classes, n_roots) The objective is to obtain the k first coefficients of the expanded polynomial """ to_merge = [] while x[0].dim() > 1 and x[0].size(0) > 1: size = x[0].size(0) half = size // 2 if 2 * half < size: to_merge.append([t[-1] for t in x]) x = mul([t[:half] for t in x], [t[half: 2 * half] for t in x]) for row in to_merge: x = mul(x, row) x = torch.cat(x) return x
64bdf2d50cf7cbf7da814b93521df5cee41623fe
22,312
def calculate_operating_pressure(feed_state_block=None, over_pressure=0.15, water_recovery=0.5, NaCl_passage=0.01, solver=None): """ estimate operating pressure for RO unit model given the following arguments: feed_state_block: the state block of the RO feed that has the non-pressure state variables initialized to their values (default=None) over_pressure: the amount of operating pressure above the brine osmotic pressure represented as a fraction (default=0.15) water_recovery: the mass-based fraction of inlet H2O that becomes permeate (default=0.5) NaCl_passage: the mass-based fraction of inlet NaCl that becomes permeate (default=0.01) solver: solver object to be used (default=None) """ t = ConcreteModel() # create temporary model prop = feed_state_block.config.parameters t.brine = prop.build_state_block([0], default={}) # specify state block t.brine[0].flow_mass_phase_comp['Liq', 'H2O'].fix( value(feed_state_block.flow_mass_phase_comp['Liq', 'H2O']) * (1 - water_recovery)) t.brine[0].flow_mass_phase_comp['Liq', 'NaCl'].fix( value(feed_state_block.flow_mass_phase_comp['Liq', 'NaCl']) * (1 - NaCl_passage)) t.brine[0].pressure.fix(101325) # valid when osmotic pressure is independent of hydraulic pressure t.brine[0].temperature.fix(value(feed_state_block.temperature)) # calculate osmotic pressure # since properties are created on demand, we must touch the property to create it t.brine[0].pressure_osm # solve state block results = solve_indexed_blocks(solver, [t.brine]) check_solve(results) return value(t.brine[0].pressure_osm) * (1 + over_pressure)
2252910515ad6b6188c06bbf3add2a36b37da1ea
22,313
from bs4 import BeautifulSoup def parse_pypi_index(text): """Parses the text and returns all the packages Parameters ---------- text : str the html of the website (https://pypi.org/simple/) Returns ------- List[str] the list of packages """ soup = BeautifulSoup(text, "lxml") return [i.get_text() for i in soup.find_all("a")]
68d831aab69f3ffdd879ea1fa7ca5f28fc1b1e75
22,314
import os def fetch(data_dir, dest="wmt14"): """ Fetches most data from the WMT14 shared task. Creates the `dest` if it doesn't exist. Args: data_dir (str): absolute path to the dir where datasets are stored dest (str): name for dir where WMT14 datasets will be extracted Returns: final_dir (str): absolute path where WMT14 datasets were extracted """ # Create folder wmt_dir = os.path.join(data_dir, dest) utils.create_folder(wmt_dir) # Download all datasets for f, url in CORPORA.items(): utils.urlretrieve(url, os.path.join(wmt_dir, f)) return wmt_dir
3bf699362fdb6c80427fe3b6fbd6450601187aca
22,315
def _get_score_measure(func, alphabeta, color, board, alpha, beta, depth, pid): """_get_score_measure """ measure(pid) return _get_score(func, alphabeta, color, board, alpha, beta, depth, pid)
e36723d03c2ee686177ea3f8ce34874b250c2058
22,316
def mousePressed(): """ Return True if the mouse has been left-clicked since the last time mousePressed was called, and False otherwise. """ global _mousePressed if _mousePressed: _mousePressed = False return True return False
37fd34e71ee7e9c4a671a5ba5a4a946a7441c0da
22,317
def variable_on_cpu(name, shape, initializer): """ Next we concern ourselves with graph creation. However, before we do so we must introduce a utility function ``variable_on_cpu()`` used to create a variable in CPU memory. """ # Use the /cpu:0 device for scoped operations with tf.device('/gpu:0'): # Create or get apropos variable var = tf.get_variable(name=name, shape=shape, initializer=initializer) return var
10e724f900d7c7334e81f3380fc4764ca935b284
22,318
def adaptive_generate_association_rules(patterns, confidence_threshold): """ Given a set of frequent itemsets, return a dictof association rules in the form {(left): (right)} It has a check with 2048 thus will only retain multimodal rules. """ missed = 0 rules = defaultdict(set) for setn, support in patterns.items(): if len(setn) > 1: itemset = list(setn) # the itemset I with n element for i in range(len(itemset)-1, -1, -1): # the last pos is the inference item i for I->i # every elem go to the last once, the itemset remains sorted itemset[i], itemset[-1] = itemset[-1], itemset[i] setn_1 = tuple(itemset[:-1]) if max(itemset[:-1]) < 2048 <= itemset[-1]: if setn_1 in patterns: confidence = patterns[setn] / patterns[setn_1] if confidence >= confidence_threshold: rules[setn_1].add(itemset[-1]) else: missed += 1 print("missed", setn_1) print('%d freq missed.' % missed) return rules
35589916f91aab789a8d31559bcdbaca37bfdcd1
22,319
import subprocess def cli(): """ Rebuild the docker container :return: Subprocess call result """ cmd = "docker-compose down && docker-compose build" return subprocess.call(cmd, shell=True)
e9d0eec7fbe97efad859ef8da28486075d3daa80
22,320
from typing import Union from typing import Collection def scored_ngrams( docs: Documents, n: int = 2, metric: str = "pmi", tokenizer: Tokenizer = DEFAULT_TOKENIZER, preprocessor: CallableOnStr = None, stopwords: Union[str, Collection[str]] = None, min_freq: int = 0, fuse_tuples: bool = False, sep: str = " ", ) -> Series: """Get Series of collocations and scores. Parameters ---------- docs : str or iterable of str Documents to scan for ngrams. n : int, optional Size of collocations, by default 2. metric : str, optional Scoring metric to use. Valid options include: 'raw_freq', 'pmi', 'mi_like', 'likelihood_ratio', 'jaccard', 'poisson_stirling', 'chi_sq', 'student_t'. See nltk.BigramAssocMeasures, nltk.TrigramAssocMeasures, and nltk.QuadgramAssocMeasures for additional size-specific options. tokenizer : callable, optional Callable for tokenizing docs. preprocessor : callable, optional Callable for preprocessing docs before tokenization, by default None. stopwords : str or collection of str, optional Name of known stopwords set or collection of stopwords to remove from docs. By default None. min_freq : int, optional Drop ngrams below this frequency, by default 0. fuse_tuples : bool, optional Join ngram tuples with `sep`, by default True. sep : str, optional Separator to use for joining ngram tuples, by default " ". Only relevant if `fuze_tuples=True`. Returns ------- Series Series {ngrams -> scores}. """ _validate_strings(docs) # Get collocation finder and measures if not isinstance(n, int): raise TypeError(f"Expected `n` to be int, got {type(n)}.") if 1 < n < 5: n = int(n) finder = NGRAM_FINDERS[n] measures = NGRAM_METRICS[n]() else: raise ValueError(f"Valid `n` values are 2, 3, and 4. Got {n}.") pre_pipe = [] if preprocessor is not None: # Apply preprocessing pre_pipe.append(preprocessor) # Tokenize pre_pipe.append(tokenizer) if stopwords is not None: # Fetch stopwords if passed str if isinstance(stopwords, str): stopwords = fetch_stopwords(stopwords) # Remove stopwords pre_pipe.append(partial(remove_stopwords, stopwords=stopwords)) docs = chain_processors(docs, pre_pipe) # Find and score collocations ngrams = finder.from_documents(docs) ngrams.apply_freq_filter(min_freq) ngram_score = ngrams.score_ngrams(getattr(measures, metric)) # Put the results in a DataFrame, squeeze into Series kind = {2: "bigram", 3: "trigram", 4: "quadgram"}[n] ngram_score = pd.DataFrame(ngram_score, columns=[kind, "score"]) if fuse_tuples: # Join ngram tuples ngram_score[kind] = ngram_score[kind].str.join(sep) ngram_score.set_index(kind, inplace=True) if ngram_score.shape[0] > 1: ngram_score = ngram_score.squeeze() return ngram_score
a77b42eb1361c55cb23a1b168e99d4abb1ef9af1
22,321
import argparse def parse_command_line_arguments() -> argparse.Namespace: """Specifies the command line parser and returns a :class:`argparse.Namespace` containing the arguments.""" parser = argparse.ArgumentParser( description=f"supreme-pancake v{__version__}") parser.add_argument( "-c", "--credentials", action="store", help="Credential JSON file", ) parser.add_argument( "-k", "--sheet-key", action="store", help="Google Sheet key", ) parser.add_argument( "-l", "--logging-level", action="store", default="INFO", help='Logging level, either "DEBUG", "INFO", "WARNING", "ERROR", ' 'or "CRITICAL"', ) parser.add_argument( "--one-shot", action="store_true", default=False, help="Runs all queries once and exit", ) parser.add_argument( "-s", "--secret", action="append", default=[], help='Adds a secret. Example: "-s PASS=123456789". Can be used ' 'multiple times', ) return parser.parse_args()
707493e5100145142426315f903b70d9cfdc0ea5
22,322
def imurl(image_url, return_as_array = False , **kwargs): """ Read image from url and convert to bytes or ndarray Paramters --------- image_url: http / https url of image return_as_array: Convert image directly to numpy array default: False kwargs: Keyword arguments of imread can be passed for image modification: Example: imurl(image_url,to_array=True,resize=(224,224),color_mode = 'rgb',dtype='float32') Note: kwargs only works with return_as_array = True Returns: -------- PIL Image by default: if return_as_array is True: image will be returned as numpy array. Additional params like resize, color_mode, dtype , return_original can also be passed inorder to refine the image Raises: ------- ImportError if requests library is not installed """ if request_image is None: raise ImportError('requests library is required from reading image from url ' 'Install it using pip install requests') if not image_url.startswith('http'): raise ValueError(f'invalid url found. Required http or https url but got {image_url} instead') image_response = request_image.get(image_url) imbytes = BytesIO(image_response.content) if return_as_array: return imread(imbytes,**kwargs) image = pilimage.open(imbytes) return image
c6c93ab7a2b97b522bca2d6673bfd843fdc8bb72
22,323
def generate_command(config, work_dir, output_analysis_id_dir, errors, warnings): """Build the main command line command to run. Args: config (GearToolkitContext.config): run-time options from config.json work_dir (path): scratch directory where non-saved files can be put output_analysis_id_dir (path): directory where output will be saved errors (list of str): error messages warnings (list of str): warning messages Returns: cmd (list of str): command to execute """ # start with the command itself: cmd = [ BIDS_APP, str(work_dir / "bids"), str(output_analysis_id_dir), ANALYSIS_LEVEL, ] # 3 positional args: bids path, output dir, 'participant' # This should be done here in case there are nargs='*' arguments # These follow the BIDS Apps definition (https://github.com/BIDS-Apps) # editme: add any positional arguments that the command needs # get parameters to pass to the command by skipping gear config parameters # (which start with "gear-"). command_parameters = {} for key, val in config.items(): # these arguments are passed directly to the command as is if key == "bids_app_args": bids_app_args = val.split(" ") for baa in bids_app_args: cmd.append(baa) elif not key.startswith("gear-"): command_parameters[key] = val # editme: Validate the command parameter dictionary - make sure everything is # ready to run so errors will appear before launching the actual gear # code. Add descriptions of problems to errors & warnings lists. # print("command_parameters:", json.dumps(command_parameters, indent=4)) if "bad_arg" in cmd: errors.append("A bad argument was found in the config.") num_things = command_parameters.get("num-things") if num_things and num_things > 41: warnings.append( f"The num-things config value should not be > 41. It is {command_parameters['num-things']}." ) cmd = build_command_list(cmd, command_parameters) # editme: fix --verbose argparse argument for ii, cc in enumerate(cmd): if cc.startswith("--verbose"): # handle a 'count' argparse argument where manifest gives # enumerated possibilities like v, vv, or vvv # e.g. replace "--verbose=vvv' with '-vvv' cmd[ii] = "-" + cc.split("=")[1] elif " " in cc: # then is is a space-separated list so take out "=" # this allows argparse "nargs" to work properly cmd[ii] = cc.replace("=", " ") log.info("command is: %s", str(cmd)) return cmd
bb24ff62f3c4fa579eedf721708e84bf4cf3920c
22,324
def ip( context, api_client, api_key, input_file, output_file, output_format, verbose, ip_address, ): """Query GreyNoise for all information on a given IP.""" ip_addresses = get_ip_addresses(context, input_file, ip_address) results = [api_client.ip(ip_address=ip_address) for ip_address in ip_addresses] return results
b4c52e1bb1abb03679b977d4b15f5e0295c1e0c2
22,325
def get_layer(neurons, neuron_loc, depth=None, return_closest: bool=False): """Obtain the layer of neurons corresponding to layer number or specific depth.""" layers = np.unique(neuron_loc[2, :]) if depth is not None: if depth in layers: pass elif return_closest: depth = layers[np.argmin(np.abs(layers - depth))] else: raise Exception('Provided depth does not correspond to layer.') neuron_mask = neuron_loc[2, :] == depth return neurons[:, neuron_mask]
d221d294bbe974554b0180ea9d41394294de41dc
22,326
def _format_unpack_code_level(message, signal_names, variable_lines, helper_kinds): """Format one unpack level in a signal tree. """ body_lines = [] muxes_lines = [] for signal_name in signal_names: if isinstance(signal_name, dict): mux_lines = _format_unpack_code_mux(message, signal_name, body_lines, variable_lines, helper_kinds) if muxes_lines: muxes_lines.append('') muxes_lines += mux_lines else: _format_unpack_code_signal(message, signal_name, body_lines, variable_lines, helper_kinds) if body_lines: if body_lines[-1] != '': body_lines.append('') if muxes_lines: muxes_lines.append('') body_lines = body_lines + muxes_lines if body_lines: body_lines = [''] + body_lines return body_lines
b88362f6fd3cb5ccaf3a3f76472f2002ac9c1518
22,327
import socket from sys import stdout import select def find_phones(): """ This function broadcasts on the LAN to the Shexter ports, and looks for a reply from a phone. :return: (IP, Port) tuple representing the phone the user selects. None if no phone found. """ sock_sender = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) sock_sender.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) # IP, Port tuple representing the phone phone = None rejected_hosts = [] broadcast_addrs = _get_broadcast_addrs() if not broadcast_addrs: print('There was a problem running the phone finder. You will have to configure manually.') return None print('Ready to search for phones.') manual = input('Press Enter when the app is open on your phone, or type "m" to skip to manual configuration.\n') manual = manual.lower() if manual.lower() == 'm': return None for port in range(PORT_MIN, PORT_MAX+1): count = 0 # Search more on the earlier ports which are much more likely to be the right one #if port == PORT_MIN: # tries = 4 #else: # tries = 2 tries = 2 print('Searching on port ' + str(port), end="") while not phone and count < tries: count += 1 print('.', end='') stdout.flush() # Send on ALL the interfaces (required by Windows!) for broadcast_addr in broadcast_addrs: #print('\nbroadcasting on ' + broadcast_addr + ' to ' + str(port)) discover_bytes = bytes(DISCOVER_REQUEST, ENCODING) sock_sender.sendto(discover_bytes, (broadcast_addr, port)) sock_recvr = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) sock_recvr.bind(('', port)) # Wait for phone to respond # I don't know what an appropriate timeout for this would be - shorter is better but how short # is too short? ready = select([sock_recvr], [], [sock_sender, sock_recvr], 0.25) if ready[0]: # Buffsize must match ConnectionInitThread.BUFFSIZE data, other_host = sock_recvr.recvfrom(256) data = data.decode(ENCODING).rstrip(' \0') if not data.startswith(DISCOVER_CONFIRM): print('Received a strange response: ' + data) continue # Skip over rejected hosts if not other_host[0] in rejected_hosts: print() print('Got a response from ' + str(other_host)) # The first line of the response is a confirm, the second is phone info, the third is port# # Print out the phone info received, and get the user to confirm print('Phone info: ' + data.splitlines()[1]) confirm = input('Is this your phone? y/N: ') if confirm.lower() == 'y': # Get the port the TCP Socket is listening for from the third line of the request tcp_port_str = data.splitlines()[2] # Convert to an int tcp_port = port_str_to_int(tcp_port_str) if not tcp_port: # Cannot recover from this; it's a server bug. Manual config only workaround. print('Received invalid port from phone; cannot continue.'.format(tcp_port_str)) return None return other_host[0], tcp_port else: rejected_hosts.append(other_host[0]) if ready[2]: print('There was an error selecting ' + ready[2]) sock_recvr.close() print() return None
8d449adf26de552087ae303f76caab1f338444e1
22,328
def fileGDB_schema() -> StructType: """Schema for dummy FileGDB.""" return StructType( [ StructField("id", LongType()), StructField("category", StringType()), StructField("geometry", BinaryType()), ] )
0ef7ad136d64f19e392bb8a9ff471478094193fe
22,329
def set_atom_stereo_parities(sgr, atm_par_dct): """ set atom parities """ atm_dct = mdict.set_by_key_by_position(atoms(sgr), atm_par_dct, ATM_STE_PAR_POS) return _create.from_atoms_and_bonds(atm_dct, bonds(sgr))
1e733291ce12e614b538054c2c05fc3892ce3206
22,330
def clean(expr): """ cleans up an expression string Arguments: expr: string, expression """ expr = expr.replace("^", "**") return expr
f7c990146094c43d256fe15f9543a0ba90877ee3
22,331
def atom_stereo_keys(sgr): """ keys to atom stereo-centers """ atm_ste_keys = dict_.keys_by_value(_atom_stereo_parities(sgr), lambda x: x in [True, False]) return atm_ste_keys
c084c30f4601d18941d98c313d3a74b93153cd80
22,332
def get_node_rd(graph, k=3): """ Get k nodes to defend based on Recalculated Degree (RD) Removal :cite:`holme2002attack`. :param graph: an undirected NetworkX graph :param k: number of nodes to defend :return: a list of nodes to defend """ return get_node_rd_attack(graph, k)
dbbf501353133a1cb222f6d2d4f632faa07bad1c
22,333
def get_frog(): """Returns the interface object to frog NLP. (There should only be one instance, because it spawns a frog process that consumes a lot of RAM.) """ global FROG if FROG is None: FROG = frog.Frog(frog.FrogOptions( tok=True, lemma=True, morph=False, daringmorph=False, mwu=True, chunking=False, ner=False, parser=False ), "/home/rahiel/hortiradar/venv/share/frog/nld/frog.cfg") return FROG
5701b2856532241d797eb77d9734fd67ee838312
22,334
import requests def fetch(url: str, **kwargs) -> Selector: """ Send HTTP request and parse it as a DOM selector. Args: url (str): The url of the site. Returns: Selector: allows you to select parts of HTML text using CSS or XPath expressions. """ kwargs.setdefault('headers', DEFAULT_HEADERS) try: res = requests.get(url, **kwargs) res.encoding = kwargs.get('encoding', DEFAULT_ENCODING) res.raise_for_status() except requests.RequestException as e: print(e) else: html = res.text tree = Selector(text=html) return tree
f5bbe41f3b7bc83d0092d0b2165681df096413d1
22,335
import math def growth(x, a, b): """ Growth model. a is the value at t=0. b is the so-called R number. Doesnt work. FIX IT """ return np.power(a * 0.5, (x / (4 * (math.log(0.5) / math.log(b)))))
6276fd00f270ef72f52ed7493f431dd0e3b34326
22,336
from datetime import datetime import pytz def __to_localdatetime(val): """Convert val into a local datetime for tz Europe/Amsterdam.""" try: # "timestamp": "2019-02-03T19:20:00", dt = datetime.strptime(val, __DATE_FORMAT) dt = pytz.timezone(__TIMEZONE).localize(dt) return dt except (ValueError, TypeError): return None
e2eea5da625a3514b6872e5604336d5dfb6f0ccb
22,337
import warnings def imgMinMaxScaler(img, scale_range): """ :param img: image to be rescaled :param scale_range: (tuple) (min, max) of the desired rescaling """ warnings.filterwarnings("ignore") img = img.astype("float64") img_std = (img - np.min(img)) / (np.max(img) - np.min(img)) img_scaled = img_std * float(scale_range[1] - scale_range[0]) + float( scale_range[0] ) # round at closest integer and transform to integer img_scaled = np.rint(img_scaled).astype("uint8") return img_scaled
f55795167f6a284ea81609413edc73c1336a2a5e
22,338
def conditional_vff(Xnew, inducing_variable, kernel, f, *, full_cov=False, full_output_cov=False, q_sqrt=None, white=False): """ - Xnew are the points of the data or minibatch, size N x D (tf.array, 2d) - feat is an instance of features.InducingFeature that provides `Kuu` and `Kuf` methods for Fourier features, this contains the limits of the bounding box and the frequencies - f is the value (or mean value) of the features (i.e. the weights) - q_sqrt (default None) is the Cholesky factor of the uncertainty about f (to be propagated through the conditional as per the GPflow inducing-point implementation) - white (defaults False) specifies whether the whitening has been applied Given the GP represented by the inducing points specified in `feat`, produce the mean and (co-)variance of the GP at the points Xnew. Xnew :: N x D Kuu :: M x M Kuf :: M x N f :: M x K, K = 1 q_sqrt :: K x M x M, with K = 1 """ if full_output_cov: raise NotImplementedError # num_data = tf.shape(Xnew)[0] # M num_func = tf.shape(f)[1] # K Kuu = cov.Kuu(inducing_variable, kernel) # this is now a LinearOperator Kuf = cov.Kuf(inducing_variable, kernel, Xnew) # still a Tensor KuuInv_Kuf = Kuu.solve(Kuf) # compute the covariance due to the conditioning if full_cov: fvar = kernel(Xnew) - tf.matmul(Kuf, KuuInv_Kuf, transpose_a=True) shape = (num_func, 1, 1) else: KufT_KuuInv_Kuf_diag = tf.reduce_sum(Kuf * KuuInv_Kuf, axis=-2) fvar = kernel(Xnew, full=False) - KufT_KuuInv_Kuf_diag shape = (num_func, 1) fvar = tf.expand_dims(fvar, 0) * tf.ones(shape, dtype=gpflow.default_float()) # K x N x N or K x N # another backsubstitution in the unwhitened case if white: raise NotImplementedError A = KuuInv_Kuf # construct the conditional mean fmean = tf.matmul(A, f, transpose_a=True) if q_sqrt is not None: if q_sqrt.get_shape().ndims == 2: # LTA = A * tf.expand_dims(q_sqrt, 2) # K x M x N # won't work # make ticket for this? raise NotImplementedError elif q_sqrt.get_shape().ndims == 3: # L = tf.matrix_band_part(tf.transpose(q_sqrt, (2, 0, 1)), -1, 0) # K x M x M # K x M x N # A_tiled = tf.expand_dims(A.get(), 0) * tf.ones((num_func, 1, 1), dtype=float_type) # LTA = tf.matmul(L, A_tiled, transpose_a=True) # K x M x N # TODO the following won't work for K > 1 assert q_sqrt.shape[0] == 1 # LTA = (A.T @ DenseMatrix(q_sqrt[:,:,0])).T.get()[None, :, :] ATL = tf.matmul(A, q_sqrt, transpose_a=True) else: raise ValueError("Bad dimension for q_sqrt: %s" % str(q_sqrt.get_shape().ndims)) if full_cov: # fvar = fvar + tf.matmul(LTA, LTA, transpose_a=True) # K x N x N fvar = fvar + tf.matmul(ATL, ATL, transpose_b=True) # K x N x N else: # fvar = fvar + tf.reduce_sum(tf.square(LTA), 1) # K x N fvar = fvar + tf.reduce_sum(tf.square(ATL), 2) # K x N fvar = tf.transpose(fvar) # N x K or N x N x K return fmean, fvar
ad4f2b69f4ea0cfb59247cf9d360514dc180774e
22,339
def xor(text, key): """Returns the given string XORed with given key.""" while len(key) < len(text): key += key key = key[:len(text)] return "".join(chr(ord(a) ^ ord(b)) for (a, b) in zip(text, key))
3cae903ef4751b2f39e0e5e28d448b8d079ce249
22,340
from pathlib import Path def get_emojis_voc_counts(path): """ Generate a value count of words for every emoji present in the csv files found in the child directories of "path" Args: path (str): parent path of the csv files Return: em2vocab [dict of dict]: a dict associating each word to its count is mapped for each emoji """ path = Path(path) em2vocab = {} for path in path.glob("**/[0-9]*.csv"): df = pd.read_csv(path) emojis = [col for col in df.columns if col in EMOJIS] for em in emojis: vocab = em2vocab.get(em, {}) for word, count in df[em].value_counts().iteritems(): pre_count = vocab.get(word, 0) pre_count += count vocab[word] = pre_count em2vocab[em] = vocab return em2vocab
b4525be35e191c84a9ea0d781d510f348724ff42
22,341
from unittest.mock import Mock from unittest.mock import patch import asyncio async def test_camera_snapshot_connection_closed(driver): """Test camera snapshot when the other side closes the connection.""" loop = MagicMock() transport = MagicMock() transport.is_closing = Mock(return_value=True) connections = {} async def _async_get_snapshot(*_): return b"fakesnap" acc = Accessory(driver, "TestAcc") acc.async_get_snapshot = _async_get_snapshot driver.add_accessory(acc) hap_proto = hap_protocol.HAPServerProtocol(loop, connections, driver) hap_proto.connection_made(transport) hap_proto.hap_crypto = MockHAPCrypto() hap_proto.handler.is_encrypted = True with patch.object(hap_proto.transport, "write") as writer: hap_proto.data_received( b'POST /resource HTTP/1.1\r\nHost: HASS\\032Bridge\\032BROZ\\0323BF435._hap._tcp.local\r\nContent-Length: 79\r\nContent-Type: application/hap+json\r\n\r\n{"image-height":360,"resource-type":"image","image-width":640,"aid":1411620844}' # pylint: disable=line-too-long ) hap_proto.close() await hap_proto.response.task await asyncio.sleep(0) assert writer.call_args_list == [] hap_proto.close()
636f22f167d07699d7e591f74ae92ecde8f460c4
22,342
import numpy from re import T def _as_scalar(res, dtype=None): """Return None or a TensorVariable whose type is in T.float_scalar_types""" if dtype is None: dtype = config.floatX if numpy.all(res.type.broadcastable): while res.owner and isinstance(res.owner.op, T.DimShuffle): res = res.owner.inputs[0] # may still have some number of True's if res.type.broadcastable: rval = res.dimshuffle() else: rval = res if rval.type.dtype[:3] in ('int', 'uin'): # We check that the upcast of res and dtype won't change dtype. # If dtype is float64, we will cast int64 to float64. # This is valid when res is a scalar used as input to a dot22 # as the cast of the scalar can be done before or after the dot22 # and this will give the same result. if theano.scalar.upcast(res.dtype, dtype) == dtype: return T.cast(rval, dtype) else: return None return rval
c5a8b6041a6eb160cec23f6957c9d9cc9147d4f7
22,343
import datasets import torch def partition_dataset(): """ Partitioning MNIST """ dataset = datasets.MNIST( './data', train=True, download=True, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307, ), (0.3081, )) ])) size = dist.get_world_size() bsz = 128 / float(size) partition_sizes = [1.0 / size for _ in range(size)] partition = DataPartitioner(dataset, partition_sizes) partition = partition.use(dist.get_rank()) train_set = torch.utils.data.DataLoader( partition, batch_size=int(bsz), shuffle=True) return train_set, bsz
6922ae2cc80655d93eeee23d31f1224f172de1cc
22,344
def add_size_to_nus(demo_graph, pop, time_left): """ adds either nu, or [nu0, growth_rate], where nu0 is the size at the beginning of the epoch use time_left to set nu0 to the size at the beginning of the epoch """ if 'nu' in demo_graph.nodes[pop]: return demo_graph.nodes[pop]['nu'] else: tt = demo_graph.nodes[pop]['T'] - time_left if 'nu0' in demo_graph.nodes[pop] and 'nuF' in demo_graph.nodes[pop]: growth_rate = np.log(demo_graph.nodes[pop]['nuF']/demo_graph.nodes[pop]['nu0']) / demo_graph.nodes[pop]['T'] nu0 = demo_graph.nodes[pop]['nu0'] * np.exp(growth_rate * tt) return [nu0, growth_rate] elif 'growth_rate' in demo_graph.nodes[pop] and 'nuF' in demo_graph.nodes[pop]: nu0_pop = demo_graph.nodes[pop]['nuF'] * np.exp(-demo_graph.nodes[pop]['growth_rate']*demo_graph.nodes[pop]['T']) nu0 = nu0_pop * np.exp(growth_rate * tt) return [nu0, demo_graph.nodes[pop]['growth_rate']] elif 'growth_rate' in demo_graph.nodes[pop] and 'nu0' in demo_graph.nodes[pop]: nu0 = demo_graph.nodes[pop]['nu0'] * np.exp(demo_graph.nodes[pop]['growth_rate'] * tt) return [nu0, demo_graph.nodes[pop]['growth_rate']]
6e655b157389ca8672433b26baa1f2362f5dde34
22,345
def _rand_lognormals(logs, sigma): """Mock-point""" return np.random.lognormal(mean=logs, sigma=sigma, size=logs.shape)
8fbf51e548293ff6c4dee8f385af69ecaaf34cde
22,346
def add_start_end_qualifiers(statement, startVal, endVal): """Add start/end qualifiers to a statement if non-None, or return None. @param statement: The statement to decorate @type statement: WD.Statement @param startVal: An ISO date string for the starting point @type startVal: str, unicode, or None @param endVal: An ISO date string for the end point @type endVal: str, unicode, or None @return: A statement decorated with start/end qualifiers @rtype: WD.Statement, or None """ if not isinstance(statement, WD.Statement): raise pywikibot.Error(u'Non-statement recieved: %s' % statement) if statement.isNone(): return None # add qualifiers quals = [] if startVal: quals.append( WD.Qualifier( P=START_P, itis=iso_to_WbTime(startVal))) if endVal: quals.append( WD.Qualifier( P=END_P, itis=iso_to_WbTime(endVal))) for q in quals: statement.addQualifier(q) return statement
9a87feff53aca00ce257a5d0b967621461a5d15a
22,347
def _CheckFilter(text): """CHecks if a string could be a filter. @rtype: bool """ return bool(frozenset(text) & FILTER_DETECTION_CHARS)
0d0dfed55df78ea6f49e4f615e9f7fe5758f9bc1
22,348
def listProxyServers(): """return a list of proxy servers as a list of lists. E.g. [['nodename','proxyname'], ['nodename','proxyname']]. Typical usage: for (nodename,proxyname) in listProxyServers(): callSomething(nodename,proxyname) """ return listServersOfType("PROXY_SERVER")
0e2ae4a874fa0ca030a04e694c7eacefde4f45f6
22,349
def api_version(func): """ API版本验证装饰器 :param func: :return: """ @wraps(func) def wrapper(*args, **kwargs): # 验证api版本 verify_result = verify_version(kwargs.get('version')) if not verify_result: raise ApiVersionException() #抛出异常,返回结果状态码400, message:api version is invalid return func(*args, **kwargs) return wrapper
2e73bc7899a4052004246c1e3392001507469c86
22,350
from typing import List from typing import Union def is_prefix(a: List[Union[int, str]], b: List[Union[int, str]]): """Check if `a` is a prefix of `b`.""" if len(a) >= len(b): return False for i in range(len(a)): if a[i] != b[i]: return False return True
4b0605af536aa5fa188cfca0cee62588fe41bf5d
22,351
import glob def shm_data_find(ifo, ldr_type, start, stride, directory='.', verbose=False): """a routine to automate discovery of frames within /dev/shm """ end = start+stride frames = [] for frame in sorted(glob.glob(shm_glob_tmp%(directory, ifo, ifo, ldr_type))): s, d = utils.extract_start_dur(frame, suffix=".gwf") if (s <= end) and (s+d > start): ### there is some overlap! frames.append( (frame, s, d) ) return frames
f4aba39ba77edf5d22cdaa0da16f888c26999512
22,352
def backward_inference(protocol, subsys_x, t_x, subsys_y, t_y, silent=True): """ Forward inference answers the question: Given a measurement result of 'subsys_y' at the end of the protocol, what can I say about the result an Agent would have received had she done a measurement of 'subsys_x' before the protocol? running the protocol? """ forward_mapping = forward_inference(protocol, subsys_x, t_x, subsys_y, t_y, silent)['table'] output_vals = list(set(chain(*forward_mapping.values()))) backward_mapping = {v: [] for v in output_vals} for inpt, possible_outputs in forward_mapping.items(): for output in possible_outputs: backward_mapping[output] += [inpt] return InferenceTable(subsys_y, t_y, subsys_x, t_x, backward_mapping)
22e73ff5c4b90b535e9387cf71829bf88745a95d
22,353
def rainfall_interception_hbv(Rainfall, PotEvaporation, Cmax, InterceptionStorage): """ Returns: TF, Interception, IntEvap,InterceptionStorage """ Interception = pcr.min( Rainfall, Cmax - InterceptionStorage ) #: Interception in mm/timestep InterceptionStorage = ( InterceptionStorage + Interception ) #: Current interception storage TF = Rainfall - Interception IntEvap = pcr.min( InterceptionStorage, PotEvaporation ) #: Evaporation from interception storage InterceptionStorage = InterceptionStorage - IntEvap return TF, Interception, IntEvap, InterceptionStorage
0e95a1088a36d25d0d1210384a56945d0b032fda
22,354
from rspn.learning.structure_learning import get_next_operation, learn_structure def learn_mspn( data, ds_context, cols="rdc", rows="kmeans", min_instances_slice=200, threshold=0.3, max_sampling_threshold_cols=10000, max_sampling_threshold_rows=100000, ohe=False, leaves=None, memory=None, rand_gen=None, cpus=-1 ): """ Adapts normal learn_mspn to use custom identity leafs and use sampling for structure learning. :param max_sampling_threshold_rows: :param max_sampling_threshold_cols: :param data: :param ds_context: :param cols: :param rows: :param min_instances_slice: :param threshold: :param ohe: :param leaves: :param memory: :param rand_gen: :param cpus: :return: """ if leaves is None: leaves = create_custom_leaf if rand_gen is None: rand_gen = np.random.RandomState(17) def l_mspn(data, ds_context, cols, rows, min_instances_slice, threshold, ohe): split_cols, split_rows = get_splitting_functions(max_sampling_threshold_rows, max_sampling_threshold_cols, cols, rows, ohe, threshold, rand_gen, cpus) nextop = get_next_operation(min_instances_slice) node = learn_structure(data, ds_context, split_rows, split_cols, leaves, next_operation=nextop) return node if memory: l_mspn = memory.cache(l_mspn) spn = l_mspn(data, ds_context, cols, rows, min_instances_slice, threshold, ohe) return spn
6ac8117b4d448c89fe148c4c97828da4a09dc471
22,355
import os def _sanitize_filename(dfile, no_symlink=True): """Check and sanitize 'dfile' for use as a target file. """ dirname, basename = os.path.split(dfile) dirname = os.path.abspath(dirname) dfile = os.path.join(dirname, basename) if no_symlink: if os.path.islink(dfile): msg = ('{} is a symlink and will be changed into a regular file if ' 'the compiler writes a compiled file to it') raise FileExistsError(msg.format(dfile)) elif os.path.exists(dfile) and not os.path.isfile(dfile): msg = ('{} is a non-regular file and will be changed into a regular ' 'one if the compiler writes a compiled file to it') raise FileExistsError(msg.format(dfile)) os.makedirs(dirname, exist_ok=True) return dfile
f612bc587209ee21411fd5d132b2bf9285767f77
22,356
def generate_image_anim(img, interval=200, save_path=None): """ Given CT img, return an animation across axial slice img: [D,H,W] or [D,H,W,3] interval: interval between each slice, default 200 save_path: path to save the animation if not None, default None return: matplotlib.animation.Animation """ fig = plt.figure() ims = [] for i in range(len(img)): im = plt.imshow(img[i], animated=True) ims.append([im]) anim = animation.ArtistAnimation(fig, ims, interval=interval, blit=True, repeat_delay=1000) if save_path: Writer = animation.writers['ffmpeg'] writer = Writer(fps=30, metadata=dict(artist='Me'), bitrate=1800) anim.save(save_path) return anim
90ebd9d0e21b58f75a2eca8623ac7a9d12b4a820
22,357
import sys import os import subprocess import json def get_routes(config, prefix=None, group_by=None): """Executes the helper script that extracts the routes out of the pyramid app.""" python = sys.executable script = os.path.join(os.path.dirname(__file__), "extract.py") config = os.path.expanduser(config) args = [python, script, config] if group_by: args.append("--group=" + group_by) if prefix: args.append("--prefix=" + prefix) p = subprocess.Popen(args=args, stdout=subprocess.PIPE) (stdout, _) = p.communicate() return json.loads(stdout.decode("utf-8"))
b71d896f7b3fc23848899efc332d2e9008ff1f9c
22,358
def square_root(s): """ Function to compute square roots using the Babylonian method """ x = s/2 while True: temp = x x = (1/2) * ( x + (s/x) ) if temp == x: return x # Como la convergencia se alcanza rápidamente, llega un momento en que el error # es menor que la precisión de la máquina y el valor no cambia de un paso a otro.
9af22ce073bcb8d131736efba6133a92d9d7dc74
22,359
def quisort(uslist, lo=None, hi=None): """Sort in-place an unsorted list or slice of a list lo and hi correspond to the start and stop indices for the list slice""" if hi is None: hi = len(uslist) - 1 if lo is None: lo = 0 def partition(uslist, lo, hi): """Compare and swap values over list slice""" p = uslist[hi] i = lo - 1 j = lo while j < hi: if uslist[j] <= p: i = i + 1 uslist[i], uslist[j] = uslist[j], uslist[i] j += 1 i += 1 uslist[i], uslist[hi] = uslist[hi], uslist[i] return i if lo < hi: p = partition(uslist, lo, hi) quisort(uslist, lo, p - 1) quisort(uslist, p + 1, hi)
a33adbe819ec1c60149e6d9a50ab78555f6021d5
22,360
def is_generator(f): """Return True if a function is a generator.""" isgen = (f.__code__.co_flags & CO_GENERATOR) != 0 return isgen
239d0854e27a16d9e99102ff9c698086119b8e35
22,361
import torch def reward(sample_solution, use_cuda=True, name='reward'): """ Args: sample_solution seq_len of [batch_size] """ ''' if 'TSP' in name: batch_size = sample_solution[0].size(0) n = len(sample_solution) tour_len = Variable(torch.zeros([batch_size])) if use_cuda: tour_len = tour_len.cuda() for i in range(n - 1): distance = torch.norm(sample_solution[i] - sample_solution[i + 1], dim=1) tour_len += distance distance = torch.norm(sample_solution[n - 1] - sample_solution[0], dim=1) tour_len += distance reward = tour_len ''' if 'CH' in name: batch_size = sample_solution[0].size(0) n = len(sample_solution) #print "batch_size batch_size batch_size" #print batch_size #print "n n n" #print n #tour_area = Variable(torch.zeros([batch_size])) vec_area = Variable(torch.zeros([batch_size])) #if use_cuda: #area = area.cuda() for s in range(batch_size): points = [] poly_area = 0 for t in range(n): points.append(sample_solution[t][s].tolist()) if t >= 2: hull = ConvexHull(points) poly_area = max (hull.area,poly_area) vec_area[s] = poly_area #for i in range(n - 1): #area = torch.norm(sample_solution[i] - sample_solution[i + 1], dim=1) #tour_area += area #area = torch.norm(sample_solution[n - 1] - sample_solution[0], dim=1) #tour_area += area #reward = tour_area reward = vec_area return reward
fed916437085d15b2c9c6a04486e43251c3b0422
22,362
def addRegionEntry(Id: int, parentId: int, name: str, RegionType: RegionType, alias=''): """ 添加自定义地址信息 :param Id: 地址的ID :param parentId: 地址的父ID, 必须存在 :param name: 地址的名称 :param RegionType: 地址类型,RegionType, :param alias: 地址的别名, default='' :return: """ geocoding = jpype.JClass('io.patamon.geocoding.Geocoding') try: geocoding.addRegionEntry(Id, parentId, name, RegionType, alias) return True except: return False
ba6c78842f847939f1a44b859156d15738adca58
22,363
def check_movement(pagination): """Check for ability to navigate backward or forward between pages.""" pagination_movements = pagination.find_element_by_xpath( './/div[@class="search_pagination_right"]' ).find_elements_by_class_name("pagebtn") # Check for ability to move back try: move_back_a = pagination_movements[0] assert move_back_a.text == "<" can_move_back = True print("Can move back, ", end="") except Exception: can_move_back = False print("Can not move back, ", end="") # Check for ability to move forward try: move_forward_a = pagination_movements[-1] assert move_forward_a.text == ">" can_move_forward = True print("Can move forward") except Exception: can_move_forward = False print("Can not move forward, ", end="") return [can_move_back, can_move_forward]
37bb55ae4509f8bdc98d3bf52bbef4a4a1e5d600
22,364
def glint_correct_image(imarr, glintarr, nir_band=7): """ Apply the sunglint removal algorithm from section III of Lyzenga et al. 2006 to a multispectral image array. Parameters ---------- imarr : numpy array (RxCxBands shape) The multispectral image array. See `OpticalRS.RasterDS` for more info. glintarr : numpy array A subset of `imarr` from an optically deep location with sun glint. nir_band : int (Default value = 7) The default `nir_band` value of 7 selects the NIR2 band in WorldView-2 imagery. If you're working with a different type of imagery, you will need figure out the appropriate value to use instead. This is a zero indexed number (the first band is 0, not 1). Returns ------- numpy array A de-glinted copy of `imarr`. Notes ----- This deglinting method may not work well on WorldView-2 imagery because the bands are not captured exactly concurrently. See section II B of Eugenio et al. 2015 [1]_ for more information and a different sunglint correction algorithm that may be more appropriate. References ---------- .. [1] Eugenio, F., Marcello, J., Martin, J., 2015. High-Resolution Maps of Bathymetry and Benthic Habitats in Shallow-Water Environments Using Multispectral Remote Sensing Imagery. IEEE Transactions on Geoscience and Remote Sensing 53, 3539–3549. doi:10.1109/TGRS.2014.2377300 """ # calculate the covariance ratios cov_rats = cov_ratios(glintarr,nir_band) # get the NIR mean nirm = nir_mean(glintarr,nir_band) # we don't want to try to apply the correction # to the NIR band nbands = imarr.shape[-1] bands = range(nbands) bands.remove(nir_band) outarr = imarr.copy() for i,band in enumerate(bands): outarr[:,:,band] = imarr[:,:,band] - cov_rats[i] * ( imarr[:,:,nir_band] - nirm ) # this will leave the NIR band unchanged return outarr
2982883b37fa2452b12311c62f4d0c404f1718f9
22,365
def get_named_game(id): """Get specific game from GB API.""" query_uri = f"{GB_GAME_URL}{id}?format=json&api_key={API_KEY}" return query_for_goty(query_uri, expect_list=False, always_return_something=False)
4b4c7efeecace2d07b5ce7052cfa550d233a61bb
22,366
from datetime import datetime import pytz def isoweek_datetime(year, week, timezone='UTC', naive=False): """ Returns a datetime matching the starting point of a specified ISO week in the specified timezone (default UTC). Returns a naive datetime in UTC if requested (default False). >>> isoweek_datetime(2017, 1) datetime.datetime(2017, 1, 2, 0, 0, tzinfo=<UTC>) >>> isoweek_datetime(2017, 1, 'Asia/Kolkata') datetime.datetime(2017, 1, 1, 18, 30, tzinfo=<UTC>) >>> isoweek_datetime(2017, 1, 'Asia/Kolkata', naive=True) datetime.datetime(2017, 1, 1, 18, 30) >>> isoweek_datetime(2008, 1, 'Asia/Kolkata') datetime.datetime(2007, 12, 30, 18, 30, tzinfo=<UTC>) """ naivedt = datetime.combine(isoweek.Week(year, week).day(0), datetime.min.time()) if isinstance(timezone, str): tz = pytz.timezone(timezone) else: tz = timezone dt = tz.localize(naivedt).astimezone(pytz.UTC) if naive: return dt.replace(tzinfo=None) else: return dt
d109d8ca0443b6454c7ab58a9482d5c52ec90799
22,367
def returned(n): """Generate a random walk and return True if the walker has returned to the origin after taking `n` steps. """ ## `takei` yield lazily so we can short-circuit and avoid computing the rest of the walk for pos in randwalk() >> drop(1) >> takei(xrange(n-1)): if pos == Origin: return True return False
6c501a58c6d2abe9d9fa76736fabf75f3f78dbd9
22,368
def get_ego_as_agent(frame: np.ndarray) -> np.ndarray: """Get a valid agent with information from the AV. Ford Fusion extent is used. :param frame: The frame from which the Ego states are extracted :return: An agent numpy array of the Ego states """ ego_agent = np.zeros(1, dtype=AGENT_DTYPE) ego_agent[0]["centroid"] = frame["ego_translation"][:2] ego_agent[0]["yaw"] = rotation33_as_yaw(frame["ego_rotation"]) ego_agent[0]["extent"] = np.asarray((EGO_EXTENT_LENGTH, EGO_EXTENT_WIDTH, EGO_EXTENT_HEIGHT)) return ego_agent
249ca88c8aa01c7f06c6acf2d8427ca158926603
22,369
import json def load_users(dir="private/users"): """load_users will load up all of the user json files in the dir.""" files = get_files_in_dir(dir) dict = {} for filename in files: user = {} filepath = join(dir, filename) with open(filepath) as file: try: user = json.load(file) except json.JSONDecodeError: print("Could not decode file {0}".format(filepath)) except UnicodeDecodeError: print("Could not decode unicode in {0}".format(filepath)) id = user.get("user_id") dict[id] = user return dict
e9181ff8f34a6c351f874649ec328d14b4ba2784
22,370
def _scale_annots_dict(annot, new_sz, ann_im_sz): """Scale annotations to the new_sz, provided the original ann_im_sz. :param annot: bounding box in dict format :param new_sz: new size of image (after linear transforms like resize) :param ann_im_sz: original size of image for which the bounding boxes were given. :return: """ d = {} for k, v in annot.items(): if k.startswith('x'): v_ = new_sz[0] * v / ann_im_sz[0] elif k.startswith('y'): v_ = new_sz[1] * v / ann_im_sz[1] else: # don't destroy other keys v_ = v d.update({k: v_}) return d
44a0f9bf0b1a9befbaea95fd6b6fd5d9440178a4
22,371
from typing import Any from typing import Tuple from typing import List import inspect def get_handlers_in_instance(inst: Any) -> Tuple[List[Handler], List[Handler]]: """Get all handlers from the members of an instance. Args: inst: Instance to get handlers from. Returns: 2-tuple containing the list of all registration and all subscription handlers. Raises: TypeError: If inst isn't an instance. """ if inspect.isclass(inst): raise TypeError("expected instance, not class. " "Please create an instance of your template class first") registrations = [] subscriptions = [] for _, value in inspect.getmembers(inst, callable): if inspect.ismethod(value): reg, sub = get_bound_handlers(value) else: reg, sub = get_handlers(value) if reg is not None: registrations.append(reg) if sub is not None: subscriptions.append(sub) return registrations, subscriptions
c4f268d06fba208ce2a40bac3700b2c43d394051
22,372
def django_op_to_flag(op): """ Converts a django admin operation string to the matching grainy permission flag Arguments: - op <str> Returns: - int """ return DJANGO_OP_TO_FLAG.get(op, 0)
6d221271d69db3ed923395b920ee7aba30b50bab
22,373
def rgb2gray(images): """将RGB图像转为灰度图""" # Y' = 0.299 R + 0.587 G + 0.114 B # https://en.wikipedia.org/wiki/Grayscale#Converting_color_to_grayscale return np.dot(images[..., :3], [0.299, 0.587, 0.114])
f011345d43f49e1b7d625a4d379a72ec684cab00
22,374
import os def wav2vec2_local(ckpt, *args, **kwargs): """ The model from local ckpt ckpt (str): PATH """ assert os.path.isfile(ckpt) return _UpstreamExpert(ckpt, *args, **kwargs)
a6e63598deccb99edfe798871e026fed7995381d
22,375
def mIou(y_true, y_pred, n_classes): """ Mean Intersect over Union metric. Computes the one versus all IoU for each class and returns the average. Classes that do not appear in the provided set are not counted in the average. Args: y_true (1D-array): True labels y_pred (1D-array): Predicted labels n_classes (int): Total number of classes Returns: mean Iou (float) """ iou = 0 n_observed = n_classes for i in range(n_classes): y_t = (np.array(y_true) == i).astype(int) y_p = (np.array(y_pred) == i).astype(int) inter = np.sum(y_t * y_p) union = np.sum((y_t + y_p > 0).astype(int)) if union == 0: n_observed -= 1 else: iou += inter / union return iou / n_observed
aebb9a367f45172b999ddda8eb024371f3e0df3d
22,376
import operator def drop_last(iterable, n=1): """Drops the last item of iterable""" t1, t2 = tee(iterable) return map(operator.itemgetter(0), zip(t1, islice(t2, n, None)))
edef599cc1697cd4d8f1e1df2d479e123945aa41
22,377
def density(height: float) -> float: """ Returns the air density in slug/ft^3 based on altitude Equations from https://www.grc.nasa.gov/www/k-12/rocket/atmos.html :param height: Altitude in feet :return: Density in slugs/ft^3 """ if height < 36152.0: temp = 59 - 0.00356 * height p = 2116 * ((temp + 459.7)/518.6)**5.256 elif 36152 <= height < 82345: temp = -70 p = 473.1*np.exp(1.73 - 0.000048*height) else: temp = -205.05 + 0.00164 * height p = 51.97*((temp + 459.7)/389.98)**-11.388 rho = p/(1718*(temp+459.7)) return rho
ec85f9384035808084a024eb5a374ecfe7a64a2f
22,378
import logging import os def parse(opts): """ Entry point for XML Schema parsing into an OME Model. """ # The following two statements are required to "prime" the generateDS # code and ensure we have reasonable namespace support. filenames = opts.args namespace = opts.namespace schemas = dict() logging.debug("Namespace: %s" % namespace) set_type_constants(namespace) generateDS.generateDS.XsdNameSpace = namespace logging.debug("Type map: %s" % opts.lang.type_map) parser = sax.make_parser() ch = XschemaHandler() parser.setContentHandler(ch) for filename in filenames: parser.parse(filename) schemaname = os.path.split(filename)[1] schemaname = os.path.splitext(schemaname)[0] schema = ElementTree.parse(filename) schemas[schemaname] = schema root = ch.getRoot() if root is None: raise ModelProcessingError( "No model objects found, have you set the correct namespace?") root.annotate() return OMEModel.process(ch, schemas, opts)
04e36d09693a0065e3284d43a517a9de977dab39
22,379
def has_paired_before() -> bool: """Simple check for whether a device has previously been paired. This does not verify that the pairing information is valid or up to date. The assumption being - if it's previously paired, then it has previously connected to the internet. """ identity = IdentityManager.get() return identity.uuid != ""
f43ddf1290fcb101f0a0ae3d0fb6eabc368113c2
22,380
def caller_linkedin(user_input: dict) -> dict: """ Call LinkedIn scraping methods to get info about found and potential subjects. Args: `user_input`: user input represented as a dictionary. Returns: `dict`: the dictionary with information about found or potential subjects. """ results_to_filter = {} linkedin_obj = LinkedinSearchSubjects(user_input) linkedin_obj.linkedin_search() linkedin_obj.linkedin_find_ids() linkedin_obj.linkedin_search_for_info() if linkedin_obj.found_subjects_info: results_to_filter["linkedin"] = {"found_subjects": linkedin_obj.found_subjects_info} else: results_to_filter["linkedin"] = { "potential_subjects_after_filtering": linkedin_obj.potential_subjects_info_after_filtering } return results_to_filter
abb6277e699efa184949faf2b5c6585734be2f53
22,381
def service_request_eqf(stub_response): """ Return a function to be used as the value matching a ServiceRequest in :class:`EQFDispatcher`. """ def resolve_service_request(service_request_intent): eff = concretize_service_request( authenticator=object(), log=object(), service_configs=make_service_configs(), throttler=lambda stype, method, tid: None, tenant_id='000000', service_request=service_request_intent) # "authenticate" eff = resolve_authenticate(eff) # make request return resolve_effect(eff, stub_response) return resolve_service_request
f2a052f975ad8c94a58de50c1eb8aaa563522ca1
22,382
def get_nearest_point_distance(points, wire1, wire2): """ >>> get_nearest_point_distance([(0, 0), (158, -12), (146, 46), (155, 4), (155, 11)], [((0, 0), (75, 0)), ((75, 0), (75, -30)), ((75, -30), (158, -30)), ((158, -30), (158, 53)), ((158, 53), (146, 53)), ((146, 53), (146, 4)), ((146, 4), (217, 4)), ((217, 4), (217, 11)), ((217, 11), (145, 11))], [((0, 0), (0, 62)), ((0, 62), (66, 62)), ((66, 62), (66, 117)), ((66, 117), (100, 117)), ((100, 117), (100, 46)), ((100, 46), (155, 46)), ((155, 46), (155, -12)), ((155, -12), (238, -12))]) 610 >>> get_nearest_point_distance([(0, 0), (107, 47), (124, 11), (157, 18), (107, 71), (107, 51)], [((0, 0), (98, 0)), ((98, 0), (98, 47)), ((98, 47), (124, 47)), ((124, 47), (124, -16)), ((124, -16), (157, -16)), ((157, -16), (157, 71)), ((157, 71), (95, 71)), ((95, 71), (95, 51)), ((95, 51), (128, 51)), ((128, 51), (128, 104)), ((128, 104), (179, 104))], [((0, 0), (0, 98)), ((0, 98), (91, 98)), ((91, 98), (91, 78)), ((91, 78), (107, 78)), ((107, 78), (107, 11)), ((107, 11), (147, 11)), ((147, 11), (147, 18)), ((147, 18), (162, 18)), ((162, 18), (162, 24)), ((162, 24), (169, 24))]) 410 """ def get_distance(point): d = 0 for wire in (wire1, wire2): for part in wire: intersection = get_intersection_point(part, (point, point)) if intersection == []: d += abs(part[0][0] - part[1][0]) + abs(part[0][1] - part[1][1]) else: d += abs(part[0][0] - point[0]) + abs(part[0][1] - point[1]) break return d points.sort(key=get_distance) return get_distance(points[1])
917ae2370497ec4ea753daed89a2ee82724887fc
22,383
def strip_tokens(tokenized: str) -> str: """Replaces all tokens with the token's arguments.""" result = [] pos = 0 match = RX_TOKEN.search(tokenized, pos) while match: start, end = match.span() result.append(tokenized[pos:start]) result.append(match.groupdict()['argument']) pos = end match = RX_TOKEN.search(tokenized, pos) result.append(tokenized[pos:]) return ''.join(result)
b70c58ee45fc24e88269c99067a2f161b2b37e75
22,384
def circular(P=365, K=0.1, T=0, gamma=0, t=None): """ circular() simulates the radial velocity signal of a planet in a circular orbit around a star. The algorithm needs improvements. Parameters: P = period in days K = semi-amplitude of the signal T = velocity at zero phase gamma = average velocity of the star t = time space = We want an observation every time/space days Returns: t = time RV = rv signal generated """ if t is None: print('Time needed') RV = [K*_np.sin(2*_np.pi*x/P - T) + gamma for x in t] #RV = [x for x in RV] #m/s return t, RV
33d3ea97d21ce1a14b07d02216597fe8977b2400
22,385
def set(isamAppliance, dsc, check_mode=False, force=False): """ Updating the tracing levels """ check_value,warnings = _check(isamAppliance, dsc) if force is True or check_value is False: if check_mode is True: return isamAppliance.create_return_object(changed=True, warnings=warnings) else: return isamAppliance.invoke_put( "Updating the tracing levels", "/isam/cluster/tracing/v1", { 'dsc': dsc }, requires_model=requires_model) return isamAppliance.create_return_object(warnings=warnings)
1451ace2ed5ef6820e34ebb05d0879e3d5e3917b
22,386
import logging import json import tempfile import os import zipfile import requests def submit_annotations(ann_srv_url, annotations, send_zip=False): """ Call the Annotation Storage Service to save annotations. :param ann_srv_url: URL of the annotation service where the annotations will be stored. :param annotations: Annotations to append to the annotations Document. :param send_zip: indicates if the annotations should be sent in a zip file :type annotations: list """ logger = logging.getLogger(__name__) logger.info("Submitting annotations to target %s", ann_srv_url) if not isinstance(annotations, list): raise InvalidAnnotationFormat("Annotations should be an object of type" " list") cur_try = 1 max_tries = 5 result = None payload = json.dumps({'common': {}, 'data': annotations}) logger.debug("Upload URL is %s", ann_srv_url) logger.debug("Submitted data is %s", payload) files = None temp_text_file = None temp_zip_file = None if send_zip: headers = {'accept': 'application/json'} # creating annotations.txt file to be zipped temp_dir = tempfile.gettempdir() text_file_name = "annotations.txt" zip_file_name = "annotations.zip" temp_text_file = os.path.join(temp_dir, text_file_name) temp_zip_file = os.path.join(temp_dir, zip_file_name) with open(temp_text_file, 'w') as file_to_zip: file_to_zip.write(str(payload)) # creating zipped file with zipfile.ZipFile(temp_zip_file, "w", compression=zipfile.ZIP_DEFLATED) as zippy: zippy.write(temp_text_file, text_file_name) zippy.close() # Opened for transport (HTTP POST via form-data, as bytes) opened_zipped_file = open(temp_zip_file, "rb") files = {"file": opened_zipped_file} else: headers = {'content-type': 'application/json', 'accept': 'application/json'} while cur_try <= max_tries and not result: logger.debug("Trying HTTP POST request %s/%s", cur_try, max_tries) try: if files is None: result = requests.post(ann_srv_url, data=payload, timeout=TIMEOUT, headers=headers) else: result = requests.post(ann_srv_url, files=files, timeout=TIMEOUT, headers=headers) if result.status_code not in [200, 201, 204]: logger.error("Got following code : %s", result.status_code) result.raise_for_status() except requests.exceptions.Timeout as error: # Handle timeout error separately if cur_try < max_tries: cur_try += 1 logger.debug("Current try : %s", cur_try) logger.warning("Timeout occurred while uploading document to " "%s. Retry (%s/%s)", ann_srv_url, cur_try, max_tries) else: logger.error("Could not upload document to %s", ann_srv_url) raise UploadError(error) except requests.exceptions.RequestException as error: logger.error("Could not upload document to %s", ann_srv_url) raise UploadError(error) finally: # Delete artifacts if temp_text_file and os.path.exists(temp_text_file) and\ os.path.isfile(temp_text_file): os.remove(temp_text_file) if temp_zip_file and os.path.exists(temp_zip_file) and\ os.path.isfile(temp_zip_file): os.remove(temp_zip_file) return result
8b69af6ca4019c84a8d90df7390e763c143810d8
22,387
from astropy.table import Table from astropy.time import Time def parse_logfile(logfile): """ Read iotime log entries from logfile Return Table with columns function duration readwrite filename timestamp datetime """ rows = list() with open(logfile) as fx: for line in fx: row = parse(line) if row is not None: rows.append(row) timing = Table(rows=rows) timing['datetime'] = Time(timing['timestamp']).datetime return timing
8fb27648694df32e9035d0d24cae80f9ff9e654a
22,388
def histogram2d(x,y,n=10,range=None,density=False,keep_outliers=False,out=None): """2D histogram with uniform bins. Accelerated by numba x, y: array_like x and y coordinates of each point. x and y will be flattened n : scalar or (nx, ny) number of bins in x and y range : None or ((xmin,xmax),(ymin,ymax)) range of bins. If any is None, the min/max is computed density : optional, bool if True, compute bin_count / (sample_count * bin_area) keep_outliers : optional, bool if True, add rows and columns to each edge of the histogram to count the outliers out : array_like, optional, shape = (nx, ny) Array to store output. Note that for compatibility with numpy's histogram2d, out is indexed out[x,y]. If keep_outliers is True, out must have shape (nx+2,ny+2) """ x = np.asarray(x) y = np.asarray(y) if x.shape != y.shape: raise RuntimeError("x and y must be same shape") x = x.reshape(-1) y = y.reshape(-1) if range is None: xmin,xmax = None,None ymin,ymax = None,None else: xmin,xmax = range[0] ymin,ymax = range[1] if xmin is None or xmax is None: xmm = aminmax(x) if xmin is None: xmin = xmm[0] if xmax is None: xmax = xmm[1] if ymin is None or ymax is None: ymm = aminmax(y) if ymin is None: ymin = ymm[0] if ymax is None: ymax = ymm[1] if np.isscalar(n): nx,ny = n,n else: nx,ny = n if keep_outliers: out_shape = (nx+2,ny+2) else: out_shape = (nx,ny) if density: # 1/ (sample_count * bin_area) d = (nx*ny)/(len(x)*(xmax-xmin)*(ymax-ymin)) if out is None: out = np.empty(out_shape,np.float64) else: d = 1 if out is None: out = np.empty(out_shape,np.uint64) _histogram2d(out, x,y,nx,ny,xmin,xmax,ymin,ymax,d,keep_outliers) return out
8faf591e769630540345565ff3a34f33e83f70ce
22,389
import os import inspect def createSplash(app): """Creates a splash screen object to show while the Window is loading. Return: SplashScreen object. """ uiDir = os.path.dirname(inspect.getfile(KrakenUI)) splashPixmap = QtGui.QPixmap() splashImgPath = os.path.join(uiDir, 'images', 'KrakenUI_Splash.png') splashPixmap.load(splashImgPath) splash = QtGui.QSplashScreen(splashPixmap, QtCore.Qt.WindowStaysOnTopHint) splash.setMask(splashPixmap.mask()) splash.showMessage("Loading Extensions...", QtCore.Qt.AlignBottom | QtCore.Qt.AlignLeft, QtCore.Qt.white) splash.show() app.processEvents() return splash
c3f4d33071e3a8b1d67d0a69801c706aab652714
22,390
def _anatomical_swaps(pd): """Return swap and flip arrays for data transform to anatomical use_hardcoded: no-brain implementation for 90deg rots """ use_hardcoded = True # hardcoded for 90degs if use_hardcoded: if _check90deg(pd) != True: raise(Exception('Not implemented')) ori = pd['orient'] if ori == 'trans': #swap = [0,1,2] swap = [0,2,1] flip = [1,2] elif ori == 'trans90': #swap = [1,0,2] swap = [2,0,1] flip = [0] elif ori == 'sag': #swap = [1,2,0] swap = [2,1,0] flip = [1,2] elif ori == 'sag90': #swap = [2,1,0] swap = [1,2,0] flip = [0] elif ori == 'cor': #swap = [0,2,1] swap = [0,1,2] flip = [1] elif ori == 'cor90': swap = [1,0,2] flip = [] return swap, flip # with rot matrix else: rot_matrix = vj.core.niftitools._qform_rot_matrix(pd) inv = np.linalg.inv(rot_matrix).astype(int) swap = inv.dot(np.array([1,2,3], dtype=int)) flipaxes = [] for num, i in enumerate(swap): if i < 0: flipaxes.append(num) swapaxes = (np.abs(swap) - 1).astype(int) return swapaxes, flipaxes
2b958571597b72ca38de1310ca9a9e6a2caa69ac
22,391
from typing import List from typing import Dict def block_variants_and_samples(variant_df: DataFrame, sample_ids: List[str], variants_per_block: int, sample_block_count: int) -> (DataFrame, Dict[str, List[str]]): """ Creates a blocked GT matrix and index mapping from sample blocks to a list of corresponding sample IDs. Uses the same sample-blocking logic as the blocked GT matrix transformer. Requires that: - Each variant row has the same number of values - The number of values per row matches the number of sample IDs Args: variant_df : The variant DataFrame sample_ids : The list of sample ID strings variants_per_block : The number of variants per block sample_block_count : The number of sample blocks Returns: tuple of (blocked GT matrix, index mapping) """ assert check_argument_types() first_row = variant_df.selectExpr("size(values) as numValues").take(1) if not first_row: raise Exception("DataFrame has no values.") num_values = first_row[0].numValues if num_values != len(sample_ids): raise Exception( f"Number of values does not match between DataFrame ({num_values}) and sample ID list ({len(sample_ids)})." ) __validate_sample_ids(sample_ids) blocked_gt = glow.transform("block_variants_and_samples", variant_df, variants_per_block=variants_per_block, sample_block_count=sample_block_count) index_map = __get_index_map(sample_ids, sample_block_count, variant_df.sql_ctx) output = blocked_gt, index_map assert check_return_type(output) return output
4cf50f74b235adf5ef92ebde3a4a9259a3a49d87
22,392
def sparql_service_update(service, update_query): """ Helper function to update (DELETE DATA, INSERT DATA, DELETE/INSERT) data. """ sparql = SPARQLWrapper(service) sparql.setMethod(POST) sparql.setRequestMethod(POSTDIRECTLY) sparql.setQuery(update_query) result = sparql.query() #SPARQLWrapper is going to throw an exception if result.response.status != 200: return 'Done'
8e68c6672222a831203e457deaaac1ed73169fbf
22,393
def filtered_events(request): """Get the most recent year of stocking and pass the information onto our annual_events view. """ dataUrl = reverse("api:api-get-stocking-events") maxEvents = settings.MAX_FILTERED_EVENT_COUNT return render( request, "stocking/found_events.html", context={"dataUrl": dataUrl, "maxEvents": maxEvents}, )
b99d772656bab246b9f412f50674e402b7ca7476
22,394
import requests from datetime import datetime def track_user_session(user=None, request=None): """Creates, filters and updates UserSessions on the core and sends UserSessions to the hub on next login Filter the local UserSession objects per user and get their most recent user_session object If its a LOGIN REQUEST and the UserSession exists, we send this UserSession to the hub Then FeedbackActivity model hours_used_release field is calculated on the hub Then we create a new UserSession on the core-service Else any other type of REQUEST (e.q. verify, refresh), overwrite session_end Args: user: The user objects from people.models request: The request object from django_rest_framework Returns: None """ user_session = UserSession.objects.filter(user_email=user.email).order_by('session_start').last() # serialize, then transform to JSON format (.data) user_session_serial = UserSessionSerializer(user_session).data request_url = request.META.get('PATH_INFO', '') if '/api-token-auth/' in request_url: # User login, new session start if user_session: hub_token = ServiceToServiceAuth().generate_hub_token() request_params = { 'url': settings.HUB_URL + 'feedback/user-sessions/', 'json': user_session_serial, 'headers': {"Authorization": "STS-JWT {0}".format(hub_token)} } try: hub_response = requests.post(**request_params) new_user_session = UserSession.objects.create(user_email=user.email, session_start=datetime.now(), session_end=datetime.now(), tag=settings.VERSION) # settings.VERSION except requests.ConnectionError: return create_error_response('Failed to connect to hub service: {url}'.format(**request_params)) else: # Any other request e.q. refresh, verify user_session.session_end = datetime.now() user_session.save()
f5733eed845d23dac945c7d23d09a4cccb2f4e14
22,395
def score(string, goal): """ Compare randomly generated string to the goal, check how many letters are correct and return """ check_counter = 0 string = generate(values) for i in range(len(string)): if string[i] == goal[i]: check_counter += 1 return check_counter
afcfcc565a898b2cdd22e05793144c710fb58e26
22,396
from datetime import datetime def validate_parent(): """ This api validates a parent in the DB. """ parent_id = request.json.get('parent_id', None) decision = request.json.get('decision', 0) parent = query_existing_user(parent_id) if parent: parent.validated = decision parent.approver_id = get_jwt_identity().get('id') parent.approve_time = datetime.utcnow() db.session.add(parent) db.session.commit() return jsonify(message="Parent validation updated"), 201 else: return jsonify(message='User does not exist'), 201
3fe52863b4de24705c96aeaa67ee264916b37dbe
22,397
def parse_pdu(data, **kwargs): """Parse binary PDU""" command = pdu.extract_command(data) if command is None: return None new_pdu = make_pdu(command, **kwargs) new_pdu.parse(data) return new_pdu
0a5a84368793f8d5983b08bc2123bd0adb994be6
22,398
def alchemy_nodes(mol): """Featurization for all atoms in a molecule. The atom indices will be preserved. Parameters ---------- mol : rdkit.Chem.rdchem.Mol RDKit molecule object Returns ------- atom_feats_dict : dict Dictionary for atom features """ atom_feats_dict = defaultdict(list) is_donor = defaultdict(int) is_acceptor = defaultdict(int) fdef_name = osp.join(RDConfig.RDDataDir, 'BaseFeatures.fdef') mol_featurizer = ChemicalFeatures.BuildFeatureFactory(fdef_name) mol_feats = mol_featurizer.GetFeaturesForMol(mol) mol_conformers = mol.GetConformers() assert len(mol_conformers) == 1 for i in range(len(mol_feats)): if mol_feats[i].GetFamily() == 'Donor': node_list = mol_feats[i].GetAtomIds() for u in node_list: is_donor[u] = 1 elif mol_feats[i].GetFamily() == 'Acceptor': node_list = mol_feats[i].GetAtomIds() for u in node_list: is_acceptor[u] = 1 num_atoms = mol.GetNumAtoms() for u in range(num_atoms): atom = mol.GetAtomWithIdx(u) atom_type = atom.GetAtomicNum() num_h = atom.GetTotalNumHs() atom_feats_dict['node_type'].append(atom_type) h_u = [] h_u += atom_type_one_hot(atom, ['H', 'C', 'N', 'O', 'F', 'S', 'Cl']) h_u.append(atom_type) h_u.append(is_acceptor[u]) h_u.append(is_donor[u]) h_u += atom_is_aromatic(atom) h_u += atom_hybridization_one_hot(atom, [Chem.rdchem.HybridizationType.SP, Chem.rdchem.HybridizationType.SP2, Chem.rdchem.HybridizationType.SP3]) h_u.append(num_h) atom_feats_dict['n_feat'].append(F.tensor(np.array(h_u).astype(np.float32))) atom_feats_dict['n_feat'] = F.stack(atom_feats_dict['n_feat'], dim=0) atom_feats_dict['node_type'] = F.tensor(np.array( atom_feats_dict['node_type']).astype(np.int64)) return atom_feats_dict
8bccd62bafa77f6dc95bfba8992df48756b9ac0e
22,399