content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def download_from_url_if_not_in_cache(cloud_path: str, cache_dir: str = None): """ :param cloud_path: e.g., https://public-aristo-processes.s3-us-west-2.amazonaws.com/wiqa-model.tar.gz :param to_dir: will be regarded as a cache. :return: the path of file to which the file is downloaded. """ return cached_path(url_or_filename=cloud_path, cache_dir=cache_dir)
f0549e14b4219303ce48f992d684330338958370
3,648,800
from mne.viz.backends.renderer import _get_renderer from mne_connectivity.base import BaseConnectivity def plot_sensors_connectivity(info, con, picks=None, cbar_label='Connectivity'): """Visualize the sensor connectivity in 3D. Parameters ---------- info : dict | None The measurement info. con : array, shape (n_channels, n_channels) | Connectivity The computed connectivity measure(s). %(picks_good_data)s Indices of selected channels. cbar_label : str Label for the colorbar. Returns ------- fig : instance of Renderer The 3D figure. """ _validate_type(info, "info") if isinstance(con, BaseConnectivity): con = con.get_data() renderer = _get_renderer(size=(600, 600), bgcolor=(0.5, 0.5, 0.5)) picks = _picks_to_idx(info, picks) if len(picks) != len(con): raise ValueError('The number of channels picked (%s) does not ' 'correspond to the size of the connectivity data ' '(%s)' % (len(picks), len(con))) # Plot the sensor locations sens_loc = [info['chs'][k]['loc'][:3] for k in picks] sens_loc = np.array(sens_loc) renderer.sphere(np.c_[sens_loc[:, 0], sens_loc[:, 1], sens_loc[:, 2]], color=(1, 1, 1), opacity=1, scale=0.005) # Get the strongest connections n_con = 20 # show up to 20 connections min_dist = 0.05 # exclude sensors that are less than 5cm apart threshold = np.sort(con, axis=None)[-n_con] ii, jj = np.where(con >= threshold) # Remove close connections con_nodes = list() con_val = list() for i, j in zip(ii, jj): if np.linalg.norm(sens_loc[i] - sens_loc[j]) > min_dist: con_nodes.append((i, j)) con_val.append(con[i, j]) con_val = np.array(con_val) # Show the connections as tubes between sensors vmax = np.max(con_val) vmin = np.min(con_val) for val, nodes in zip(con_val, con_nodes): x1, y1, z1 = sens_loc[nodes[0]] x2, y2, z2 = sens_loc[nodes[1]] tube = renderer.tube(origin=np.c_[x1, y1, z1], destination=np.c_[x2, y2, z2], scalars=np.c_[val, val], vmin=vmin, vmax=vmax, reverse_lut=True) renderer.scalarbar(source=tube, title=cbar_label) # Add the sensor names for the connections shown nodes_shown = list(set([n[0] for n in con_nodes] + [n[1] for n in con_nodes])) for node in nodes_shown: x, y, z = sens_loc[node] renderer.text3d(x, y, z, text=info['ch_names'][picks[node]], scale=0.005, color=(0, 0, 0)) renderer.set_camera(azimuth=-88.7, elevation=40.8, distance=0.76, focalpoint=np.array([-3.9e-4, -8.5e-3, -1e-2])) renderer.show() return renderer.scene()
3d236d8e8802f65c6388eeeafa327a252f9a75be
3,648,801
import re import json def str_to_list_1(string): """ Parameters ---------- string : str The str of first line in each sample of sample.txt Returns --------- final_list: lst """ final_list = [] li = re.findall(r'\[.*?\]', string) for ele in li: final_list.append(json.loads(ele)) return final_list
92b4b11a339d2101a0af5408caee58cc9b9668a1
3,648,802
import torch def batched_nms(boxes, scores, idxs, iou_threshold): """ Same as torchvision.ops.boxes.batched_nms, but safer. """ assert boxes.shape[-1] == 4 # TODO may need better strategy. # Investigate after having a fully-cuda NMS op. if len(boxes) < 40000: return box_ops.batched_nms(boxes, scores, idxs, iou_threshold) result_mask = scores.new_zeros(scores.size(), dtype=torch.bool) for id in torch.unique(idxs).cpu().tolist(): mask = (idxs == id).nonzero().view(-1) keep = nms(boxes[mask], scores[mask], iou_threshold) result_mask[mask[keep]] = True keep = result_mask.nonzero().view(-1) keep = keep[scores[keep].argsort(descending=True)] return keep
2800d7e488fd018350c98c846138675b2ef79090
3,648,803
def one_mini_batch(data, batch_indices): """ 产生每一次的小的batch :param data: :param batch_indices: :return: """ batch_data = { "raw_data": [data[i] for i in batch_indices], "word_id_list": [], "label_vector": [] } for data in batch_data["raw_data"]: batch_data["word_id_list"].append(data["word_id_list"]) batch_data["label_vector"].append(data["label_vector"]) return batch_data
2bbbd62a00422431bb3322ebfce26d7fe95edc09
3,648,804
def reset_password(reset_key): """Checks the reset key. If successful, displays the password reset prompt.""" username = auth_utils.check_reset_key(reset_key) if username is None: flask.flash( 'Invalid request. If your link has expired, then you will need to generate a new one. ' 'If you continue to encounter problems, please contact devteam@donut.caltech.edu.' ) return flask.redirect(flask.url_for('auth.forgot_password')) return flask.render_template( 'reset_password.html', username=username, reset_key=reset_key)
4f8e30a1669837c31b3dc2f77df441c50c6439dd
3,648,805
import scipy def williams_diff_test(corr_func: SummaryCorrFunc, X: np.ndarray, Y: np.ndarray, Z: np.ndarray, two_tailed: bool) -> float: """ Calculates the p-value for the difference in correlations using Williams' Test. """ # In the math, Z is metric 1. We take the absolute value of the correlations because # it does not matter whether they are positively or negatively correlated with each other. The WMT scripts # do the same before calling r.test r12 = abs(corr_func(X, Z)) r13 = abs(corr_func(Y, Z)) r23 = abs(corr_func(X, Y)) n = _get_n(corr_func, X) # Implementation based on https://github.com/cran/psych/blob/master/R/r.test.R diff = r12 - r13 det = 1 - (r12 ** 2) - (r23 ** 2) - (r13 ** 2) + (2 * r12 * r23 * r13) av = (r12 + r13) / 2 cube = (1 - r23) ** 3 t2 = diff * np.sqrt((n - 1) * (1 + r23) / (((2 * (n - 1) / (n - 3)) * det + av ** 2 * cube))) # r.test implicitly assumes that r12 > r13 because it takes the absolute value of the t statistic. Since we don't, # we have to have special handling for one-tailed tests so we don't map a negative t statistic to a positive one. if two_tailed: pvalue = scipy.stats.t.sf(abs(t2), n - 3) * 2 else: pvalue = scipy.stats.t.sf(t2, n - 3) return pvalue
afda90296b544233ba34f3abdd87d72b360de832
3,648,806
from typing import Tuple from typing import List import sqlite3 def load_students(max_meeting_seconds: int) -> Tuple[List[str], int]: """Loads student names and wait times from the database.""" try: with sqlite3.connect("students.db") as conn: cursor = conn.cursor() try: cursor.execute("SELECT name FROM students") student_names = [row[0] for row in cursor.fetchall()] cursor.execute("SELECT seconds FROM students") individual_seconds = cursor.fetchall()[0][0] return student_names, individual_seconds except IndexError: pass except sqlite3.OperationalError: create_students_table() return [], max_meeting_seconds
b5b2a003216507df413cba7bea1171cd4667ee1f
3,648,807
def coords(gd0, c, pad=True): """Return coordinates along one of the three axes. Useful for plotting:: import matplotlib.pyplot as plt plt.plot(gd.coords(0), data[:, 0, 0]) plt.show() """ L = np.linalg.norm(gd0.cell_cv[c]) N = gd0.N_c[c] h = L / N p = gd0.pbc_c[c] or pad return np.linspace((1 - p) * h, L, N - 1 + p, False)
42541198f7a57fe6346b49eeaa4961336bd47c3a
3,648,808
def get_associated_genes(variants_list: list) -> pd.DataFrame: """ Get variant gene information from BioMart. More information on BioMart here: https://www.ensembl.org/info/data/biomart/index.html :param variants_list: the list with variant ids. :return: dataframe with variant and gene information """ snp_dataset = Dataset(name='hsapiens_snp', host='http://www.ensembl.org') variant_gene_df = snp_dataset.query(attributes=['refsnp_id', 'ensembl_gene_stable_id'], filters={'snp_filter': variants_list}) gene_dataset = Dataset(name='hsapiens_gene_ensembl', host='http://www.ensembl.org') gene_df = gene_dataset.query(attributes=['ensembl_gene_id', 'external_gene_name'], only_unique=False, filters={'link_ensembl_gene_id': list(variant_gene_df['Gene stable ID'])}) merged_df = pd.merge(variant_gene_df, gene_df, on='Gene stable ID') interaction = ['association' for ind, row in merged_df.iterrows()] merged_df['interaction'] = interaction return merged_df
e267afb387496a99701872db94b46543e8c7406a
3,648,809
def crc16(data) : """Compute CRC16 for bytes/bytearray/memoryview data""" crc = _CRC16_START for b in data : crc = ((crc << 8) & 0xFFFF) ^ _CRC16_TABLE[(crc >> 8) ^ b] return crc
ac7dc27ebc47d1bc444050b9adba81d0ac26167a
3,648,810
def sigma(j: int, N: int = 1) -> np.ndarray: """ """ s = [s0, s1, s2, s3] dims = [4] * N idx = np.unravel_index(j, dims) return tensor(s[x] for x in idx)
c312222f5a037723f9b7920a971d93e36e3b3e4b
3,648,811
def backcasting( predictor, window, curves, distance="RMS", columns=("cases", "deaths"), min_series=14, step=1, ): """ Perform a backcasting performance analysis of the given model. For the sake of this method, the model is just a function that receives an epidemic curve dataframe and a list of time windows and return the forecasts for cases and deaths for the specified times. """ windows = np.array(as_seq(windows)) min_window = windows.min(initial=len(curves)) def distance(x, y): return (x - y).dropna().abs() / x results = [] for k in range(min_window, len(curves) - min_series, step): data = curves.iloc[:-k] prediction = fn(data, windows) results.append(distance(curves, prediction)) st.write(results[-1]) return pd.concat(results, axis=0)
0e0eafc06ab6ab4578be1b299fc70ae88796a72d
3,648,812
from typing import Dict from typing import Callable from typing import List def find_keys(d: Dict[K, V], predicate: Callable[[V], bool]) -> List[K]: """Find keys where values match predicate.""" return [k for k, v in d.items() if predicate(v)]
68febd42bcd65ff52a786e4941dd5abf7d6a36ee
3,648,813
def get_maya_property_name(prop, ignore_channel=False): """ Given a property, return a reasonable Maya name to use for it. If ignore_channel is True, return the property for the whole vector, eg. return '.translate' instead of '.translateX'. This doesn't create or query anything. It just generates a name to use elsewhere. """ prop_parts = prop.path.split('/') # Get the property key, without any channel suffixes attached. prop_key = prop_parts[0] mapping = { 'translation': 'translate', 'rotation': 'rotate', 'scale': 'scale', } maya_key = None if prop_key in mapping: prop_key = mapping[prop_key] if prop.path.count('/') == 1 and not ignore_channel: # If we've been given a single channel, eg. rotation/x, return it. assert len(prop_parts) == 2, prop_parts assert prop_parts[1] in ('x', 'y', 'z'), prop_parts return '%s%s' % (prop_key, prop_parts[1].upper()) else: # Otherwise, return the vector itself. return prop_key
591a49f054db3936d5a345919a2c69491b6f345e
3,648,814
from typing import Concatenate def model_deepFlavourReference_test(Inputs,nclasses,dropoutRate=0.1,momentum=0.6): """ reference 1x1 convolutional model for 'deepFlavour' with recurrent layers and batch normalisation standard dropout rate it 0.1 should be trained for flavour prediction first. afterwards, all layers can be fixed that do not include 'regression' and the training can be repeated focusing on the regression part (check function fixLayersContaining with invert=True) """ globalvars = BatchNormalization(momentum=momentum,name='globals_input_batchnorm') (Inputs[0]) cpf = BatchNormalization(momentum=momentum,name='cpf_input_batchnorm') (Inputs[1]) npf = BatchNormalization(momentum=momentum,name='npf_input_batchnorm') (Inputs[2]) vtx = BatchNormalization(momentum=momentum,name='vtx_input_batchnorm') (Inputs[3]) cpf,npf,vtx = block_deepFlavourConvolutions(charged=cpf, neutrals=npf, vertices=vtx, dropoutRate=dropoutRate, active=True, batchnorm=True, batchmomentum=momentum) # cpf = LSTM(150,go_backwards=True,implementation=2, name='cpf_lstm')(cpf) cpf=BatchNormalization(momentum=momentum,name='cpflstm_batchnorm')(cpf) cpf = Dropout(dropoutRate)(cpf) npf = LSTM(50,go_backwards=True,implementation=2, name='npf_lstm')(npf) npf=BatchNormalization(momentum=momentum,name='npflstm_batchnorm')(npf) npf = Dropout(dropoutRate)(npf) vtx = LSTM(50,go_backwards=True,implementation=2, name='vtx_lstm')(vtx) vtx=BatchNormalization(momentum=momentum,name='vtxlstm_batchnorm')(vtx) vtx = Dropout(dropoutRate)(vtx) x = Concatenate()( [globalvars,cpf,npf,vtx ]) x = block_deepFlavourDense(x,dropoutRate,active=True,batchnorm=True,batchmomentum=momentum) flavour_pred=Dense(nclasses, activation='softmax',kernel_initializer='lecun_uniform',name='ID_pred')(x) predictions = [flavour_pred] model = Model(inputs=Inputs, outputs=predictions) return model
f92f977a5570e647bf394d450bd5a5dea918aeba
3,648,815
import pathlib def load_spyrelet_class(spyrelet_name, cfg): """Load a spyrelet class from a file (whose location is defined in cfg)""" # discover spyrelet file and class spyrelet_path_str, _ = get_config_param(cfg, [CONFIG_SPYRELETS_KEY, spyrelet_name, CONFIG_SPYRELETS_FILE_KEY]) spyrelet_class_name, spyrelet_cfg_path_str = get_config_param(cfg, [CONFIG_SPYRELETS_KEY, spyrelet_name, CONFIG_SPYRELETS_CLASS_KEY]) # resolve the spyrelet file location # if the path isn't absolute resolve it relative to the config file spyrelet_path = pathlib.Path(spyrelet_path_str) if not spyrelet_path.is_absolute(): spyrelet_path = pathlib.Path(spyrelet_cfg_path_str).parent / spyrelet_path spyrelet_path = spyrelet_path.resolve() if not spyrelet_path.is_file(): raise SpyreletLoadError(None, f'spyrelet [{spyrelet_name}] file [{spyrelet_path}] doesn\'t exist') return load_class_from_file(spyrelet_path, spyrelet_class_name)
877c8a626e7abe3e41146475dc030966c0b9f41e
3,648,816
def see_documentation(): """ This function redirects to the api documentation """ return jsonify({ '@context': responses.CONTEXT, 'rdfs:comment': 'See http://www.conceptnet.io for more information about ConceptNet, and http://api.conceptnet.io/docs for the API documentation.' })
46de921c855797b1b7d231a4cb88c57026ece947
3,648,817
import time def fit_imputer(df, tolerance=0.2, verbose=2, max_iter=20, nearest_features=20, imputation_order='ascending', initial_strategy='most_frequent'): """ A function to train an IterativeImputer using machine learning Args: df: dataset to impute tolerance: Tolerance of stopping function verbose: Verbosy flag, controls the debug messages that are issued as functions are evaluated max_iter: Maximum number of imputation rounds nearest_features: Number of other features to use to estimate the missing values imputation_order: ascending or descending - the order in which the features will be imputed initial_strategy: e.g. 'most_frequent' or 'mean' Returns: dataset with no missing values """ start = time.time() # restrict the values to be predicted to a min / max range minimum_before = list(df.iloc[:, :].min(axis=0)) maximum_before = list(df.iloc[:, :].max(axis=0)) imputer = IterativeImputer(random_state=0, imputation_order=imputation_order, n_nearest_features=nearest_features, initial_strategy=initial_strategy, max_iter=max_iter, min_value=minimum_before, max_value=maximum_before, skip_complete=True, tol=tolerance, verbose=verbose) imputer.fit(df) end = time.time() print('Execution time for IterativeImputer: {} sec'.format(end - start)) return imputer
9ca798c61ee555ad7d58da16660aeb12518c9b7e
3,648,818
from django.shortcuts import render def jhtml_render(request, file_type=None,json_file_url=None, html_template=None, json_render_dict=None, json_render_func=None, file_path=None, url_name=None, app_name=None): """ :param request: :param file_type: json/temp_json :param json_file_url: :param html_template:模板文件路径,不包含templates :param render_var_dict_str: 渲染变量dict :return: """ path = request.path print(path) if file_type=='temp_json': try: json_file_url = reverse(url_name+'_tjson', current_app=app_name) except Exception as e: print('ERROR: no json file url found:', file_path) render_dict = {'json_to_render_file_url': json_file_url} if json_render_dict is not None: render_dict.update(json_render_dict) if json_render_func is not None: render_dict.update(json_render_func(request, json_render_dict)) page_name = html_template if (page_name is not None) and len(page_name) > 0: page_name = page_name # settings.BASE_DIR / else: page_name = 'html/index_for_json.html' if len(json_file_url) > 3: if json_file_url[-4:] == 'html': page_name = json_file_url #static amis json file render ##if re_one.file_type=='temp_json': return render(request, page_name, render_dict)
b5d61d69a2c27d883aad60953c7366c6724b905e
3,648,819
import sys import os import tempfile def intermediate_dir(): """ Location in temp dir for storing .cpp and .o files during builds. """ python_name = "python%d%d_intermediate" % tuple(sys.version_info[:2]) path = os.path.join(tempfile.gettempdir(),"%s"%whoami(),python_name) if not os.path.exists(path): os.makedirs(path, mode=0o700) return path
123f18287ae54bf257cbb74e0fe2d4bfca1df564
3,648,820
from PIL import Image import os def image(cache_path, width, height): """ Generate a custom-sized sample image """ # Create unique path size = (width, height) filename = "%sx%s.png" % (width, height) path = os.path.join(cache_path, filename) # Check if image has already been created if not os.path.exists(path): # Generate new image sample = breakdown.pkg_path("img/sample.png") if not os.path.exists(sample): return Markup(u"<img/>") else: try: # Try scaling the image using PIL source = Image.open(sample) scaled = source.resize(size, Image.BICUBIC) scaled.save(path) except ImportError: # If we couldnt find PIL, just copy the image inf = open(sample, "rb") outf = open(path, "wb") outf.write(inf.read()) return Markup(u'<img src="%s%s">' % (STATIC_URL, filename))
df58a08937b5740fb5e4bc433f99c8de9b779c73
3,648,821
def truncate(text, length=30, indicator='...', whole_word=False): """Truncate ``text`` with replacement characters. ``length`` The maximum length of ``text`` before replacement ``indicator`` If ``text`` exceeds the ``length``, this string will replace the end of the string ``whole_word`` If true, shorten the string further to avoid breaking a word in the middle. A word is defined as any string not containing whitespace. If the entire text before the break is a single word, it will have to be broken. Example:: >>> truncate('Once upon a time in a world far far away', 14) 'Once upon a...' TODO: try to replace it with built-in `textwrap.shorten` (available starting from Python 3.4) when support for Python 2 completely dropped. """ if not text: return "" if len(text) <= length: return text short_length = length - len(indicator) if not whole_word: return text[:short_length] + indicator # Go back to end of previous word. i = short_length while i >= 0 and not text[i].isspace(): i -= 1 while i >= 0 and text[i].isspace(): i -= 1 if i <= 0: # Entire text before break is one word, or we miscalculated. return text[:short_length] + indicator return text[:i + 1] + indicator
82bf86407f57fc8f3524120c27c9231ad39ec2b2
3,648,822
def prefix_sums(A): """ This function calculate of sums of eements in given slice (contiguous segments of array). Its main idea uses prefix sums which are defined as the consecutive totals of the first 0, 1, 2, . . . , n elements of an array. Args: A: an array represents number of mushrooms growing on the consecutive spots along a road. Returns: an array contains the consecutive sums of the first n elements of an array A To use: >> A=[2,3,7,5,1,3,9] >> print(prefix_sums(A)) [0, 2, 5, 12, 17, 18, 21, 30] Time Complexity: O(n) """ n = len(A) P = [0] * (n + 1) for k in range(1, n + 1): P[k] = P[k - 1] + A[k - 1] return P
d61e49eb4a973f7718ccef864d8e09adf0e09ce2
3,648,823
from run4it.api.scripts import script_import_polar_exercices as script_func def polar_import(): """Import data from Polar and save as workouts""" return script_func('polar_import')
6a7075184e5c44a3092670fffc94360ef9a363c4
3,648,824
def dijkstra(G, Gextra, source, target_set, required_datarate, max_path_latency): """ :returns a successful path from source to a target from target_set with lowest path length """ q = DynamicPriorityQueue() q.put((source, 0.0), priority=0.0) marked = set() parents = {source: None} while not q.empty(): path_length, (current_node, current_path_latency) = q.pop() marked.add(current_node) if current_node in target_set: return _compute_path(parents, current_node) for neighbor in G.neighbors_iter(current_node): if neighbor not in marked: edata = G.edge[current_node][neighbor] new_path_latency = current_path_latency + edata["l_cap"] if (required_datarate <= Gextra.get_edge(current_node, neighbor).b_rem and new_path_latency <= max_path_latency): new_path_length = path_length + 1 if not config.USE_HOP_PATH_LENGTH: new_path_length = new_path_latency if q.put_or_decrease((neighbor, new_path_latency), other_priority=new_path_length): parents[neighbor] = current_node return None
6a8ff88b7a56308e099d3f9e50c8645c3281a68e
3,648,825
def build_single_class_dataset(name, class_ind=0, **dataset_params): """ wrapper for the base skeletor dataset loader `build_dataset` this will take in the same arguments, but the loader will only iterate over examples of the given class I'm just going to overwrite standard cifar loading data for now """ trainloader, testloader = build_dataset(name, **dataset_params) def _filter(loader, mode='train'): dataset = loader.dataset assert name in ['cifar10', 'svhn'],\ 'we only support cifar and svhn right now' if name == 'cifar10': data_attr = mode + '_data' # e.g. train imgs in dataset.train_data label_attr = mode + '_labels' else: data_attr = 'data' label_attr = 'labels' data = getattr(dataset, data_attr) targets = np.array(getattr(dataset, label_attr)) class_inds = np.where(targets == int(class_ind)) data, targets = data[class_inds], targets[class_inds] setattr(dataset, data_attr, data) setattr(dataset, label_attr, targets) return loader return _filter(trainloader, mode='train'), _filter(testloader, mode='test')
c8d05ecc1292562e846bc62724a224c20746037a
3,648,826
def gamma_trace(t): """ trace of a single line of gamma matrices Examples ======== >>> from sympy.physics.hep.gamma_matrices import GammaMatrix as G, \ gamma_trace, LorentzIndex >>> from sympy.tensor.tensor import tensor_indices, tensorhead >>> p, q = tensorhead('p, q', [LorentzIndex], [[1]]) >>> i0,i1,i2,i3,i4,i5 = tensor_indices('i0:6', LorentzIndex) >>> ps = p(i0)*G(-i0) >>> qs = q(i0)*G(-i0) >>> gamma_trace(G(i0)*G(i1)) 4*metric(i0, i1) >>> gamma_trace(ps*ps) - 4*p(i0)*p(-i0) 0 >>> gamma_trace(ps*qs + ps*ps) - 4*p(i0)*p(-i0) - 4*p(i0)*q(-i0) 0 """ if isinstance(t, TensAdd): res = TensAdd(*[_trace_single_line(x) for x in t.args]) return res t = _simplify_single_line(t) res = _trace_single_line(t) return res
8eb5bf4ba1f1d0e170a88a7b798b65273db8c1fd
3,648,827
import copy def preprocess(comment): """Pre-Process the comment""" copy_comment = copy.deepcopy(comment) # Replacing link final_comment = replace_link(copy_comment) nftokens = get_nf_tokens(comment) return final_comment, nftokens
f7286d5ca3e668b70385cd72485bb81eb8f9eec1
3,648,828
def voc_label_indices(colormap, colormap2label): """Map a RGB color to a label.""" colormap = colormap.astype('int32') idx = ((colormap[:, :, 0] * 256 + colormap[:, :, 1]) * 256 + colormap[:, :, 2]) return colormap2label[idx]
481eccab328da13c4a49b2cf69d8e0e1cf1e48ab
3,648,829
def make_noisy_linear(w=1, std=1): """Factory for linear function <w,x> perturbed by gaussian noise N(0,std^2)""" @Oracle def noisy_linear(x): return np.dot(x, w) + np.random.normal(scale=std) return noisy_linear
80ec4a37dbbe6dc837707fa9a6e93e27d8dea9b9
3,648,830
def distance(turtle, x, y=None): """Return the distance from the turtle to (x,y) in turtle step units. Arguments: turtle -- the turtle x -- a number or a pair/vector of numbers or a turtle instance y -- a number None None call: distance(turtle, x, y) # two coordinates --or: distance(turtle, (x, y)) # a pair (tuple) of coordinates --or: distance(turtle, vec) # e.g. as returned by pos(turtle) --or: distance(turtle, mypen) # where mypen is another turtle Example: >>> pos(turtle) (0.00, 0.00) >>> distance(turtle, 30,40) 50.0 >>> pen = Turtle() >>> pen.forward(77) >>> distance(turtle, pen) 77.0 """ if type(turtle) != _turtle.Turtle: raise(TypeError("turtle argument to distance is not a valid turtle")) return turtle.distance(x, y)
f09b320c2b07374bebd2fd8c16084e7bf676523d
3,648,831
import copy def asy_ts(gp, anc_data): """ Returns a recommendation via TS in the asyuential setting. """ anc_data = copy(anc_data) # Always use a random optimiser with a vectorised sampler for TS. if anc_data.acq_opt_method != 'rand': anc_data.acq_opt_method = 'rand' anc_data.max_evals = 4 * anc_data.max_evals gp_sample = _get_gp_sampler_for_parallel_strategy(gp, anc_data) return _maximise_acquisition(gp_sample, anc_data, vectorised=True)
1514263314cd92b053bfcd655872a03785b47af0
3,648,832
import re def checkParams(opts): """ 检查模块名是否符合命名规则 检查目录是否存在 """ res = {} for opt, arg in opts: if opt in ('--name'): if re.match('^[a-zA-Z_][a-zA-Z0-9_]*$', arg): res['name'] = arg else: return res elif opt in ('--dir'): res['dir'] = arg; elif opt in ('--type'): res['type'] = arg else: print("Unknown option " + arg) res['dir'] = res['dir'] + res['name'] + '/' return res
5b8306a1c9805786e4a98509dcea3af59ffd04d1
3,648,833
def nms(bboxes, iou_threshold, sigma=0.3, method='nms'): """ Note: soft-nms, https://arxiv.org/pdf/1704.04503.pdf https://github.com/bharatsingh430/soft-nms """ best_bboxes = [] while len(bboxes) > 0: max_ind = np.argmax(bboxes[:, 4]) best_bbox = bboxes[max_ind] best_bboxes.append(list(best_bbox)) bboxes = np.concatenate([bboxes[: max_ind], bboxes[max_ind + 1:]]) iou = bboxes_iou(best_bbox[np.newaxis, :4], bboxes[:, :4]) weight = np.ones((len(iou),), dtype=np.float32) assert method in ['nms', 'soft-nms'] if method == 'nms': iou_mask = iou > iou_threshold weight[iou_mask] = 0.0 if method == 'soft-nms': weight = np.exp(-(1.0 * iou ** 2 / sigma)) bboxes[:, 4] = bboxes[:, 4] * weight score_mask = bboxes[:, 4] > 0. bboxes = bboxes[score_mask] return best_bboxes
10f3f65bd00599aa77f2d832754febfeeed7ca55
3,648,834
def smart_cast(value): """Intelligently cast the given value to a Python data type. :param value: The value to be cast. :type value: str """ # Handle integers first because is_bool() may interpret 0s and 1s as booleans. if is_integer(value, cast=True): return int(value) elif is_float(value): return float(value) elif is_bool(value): return to_bool(value) else: return value
73676278e8c8bf54536fd3c9982cad7f6064cb75
3,648,835
from rdkit.Chem import Draw from rdkit.Chem import AllChem from IPython.display import SVG, display import io import matplotlib.pyplot as plt import matplotlib.image as mpimg def _draw_mol_with_property( mol, property, **kwargs ): """ http://rdkit.blogspot.com/2015/02/new-drawing-code.html Parameters --------- property : dict key atom idx, val the property (need to be stringfiable) """ def run_from_ipython(): try: __IPYTHON__ return True except NameError: return False AllChem.Compute2DCoords(mol) for idx in property: # opts.atomLabels[idx] = mol.GetAtomWithIdx( idx ).SetProp( 'molAtomMapNumber', "({})".format( str(property[idx]))) mol = Draw.PrepareMolForDrawing(mol, kekulize=False) #enable adding stereochem if run_from_ipython(): if "width" in kwargs and type(kwargs["width"]) is int and "height" in kwargs and type(kwargs["height"]) is int: drawer = Draw.MolDraw2DSVG(kwargs["width"], kwargs["height"]) else: drawer = Draw.MolDraw2DSVG(500,250) drawer.DrawMolecule(mol) drawer.FinishDrawing() display(SVG(drawer.GetDrawingText().replace("svg:", ""))) else: if "width" in kwargs and type(kwargs["width"]) is int and "height" in kwargs and type(kwargs["height"]) is int: drawer = Draw.MolDraw2DCairo(kwargs["width"], kwargs["height"]) else: drawer = Draw.MolDraw2DCairo(500,250) #cairo requires anaconda rdkit # opts = drawer.drawOptions() drawer.DrawMolecule(mol) drawer.FinishDrawing() # # with open("/home/shuwang/sandbox/tmp.png","wb") as f: # f.write(drawer.GetDrawingText()) buff = io.BytesIO() buff.write(drawer.GetDrawingText()) buff.seek(0) plt.figure() i = mpimg.imread(buff) plt.imshow(i) plt.show() # display(SVG(drawer.GetDrawingText()))
5f680f750b01d2f178df125dbaff6f737bbbcfc8
3,648,836
from typing import List from typing import Dict import math def find_host_biz_relations(bk_host_ids: List[int]) -> Dict: """ 查询主机所属拓扑关系 :param bk_host_ids: 主机ID列表 [1, 2, 3] :return: 主机所属拓扑关系 [ { "bk_biz_id": 3, "bk_host_id": 3, "bk_module_id": 59, "bk_set_id": 11, "bk_supplier_account": "0" } ] """ # CMDB 限制了单次查询数量,这里需分批并发请求查询 param_list = [ {"bk_host_id": bk_host_ids[count * constants.QUERY_CMDB_LIMIT : (count + 1) * constants.QUERY_CMDB_LIMIT]} for count in range(math.ceil(len(bk_host_ids) / constants.QUERY_CMDB_LIMIT)) ] host_biz_relations = request_multi_thread(client_v2.cc.find_host_biz_relations, param_list, get_data=lambda x: x) return host_biz_relations
9cd9891a97b5ad3db88a0e8a631775b1dc8c24c7
3,648,837
def atom_to_atom_line(atom): """Takes an atomium atom and turns it into a .cif ATOM record. :param Atom atom: the atom to read. :rtype: ``str``""" name = get_atom_name(atom) res_num, res_insert = split_residue_id(atom) return "ATOM {} {} {} . {} {} . {} {} {} {} {} 1 {} {} {} {} {} {} 1".format( atom.id, atom.element, name, atom.het._name if atom.het else "?", atom.het._internal_id if atom.het and isinstance( atom.het, Ligand ) else atom.chain._internal_id if atom.chain else ".", res_num, res_insert, atom.location[0], atom.location[1], atom.location[2], atom.bvalue, atom.charge, res_num, atom.het._name if atom.het else "?", atom.chain.id if atom.chain else ".", name )
30e9f9191947b23dffd9e3f6d63f697de325e5f0
3,648,838
from .....main import _get_bot from typing import Union from typing import Optional async def edit_chat_invite_link( token: str = TOKEN_VALIDATION, chat_id: Union[int, str] = Query(..., description='Unique identifier for the target chat or username of the target channel (in the format @channelusername)'), invite_link: str = Query(..., description='The invite link to edit'), name: Optional[str] = Query(None, description='Invite link name; 0-32 characters'), expire_date: Optional[int] = Query(None, description='Point in time (Unix timestamp) when the link will expire'), member_limit: Optional[int] = Query(None, description='Maximum number of users that can be members of the chat simultaneously after joining the chat via this invite link; 1-99999'), creates_join_request: Optional[bool] = Query(None, description="True, if users joining the chat via the link need to be approved by chat administrators. If True, member_limit can't be specified"), ) -> JSONableResponse: """ Use this method to edit a non-primary invite link created by the bot. The bot must be an administrator in the chat for this to work and must have the appropriate administrator rights. Returns the edited invite link as a ChatInviteLink object. https://core.telegram.org/bots/api#editchatinvitelink """ bot = await _get_bot(token) try: entity = await get_entity(bot, chat_id) except BotMethodInvalidError: assert isinstance(chat_id, int) or (isinstance(chat_id, str) and len(chat_id) > 0 and chat_id[0] == '@') entity = chat_id except ValueError: raise HTTPException(404, detail="chat not found?") # end try result = await bot.edit_chat_invite_link( entity=entity, invite_link=invite_link, name=name, expire_date=expire_date, member_limit=member_limit, creates_join_request=creates_join_request, ) data = await to_web_api(result, bot) return r_success(data.to_array())
7c83316e0e86eb223b40ed9bf69126d79a4651b4
3,648,839
def post_live_migrate_at_source(adapter, host_uuid, instance, vif): """Performs the post live migrate on the source host. :param adapter: The pypowervm adapter. :param host_uuid: The host UUID for the PowerVM API. :param instance: The nova instance object. :param vif: The virtual interface of the instance. This may be called network_info in other portions of the code. """ vif_drv = _build_vif_driver(adapter, host_uuid, instance, vif) return vif_drv.post_live_migrate_at_source(vif)
0a4165abe0373a96b2b222d4eaa9316649d607b2
3,648,840
import re def conv2date(dtstr,tstart=None): """Convert epoch string or time interval to matplotlib date""" #we possibly have a timeinterval as input so wrap in exception block m=re.search("([\+\-])([0-9]+)([dm])",dtstr) if m: if m.group(3) == "m": dt=30.5*float(m.group(2)) #scale with average days per month elif m.group(3) == "d": dt=float(m.group(2)) if m.group(1) == "+": fac=1 else: fac=-1 if not tstart: tstart=0 #Compute timedeltas only dout=tstart+fac*dt else: dout=datestr2num(dtstr) return dout
b848f45c04bf9ef77fa3af395afb992f6302fb4f
3,648,841
def resnet18(pretrained=False, **kwargs): """Constructs a ResNet-18 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ model = ResNet(MaskedBasicblock, [2, 2, 2, 2], **kwargs) if pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet18'])) print('ResNet-18 Use pretrained model for initalization') return model
94e339a390723e7dbdec4d95b7f4bb3600faae1f
3,648,842
def logioinfo(func): """ This function is to add IO information """ def write(exec_info): """ This function is to add bucket and object Io information Parameters: exec_info Returns: write """ log.info('in write') log.info(exec_info) ret_val = func(exec_info) if ret_val is False: return ret_val gen_basic_io_info_structure = BasicIOInfoStructure() gen_extra_io_info_structure = ExtraIOInfoStructure() write_bucket_info = BucketIoInfo() write_key_info = KeyIoInfo() obj = exec_info['obj'] resource_name = exec_info['resource'] extra_info = exec_info.get('extra_info', None) log.info('obj_name :%s' % obj) log.info('resource_name: %s' % resource_name) if 's3.Bucket' == type(obj).__name__: log.info('in s3.Bucket logging') resource_names = ['create'] if resource_name in resource_names: access_key = extra_info['access_key'] log.info('adding io info of create bucket') bucket_info = gen_basic_io_info_structure.bucket(**{'name': obj.name}) write_bucket_info.add_bucket_info(access_key, bucket_info) if 's3.Object' == type(obj).__name__: log.info('in s3.Object logging') resource_names = ['upload_file', 'initiate_multipart_upload'] if resource_name in resource_names: log.info('writing log for upload_type: %s' % extra_info.get('upload_type','normal')) access_key = extra_info['access_key'] # setting default versioning status to disabled extra_info['versioning_status'] = extra_info.get('versioning_status', 'disabled') log.info('versioning_status: %s' % extra_info['versioning_status']) if extra_info.get('versioning_status') == 'disabled' or \ extra_info.get('versioning_status') == 'suspended': log.info('adding io info of upload objects') key_upload_info = gen_basic_io_info_structure.key( **{'name': extra_info['name'], 'size': extra_info['size'], 'md5_local': extra_info['md5'], 'upload_type': extra_info.get('upload_type','normal')}) write_key_info.add_keys_info(access_key, obj.bucket_name, key_upload_info) if extra_info.get('versioning_status') == 'enabled' and extra_info.get('version_count_no') == 0: log.info('adding io info of upload objects, version enabled, so only key name will be added') key_upload_info = gen_basic_io_info_structure.key( **{'name': extra_info['name'], 'size': None, 'md5_local': None, 'upload_type': extra_info.get('upload_type','normal')}) write_key_info.add_keys_info(access_key, obj.bucket_name, key_upload_info) log.debug('writing log for %s' % resource_name) return ret_val return write
ef8f1361e87cd246353debab11d7ad5c97f62058
3,648,843
import pytz def weather(api_token, city, start, end): """ Returns an hourly report of cloud cover, wind and temperature data for the given city. The report is always in full days. Timestamps are in UTC. Start and end dates are interpreted as UTC. """ a = Astral() city = a[city] # hour=0 would give us the previous day. Dark Sky always returns full days so # we can just make one request per day from start to end, always at midday. d = start.replace(hour=12, tzinfo=pytz.UTC) dfs = [] for i in range(_num_days(start, end)): weather = _raw_weather(api_token, city.latitude, city.longitude, d) df = _as_dataframe(weather, d) dfs.append(df) d = d + timedelta(days=1) return _tidy(pd.concat(dfs))
2d8457cc8388613825dad54686988194eed85b2b
3,648,844
from skimage.transform import iradon def skimage_radon_back_projector(sinogram, geometry, range, out=None): """Calculate forward projection using skimage. Parameters ---------- sinogram : `DiscreteLpElement` Sinogram (projections) to backproject. geometry : `Geometry` The projection geometry to use. range : `DiscreteLp` range of this projection (volume space). out : ``range`` element, optional An element in range that the result should be written to. Returns ------- sinogram : ``range`` element Sinogram given by the projection. """ # Lazy import due to significant import time theta = skimage_theta(geometry) skimage_range = skimage_sinogram_space(geometry, range, sinogram.space) skimage_sinogram = skimage_range.element() skimage_sinogram.sampling(clamped_interpolation(range, sinogram)) if out is None: out = range.element() else: # Only do asserts here since these are backend functions assert out in range # Rotate back from (rows, cols) to (x, y) backproj = iradon(skimage_sinogram.asarray().T, theta, output_size=range.shape[0], filter=None, circle=False) out[:] = np.rot90(backproj, -1) # Empirically determined value, gives correct scaling scaling_factor = 4.0 * float(geometry.motion_params.length) / (2 * np.pi) # Correct in case of non-weighted spaces proj_extent = float(sinogram.space.partition.extent.prod()) proj_size = float(sinogram.space.partition.size) proj_weighting = proj_extent / proj_size scaling_factor *= (sinogram.space.weighting.const / proj_weighting) scaling_factor /= (range.weighting.const / range.cell_volume) # Correctly scale the output out *= scaling_factor return out
8158569eca46907091bfbca6aba57cd2a6afa6bf
3,648,845
def get_segment_hosts(master_port): """ """ gparray = GpArray.initFromCatalog( dbconn.DbURL(port=master_port), utility=True ) segments = GpArray.getSegmentsByHostName( gparray.getDbList() ) return segments.keys()
565921e4b7d46ec357666d50dee7dcdb7127759e
3,648,846
from typing import List from typing import Dict from typing import Any def get_saved_albums(sp: Spotify) -> List[Dict[str, Any]]: """Returns the list of albums saved in user library""" albums = [] # type: List[Dict[str, Any]] results = sp.current_user_saved_albums(limit=50) albums.extend(results["items"]) while results["next"]: results = sp.next(results) albums.extend(results["items"]) return albums
525074d9f957b71c0b355d3d343e088d29792363
3,648,847
def createMergerCatalog(hd_obj, obj_conditions, cosmo, time_since_merger=1): """ Function to create Major Merger (MM) catalog @hd_obj :: header file for the object of interest @obj_conditions :: prior conditions to define the object sample @cosmo :: cosmology used in the notebook (Flat Lambda CDM) @mass_range :: [lower, upper] limits on range on galaxy stellar masses to create pair sample @time_since_merger :: int to decide the objects with mergers < x Gyr """ # converting the time since merger into scale factor merger_z = z_at_value(cosmo.lookback_time, time_since_merger*u.Gyr) merger_scale = 1/(1+merger_z) # defining the merger condition merger_condition = (hd_obj['HALO_scale_of_last_MM']>merger_scale) downsample = obj_conditions & merger_condition return hd_obj[downsample], downsample
ee0ac59fe1a8fa9a40a934caa32ff53cd171f3dc
3,648,848
import subprocess def get_test_subprocess(cmd=None, **kwds): """Return a subprocess.Popen object to use in tests. By default stdout and stderr are redirected to /dev/null and the python interpreter is used as test process. It also attemps to make sure the process is in a reasonably initialized state. """ kwds.setdefault("stdin", DEVNULL) kwds.setdefault("stdout", DEVNULL) if cmd is None: safe_rmpath(_TESTFN) pyline = "from time import sleep;" pyline += "open(r'%s', 'w').close();" % _TESTFN pyline += "sleep(60)" cmd = [PYTHON, "-c", pyline] sproc = subprocess.Popen(cmd, **kwds) wait_for_file(_TESTFN, delete_file=True, empty=True) else: sproc = subprocess.Popen(cmd, **kwds) wait_for_pid(sproc.pid) _subprocesses_started.add(sproc) return sproc
85d62aeb20b56c604199fc3c2812cf366e7fa1ee
3,648,849
from typing import Union from typing import Dict from typing import List from typing import Any import json def make_response(code: int, body: Union[Dict, List]) -> Dict[str, Any]: """Build a response. Args: code: HTTP response code. body: Python dictionary or list to jsonify. Returns: Response object compatible with AWS Lambda Proxy Integration """ return { "statusCode": code, "headers": { "Content-Type": "application/json", "Access-Control-Allow-Origin": "*", "Access-Control-Allow-Credentials": "true", }, "body": json.dumps(body, default=json_custom), }
bae0a8720085bdf3734724b00df8d856e362602a
3,648,850
def sql2dict(queryset): """Return a SQL alchemy style query result into a list of dicts. Args: queryset (object): The SQL alchemy result. Returns: result (list): The converted query set. """ if queryset is None: return [] return [record.__dict__ for record in queryset]
c55fa18773142cca591aac8ed6bdc37657569961
3,648,851
from typing import OrderedDict import itertools def build_DNN(input_dim, hidden_dim, num_hidden, embedding_dim=1, vocab_size=20,output_dim=1 ,activation_func=nn.Sigmoid): """ Function that automates the generation of a DNN by providing a template for pytorch's nn.Sequential class Parameters ---------- input_dim : int Number of dimensions of input vector hidden_dim : int Number of dimensions for each hidden layer num_hidden : int Number of hidden layers to construct output_dim : int, default=1 Number of output (label) dimensions activation_func : nn.Function Activation function applied to all but the penultimate layer return nn.Module The feedforward network as a PyTorch model """ embed = OrderedDict([("Embedding", nn.Embedding(vocab_size,embedding_dim))]) input = OrderedDict([("Input", nn.Linear(input_dim,hidden_dim)),("Sig1", activation_func())]) hidden_structure = [[('Hidden{}'.format(i), nn.Linear(hidden_dim,hidden_dim)), ('Sig{}'.format(i+1), nn.Sigmoid())] for i in range(1,num_hidden+1)] hiddens = OrderedDict(list(itertools.chain.from_iterable(hidden_structure))) output = OrderedDict([("Output", nn.Linear(hidden_dim,output_dim))]) return nn.Sequential(OrderedDict(**embed, **input, **hiddens, **output))
5b7476b20aacb0d6b0f78da6f97f9a1d3262d43c
3,648,852
def float_to_bin(x, m_digits:int): """ Convert a number x in range [0,1] to a binary string truncated to length m_digits arguments: x: float m_digits: integer return: x_bin: string The decimal representation of digits AFTER '0.' Ex: Input 0.75 has binary representation 0.11 Then this function would return '11' """ if x < 0 or x >= 1: raise ValueError("x must be in interval [0,1)") x_round = round(x * 2**m_digits) # print(x_round) # print(2**m_digits) if x_round == 2**m_digits: x_round = 0 x_raw = bin(x_round) x_bin = x_raw[2:].zfill(m_digits) return x_bin
f95e72d9449b66681575b230f6c858e8b3833cc2
3,648,853
from typing import Callable from typing import List def apply(func: Callable, args: List): """Call `func` expanding `args`. Example: >>> def add(a, b): >>> return a + b >>> apply(add, [1, 2]) 3 """ return func(*args)
f866087d07c7c036b405f8d97ba993f12c392d76
3,648,854
def random_energy_model_create(db: Session) -> EnergyModelCreate: """ Generate a random energy model create request. """ dataset = fixed_existing_dataset(db) component_1 = fixed_existing_energy_source(db) return EnergyModelCreate(name=f"EnergyModel-{dataset.id}-" + random_lower_string(), ref_dataset=dataset.id, description="EnergyModel description", parameters=[ EnergyModelParameterCreate(component=component_1.component.name, attribute=EnergyModelParameterAttribute.yearly_limit, operation=EnergyModelParameterOperation.set, value=366.6), ] )
db5ac3decf6094bef271005732fd9b78a3870be3
3,648,855
def _indices_3d(f, y, x, py, px, t, nt, interp=True): """Compute time and space indices of parametric line in ``f`` function Parameters ---------- f : :obj:`func` Function computing values of parametric line for stacking y : :obj:`np.ndarray` Slow spatial axis (must be symmetrical around 0 and with sampling 1) x : :obj:`np.ndarray` Fast spatial axis (must be symmetrical around 0 and with sampling 1) py : :obj:`float` Slowness/curvature in slow axis px : :obj:`float` Slowness/curvature in fast axis t : :obj:`int` Time sample (time axis is assumed to have sampling 1) nt : :obj:`int` Size scaof time axis interp : :obj:`bool`, optional Apply linear interpolation (``True``) or nearest interpolation (``False``) during stacking/spreading along parametric curve Returns ------- sscan : :obj:`np.ndarray` Spatial indices tscan : :obj:`np.ndarray` Time indices dtscan : :obj:`np.ndarray` Decimal time variations for interpolation """ tdecscan = f(y, x, t, py, px) if not interp: sscan = (tdecscan >= 0) & (tdecscan < nt) else: sscan = (tdecscan >= 0) & (tdecscan < nt - 1) tscan = tdecscan[sscan].astype(np.int) if interp: dtscan = tdecscan[sscan] - tscan else: dtscan = None return sscan, tscan, dtscan
43a1f8761fb4e2ad32225ebf9e96f0aa2cdf0afd
3,648,856
def indicators_listing(request,option=None): """ Generate Indicator Listing template. :param request: Django request object (Required) :type request: :class:`django.http.HttpRequest` :param option: Whether or not we should generate a CSV (yes if option is "csv") :type option: str :returns: :class:`django.http.HttpResponse` """ if option == "csv": return generate_indicator_csv(request) return generate_indicator_jtable(request, option)
772ec90af7b104b4a9712742064d3aba758aab6f
3,648,857
def parse_sensor(csv): """ Ideally, the output from the sensors would be standardized and a simple list to dict conversion would be possible. However, there are differences between the sensors that need to be accommodated. """ lst = csv.split(";") sensor = lst[SENSOR_QUANTITY] if sensor in SENSORS: result = SENSORS[sensor](lst) else: result = parse_generic_sensor(lst) return result
6673e12403090d130f0ac5590097794ae8f191aa
3,648,858
from datetime import datetime def samiljeol(year=None): """ :parm year: int :return: Independence Movement Day of Korea """ year = year if year else _year return datetime.date(int(year), 3, 1)
6ae717e12aa3dc5bd1d273e240294d2bc6a294ff
3,648,859
def get_entries(xml_file): """Get every entry from a given XML file: the words, their roots and their definitions. """ tree = get_tree(xml_file) # each <drv> is one entry entries = [] for drv_node in tree.iter('drv'): node_words = get_words_from_kap(drv_node.find('kap')) root = get_word_root(drv_node) try: definitions = get_all_definitions(drv_node) except AssertionError: print "Error whilst processing %s: %r" % (xml_file, node_words) raise for word in node_words: entries.append(Entry(word, root, definitions)) return entries
f9647cf79be68afa03908433890e1abbff9284bf
3,648,860
def comoving_radial_distance(cosmo, a, status): """comoving_radial_distance(cosmology cosmo, double a, int * status) -> double""" return _ccllib.comoving_radial_distance(cosmo, a, status)
72066b4b51a7728608d52c920bade33ecef0b920
3,648,861
import dateutil def make_legacy_date(date_str): """ Converts a date from the UTC format (used in api v3) to the form in api v2. :param date_str: :return: """ date_obj = dateutil.parser.parse(date_str) try: return date_obj.strftime('%Y%m%d') except: return None
5a2ed526c7bd0dae5a73a55c93d14ec158a0e6df
3,648,862
import torch def l2_mat(b1, b2): """b1 has size B x M x D, b2 has size b2 B x N x D, res has size P x M x N Args: b1: b2: Returns: """ b1_norm = b1.pow(2).sum(dim=-1, keepdim=True) b2_norm = b2.pow(2).sum(dim=-1, keepdim=True) res = torch.addmm(b2_norm.transpose(-2, -1), b1, b2.transpose(-2, -1), alpha=-2).add_(b1_norm) # mask = 1.0 - torch.ones(res.shape[0]).diag().to(res.device) res = res.clamp_min_(torch.finfo(torch.float32).eps).sqrt_() # res = res * mask return res
ad254c2c11dccab5dd97c7e72ef3b00c7b6143fb
3,648,863
import fnmatch import os def find_files(base, pattern): """Return list of files matching pattern in base folder.""" return [n for n in fnmatch.filter(os.listdir(base), pattern) if os.path.isfile(os.path.join(base, n))]
e84dd19e6746d92de1852f162eaa997734ac245c
3,648,864
def take_rich(frame, n, offset=0, columns=None): """ A take operation which also returns the schema, offset and count of the data. Not part of the "public" API, but used by other operations like inspect """ if n is None: data = frame.collect(columns) else: data = frame.take(n, offset, columns) schema = frame.schema if not columns else sparktk.frame.schema.get_schema_for_columns(frame.schema, columns) return TakeRichResult(data=data, n=n, offset=offset, schema=schema)
de3514d64a74addae76628c37f679693ba68550b
3,648,865
def default_name(class_or_fn): """Default name for a class or function. This is the naming function by default for registries expecting classes or functions. Args: class_or_fn: class or function to be named. Returns: Default name for registration. """ return camelcase_to_snakecase(class_or_fn.__name__)
1ed04a87916ae5d0fa9f1173d5fb9f97c26b32e9
3,648,866
import pathlib import shutil import random import logging def main(config_file: str, log_level: int) -> int: """Main function Parameters ---------- TODO """ coloredlogs.install( level=log_level * 10, logger=LOG, milliseconds=True, ) # Parse config file config_file = pathlib.Path(config_file).resolve() config = parse_config(config_file) if config is None: LOG.error("Could not read config file") return 1 env_size = tuple(map(int, config["env"]["env_size"].split(", "))) # Set up image dir image_dir = PROJ_DIR.joinpath(config["paths"]["image_dir"]) image_dir.mkdir(mode=0o775, exist_ok=True) image_dir = image_dir.joinpath(config_file.stem) shutil.rmtree(image_dir, ignore_errors=True) image_dir.mkdir(mode=0o775, exist_ok=True) # Create environment env = Environment( log_level, config["general"]["name"], image_dir, env_size, float(config["env"]["grid_size"]), float(config["env"]["time_step_size"]), int(config["env"]["epochs"]), float(config["env"]["air_conductivity"]), float(config["env"]["initial_temp"]), float(config["env"]["ambient_temp"]), (config["general"]["make_gif"] == "True"), ) # Add agents to environment added_penguins = 0 max_penguins = int(config["penguin"]["count"]) max_iterations = max_penguins * 10 for _ in range(max_iterations): penguin = Penguin( random.randrange(env_size[0]), random.randrange(env_size[1]), int(config["penguin"]["body_radius"]), int(config["penguin"]["sense_radius"]), float(config["penguin"]["body_temp"]), float(config["penguin"]["low_death_threshold"]), float(config["penguin"]["high_death_threshold"]), float(config["penguin"]["low_move_threshold"]), float(config["penguin"]["high_move_threshold"]), float(config["penguin"]["internal_conductivity"]), float(config["penguin"]["external_conductivity"]), float(config["penguin"]["insulation_thickness"]), float(config["penguin"]["density"]), config["penguin"]["movement_policy"], int(config["penguin"]["movement_speed"]), float(config["penguin"]["metabolism"]), ) if env.add_agent(penguin): added_penguins += 1 if added_penguins >= max_penguins: break LOG.info(f"Added {added_penguins} agents.") # Run the simulation env.run() LOG.info("Done.") logging.shutdown() return 0
8d176c597f28588a54c2e24016be0e2caf048c0d
3,648,867
def get_ip_result_by_input_method( set_input_method, module_input_method, var_ip_selector, username, bk_biz_id, bk_supplier_account, filter_set, filter_service_template, produce_method, var_module_name="", ): """ @summary 根据输入方式获取ip @param var_module_name: 模块属性名 @param set_input_method: 集群输入方式对应tag code @param module_input_method: 模块输入方式对应tag code @param var_ip_selector: 表单数据 @param username: 用户名 @param bk_biz_id: 业务id @param bk_supplier_account: 供应商账户 @param filter_set: 筛选集群 @param filter_service_template: 筛选模块 @param produce_method: 输入方式 @return: 逗号分隔ip字符串 """ produce_method = "var_ip_{}_value".format(produce_method) select_method = var_ip_selector[produce_method] # 获取全部集群列表 set_list = get_set_list(username, bk_biz_id, bk_supplier_account) # 集群全选,筛选条件不为空则调接口获取集群id列表 if ALL_SELECTED_STR not in select_method[set_input_method]: selected_set_names = select_method[set_input_method] # 根据选中的集群名称获取选中的集群列表 set_list = get_list_by_selected_names(selected_set_names, set_list) # 获取全部服务模板列表 service_template_list = get_service_template_list(username, bk_biz_id, bk_supplier_account) # 服务模板全选,则调接口获取服务模板列表 if ALL_SELECTED_STR not in select_method[module_input_method]: selected_service_template_names = select_method[module_input_method] # 通过选中的或输入的集群模板获取集群模板列表 service_template_list = get_service_template_list_by_names( selected_service_template_names, service_template_list ) # 根据输入获取空闲机module id service_template_list.extend( get_biz_inner_module_list( var_ip_selector, username, bk_biz_id, bk_supplier_account, produce_method, set_input_method=set_input_method, module_input_method=module_input_method, ) ) # 获取模块id列表 module_ids = get_module_id_list( bk_biz_id, username, set_list, service_template_list, filter_set, filter_service_template, bk_supplier_account ) if not var_module_name or var_module_name == "ip": # 根据模块 id 列表获取 ip 并返回 data = get_ip_list_by_module_id(username, bk_biz_id, bk_supplier_account, module_ids) else: # 根据模块属性名获取模块信息 kwargs = {"bk_ids": module_ids, "fields": var_module_name.split(",")} data = [module_attr[var_module_name] for module_attr in get_module_list(username, bk_biz_id, kwargs=kwargs)] return data
aa12179a5706f213894962579e5d0be30209f14e
3,648,868
from typing import cast from typing import Sized def function_size(container: Result) -> Result: """ The size() function applied to a Value. Delegate to Python's :py:func:`len`. (string) -> int string length (bytes) -> int bytes length (list(A)) -> int list size (map(A, B)) -> int map size For other types, this will raise a Python :exc:`TypeError`. (This is captured and becomes an :exc:`CELEvalError` Result.) .. todo:: check container type for celpy.celtypes.StringType, celpy.celtypes.BytesType, celpy.celtypes.ListType and celpy.celtypes.MapType """ if container is None: return celpy.celtypes.IntType(0) sized_container = cast(Sized, container) result = celpy.celtypes.IntType(len(sized_container)) logger.debug(f"function_size({container!r}) = {result!r}") return result
33470b886ba2a632c98d2de8342e8a793a5b1ac4
3,648,869
def cluster_from_metis_config(config): """ Construct a Cluster from a metis-flavored object. Args: config (dict): Metis data. Returns: Cluster """ curie_settings = curie_server_state_pb2.CurieSettings() cluster = curie_settings.Cluster() cluster.cluster_name = config["cluster.name"] log.info("Using cluster %s", cluster.cluster_name) # Manager. if config["manager.type"].lower() == "prism": cluster.cluster_hypervisor_info.ahv_info.SetInParent() prism_info = cluster.cluster_management_server_info.prism_info prism_info.prism_host = config["prism.address"] prism_info.prism_username = config["prism.username"] prism_info.prism_password = config["prism.password"] prism_info.prism_cluster_id = config["prism.cluster"] prism_info.prism_container_id = config["prism.container"] prism_info.prism_network_id = config["prism.network"] elif config["manager.type"].lower() == "vcenter": cluster.cluster_hypervisor_info.esx_info.SetInParent() vcenter_info = cluster.cluster_management_server_info.vcenter_info vcenter_info.vcenter_host = config["vcenter.address"] vcenter_info.vcenter_user = config["vcenter.username"] vcenter_info.vcenter_password = config["vcenter.password"] vcenter_info.vcenter_datacenter_name = config["vcenter.datacenter"] vcenter_info.vcenter_cluster_name = config["vcenter.cluster"] vcenter_info.vcenter_datastore_name = config["vcenter.datastore"] vcenter_info.vcenter_network_name = config["vcenter.network"] elif config["manager.type"].lower() == "scvmm": cluster.cluster_hypervisor_info.hyperv_info.SetInParent() vmm_info = cluster.cluster_management_server_info.vmm_info vmm_info.vmm_server = config["scvmm.address"] vmm_info.vmm_user = config["scvmm.username"] vmm_info.vmm_password = config["scvmm.password"] vmm_info.vmm_library_server = config["scvmm.library_server_address"] vmm_info.vmm_library_server_share_path = config["scvmm.library_server_share_path"] vmm_info.vmm_cluster_name = config["scvmm.cluster"] vmm_info.vmm_share_path = config["scvmm.share_path"] vmm_info.vmm_network_name = config["scvmm.network"] else: raise ValueError("Unsupported manager.type '%s'" % config["manager.type"]) # OoB. oob_management_info = curie_settings.ClusterNode.NodeOutOfBandManagementInfo oob_interface_types = dict(oob_management_info.InterfaceType.items()) oob_vendors = dict(oob_management_info.Vendor.items()) if "k" + config["oob.type"].title() not in oob_interface_types: raise ValueError("Unsupported oob.type '%s'" % config["oob.type"]) if "k" + config["ipmi.vendor"].title() not in oob_vendors: raise ValueError("Unsupported ipmi.vendor '%s'" % config["ipmi.vendor"]) # Nodes. for node_config in config["nodes"]: cluster_node = cluster.cluster_nodes.add() cluster_node.id = node_config["hypervisor_addr"] try: cluster_node.svm_addr = node_config["svm_addr"] except KeyError as err: log.warning("Error parsing JSON: %s", err.message) cluster_node.node_out_of_band_management_info.SetInParent() oob = cluster_node.node_out_of_band_management_info oob.interface_type = oob_interface_types["k" + config["oob.type"].title()] oob.vendor = oob_vendors["k" + config["ipmi.vendor"].title()] oob.username = config["ipmi.username"] oob.password = config["ipmi.password"] oob.ip_address = node_config["ipmi_addr"] # Cluster. cluster_software_info = cluster.cluster_software_info if config["cluster.type"].lower() == "nutanix": cluster_software_info.nutanix_info.SetInParent() nutanix_info = cluster_software_info.nutanix_info # TODO (jklein): Remove once all of the CI configs have been updated. if not config.get("prism.address"): raise ValueError("Nutanix cluster is missing required virtual IP") nutanix_info.prism_host = config["prism.address"] nutanix_info.prism_user = config["prism.username"] nutanix_info.prism_password = config["prism.password"] elif config["cluster.type"].lower() == "vsan": cluster_software_info.vsan_info.SetInParent() else: cluster_software_info.generic_info.SetInParent() if cluster.cluster_hypervisor_info.HasField("esx_info"): if cluster.cluster_software_info.HasField("nutanix_info"): return NutanixVsphereCluster(cluster) else: return GenericVsphereCluster(cluster) elif cluster.cluster_hypervisor_info.HasField("hyperv_info"): if cluster.cluster_software_info.HasField("nutanix_info"): return NutanixHypervCluster(cluster) else: return HyperVCluster(cluster) elif cluster.cluster_hypervisor_info.HasField("ahv_info"): return AcropolisCluster(cluster) else: raise ValueError("Unsupported set of hypervisor and cluster type")
3c5abca482d89e7142129b2fb76accb2fc5aa5f2
3,648,870
def _single_style_loss(a, g): """ Calculate the style loss at a certain layer Inputs: a is the feature representation of the real image g is the feature representation of the generated image Output: the style loss at a certain layer (which is E_l in the paper) """ N = a.shape[3] # number of filters M = a.shape[1] * a.shape[2] # height times width of the feature map A = _gram_matrix(a, N, M) G = _gram_matrix(g, N, M) return tf.reduce_sum((G - A) ** 2 / ((2 * N * M) ** 2))
f19d8fcfc467d4760a44d2cdb872791cc2ad2ffe
3,648,871
def hyp_dist_o(x): """ Computes hyperbolic distance between x and the origin. """ x_norm = x.norm(dim=-1, p=2, keepdim=True) return 2 * arctanh(x_norm)
8864d8625798a8b41e2dd645cfe11e8d73d6d9d3
3,648,872
def check_image(url): """A little wrapper for the :func:`get_image_info` function. If the image doesn't match the ``flaskbb_config`` settings it will return a tuple with a the first value is the custom error message and the second value ``False`` for not passing the check. If the check is successful, it will return ``None`` for the error message and ``True`` for the passed check. :param url: The image url to be checked. """ img_info = get_image_info(url) error = None if not img_info["content-type"] in flaskbb_config["AVATAR_TYPES"]: error = "Image type is not allowed. Allowed types are: {}".format( ", ".join(flaskbb_config["AVATAR_TYPES"]) ) return error, False if img_info["width"] > flaskbb_config["AVATAR_WIDTH"]: error = "Image is too wide! {}px width is allowed.".format( flaskbb_config["AVATAR_WIDTH"] ) return error, False if img_info["height"] > flaskbb_config["AVATAR_HEIGHT"]: error = "Image is too high! {}px height is allowed.".format( flaskbb_config["AVATAR_HEIGHT"] ) return error, False if img_info["size"] > flaskbb_config["AVATAR_SIZE"]: error = "Image is too big! {}kb are allowed.".format( flaskbb_config["AVATAR_SIZE"] ) return error, False return error, True
d0587dc987a079d49eb9a863d5203908acab41c4
3,648,873
def preprocess(dataset_file_path, len_bound, num_examples = None, reverse = False): """ It reads the required files, creates input output pairs. """ min_sentence_length = len_bound[0] max_sentence_length = len_bound[1] lines = open(str(dataset_file_path), encoding='utf-8', errors = 'ignore').read().strip().split('\n') if num_examples is not None: lines = lines[:num_examples] # This takes only some lines input_lang = [] output_lang = [] seen = set() for line in lines: _line = line.split('\t') # seperate the input line and output line if (len(_line[0].split(" "))>min_sentence_length and len(_line[0].split(" "))<max_sentence_length and len(_line[1].split(" "))>min_sentence_length and len(_line[1].split(" "))<max_sentence_length): inp = clean_text(_line[0]) if inp in seen: continue seen.add(inp) input_lang.append(inp) output_lang.append(clean_text(_line[1])) assert len(input_lang) == len(output_lang) # make both equal print("Read %s sentence pairs" % len(input_lang)) if reverse: return (input_lang, output_lang) else: return (output_lang, input_lang)
5849c1957ccab997bcf835bce2fec71b0a93cd6d
3,648,874
def read_transcriptome(transcriptome): """ Parse transcriptome as a dictionary. """ result_dict = {} for sequence in SeqIO.parse(transcriptome, 'fasta'): result_dict[sequence.name] = sequence.seq return result_dict
008df223435de465cd6f36978305ca95bb15b270
3,648,875
from re import X def magnus(w, n): """ The 'Magnus' map """ expr = w.subs(x,1+eps*X).subs(y,1+eps*Y) - 1 return limit(expr / eps**n, eps, 0)
7faf1935b9348f41e6968b7da5fa59576ad874a5
3,648,876
import logging def initCmdLineParser(): """ Initiate the optparse object, add all the groups and general command line flags and returns the optparse object """ # Init parser and all general flags logging.debug("initiating command line option parser") usage = "usage: %prog [options]" parser = OptionParser(usage) parser.add_option("--gen-answer-file", help="Generate a template of an answer file, using this option excludes all other option") parser.add_option("--answer-file", help="Runs the configuration in none-interactive mode, extracting all information from the \ configuration file. using this option excludes all other option") # For each group, create a group option for group in controller.getAllGroups(): groupParser = OptionGroup(parser, group.getKey("DESCRIPTION")) for param in group.getAllParams(): cmdOption = param.getKey("CMD_OPTION") paramUsage = param.getKey("USAGE") optionsList = param.getKey("OPTION_LIST") useDefault = param.getKey("USE_DEFAULT") if not useDefault: if optionsList: groupParser.add_option("--%s" % cmdOption, metavar=optionsList, help=paramUsage, choices=optionsList) else: groupParser.add_option("--%s" % cmdOption, help=paramUsage) # Add group parser to main parser parser.add_option_group(groupParser) return parser
0a311909888b441bf6dfc559df6f31ea5a5c9c5a
3,648,877
def translate_node_coordinates(wn, offset_x, offset_y): """ Translate node coordinates Parameters ----------- wn: wntr WaterNetworkModel A WaterNetworkModel object offset_x: tuple Translation in the x direction, in meters offset_y: float Translation in the y direction, in meters Returns -------- A WaterNetworkModel object with updated node coordinates """ wn2 = _deepcopy_wn(wn) for name, node in wn2.nodes(): pos = node.coordinates node.coordinates = (pos[0]+offset_x, pos[1]+offset_y) return wn2
da886a624b9038296d47ffe85a04e62f71f49def
3,648,878
def get_demo_board(): """Get a demo board""" demo_board_id = 1 query = Board.query.filter(Board.id == demo_board_id) query = query.options(joinedload(Board.tasks)).options(raiseload('*')) board = query.one() return BoardDetailsSchema().dump(board).data
69b20a6c7446dc3813ec8d8c454a7a35443bf103
3,648,879
def cool_KI(n, T): """ Returns Koyama & Inutsuka (2002) cooling function """ return 2e-19*n*n*(np.exp(-1.184e5/(T + 1e3)) + 1.4e-9*T**0.5*np.exp(-92.0/T))
707b9e8d42e4d1b7db069c05b3b74e3f0b37f2e6
3,648,880
def main(args): """ main entry point for the manifest CLI """ if len(args) < 2: return usage("Command expected") command = args[1] rest = args[2:] if "create".startswith(command): return cli_create(rest) elif "query".startswith(command): return cli_query(rest) elif "verify".startswith(command): return cli_verify(rest) else: return usage("Unknown command: %s" % command)
b89e68c6ef98722a55ff15e8473dec8c8437bf8d
3,648,881
def compute_correlations(states): """compute_correlations. Calculate the average correlation of spin 0 and every other spin. Parameters ---------- states : list of states. ``len(states)`` must be >= 1! Returns ------- correlations : list of floats. """ return [ sum(s[0] * s[i] for s in states) / len(states) for i in range(len(states[0])) ]
471949aa63a3d65b262fb9dad1c77d160a3f5ac7
3,648,882
from typing import Sequence from typing import Any def parse_sample_str(elems: Sequence[Any]) -> AOList[str]: """ Choose n floats from a distribution. Examples: >>> c = parse_sample_str([4, ["choose", ["one", "two"]]]) >>> c Sample(4, ChooseS([StrConst('one'), StrConst('two')])) """ str_func = "sample" check_n_params(["n", "dist"], elems, str_func) n = check_true_int_param(0, elems, "n", str_func) this_dist = check_str_param(1, elems, "dist", str_func) return dist.Sample(n, this_dist)
5996a3b0ed072d4a7a00d7e01cc74efdc65aa8ee
3,648,883
def htlc(TMPL_RCV, TMPL_OWN, TMPL_FEE, TMPL_HASHIMG, TMPL_HASHFN, TMPL_TIMEOUT): """This contract implements a "hash time lock". The contract will approve transactions spending algos from itself under two circumstances: - If an argument arg_0 is passed to the script such that TMPL_HASHFN(arg_0) is equal to TMPL_HASHIMG, then funds may be closed out to TMPL_RCV. - If txn.FirstValid is greater than TMPL_TIMEOUT, then funds may be closed out to TMPL_OWN. The idea is that by knowing the preimage to TMPL_HASHIMG, funds may be released to TMPL_RCV (Scenario 1). Alternatively, after some timeout round TMPL_TIMEOUT, funds may be closed back to their original owner, TMPL_OWN (Scenario 2). Note that Scenario 1 may occur up until Scenario 2 occurs, even if TMPL_TIMEOUT has already passed. Parameters: TMPL_RCV: the address to send funds to when the preimage is supplied TMPL_HASHFN: the specific hash function (sha256 or keccak256) to use (sha256 in this example) TMPL_HASHIMG: the image of the hash function for which knowing the preimage under TMPL_HASHFN will release funds TMPL_TIMEOUT: the round after which funds may be closed out to TMPL_OWN TMPL_OWN: the address to refund funds to on timeout TMPL_FEE: maximum fee of any transactions approved by this contract """ # First, check that the fee of this transaction is less than or equal to TMPL_FEE fee_check = Txn.fee() < Int(TMPL_FEE) # Next, check that this is a payment transaction. pay_check = Txn.type_enum() == TxnType.Payment # Next, check that the Receiver field for this transaction is empty # Because this contract can approve transactions that close out its entire balance, # it should never have a receiver. rec_field_check = Txn.receiver() == Global.zero_address() # Next, check that the Amount of algos transferred is 0. This is for the same reason as # above: we only allow transactions that close out this account completely, which # having a non-zero-address CloseRemainderTo will handle for us. amount_check = Txn.amount() == Int(0) # Always verify that the RekeyTo property of any transaction is set to the ZeroAddress # unless the contract is specifically involved ina rekeying operation. rekey_check = Txn.rekey_to() == Global.zero_address() # fold all the above checks into a single boolean. common_checks = And( fee_check, pay_check, rec_field_check, amount_check, rekey_check ) # Payout scenarios : At this point in the execution, there is one boolean variable on the # stack that must be true in order for the transaction to be valid. The checks we have done # above apply to any transaction that may be approved by this script.We will now check if we # are in one of the two payment scenarios described in the functionality section.""" # Scenario 1: Hash preimage has been revealed # First, check that the CloseRemainderTo field is set to be the TMPL_RCV address. recv_field_check = Txn.close_remainder_to() == TMPL_RCV # Next, we will check that arg_0 is the correct preimage for TMPL_HASHIMG under TMPL_HASHFN. preimage_check = TMPL_HASHFN(Arg(0)) == Bytes("base64", TMPL_HASHIMG) #Fold the "Scenario 1" checks into a single boolean. scenario_1 = And(recv_field_check, preimage_check) # Scenario 2: Contract has timed out # First, check that the CloseRemainderTo field is set to be the TMPL_OWN address # (presumably initialized to be the original owner of the funds). owner_field_check = Txn.close_remainder_to() == TMPL_OWN # Next, check that this transaction has only occurred after the TMPL_TIMEOUT round. timeout_check = Txn.first_valid() > Int(TMPL_TIMEOUT) #Fold the "Scenario 2" checks into a single boolean. scenario_2 = And(owner_field_check, timeout_check) # At this point in the program's execution, the stack has three values. At the base of the # stack is a boolean holding the results of the initial transaction validity checks. # This is followed by two booleans indicating the results of the scenario 1 and 2 checks. # We want to approve this transaction if we are in scenario 1 or 2. # So we logically OR the results of those checks together. # Finally, we logically AND the scenario checks with the initial checks. # At this point, the stack contains just one value: a boolean indicating # whether or not it has been approved by this contract. return And(Or(scenario_1, scenario_2), common_checks)
9288458b228dabc1663901e03011feaa8ff9765c
3,648,884
def parse(*args, is_flag=False, **kwargs): """alias of parser.parse""" return _parser.parse(*args, is_flag=is_flag, **kwargs)
f40499277a12bd6e492e43fd7e4328124ac59814
3,648,885
def oauth_callback(): """ return: str """ auth = tweepy.OAuthHandler(env.TWITTER_API_KEY, env.TWITTER_API_SECRET) try: auth.request_token = session['REQUEST_TOKEN'] verifier = request.args.get('oauth_verifier') auth.get_access_token(verifier) session['AUTH_TOKEN'],session['AUTH_TOKEN_SECRET'] = auth.access_token, auth.access_token_secret redirect_url = '/share' except Exception: redirect_url = '/' return redirect_url
a15d7c88c97b23a3ce625e363882fff3197c55b5
3,648,886
from typing import Tuple from typing import List import random def generate_random_instance(n_instants: int, cost_dim: int, items_per_instant: int = 1) -> \ Tuple[List[List[float]], List[List[List[float]]], float, float]: """Generates random values, costs and capacity for a Packing Problem instance. Instances generated here may not respect guarantees constraints. Parameters ---------- n_instants : int Number of instants to be generated. cost_dim : int Dimension of the cost vectors to be generated. items_per_instant : int Number of items that should be available in each instant. Returns ------- values : list of list of float A list containing, for each instant, a list with that instant item's values. costs : list of list of list of float A list containing, for each instant, a list with that instant item's cost vectors. cap : float A random problem capacity. e : float The best theorical epsilon for the generated problem. """ assert items_per_instant > 0 assert cost_dim > 0 values: List[List[float]] = _get_random_values(n_instants, items_per_instant) costs: List[List[List[float]]] = _get_random_costs(n_instants, items_per_instant, cost_dim) cap = random.random() * n_instants/2 e = sqrt(log(cost_dim, 2)/cap) return values.copy(), deepcopy(costs), cap, e
57ccf4cd5410d2358c434d94beb9bfbb0ca04820
3,648,887
def recommend_tags_questions(professional_id, threshold=0.01, top=5): """ Recommends tags for an professional depending on answered questions. :param professional_id: ID of the professional :param threshold: Minimum percentage of questions with the tags. :param top: Top N recommended tags (default: 5) :return top_tags: DataFrame with the top tags and how many answered questions have these. """ professional_tags = get_user_tags(professional_id) professional = professionals[professionals['professionals_id'] == professional_id] professional_questions = answers[answers['answers_author_id'] == professional_id]['answers_question_id'] top_tags = tag_questions[tag_questions['tag_questions_question_id'].isin(professional_questions)] top_tags = pd.merge(top_tags, tags, left_on='tag_questions_tag_id', right_on='tags_tag_id') top_tags = top_tags[~top_tags['tags_tag_name'].isin(professional_tags)] top_tags = top_tags.groupby('tags_tag_name').size()/len(professional_questions.index) top_tags = top_tags[top_tags > threshold] top_tags = top_tags.sort_values(ascending=False).head(top) return top_tags
1b4bc6d37569d4794294028036e59437f66dc552
3,648,888
from .tools import make_simulationtable from .model import reservoirs def simulationtable(request): """ called when the simulation page starts to get used """ # convert to the right name syntax so you can get the COM ids from the database selected_reservoir = request.body.decode("utf-8") reservoirs = reservoirs() for reservoir in reservoirs: if reservoirs[reservoir] == selected_reservoir: selected_reservoir = reservoir break return JsonResponse(make_simulationtable(selected_reservoir))
eaa60d02ee095d5efcc6a4f458bd4bb6745675d0
3,648,889
from datetime import datetime def get_rate_limits(response): """Returns a list of rate limit information from a given response's headers.""" periods = response.headers['X-RateLimit-Period'] if not periods: return [] rate_limits = [] periods = periods.split(',') limits = response.headers['X-RateLimit-Limit'].split(',') remaining = response.headers['X-RateLimit-Remaining'].split(',') reset = response.headers['X-RateLimit-Reset'].split(',') for idx, period in enumerate(periods): rate_limit = {} limit_period = get_readable_time_string(period) rate_limit["period"] = limit_period rate_limit["period_seconds"] = period rate_limit["request_limit"] = limits[idx] rate_limit["requests_remaining"] = remaining[idx] reset_datetime = get_datetime_from_timestamp(reset[idx]) rate_limit["reset"] = reset_datetime right_now = datetime.now() if (reset_datetime is not None) and (right_now < reset_datetime): # add 1 second because of rounding seconds_remaining = (reset_datetime - right_now).seconds + 1 else: seconds_remaining = 0 rate_limit["reset_in_seconds"] = seconds_remaining rate_limit["time_to_reset"] = get_readable_time_string(seconds_remaining) rate_limits.append(rate_limit) return rate_limits
eed6504d712e91110763e28f400dab5faf9300a1
3,648,890
import numpy def plot_breakdown_percents(runs, event_labels=[], title=None, colors=None): """ Plots a bar chart with the percent of the total wall-time of all events for multiple runs. Parameters ---------- runs: Run object or list of Run objects The list of runs to display on the figure. event_labels: string or list of strings, optional Names of the events to display on the figure; default: []. title: string, optional Title of the figure; default: None. colors: iterator, optional Colors to use; default: None. Returns ------- fig: Matplotlib Figure object The figure. ax: Matplotlib Axes object Single or array of axes. """ if not isinstance(runs, (list, tuple)): runs = [runs] if not isinstance(event_labels, (list, tuple)): event_labels = [event_labels] fig, ax = pyplot.subplots(figsize=(8.0, 6.0)) ax.yaxis.grid(zorder=0) ax.set_ylabel('% of wall-time', fontsize=16) indices = numpy.arange(len(runs)) bar_width = 0.5 bar_offsets = numpy.zeros(len(runs)) for label in event_labels: if colors: color = next(colors) else: color = next(ax._get_lines.prop_cycler)['color'] percents = [] for run in runs: if label in run.events.keys(): percents.append(run.events[label]['percent']) else: percents.append(0.0) ax.bar(indices, percents, bar_width, label=label, bottom=bar_offsets, color=color, linewidth=0, zorder=0) bar_offsets += percents ax.legend(bbox_to_anchor=(1.0, 1.0), frameon=False) ax.set_xticks(indices + 0.25 * bar_width) ax.set_xticklabels([run.label for run in runs], rotation=0, fontsize=16) ax.set_yticks([0.0, 25.0, 50.0, 75.0, 100.0], ('0', '25', '50', '75', '100')) ax.set_xlim(indices[0] - 0.5, indices[-1] + 1.0) ax.set_ylim(0.0, 100.0) if title: fig.set_title(title) return fig, ax
788c0c466223a2e2aaa695c616fdfc649248b963
3,648,891
def gen3_file(mock_gen3_auth): """ Mock Gen3File with auth """ return Gen3File(endpoint=mock_gen3_auth.endpoint, auth_provider=mock_gen3_auth)
ee2af5d8b89c02e205101e0fe56dc58025d72e38
3,648,892
def rhs_of_rule(rule): """ This function takes a grammatical rule, and returns its RHS """ return rule[0]
004b99ac97c50f7b33cc798997463a28c3ae9a6f
3,648,893
from typing import Union from typing import Optional from typing import Any def flow_duration_curve( x: Union[np.ndarray, pd.Series], log: bool = True, plot: bool = True, non_exceeding:bool = True, ax: Optional[Union[SubplotBase, Any]] = None, **kwargs ) -> Union[np.ndarray, Figure]: """Calculate a flow duration curve Calculate flow duration curve from the discharge measurements. The function can either return a ``matplotlib`` plot or return the ordered ( non)-exceeding probabilities of the observations. These values can then be used in any external plotting environment. In case x.ndim > 1, the function will be called iteratively along axis 0. Parameters ---------- x : numpy.ndarray, pandas.Series Series of prefereably discharge measurements log : bool, default=True if `True` plot on loglog axis, ignored when plot is `False` plot : bool, default=True if `False` plotting will be suppressed and the resulting array will be returned non_exceeding : bool, default=True if `True` use non-exceeding probabilities ax : matplotlib.AxesSubplot | bokeh.Figure , default=None if not None, will plot into that AxesSubplot or Figure instance. .. note:: If you pass an object, be sure to set the correct plotting backend first. kwargs : kwargs, will be passed to the ``matplotlib.pyplot.plot`` function Returns ------- matplotlib.Figure : if `plot` was `True` numpy.ndarray : if `plot was `False` Notes ----- The probabilities are calculated using the Weibull empirical probability. Following [1]_, this probability can be calculated as: .. math:: p =m/(n + 1) where `m` is the rank of an observation in the ordered time series and `n` are the total observations. The increasion by one will prevent 0% and 100% probabilities. References ---------- .. [1] Sloto, R. a., & Crouse, M. Y. (1996). Hysep: a computer program for streamflow hydrograph separation and analysis. U.S. Geological Survey Water-Resources Investigations Report, 96(4040), 54. """ # omit the Series index if isinstance(x, pd.Series): x = x.values # if x has more than one dimension call this func recursive along axis=0 if x.ndim > 1: # check if plot was None, then iterate along axis=0 if not plot: return np.apply_along_axis(flow_duration_curve, 0, x, non_exceeding=non_exceeding, plot=False) else: # plot, if ax is None, create if ax is None: fig, ax = plt.subplots(1,1) last_ax = list(map(lambda x: flow_duration_curve(x, log=log, non_exceeding=non_exceeding, ax=ax), x.T))[-1] return last_ax # calculate the ranks ranks = rankdata(x, method='average') # calculate weibull pdf N = x.size # calculate probabilities p = np.fromiter(map(lambda r: r / (N + 1), ranks), dtype=np.float) # create sorting index if non_exceeding: index = np.argsort(p) else: index = np.argsort(p)[::-1] if not plot: return p[index] else: pfunc = plot_function_loader('flow_duration_curve') fig = pfunc(func_args=dict( x=x[index], y=p[index], non_exceeding=non_exceeding, log=log, figure=ax), plot_args=kwargs ) return fig
3bec0159553a814ac4c68b198a29bf3075f6d202
3,648,894
def get_fields(filters): """ Return sql fields ready to be used on query """ fields = ( ("(SELECT p.posting_date FROM `tabPurchase Invoice` p Join `tabPurchase Invoice Item` i On p.name = i.parent WHERE i.item_code = `tabItem`.item_code And p.docstatus = 1 limit 1) as pinv_date"), ("CONCAT(`tabItem`._default_supplier, ' - ', `tabAddress`.city, ', ', `tabAddress`.state) as location"), ("Item", "vim_number"), ("Item", "make"), ("Item", "model"), ("Item", "bl"), ("Item", "item_type"), ("Item", "booking_no"), ("Item", "container_no"), ("Item", "part_type"), ("Item", "year"), ("Item", "exterior_color"), ("Item", "status"), ("Delivery Checklist", "status", "vehicle_release"), ("Sales Invoice Item", "item_code"), ("Sales Invoice Item", "vim_number", "cont_vim"), ("Sales Invoice Item", "item_name"), # ("Sales Invoice", "due_date", "due_date"), ("""(SELECT SUM(b.grand_total) FROM `tabSales Invoice` as b WHERE b.is_return = 1 and b.docstatus = 1 and b.return_against = `tabSales Invoice`.name ) as credit_note"""), ("""0 as gst_total"""), ("""0 as pst_total"""), ("""0 as g_gst_total"""), # ("""SUM( IF(`tabSales Taxes and Charges`.tax_type = 'GST', `tabSales Taxes and Charges`.tax_amount, 0) ) as gst_total"""), # ("""SUM( IF(`tabSales Taxes and Charges`.tax_type = 'PST', `tabSales Taxes and Charges`.tax_amount, 0 ) ) as pst_total"""), # ("""SUM( IF(`tabSales Taxes and Charges`.tax_type = 'GST', `tabSales Taxes and Charges`.g_tax, 0 ) ) as g_gst_total"""), ("Sales Invoice", "company"), ("Sales Invoice", "is_return"), ("Sales Invoice", "posting_date", "sinv_date"), ("Sales Invoice", "customer"), ("Sales Invoice", "invoice_type"), ("Sales Invoice", "net_total"), ("Sales Invoice", "currency"), ("Sales Invoice", "base_grand_total"), ("Sales Invoice", "grand_total"), ("Sales Invoice", "name", "sinv_name"), ("Sales Invoice", "outstanding_amount"), ("Sales Invoice", "total_g", "gprice"), ("Payment Entry", "posting_date", "p_posting_date"), ("Payment Entry", "mode_of_payment"), ("Payment Entry Reference", "parent", "payment_entry"), ("Payment Entry Reference", "allocated_amount", "breakdown"), ("`viewPayment and Refunds`.paid_amount"), ("`viewPayment and Refunds`.refund_amount"), ("(SELECT `view_vehicle_g_cost`.purchase_cost + COALESCE(view_vehicle_g_cost.net_lcv, 0) from `view_vehicle_g_cost` where `view_vehicle_g_cost`.item_code = `tabItem`.item_code) as net_cost") ) sql_fields = [] for args in fields: sql_field = get_field(args) sql_fields.append(sql_field) # frappe.errprint(", ".join(sql_fields)) return ", ".join(sql_fields)
592d7c051e3af4cb510e43caa774054976f68865
3,648,895
from typing import Counter def count_POS_tag(df_pos): """Count how often each POS tag occurs Args: df_pos ([dataframe]): dataframe, where the entries are list of tuples (token, POS tag) Returns: df_pos_stats ([dataframe]): dataframe containing POS tag statistics """ # POS tag list tag_lst = ['CC', 'CD', 'DT', 'EX', 'FW', 'IN', 'JJ', 'JJR', 'JJS', 'LS', 'MD', 'NN', 'NNS', 'NNP', 'NNPS', 'PDT', 'POS', 'PRP', 'PRP$', 'RB', 'RBR', 'RBS', 'RP', 'SYM', 'TO', 'UH', 'VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ', 'WDT', 'WP', 'WP$', 'WRB', '$', "''", '(', ')', ',', '.', ':', '``'] # init dataframe df_pos_stats = pd.DataFrame(0, index=range(len(df_pos)), columns=tag_lst) # count POS tag for index, pos in enumerate(df_pos): count_dict = Counter(tag for _, tag in pos) for tag, count in count_dict.items(): if tag in tag_lst: df_pos_stats.loc[index, tag] = count return df_pos_stats
a9ac14f34c020b78b02d6ae629cbddcdde39af8d
3,648,896
import json def catch_all(path): """ Gets dummy message. """ return json.dumps({ 'message': 'no one was here', 'ms': get_epochtime_ms() })
b93190b546705c1115c1612e4bd79210ab0d8f85
3,648,897
import os import zipfile def make_archive_obj(filepath, fileobj=None, inmemory_processing=True, allow_unsafe_extraction=False): """This method allows for smart opening of an archive file. Currently this method can handle tar and zip archives. For the tar files, if the python library has issues, the file is attempted to be processed by using the tar command. (Note: the native classes are implemented to work only on posix machines)""" if not fileobj: assert os.path.isfile(filepath) test_arg = filepath else: test_arg = fileobj obj = None if zipfile.is_zipfile(test_arg): obj = ZipFile(filepath, fileobj, inmemory_processing, allow_unsafe_extraction) elif is_tarfile(test_arg): try: obj = TarFile(filepath, fileobj, inmemory_processing, allow_unsafe_extraction) except: obj = NativeTarFile(filepath, fileobj, inmemory_processing, allow_unsafe_extraction) else: raise Exception("Unknown Archive Type: " + "You should really just give me something I can digest!") return obj
7a5490b091ae0ca55c591fe16139d0df793a71e5
3,648,898
import typing from typing import List from functools import reduce def dimensions_to_space_time_index(dims, t_idx = (), t_len = (), s_idx = (), s_len = (), next_idx_valid = 0, invalid = False, min_port_width = 0, max_port_width = 0, total_time = 0, first_call = True) -> typing.Tuple[List[SpaceTimeIndex], int]: """ Convert a space-time Type to a flat list of SpaceTimeIndexs with the s and t values along with the flat_idx. This is a recursive function. The parameters other than dim are the status of the current call. The values are needed to compute the flat t, s, and flat_idx of each inner value :param dims: The type, it's space and time dimensions :param t_idx: The index in each of the parent calls' that are TSeqs :param t_len: The lengths of each of the parent calls' TSeqs :param s_idx: The index in each of the parent calls' that are SSeqs :param S_len: The lengths of each of the parent calls' SSeqs :param next_idx_valid: The next flat_idx to use for valids :param invalid: Whether this call is in and invalid part of a type. Any invalid parent makes all the children invalid :param min_port_width: The minimum width of this type and the other (output or input). This is used when adding padding at end of top call. :param max_port_width: The maximum width of this type and the other (output or input). This is used when adding padding at end of top call. :param total_time: The total time required by this type. This is used when adding padding at end of top call. :param first_val: Whether this is the top, non-recursive call to this function :return: A list of SpaceTimeIndex """ if type(dims) == ST_SSeq or type(dims) == ST_SSeq_Tuple: nested_result = [] for s in range(dims.n): (res, next_idx_valid) = \ dimensions_to_space_time_index(dims.t, t_idx, t_len, tuple([s]) + s_idx, tuple([dims.n]) + s_len, next_idx_valid, invalid, 0, 0, 0, False) nested_result += [res] result = flatten(nested_result), next_idx_valid elif type(dims) == ST_TSeq: nested_result = [] for t in range(dims.n + dims.i): (res, next_idx_valid) = \ dimensions_to_space_time_index(dims.t, tuple([t]) + t_idx, tuple([dims.n + dims.i]) + t_len, s_idx, s_len, next_idx_valid, invalid or (t >= dims.n), 0, 0, 0, False) nested_result += [res] result = flatten(nested_result), next_idx_valid else: # track how much time each t_idx indicates due to nested index structure # drop the last value because each t_idx time is the product of all # time dimensions inside of it. No t_idx contains last dimension time_per_t_len = list(accumulate([1] + list(t_len), lambda x,y : x*y))[:-1] t_idx_with_time_per_len = zip(time_per_t_len, list(t_idx)) time_per_t_idx = list(map(lambda x: x[0]*x[1], t_idx_with_time_per_len)) t = reduce(lambda x,y: x+y, [0] + time_per_t_idx) # do same computation for space time_per_s_len = list(accumulate([1] + list(s_len), lambda x,y : x*y)) s_idx_with_time_per_len = zip(time_per_s_len, list(s_idx)) time_per_s_idx = list(map(lambda x: x[0]*x[1], s_idx_with_time_per_len)) s = reduce(lambda x,y: x+y, [0] + time_per_s_idx) if invalid: result = [SpaceTimeIndex(FlatIndex(True, (t, s)), s, t)], next_idx_valid else: next_idx_valid += 1 result = [SpaceTimeIndex(FlatIndex(False, next_idx_valid - 1), s, t)], next_idx_valid if first_call: padded_result = pad_space_dimension_with_invalids(result[0], min_port_width, max_port_width, total_time) return fix_invalid_indexes(padded_result), result[1] else: return result
bdb24e237ba99288be98112db0f09d6782193594
3,648,899