content
stringlengths
22
815k
id
int64
0
4.91M
def convert_reconstruction( bruker_directory, series, reconstruction, iod_converter, writer): """ Convert and save a single reconstruction. :param bruker_directory: Bruker directory object :param series: series number in the Bruker directory :param reconstruction: reconstruction number in the series :param iod_converter: conversion function :param transfer_syntax: target transfer syntax :param destination: destination directory :param iso_9660: whether to use ISO-9660 compatible file names """ logger.info("Converting {}:{}".format(series, reconstruction)) bruker_binary = bruker_directory.get_dataset( "{}{:04d}".format(series, int(reconstruction))) bruker_json = json.loads(bruker.as_json(bruker_binary)) logger.info("Found {}:{} - {} ({})".format( series, reconstruction, bruker_json.get("VisuAcquisitionProtocol", ["(none)"])[0], bruker_json.get("RECO_mode", ["none"])[0] )) bruker_json["reco_files"] = list(bruker_directory.get_used_files( "{}{:04d}".format(series, int(reconstruction)))) dicom_data_sets = iod_converter(bruker_json, writer.transfer_syntax) for dicom_data_set in dicom_data_sets: writer(dicom_data_set)
26,400
def dash_min_det_areas(bars, m, with_min_det): """ Dash the areas of the bars that represent cost incurred while being considered by the MIN^det algorithm. :param bars: Collection (tuple) of matplotlib bar objects (matplotlib.patches.Rectangle). """ bar_count = 0 alg_count = 0 for bar in bars: if bar_count % m == alg_count % m: bar.set_hatch('///') bar_count += 1 if bar_count % (m+int(with_min_det)) == 0: alg_count += 1 if with_min_det: bar.set_hatch('///') alg_count += 1
26,401
def make3DArray(dim1, dim2, dim3, initValue): """ Return a list of lists of lists representing a 3D array with dimensions dim1, dim2, and dim3 filled with initialValue """ result = [] for i in range(dim1): result = result + [make2DArray(dim2, dim3, initValue)] return result
26,402
def generate_boxes(bounds=(-1, -1, 1, 1), method='size', size=math.inf): """ Generate a stream of random bounding boxes Has two methods for generating random boxes: - *size* - generates a random central point (x0, y0) within the bounding box, and then draws widths and heights from a logN(0, 0.25) distribution. - *range* - generates random ranges in x and y by drawing points from the bounding box and ordering them. Parameters: bounds - the bounding box to generate boxes in method - the method to use to generate the boxes. One of 'range' or 'size' size - the number of boxes to generate. If `size=math.inf` then return a Returns: a generator """ methods = { 'size': size_box_stream, 'range': range_box_stream } if method not in methods.keys(): raise ValueError(f'Unknown method {method}, allowed values are {methods.keys()}') # Make the thing to return _generator = methods[method](bounds) return _generator if math.isinf(size) else islice(_generator, size)
26,403
def create_assets(asset_ids, asset_type, mk_parents): """Creates the specified assets if they do not exist. This is a fork of the original function in 'ee.data' module with the difference that - If the asset already exists but the type is different that the one we want, raise an error - Starts the creation of folders since 'user/username/' Will be here until I can pull requests to the original repo :param asset_ids: list of paths :type asset_ids: list :param asset_type: the type of the assets. Options: "ImageCollection" or "Folder" :type asset_type: str :param mk_parents: make the parents? :type mk_parents: bool :return: A description of the saved asset, including a generated ID """ for asset_id in asset_ids: already = ee.data.getInfo(asset_id) if already: ty = already['type'] if ty != asset_type: raise ValueError("{} is a {}. Can't create asset".format(asset_id, ty)) print('Asset %s already exists' % asset_id) continue if mk_parents: parts = asset_id.split('/') root = "/".join(parts[:2]) root += "/" for part in parts[2:-1]: root += part if ee.data.getInfo(root) is None: ee.data.createAsset({'type': 'Folder'}, root) root += '/' return ee.data.createAsset({'type': asset_type}, asset_id)
26,404
def cp(from_path: str, to_path: str, fs, user): """cp [from:string] [to:string] [EXTEND] cp - copy file or folder from the first specified path to the second """ fs.cp(user, from_path.split("/"), to_path.split("/"))
26,405
def decoration(markdown: str, separate: int = 0) -> str: """見出しが使われているマークダウンをDiscordで有効なものに変換します。 ただたんに`# ...`を`**#** ...`に変換して渡された数だけ後ろに改行を付け足すだけです。 Parameters ---------- markdown : str 変換するマークダウンです。 separate : int, default 1 見出しを`**`で囲んだ際に後ろに何個改行を含めるかです。""" new = "" for line in markdown.splitlines(): if line.startswith(("# ", "## ", "### ", "#### ", "##### ")): line = f"**#** {line[line.find(' ')+1:]}" if line.startswith(("\n", "**#**")): line = f"{repeate(separate)}{line}" new += f"{line}\n" return new
26,406
def get_fingerprint(file_path: str) -> str: """ Calculate a fingerprint for a given file. :param file_path: path to the file that should be fingerprinted :return: the file fingerprint, or an empty string """ try: block_size = 65536 hash_method = hashlib.md5() with open(file_path, 'rb') as input_file: buf = input_file.read(block_size) while buf: hash_method.update(buf) buf = input_file.read(block_size) return hash_method.hexdigest() except Exception: # if the file cannot be hashed for any reason, return an empty fingerprint return ''
26,407
def set_publish_cluster_args(args): """Set args to publish cluster """ public_cluster = {} if args.public_cluster: public_cluster = {"private": False} if args.model_price: public_cluster.update(price=args.model_price) if args.cpp: public_cluster.update(credits_per_prediction=args.cpp) return public_cluster
26,408
def test_permutation_operator_standard_swap_list_dim(): """Generates the standard swap operator on two qubits.""" expected_res = np.array([[1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 1]]) res = permutation_operator([2, 2], [2, 1]) bool_mat = np.isclose(res, expected_res) np.testing.assert_equal(np.all(bool_mat), True)
26,409
def create_session(): """creates database session""" db_engine = sa.create_engine(os.environ.get('DATABASE_URL'), echo=True) return sessionmaker(bind=db_engine)
26,410
def _get_iforest_anomaly_score_per_node(children_left, children_right, n_node_samples): """ Get anomaly score per node in isolation forest, which is node depth + _average_path_length(n_node_samples). Will be used to replace "value" in each tree. Args: children_left: left children children_right: right children n_node_samples: number of samples per node """ # Get depth per node. node_depth = np.zeros(shape=n_node_samples.shape, dtype=np.int64) stack = [(0, -1)] # seed is the root node id and its parent depth while len(stack) > 0: node_id, parent_depth = stack.pop() node_depth[node_id] = parent_depth + 1 if children_left[node_id] != children_right[node_id]: stack.append((children_left[node_id], parent_depth + 1)) stack.append((children_right[node_id], parent_depth + 1)) return _average_path_length(n_node_samples) + node_depth
26,411
def get_invalid_value_message(value_name: str, value: str, line_no: int, uid: str, expected_vals: "list[str]") -> str: """ Returns the formatted message template for invalid value while parsing students data! """ msg = f"Invalid {value_name} <span class=\"font-weight-bold\">{value}</span>\ on line <span class=\"text-primary\">{line_no}</span>\ of UID <span class=\"text-secondary\">{uid}</span>.\ Should be one of {expected_vals}" return msg
26,412
def test_ShortestPairs_sparse_nacl(ph_nacl: Phonopy, helper_methods): """Test ShortestPairs (parse) by NaCl.""" scell = ph_nacl.supercell pcell = ph_nacl.primitive pos = scell.scaled_positions spairs = ShortestPairs(scell.cell, pos, pos[pcell.p2s_map]) svecs = spairs.shortest_vectors multi = spairs.multiplicities np.testing.assert_array_equal(multi.ravel(), multi_nacl_ref) pos_from_svecs = svecs[:, 0, 0, :] + pos[0] np.testing.assert_allclose(svecs_nacl_ref10, svecs[1, 0, :2], atol=1e-8) np.testing.assert_allclose(svecs_nacl_ref30, svecs[3, 0, :4], atol=1e-8) helper_methods.compare_positions_with_order(pos_from_svecs, pos, scell.cell)
26,413
def compute_score_for_coagulation(platelets_count: int) -> int: """ Computes score based on platelets count (unit is number per microliter). """ if platelets_count < 20_000: return 4 if platelets_count < 50_000: return 3 if platelets_count < 100_000: return 2 if platelets_count < 150_000: return 1 return 0
26,414
def check_horizontal_visibility(board: list): """ Check row-wise visibility (left-right and vice versa) Return True if all horizontal hints are satisfiable, i.e., for line 412453* , hint is 4, and 1245 are the four buildings that could be observed from the hint looking to the right. >>> check_horizontal_visibility(['***21**', '412453*', '423145*', '*543215', '*35214*', '*41532*', '*2*1***']) True >>> check_horizontal_visibility(['***21**', '452453*', '423145*', '*543215', '*35214*', '*41532*', '*2*1***']) False >>> check_horizontal_visibility(['***21**', '452413*', '423145*', '*543215', '*35214*', '*41532*', '*2*1***']) False """ res_num = 0 res = 1 k = 1 for i in board: if i[0] != '*': while i[k + 1] != i[-1]: if i[k] < i[k + 1]: res += 1 k += 1 if res == int(i[0]): res_num = res_num else: res_num += 1 if i[-1] != '*': i = i[::-1] while i[k + 1] != i[-1]: if i[k] < i[k + 1]: res += 1 k += 1 if res == int(i[0]): res_num = res_num else: res_num += 1 res = 1 k = 1 if res_num == 0: return True else: return False
26,415
def _get_soup(header, url): """This functions simply gets the header and url, creates a session and generates the "soup" to pass to the other functions. Args: header (dict): The header parameters to be used in the session. url (string): The url address to create the session. Returns: bs4.BeautifulSoup: The BeautifoulSoup object. """ # Try to read data from URL, if it fails, return None try: session = requests.Session() session.headers["User-Agent"] = header["User-Agent"] session.headers["Accept-Language"] = header["Language"] session.headers["Content-Language"] = header["Language"] html = session.get(url) return bs(html.text, "html.parser") except: print(f"ERROR: Unable to retrieve data from {url}") return None
26,416
def format_details(details, countries): """ Format details given a countries object. The countries object can be retrieved from read_country_names. """ details["country_name"] = countries.get(details.get("country")) details["latitude"], details["longitude"] = read_coords(details.get("loc"))
26,417
def test_model(model, name_model, X_train, y_train, X_test, y_test, details=False, normalize=False, weights=None, return_model=False, lib='scikit-learn', fit_params=None): """ Function that does a detailed investigation of a given model. Confusion matrices are generated and various metrics are shown. Currently supported libraries: 'scikit-learn' (including Pipeline), 'keras'. For language classification additional features are implemented and recognized by pipelines named steps, if name: - 'vect': (CountVectorizer) word counts are displayed for most and least frequent words - 'tfidf': (TfidfTransformer) words with highest and lowest TFIDF scores are displayed - 'multNB': (MultinomialNB) words with highest and lowest weights are shown Parameters ---------- model : object with attributes fit & predict (+ others...) The model being tested name_model : string Name of the model being tested X_train : array-like, shape (n_samples, n_features) Training vector, where n_samples is the number of samples and n_features is the number of features. y_train : array-like, shape (n_samples) or (n_samples, n_features) Target relative to x_train for classification X_test : array-like, shape (n_samples, n_features) Training vector, where n_samples is the number of samples and n_features is the number of features. y_test : array-like, shape (n_samples) or (n_samples, n_features) Target relative to x_test for classification details : bool If True evaluation about every parameter configuration is shown default False normalize : bool Specifies wheter or not confusion matrix is normalized. default False weights : dict weights used in fit method. For example for KerasClassifier model.fit(x_train, y_train, class_weight=weights). weights requires a named step 'nn' in which it is applied. return_model : bool model is returned if True default False lib : string specifies which library the model belongs to Possible choices are: 'scikit-learn' (default), 'keras' fit_params : dict fitting parameters for the classifier - only works for lib="keras", pass weights via seperate argument, as the class labels need to be encoded otherwise. Returns ------- model, if return_model True """ # In the case where the labels don't need to be further encoded one # could simply pass the class weights with fit_params. In case of label encoding # one need to pass the class weights via weights. if weights is not None or lib == 'keras': le = LabelEncoder() y_test_dec = y_test y_test = le.fit_transform(y_test) y_train_dec = y_train y_train = le.transform(y_train) # Encode the class label for the weights df = pd.DataFrame(weights, index=[0]) df.columns = le.transform(df.columns) class_weights = df.iloc[0].to_dict() fit_params['nn__class_weight'] = class_weights if weights is not None: # try: model.fit(X_train, y_train, **fit_params) # except Exception as e: # print(e) # print("You probably face the issue that scikit-learn's fit method does not have" # " the fitting parameter class_weight.") # sys.exit() else: model.fit(X_train, y_train, **fit_params) else: model.fit(X_train, y_train, **fit_params) print('############################################# \n ' 'model: {} \n' '#############################################'.format(name_model)) if details and hasattr(model, 'named_steps'): print('the list of steps and parameters in the pipeline\n') for k, v in model.named_steps.items(): print('{}: {}\n'.format(k, v)) if lib == 'scikit-learn': y_pred = model.predict(X_test) y_pred_train = model.predict(X_train) elif lib == 'keras': y_pred = model.predict_classes(X_test) y_pred_train = model.predict_classes(X_train) else: print("No library recognized.") sys.exit() # make sure we work with the correct encoding if weights is not None or lib == 'keras': y_pred_dec = le.inverse_transform(y_pred) y_pred_train_dec = le.inverse_transform(y_pred_train) model_classes = le.classes_ elif lib == 'scikit-learn': y_pred_dec = y_pred y_pred_train_dec = y_pred_train y_train_dec = y_train y_test_dec = y_test model_classes = model.classes_ # print accuracy print('accuracy on test set: \n {} % \n'.format(accuracy_score(y_test_dec, y_pred_dec))) print('accuracy on train set: \n {} % \n'.format(accuracy_score(y_train_dec, y_pred_train_dec))) # print report rep = classification_report(y_test_dec, y_pred_dec) print('classification report: \n {} \n '.format(rep)) cm = confusion_matrix(y_test_dec, y_pred_dec, labels=model_classes) if details: print('confusion matrix: \n {} \n'.format(cm)) print('Actual labels:') for i, j in zip(np.sum(cm, axis=1), model_classes): print(' ', j, i) print('') print('Predicted labels:') for i, j in zip(np.sum(cm, axis=0), model_classes): print(' ', j, i) print('') # Plot non-normalized confusion matrix plt.figure() plt.figure(figsize=(12, 12)) plot_confusion_matrix(cm, classes=model_classes, title='Confusion matrix', normalize=normalize) plt.show() if details: # print the lenght of the vocabulary has_index = False if hasattr(model, 'named_steps'): if 'vect' in model.named_steps.keys(): # '.vocabulary_': dictionary item (word) and index 'world': index # '.get_feature_names()': list of word from (vocabulary) voc = model.named_steps['vect'].vocabulary_ voc_list = sorted(voc.items(), key=lambda kv: kv[1], reverse=True) print('length of the vocabulary vector : \n{} {} ' '\n'.format(len(voc), len(model.named_steps['vect'].get_feature_names()))) # looking at the word occurency after CountVectorizer vect_fit = model.named_steps['vect'].transform(X_test) counts = np.asarray(vect_fit.sum(axis=0)).ravel().tolist() df_counts = pd.DataFrame({'term': model.named_steps['vect'].get_feature_names(), 'count': counts}) df_counts.sort_values(by='count', ascending=False, inplace=True) print(df_counts.head(30)) print(df_counts.tail(10)) print('') n = 0 for i in voc_list: n += 1 print(' ', i) if n > 20: break print('more frequent words: \n{} \n'.format(voc_list[0:20])) print('less frequent words: \n{} \n'.format(voc_list[-20:-1])) # print('longest word: \n{} \n'.format(max(voc, key=len))) # print('shortest word: \n{} \n'.format(min(voc, key=len))) index = model.named_steps['vect'].get_feature_names() has_index = True # print the tfidf values if 'tfidf' in model.named_steps.keys(): tfidf_value = model.named_steps['tfidf'].idf_ # print('model\'s methods: {}\n'.format(dir(model.named_steps['tfidf']))) if has_index: # looking at the word occurency after CountVectorizer tfidf_fit = model.named_steps['tfidf'].transform(vect_fit) tfidf = np.asarray(tfidf_fit.mean(axis=0)).ravel().tolist() df_tfidf = pd.DataFrame({'term': model.named_steps['vect'].get_feature_names(), 'tfidf': tfidf}) df_tfidf.sort_values(by='tfidf', ascending=False, inplace=True) print(df_tfidf.head(20)) print(df_tfidf.tail(20)) print('') tfidf_series = pd.Series(data=tfidf_value, index=index) print('IDF:') print('Smallest idf:\n{}'.format(tfidf_series.nsmallest(20).index.values.tolist())) print('{} \n'.format(tfidf_series.nsmallest(20).values.tolist())) print('Largest idf:\n{}'.format(tfidf_series.nlargest(20).index.values.tolist())) print('{} \n'.format(tfidf_series.nlargest(20).values.tolist())) # print the parameters from the model if 'multNB' in model.named_steps.keys(): values = model.named_steps['multNB'].coef_[0] if has_index: features_series = pd.Series(data=values, index=index) print('Model\'s parameters:') print('Smallest coeff:\n{}'.format(features_series.nsmallest(20).index.values.tolist())) print('{} \n'.format(features_series.nsmallest(20).values.tolist())) print('Largest coeff:\n{}'.format(features_series.nlargest(20).index.values.tolist())) print('{} \n'.format(features_series.nlargest(20).values.tolist())) # to find the list of label # model_classes # to find the model and attributes # print('model\'s attributes: {}\n'.format(model.__dict__)) # to find all methods # print('model\'s methods: {}\n'.format(dir(model))) # dir(model) print('') if return_model: return model
26,418
def get_text_block(dunning_type, language, doc): """ This allows the rendering of parsed fields in the jinja template """ if isinstance(doc, string_types): doc = json.loads(doc) text_block = frappe.db.get_value('Dunning Type Text Block', {'parent': dunning_type, 'language': language}, ['top_text_block', 'bottom_text_block'], as_dict = 1) if text_block: return { 'top_text_block': frappe.render_template(text_block.top_text_block, doc), 'bottom_text_block': frappe.render_template(text_block.bottom_text_block, doc) }
26,419
def load(test=False, cols=None): """Loads data from FTEST if *test* is True, otherwise from FTRAIN. Pass a list of *cols* if you're only interested in a subset of the target columns. """ fname = FTEST if test else FTRAIN df = pd.read_csv(os.path.expanduser(fname)) # load pandas dataframe # The Image column has pixel values separated by space; convert # the values to numpy arrays: df['Image'] = df['Image'].apply(lambda im: np.fromstring(im, sep=' ')) if cols: # get a subset of columns df = df[list(cols) + ['Image']] print(df.count()) # prints the number of values for each column df = df.dropna() # drop all rows that have missing values in them X = np.vstack(df['Image'].values) / 255. # scale pixel values to [0, 1] X = X.astype(np.float32) if not test: # only FTRAIN has any target columns y = df[df.columns[:-1]].values y = (y - 48) / 48 # scale target coordinates to [-1, 1] X, y = shuffle(X, y, random_state=42) # shuffle train data y = y.astype(np.float32) else: y = None return X, y
26,420
def flow_read(src_file): """Read optical flow stored in a .flo, .pfm, or .png file Args: src_file: Path to flow file Returns: flow: optical flow in [h, w, 2] format Refs: - Interpret bytes as packed binary data Per https://docs.python.org/3/library/struct.html#format-characters: format: f -> C Type: float, Python type: float, Standard size: 4 format: d -> C Type: double, Python type: float, Standard size: 8 Based on: - To read optical flow data from 16-bit PNG file: https://github.com/ClementPinard/FlowNetPytorch/blob/master/datasets/KITTI.py Written by Clément Pinard, Copyright (c) 2017 Clément Pinard MIT License - To read optical flow data from PFM file: https://github.com/liruoteng/OpticalFlowToolkit/blob/master/lib/pfm.py Written by Ruoteng Li, Copyright (c) 2017 Ruoteng Li License Unknown - To read optical flow data from FLO file: https://github.com/daigo0927/PWC-Net_tf/blob/master/flow_utils.py Written by Daigo Hirooka, Copyright (c) 2018 Daigo Hirooka MIT License """ # Read in the entire file, if it exists assert(os.path.exists(src_file)) if src_file.lower().endswith('.flo'): with open(src_file, 'rb') as f: # Parse .flo file header tag = float(np.fromfile(f, np.float32, count=1)[0]) assert(tag == TAG_FLOAT) w = np.fromfile(f, np.int32, count=1)[0] h = np.fromfile(f, np.int32, count=1)[0] # Read in flow data and reshape it flow = np.fromfile(f, np.float32, count=h * w * 2) flow.resize((h, w, 2)) elif src_file.lower().endswith('.png'): # Read in .png file flow_raw = cv2.imread(src_file, -1) # Convert from [H,W,1] 16bit to [H,W,2] float formet flow = flow_raw[:, :, 2:0:-1].astype(np.float32) flow = flow - 32768 flow = flow / 64 # Clip flow values flow[np.abs(flow) < 1e-10] = 1e-10 # Remove invalid flow values invalid = (flow_raw[:, :, 0] == 0) flow[invalid, :] = 0 elif src_file.lower().endswith('.pfm'): with open(src_file, 'rb') as f: # Parse .pfm file header tag = f.readline().rstrip().decode("utf-8") assert(tag == 'PF') dims = f.readline().rstrip().decode("utf-8") w, h = map(int, dims.split(' ')) scale = float(f.readline().rstrip().decode("utf-8")) # Read in flow data and reshape it flow = np.fromfile(f, '<f') if scale < 0 else np.fromfile(f, '>f') flow = np.reshape(flow, (h, w, 3))[:, :, 0:2] flow = np.flipud(flow) else: raise IOError return flow
26,421
def group_create_factory(context, request): """Return a GroupCreateService instance for the passed context and request.""" user_service = request.find_service(name="user") return GroupCreateService( session=request.db, user_fetcher=user_service.fetch, publish=partial(_publish, request), )
26,422
def test_cray_badger_create_suite(cli_runner, rest_mock): """ Test `cray init` for creating the default configuration """ # pylint: disable=too-many-locals runner, cli, opts = cli_runner name = 'someName' description = 'someDescription' config = opts['default'] hostname = config['hostname'] app_id_0 = "c3370eca-e3af-4c6f-80ae-09bc93d5707b" app_id_1 = "f3a3d357-65c9-49f7-ae8d-ab98b265d1bc" applicationOrder = [ {"applicationID": app_id_0}, {"applicationID": app_id_1}] result = runner.invoke(cli, ['badger', 'suites', 'create', '--name', name, '--description', description, '--application-order-application-id', ",".join([app_id_0, app_id_1])]) print(result.output) assert result.exit_code == 0 data = json.loads(result.output) assert data['method'].lower() == 'post' assert data.get('body') body = data.get('body') assert body.get('description') == description applicationOrderFromBody = body.get('applicationOrder') print(applicationOrder) print(applicationOrderFromBody) assert applicationOrder == applicationOrderFromBody uri = data['url'].split(hostname)[-1] assert uri == '/apis/badger-api/v1/suites'
26,423
def verify(params, vk, m, sig): """ verify a signature on a clear message """ (G, o, g1, hs, g2, e) = params (g2, X, Y) = vk sig1 , sig2 = sig return not sig1.isinf() and e(sig1, X + m * Y) == e(sig2, g2)
26,424
def prepend_items(): """ Return a function than prepend any item from "paths" list with "prefix" """ def prepend_func(prefix, paths): return [os.path.join(prefix, item) for item in paths] return prepend_func
26,425
def generate_data(n): """ 生成训练数据 """ X, y = make_classification(n_samples=n, n_features=4) data = pd.DataFrame(X, columns=["x1", "x2", "x3", "x4"]) data["y"] = y return data
26,426
def glob_to_chain(chain, paths): """ Default TChain::Add functionality doesn't support wildcards in directories, only base filename. Workaround by adding individual files one by one. paths: wildcard or list of filepaths """ if type(paths) is str: # glob wildcard paths = glob.glob(os.path.expanduser(paths)) for path in paths: chain.AddFile(path)
26,427
def _configure_logging(app): """configure logging""" pass
26,428
def blsimpv(p, s, k, rf, t, div=0, cp=1): """ Computes implied Black vol from given price, forward, strike and time. """ f = lambda x: blsprice(s, k, rf, t, x, div, cp) - p result = brentq(f, 1e-9, 1e+9) return result
26,429
def container_describe(object_id, input_params={}, always_retry=True, **kwargs): """ Invokes the /container-xxxx/describe API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Containers-for-Execution#API-method%3A-%2Fcontainer-xxxx%2Fdescribe """ return DXHTTPRequest('/%s/describe' % object_id, input_params, always_retry=always_retry, **kwargs)
26,430
def nested(fields: Union[Dict[str, Dict], DataSpec], **config) -> dict: """ Constructs a nested Field Spec Args: fields: sub field specifications config: in kwargs format Returns: the nested spec """ spec = { "type": "nested", "fields": utils.get_raw_spec(fields) } # type: Dict[str, Any] if len(config) > 0: spec['config'] = config return spec
26,431
def test_standardize_teams(ps): """Tests standardize_teams""" teams = ['KCC', 'GBP', 'LAC'] assert ps.standardize_teams(teams) == ['KC', 'GB', 'LAC']
26,432
def generate_graphs(dat_dir, graph_dir, graph_infra, platform_name): """ Generate graphs using the existing .dat files and graph infrastructure. """ genGraphs = os.path.join(get_chpl_util_dir(), 'genGraphs') cmd = [genGraphs, '--perfdir', dat_dir, '--outdir', graph_dir, '--graphlist', os.path.join(graph_infra, 'GRAPHLIST'), '--testdir', graph_infra, '--alttitle', 'Arkouda Performance Graphs'] if platform_name: cmd += ['--name', platform_name] subprocess.check_output(cmd)
26,433
def make_cluster_cmap(labels, grey_pos='start'): """ Creates an appropriate colormap for a vector of cluster labels. Parameters ---------- labels : array_like The labels of multiple clustered points grey_pos: str Where to put the grey color for the noise Returns ------- cmap : matplotlib colormap object A correct colormap Examples -------- >>> my_cmap = make_cluster_cmap(labels=np.array([-1,3,5,2,4,1,3,-1,4,2,5])) """ from matplotlib.colors import ListedColormap if labels.max() < 9: cmap = list(plt.get_cmap('tab10').colors) if grey_pos == 'end': cmap.append(cmap.pop(-3)) elif grey_pos == 'start': cmap = [cmap.pop(-3)] + cmap elif grey_pos == 'del': del cmap[-3] else: cmap = list(plt.get_cmap('tab20').colors) if grey_pos == 'end': cmap.append(cmap.pop(-6)) cmap.append(cmap.pop(-6)) elif grey_pos == 'start': cmap = [cmap.pop(-5)] + cmap cmap = [cmap.pop(-5)] + cmap elif grey_pos == 'del': del cmap[-5] del cmap[-5] cmap = ListedColormap(cmap) return cmap
26,434
def init_weights(net, init_type='normal', init_gain=0.02): """ Initialize network weights. Parameters: net (Cell): Network to be initialized init_type (str): The name of an initialization method: normal | xavier. init_gain (float): Gain factor for normal and xavier. """ for _, cell in net.cells_and_names(): if isinstance(cell, (nn.Conv2d, nn.Conv2dTranspose)): if init_type == 'normal': cell.weight.set_data(init.initializer( init.Normal(init_gain), cell.weight.shape)) elif init_type == 'xavier': cell.weight.set_data(init.initializer( init.XavierUniform(init_gain), cell.weight.shape)) elif init_type == 'KaimingUniform': cell.weight.set_data(init.initializer( init.HeUniform(init_gain), cell.weight.shape)) elif init_type == 'constant': cell.weight.set_data( init.initializer(0.001, cell.weight.shape)) else: raise NotImplementedError( 'initialization method [%s] is not implemented' % init_type) elif isinstance(cell, nn.GroupNorm): cell.gamma.set_data(init.initializer('ones', cell.gamma.shape)) cell.beta.set_data(init.initializer('zeros', cell.beta.shape))
26,435
def objects_from_array( objects_arr: np.ndarray, default_keys=constants.DEFAULT_OBJECT_KEYS ) -> List[btypes.PyTrackObject]: """Construct PyTrackObjects from a numpy array.""" assert objects_arr.ndim == 2 n_features = objects_arr.shape[1] assert n_features >= 3 n_objects = objects_arr.shape[0] keys = default_keys[:n_features] objects_dict = {keys[i]: objects_arr[:, i] for i in range(n_features)} objects_dict["ID"] = np.arange(n_objects) return objects_from_dict(objects_dict)
26,436
def run_policy(env, policy, scaler, logger, episodes): """ Run policy and collect data for a minimum of min_steps and min_episodes Args: env: ai gym environment policy: policy object with sample() method scaler: scaler object, used to scale/offset each observation dimension to a similar range logger: logger object, used to save stats from episodes episodes: total episodes to run Returns: list of trajectory dictionaries, list length = number of episodes 'observes' : NumPy array of states from episode 'actions' : NumPy array of actions from episode 'rewards' : NumPy array of (un-discounted) rewards from episode 'unscaled_obs' : NumPy array of (un-discounted) rewards from episode """ total_steps = 0 trajectories = [] for e in range(episodes): observes, actions, rewards, unscaled_obs = run_episode(env, policy, scaler) total_steps += observes.shape[0] trajectory = {'observes': observes, 'actions': actions, 'rewards': rewards, 'unscaled_obs': unscaled_obs} trajectories.append(trajectory) unscaled = np.concatenate([t['unscaled_obs'] for t in trajectories]) scaler.update(unscaled) # update running statistics for scaling observations logger.log({'_MeanReward': np.mean([t['rewards'].sum() for t in trajectories]), 'Steps': total_steps}) return trajectories
26,437
def load_data(): """Load database""" db = TinyDB(DATABASE_PATH) data = db.all() return pd.DataFrame(data)
26,438
def test_update_where_string(dataframe, spark_dataframe): """Test update_where and update with a string.""" assert_frame_equal( spark_dataframe.update_where( conditions=""" `decorated-elephant` = 1 AND `animals@#$%^` = 'rabbit' """, target_column_name="cities", target_val="Durham", ).toPandas(), dataframe.update_where( (dataframe["decorated-elephant"] == 1) & (dataframe["animals@#$%^"] == "rabbit"), "cities", "Durham", ), )
26,439
def uit2xml(source,target): """Converts UIT to Across XML files""" dlg = wx.MessageDialog(None, 'Not yet implemented. Use classic TFC for now', 'Error', wx.OK) dlg.ShowModal() return
26,440
def test_output_logs_path(output, tmpdir): """Testing if logs directory is created in the provided output_directory.""" assert output.logs_path() == os.path.join(tmpdir, "logs")
26,441
def plotcmaponaxis(ax, surf, title, point_sets=None): """Plot a Surface as 2D heatmap on a given matplotlib Axis""" surface = ax.pcolormesh(surf.X, surf.Y, surf.Z, cmap=cm.viridis) if point_sets: for x_y, z, style in point_sets: ax.scatter(x_y[:, 0], x_y[:, 1], **style) ax.set_xlabel('x') ax.set_ylabel('y') ax.set_title(title) return surface
26,442
def get_log_storage() -> TaskLogStorage: """Get current TaskLogStorage instance associated with the current application.""" return current_app.config.get("LOG_STORAGE")
26,443
def test_similarity_forest_wrongly_the_same_pred(data): """There should not be a situation when models predicts the same when there is no random_state set""" X, y = data X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.3, random_state=42) clf1 = SimilarityForestClassifier() clf1.fit(X_train, y_train) y_pred1 = clf1.predict_proba(X_test) clf2 = SimilarityForestClassifier() clf2.fit(X_train, y_train) y_pred2 = clf2.predict_proba(X_test) assert not np.array_equal(y_pred1, y_pred2)
26,444
def test_optional_posonly_args1(a, b=10, /, c=100): """ >>> test_optional_posonly_args1(1, 2, 3) 6 >>> test_optional_posonly_args1(1, 2, c=3) 6 >>> test_optional_posonly_args1(1, b=2, c=3) # doctest: +ELLIPSIS Traceback (most recent call last): TypeError: test_optional_posonly_args1() got ... keyword argument... 'b' >>> test_optional_posonly_args1(1, 2) 103 >>> test_optional_posonly_args1(1, b=2) # doctest: +ELLIPSIS Traceback (most recent call last): TypeError: test_optional_posonly_args1() got ... keyword argument... 'b' """ return a + b + c
26,445
def _build_schema_resource(fields): """Generate a resource fragment for a schema. Args: fields (Sequence[google.cloud.bigquery.schema.SchemaField): schema to be dumped. Returns: Sequence[Dict]: Mappings describing the schema of the supplied fields. """ return [field.to_api_repr() for field in fields]
26,446
def update_component_docs(): """Update the components.rst file with currently implemented components.""" BuildDocs().update_docs()
26,447
def modificar_nota (lista_alumnos): """ """ posicion = buscar_alumno (lista_alumnos, nombre_alumno) if posicion == -1: print('Alumno no encontrado') else: nota_nueva = float(input('Nueva nota: ')) lista_alumnos[posicion].nota = nota_nueva print('\nNombre: ',lista_alumnos[posicion].nombre,'\nNota: ',lista_alumnos[posicion].nota )
26,448
def flatten_swtn(x): """ Flatten list an array. Parameters ---------- x: list of dict or ndarray the input data Returns ------- y: ndarray 1D the flatten input list of array. shape: list of dict the input list of array structure. """ # Check input if not isinstance(x, list): x = [x] elif len(x) == 0: return None, None # Flatten the dataset y = [] shape_dict = [] for i in range(len(x)): dict_lvl = {} for key in x[i].keys(): dict_lvl[key] = x[i][key].shape y = np.concatenate((y, x[i][key].flatten())) shape_dict.append(dict_lvl) return y, shape_dict
26,449
def checkpoint(cache, **csv_args): """ Return a decorator which automatically caches the result of a function which returns a pandas.DataFrame. Parameters ---------- cache : str or path object The path to the file which contains the results of `func`. **csv_args : Optional[Mapping] Arguments to pass on to both `pandas.read_csv` to retrieve cached results and `pandas.DataFrame.to_csv` to save results. Should only contain parameters common to `read_csv` and `to_csv` (which is most of them). Returns ------- _decorator : function A decorator which caches the result of any function returning a `pandas.DataFrame`. """ def _decorator(func): @wraps(func) def _wrapper(*args, **kwargs): if path.exists(cache): return pd.read_csv(cache, **csv_args) else: result = func(*args, **kwargs) result.to_csv(cache, **csv_args) return result return _wrapper return _decorator
26,450
def crossing(series, value, **options): """Find where a function crosses a value. series: Series value: number options: passed to interp1d (default is linear interp) returns: number """ interp = interp1d(series.values, series.index, **options) return interp(value)
26,451
def param_docs(a: int, b, c: float) -> str: """Detailed param docs and annotations"""
26,452
def test_monitor_args_fails() -> None: """ Check MonitorArguments object is not created if both run id and experiment name are not provided. """ with pytest.raises(ValueError) as ex: patch_and_parse([]) assert "list of run ids or an experiment name" in ex.value.args[0]
26,453
def equilSoundSpeeds(gas, rtol=1.0e-6, maxiter=5000): """ Returns a tuple containing the equilibrium and frozen sound speeds for a gas with an equilibrium composition. The gas is first set to an equilibrium state at the temperature and pressure of the gas, since otherwise the equilibrium sound speed is not defined. """ # set the gas to equilibrium at its current T and P gas.equilibrate('TP', rtol=rtol, maxiter=maxiter) # save properties s0 = gas.s p0 = gas.P r0 = gas.density # perturb the pressure p1 = p0*1.0001 # set the gas to a state with the same entropy and composition but # the perturbed pressure gas.SP = s0, p1 # frozen sound speed afrozen = math.sqrt((p1 - p0)/(gas.density - r0)) # now equilibrate the gas holding S and P constant gas.equilibrate('SP', rtol=rtol, maxiter=maxiter) # equilibrium sound speed aequil = math.sqrt((p1 - p0)/(gas.density - r0)) # compute the frozen sound speed using the ideal gas expression as a check gamma = gas.cp/gas.cv afrozen2 = math.sqrt(gamma * ct.gas_constant * gas.T / gas.mean_molecular_weight) return aequil, afrozen, afrozen2
26,454
def yaml_to_json(yaml_file, json_file): """Convert YAML to JSON. Args: yaml_file (str): Input YAML file name. json_file (str): Output JSON file name. """ with open(yaml_file) as f: with open(json_file, 'w') as f1: f1.write(json.dumps(yaml.load(f.read()), ensure_ascii=False))
26,455
def benchmark(partitioner_list: list, item_list: list, bucket_list: list, iterations: int = 1, begin_range: int = 1, end_range: int = 10, specified_items_sizes: list = None, verbose: bool = False)\ -> pd.DataFrame: """ Args: Returns: Raises: """ r = pd.DataFrame(columns=('partitioner', 'num_items', 'buckets', 'iteration', 'variance', 'elapsed_seconds', 'dividers', 'items')) for num_items in item_list: for num_buckets in bucket_list: results = [] for i in range(1, iterations + 1): if specified_items_sizes is None: items = np.random.randint(begin_range, end_range + 1, size=num_items) else: items = specified_items_sizes[:num_items] for partitioner in partitioner_list: start = time.time() dividers, variance = partitioner.partition(items, num_buckets) end = time.time() results.append({ 'partitioner': partitioner.name, 'num_items': num_items, 'buckets': num_buckets, 'iteration': i, 'variance': variance, 'elapsed_seconds': end - start, 'dividers': dividers, 'items': items }) r = r.append(results) mean = r[(r.num_items == num_items) & (r.buckets == num_buckets)].groupby('partitioner').mean() if verbose: click.echo(f'Items: {num_items} Buckets: {num_buckets} Mean values over {iterations} iterations:') click.echo(f'Partitioner\t\tTime (ms)\t\tVariance') for partitioner, record in mean.iterrows(): click.echo(f'{partitioner}\t\t\t{record.elapsed_seconds * 1000:.2f}\t\t\t{record.variance:.4f}') return r
26,456
def qso_template(outfil='Figures/qso_template.pdf'): """ van den berk """ # Load telfer = pyicq.get_telfer_spec() clight = const.c.cgs # Beta spliced to vanden Berk template with host galaxy removed van_file = resource_filename('pyigm', '/data/quasar/VanDmeetBeta_fullResolution.txt') van_tbl = Table.read(van_file,format='ascii') isort = np.argsort(van_tbl['nu']) nu_van = van_tbl['nu'][isort] fnu_van = van_tbl['f_nu'][isort] lam_van = (clight/(nu_van*u.Hz)).to('AA') flam_van = fnu_van * clight / lam_van**2 nrm_pix = np.abs(lam_van-1450*u.AA) < 10*u.AA nrm_van = np.median(flam_van[nrm_pix]) flam_van = flam_van / nrm_van # Start the plot xmnx = (1050., 2300) pp = PdfPages(outfil) fig = plt.figure(figsize=(8.0, 5.0)) plt.clf() gs = gridspec.GridSpec(1,1) # Lya line ax = plt.subplot(gs[0]) #ax.xaxis.set_minor_locator(plt.MultipleLocator(0.5)) #ax.xaxis.set_major_locator(plt.MultipleLocator(20.)) #ax.yaxis.set_minor_locator(plt.MultipleLocator(0.1)) #ax.yaxis.set_major_locator(plt.MultipleLocator(0.2)) ax.set_xlim(xmnx) #ax.set_ylim(ymnx) ax.set_ylabel('Relative Flux') ax.set_xlabel('Rest Wavelength (Angstroms)') lw = 1. ax.plot(telfer.wavelength, telfer.flux, 'k', linewidth=lw, label='Telfer (z~1)') ax.plot(lam_van, flam_van, 'b', linewidth=lw, label='SDSS (z~2)') # Legend legend = plt.legend(loc='upper right', scatterpoints=1, borderpad=0.3, handletextpad=0.3, fontsize='large', numpoints=1) # Layout and save set_fontsize(ax, 17.) print('Writing {:s}'.format(outfil)) plt.tight_layout(pad=0.2,h_pad=0.0,w_pad=0.4) plt.subplots_adjust(hspace=0) pp.savefig(bbox_inches='tight') plt.close() # Finish pp.close()
26,457
def adj_by_strand(genes): """ liste: list of hmm gene with homogenous strand Check if the gene is in tandem with another and if so store the gene inside a set obj.TA_gene.linked In parallel it clean up the list obj.TA_gene.genes by removing the genes that forme a tandem. Then TA_gene.genes has only the lonely_gene """ linked_genes = set() for gi, gene in enumerate(genes): # print obj.TA_gene.genes_plus[gi].gene_number, obj.TA_gene.genes_plus[gi].len_val for gpost in genes[gi + 1:]: if gpost.end - gene.end + 1 > obj.Gene.length_max + obj.Gene.distanceMax: """ if the distance between gene.end and gpost.end is superior to lenmax + distmax Then the two gene won't be in pair and the next postgene either because they are sorted by their start So we can break the gpost for loop and check the next gene """ break # it is a simple test that ckeck if the two gene are adjacent if gene.is_pre_adj_to(gpost): # store the information of prev and post according the strand if gene.strand == '+': gene.post.append(gpost) gpost.prev.append(gene) else: gpost.post.append(gene) gene.prev.append(gpost) # add the gene because it has a link in the set linked of class TA_gene linked_genes.add(gene) # add the gene because it has a link in the set linked of class TA_gene linked_genes.add(gpost) return linked_genes
26,458
def register_user(): """ register a user and take to profile page """ form = RegisterForm() if form.validate_on_submit(): username = form.username.data password = form.password.data email = form.email.data first_name = form.first_name.data last_name = form.last_name.data new_user = User.register(username, password, email, first_name, last_name) db.session.add(new_user) try: db.session.commit() except IntegrityError: form.username.errors.append('Username taken. Please pick another username') return render_template('register.html', form=form) session['username'] = new_user.username flash('Welcome! Successfully Created Your Account!', "success") return redirect(f'/users/{new_user.username}') return render_template('register.html', form=form)
26,459
def stack_init_image(init_image, num_images): """Create a list from a single image. Args: init_image: a single image to be copied and stacked num_images: number of copies to be included Returns: A list of copies of the original image (numpy ndarrays) """ init_images = [] for j in range(num_images): init_images.append(np.asarray(init_image.copy())) return init_images
26,460
def process_problem(_path, _lang=EN): """ Обработка задачи """ path = os.path.join(_path, 'problem.xml') with open(path, 'r', encoding='utf-8') as file: root = ET.parse(file).getroot() titles = root.find('names').findall('name') title = titles[0].attrib['value'] for t in titles: if t.attrib['language'] == _lang: title = t.attrib['value'] statement = try_get_statement_resource(root, _lang) if statement.found: path = os.path.join(_path, statement.path) with open(path, 'r', encoding=statement.encoding) as file: statement_source = file.read() else: statement_source = '' solution = try_get_solution_resource(root, _lang) if solution.found: path = os.path.join(_path, solution.path) with open(path, 'r', encoding=solution.encoding) as file: solution_source = file.read() else: solution_source = '' checker_source = '' checker_lang = None source_node = root.find('assets/checker/source') if source_node is not None: path = os.path.join(_path, source_node.attrib['path']) checker_lang = try_get_checker_lang(path) with open(path, 'r') as checker_file: checker_source = checker_file.read() judging = root.find('judging') input_file = judging.attrib['input-file'] output_file = judging.attrib['output-file'] time_limit = 0 memory_limit = 0 tl_node = root.find('judging/testset/time-limit') if tl_node is not None: time_limit = int(float(tl_node.text) * 0.001) ml_node = root.find('judging/testset/memory-limit') if ml_node is not None: memory_limit = int(ml_node.text) // (1024 * 1024) problem = Problem() problem.codename = title problem.input_file = input_file problem.output_file = output_file problem.time_limit = time_limit problem.memory_limit = memory_limit # problem.statement = statement_source # problem.solutions = solution_source problem.checker = checker_source problem.checker_lang = checker_lang result = ImportResult(problem, get_tags(root)) return result
26,461
def refresh_task(tenant_id, source_id): """Run the Refresh task""" logger.info("First checking its availability") svc = CheckSourceAvailability(source_id) svc.process() if svc.source.availability_status == "available": logger.info("Starting Inventory Refresh") obj = RefreshInventory(source_id) obj.process() logger.info("Updating Service Plans") upd_sp = UpdateServicePlans(tenant_id) upd_sp.process() logger.info(f"Updated {upd_sp.updated} Service Plans") logger.info("Finished Inventory Refresh") else: logger.error( "Source %s[%s] is unavailable, cannot refresh it", svc.source.name, svc.tower.url, )
26,462
def input_to_text(s): """Convert the given byte string or text type to text using the file system encoding of the current system. :param basestring s: String or text type to convert :return: The string as text :rtype: unicode """ return avalon.compat.to_text(s, sys.getfilesystemencoding())
26,463
def doomed_parser(line): """Fixture to test error handling""" raise exceptions.LineParseException('Error occurred')
26,464
def f5(x, eps=0.0): """The function f(x)=tanh(4x)+noise""" return np.tanh(4*x) + eps * np.random.normal(size=x.shape)
26,465
def graph_history_scan(dataset, df_search, best_models): """ creates a graph of scores with various percentages of data (20%, 40%, .., 100%) for the 5 best models :param dataset: dataset object :param df_search: dataframe of the search history :param best_models: selection within df_search with best models :return: None """ try: df = df_search[df_search.model_name.isin(best_models)] if len(df) < 1: return if dataset.best_is_min: # positive scores (e.g loss or error: min is best) y_lim1, y_lim2 = __standard_range(df.cv_mean.abs(), 0, 100) y_lim1 -= (y_lim2 - y_lim1) * 0.1 else: # negative scores (e.g. auc: max is best) y_lim1, y_lim2 = __standard_range(df.cv_mean.abs(), 0, 100) y_lim2 += (y_lim2 - y_lim1) * 0.1 for dark, theme in [(True, 'dark_background'), (False, 'seaborn-whitegrid')]: with plt.style.context(theme, after_reset=True): plt.figure(figsize=(6, 6)) for model_name in best_models[::-1]: scan_scores = df[df.model_name == model_name].sort_values(by='pct') plt.plot(scan_scores.pct.values, np.abs(scan_scores.cv_mean.values), label=model_name) plt.title('performance on data') plt.xlabel('% data') plt.ylabel('score') plt.ylim(y_lim1, y_lim2) if dataset.best_is_min: plt.legend(loc=1) else: plt.legend(loc=4) __save_fig(dataset.dataset_id, '_scan', dark) except: log.error('error in graph_history_scan with dataset_id %s' % dataset.dataset_id)
26,466
def test_max_chunk_settings_overrides(et_code, settings, basic_exporter_class, new_exporter): """ Using EXPORTER_MAX_RC_CONFIG and EXPORTER_MAX_DC_CONFIG settings should override values set on the class when an exporter is instantiated. """ expclass = basic_exporter_class(et_code) test_et_code = expclass.__name__ new_rc_val, new_dc_val = 77777, 88888 settings.EXPORTER_MAX_RC_CONFIG[test_et_code] = new_rc_val settings.EXPORTER_MAX_DC_CONFIG[test_et_code] = new_dc_val exporter = new_exporter(expclass, 'full_export', 'waiting') assert exporter.max_rec_chunk == new_rc_val assert exporter.max_del_chunk == new_dc_val assert new_rc_val != expclass.max_rec_chunk assert new_dc_val != expclass.max_del_chunk
26,467
def get_next_event(game, players): """ return None if a player has to move before the next event otherwise return the corresponding Event enum entry """ active_player = get_active_player(players, game.finish_time) if active_player is None: return None planet_rotation_event = ( game.planet_rotation_event_time, game.planet_rotation_event_move, Event.PLANET_ROTATION) offer_demand_event = (game.offer_demand_event_time, game.offer_demand_event_move, Event.OFFER_DEMAND) no_event = (active_player.time_spent, active_player.last_move, None) events = [planet_rotation_event, offer_demand_event, no_event] if game.midgame_scoring: midgame_scoring_event = (game.midgame_scoring_event_time, game.midgame_scoring_event_move, Event.MIDGAME_SCORING) events.append(midgame_scoring_event) result = next_turn(events) return result
26,468
def _convert_requirements(requirements): """Convert the requirements to an array of strings. ["key op value", "key op value", ...] """ # TODO(frossigneux) Support the "or" operator # Convert text to json if isinstance(requirements, six.string_types): try: requirements = json.loads(requirements) except ValueError: raise manager_ex.MalformedRequirements(rqrms=requirements) # Requirement list looks like ['<', '$ram', '1024'] if _requirements_with_three_elements(requirements): result = [] if requirements[0] == '=': requirements[0] = '==' string = (requirements[1][1:] + " " + requirements[0] + " " + requirements[2]) result.append(string) return result # Remove the 'and' element at the head of the requirement list elif _requirements_with_and_keyword(requirements): return [_convert_requirements(x)[0] for x in requirements[1:]] # Empty requirement list0 elif isinstance(requirements, list) and not requirements: return requirements else: raise manager_ex.MalformedRequirements(rqrms=requirements)
26,469
def max_width(string, cols, separator='\n'): """Returns a freshly formatted :param string: string to be formatted :type string: basestring or clint.textui.colorred.ColoredString :param cols: max width the text to be formatted :type cols: int :param separator: separator to break rows :type separator: basestring >>> formatters.max_width('123 5678', 8) '123 5678' >>> formatters.max_width('123 5678', 7) '123 \n5678' """ is_color = isinstance(string, ColoredString) if is_color: string_copy = string._new('') string = string.s stack = tsplit(string, NEWLINES) for i, substring in enumerate(stack): stack[i] = substring.split() _stack = [] for row in stack: _row = ['',] _row_i = 0 for word in row: if (len(_row[_row_i]) + len(word)) <= cols: _row[_row_i] += word _row[_row_i] += ' ' elif len(word) > cols: # ensure empty row if len(_row[_row_i]): _row[_row_i] = _row[_row_i].rstrip() _row.append('') _row_i += 1 chunks = schunk(word, cols) for i, chunk in enumerate(chunks): if not (i + 1) == len(chunks): _row[_row_i] += chunk _row[_row_i] = _row[_row_i].rstrip() _row.append('') _row_i += 1 else: _row[_row_i] += chunk _row[_row_i] += ' ' else: _row[_row_i] = _row[_row_i].rstrip() _row.append('') _row_i += 1 _row[_row_i] += word _row[_row_i] += ' ' else: _row[_row_i] = _row[_row_i].rstrip() _row = map(str, _row) _stack.append(separator.join(_row)) _s = '\n'.join(_stack) if is_color: _s = string_copy._new(_s) return _s
26,470
def print_params(params, ns): """ Print contents of param dictionary to screen """ if type(params) == dict: for k, v in params.iteritems(): if type(v) == dict: print_params(v, ns_join(ns, k)) else: print("%s=%s"%(ns_join(ns, k), v)) else: print(params)
26,471
def check_aliens_bottom(ai_settings, screen, stats, sb, ship, aliens, bullets): """Check if any aliens have reached the bottom of the screen.""" screen_rect = screen.get_rect() for alien in aliens.sprites(): if alien.rect.bottom >= screen_rect.bottom: # Treat this the same as if the ship got hit. ship_hit(ai_settings, screen, stats, sb, ship, aliens, bullets) break
26,472
def aes_encrypt(text, sec_key): """ AES encrypt method. :param text: :param sec_key: :return: """ pad = 16 - len(text) % 16 if isinstance(text, bytes): text = text.decode('utf-8') text += pad * chr(pad) encryptor = AES.new(sec_key, 2, '0102030405060708') cipher_text = encryptor.encrypt(text) cipher_text = base64.b64encode(cipher_text) return cipher_text
26,473
def get_value_at_coords(matrix, x, y): """Returns the value of the matrix at given integer coordinates. Arguments: matrix {ndarray} -- Square matrix. x {int} -- x-coordinate. y {int} -- y-coordinate. Returns: int -- Value of the matrix. """ offset = matrix_offset(matrix) return matrix[x + offset, y + offset]
26,474
def rms(vector): """ Parameters ---------- vector Returns ------- """ return np.sqrt(np.mean(np.square(vector)))
26,475
def parse(text): """Parse a tag-expression as text and return the expression tree. .. code-block:: python tags = ["foo", "bar"] tag_expression = parse("foo and bar or not baz") assert tag_expression.evaluate(tags) == True :param text: Tag expression as text to parse. :param parser_class: Optional p :return: Parsed expression """ return TagExpressionParser.parse(text)
26,476
def average_distance(points, distance_func): """ Given a set of points and their pairwise distances, it calculates the average distances between a pair of points, averaged over all C(num_points, 2) pairs. """ for p0, p1 in itertools.combinations(points, 2): # assert symmetry assert abs(distance_func(p0, p1) - distance_func(p1, p0)) < 1e-7, \ '{} {} {} {}'.format(p0, p1, distance_func(p0, p1), distance_func(p1, p0)) for p0, p1, p2 in itertools.combinations(points, 3): # assert triangle inequality assert distance_func(p0, p1) + distance_func(p1, p2) >= distance_func(p0, p2) assert distance_func(p0, p2) + distance_func(p1, p2) >= distance_func(p0, p1) assert distance_func(p0, p1) + distance_func(p0, p2) >= distance_func( p1, p2), '{p0}-{p1}={d01} {p0}-{p2}={d02} {p1}-{p2}={d12}'.format( p0=p0, p1=p1, p2=p2, d01=distance_func(p0, p1), d02=distance_func(p0, p2), d12=distance_func(p1, p2)) # actual calculation happens below total_dist = 0.0 all_pairs = list(itertools.combinations(points, 2)) for p0, p1 in all_pairs: total_dist += distance_func(p0, p1) if all_pairs: return float(total_dist) / len(all_pairs) else: return 0.0
26,477
def get_bin_alignment(begin, end, freq): """Generate a few values needed for checking and filling a series if need be.""" start_bin = get_expected_first_bin(begin,freq) end_bin = (end/freq)*freq expected_bins = expected_bin_count(start_bin, end_bin, freq) return start_bin, end_bin, expected_bins
26,478
def TagAndFilterWrapper(target, dontRemoveTag=False): """\ Returns a component that wraps a target component, tagging all traffic coming from its outbox; and filtering outany traffic coming into its inbox with the same unique id. """ if dontRemoveTag: Filter = FilterButKeepTag else: Filter = FilterTag return Graphline( TAGGER = UidTagger(), FILTER = Filter(), TARGET = target, linkages = { ("TARGET", "outbox") : ("TAGGER", "inbox"), # tag data coming from target ("TAGGER", "outbox") : ("self", "outbox"), ("TAGGER", "uid") : ("FILTER", "uid"), # ensure filter uses right uid ("self", "inbox") : ("FILTER", "inbox"), # filter data going to target ("FILTER", "outbox") : ("TARGET", "inbox"), ("self", "control") : ("TARGET", "control"), # shutdown signalling path ("TARGET", "signal") : ("TAGGER", "control"), ("TAGGER", "signal") : ("FILTER", "control"), ("FILTER", "signal") : ("self", "signal"), }, )
26,479
async def test_login_hack(logged_in_page): """Test that logged_in_page is in fact logged in.""" # Wait for elements that should only be present if login succeeded await logged_in_page.waitForXPath('//button[contains(., "Logout")]') await logged_in_page.waitForXPath('//a[contains(., "My Images")]')
26,480
def tweets(url): """tweets count""" try: twitter_url = 'http://urls.api.twitter.com/1/urls/count.json?url=' + url r = requests.get(twitter_url, headers=headers) json_data = json.loads(r.text) return json_data['count'] except: return 0
26,481
def CLJPc(S): """Compute a C/F splitting using the parallel CLJP-c algorithm. CLJP-c, or CLJP in color, improves CLJP by perturbing the initial random weights with weights determined by a vertex coloring. Parameters ---------- S : csr_matrix Strength of connection matrix indicating the strength between nodes i and j (S_ij) Returns ------- splitting : array Array of length of S of ones (coarse) and zeros (fine) Examples -------- >>> from pyamg.gallery import poisson >>> from pyamg.classical.split import CLJPc >>> S = poisson((7,), format='csr') # 1D mesh with 7 vertices >>> splitting = CLJPc(S) See Also -------- MIS, PMIS, CLJP References ---------- .. [1] David M. Alber and Luke N. Olson "Parallel coarse-grid selection" Numerical Linear Algebra with Applications 2007; 14:611-643. """ S = remove_diagonal(S) return CLJP(S, color=True)
26,482
def displayTwoDimMapPOST(): """Run displayTwoDimMap""" executionStartTime = int(time.time()) # status and message success = True message = "ok" plotUrl = '' dataUrl = '' # get model, var, start time, end time, lon1, lon2, lat1, lat2, months, scale jsonData = request.json model = jsonData['model'] var = jsonData['var'] startT = jsonData['start_time'] endT = jsonData['end_time'] lon1 = jsonData['lon1'] lon2 = jsonData['lon2'] lat1 = jsonData['lat1'] lat2 = jsonData['lat2'] months = jsonData['months'] scale = jsonData['scale'] userId = request.args.get('userid', '') print 'from url, userId: ', userId if userId != None and userId != '': userId = int(userId) else: userId = 0 #added by Chris parameters_json = {'model':model, 'var':var, 'startT':startT, 'endT':endT, 'lon1':lon1, 'lon2':lon2, 'lat1':lat1, 'lat2':lat2, 'months':months, 'scale':scale} print 'model: ', model print 'var: ', var print 'startT: ', startT print 'endT: ', endT print 'lon1: ', lon1 print 'lon2: ', lon2 print 'lat1: ', lat1 print 'lat2: ', lat2 print 'months: ', months print 'scale: ', scale # get where the input file and output file are current_dir = os.getcwd() print 'current_dir: ', current_dir try: seed_str = model+var+startT+endT+lon1+lon2+lat1+lat2+months+scale tag = md5.new(seed_str).hexdigest() output_dir = current_dir + '/svc/static/twoDimMap/' + tag print 'output_dir: ', output_dir if not os.path.exists(output_dir): os.makedirs(output_dir) # chdir to where the app is os.chdir(current_dir+'/svc/src/twoDimMap') # instantiate the app. class c1 = call_twoDimMap.call_twoDimMap(model, var, startT, endT, lon1, lon2, lat1, lat2, months, output_dir, scale) # call the app. function (message, imgFileName, dataFileName) = c1.displayTwoDimMap() # chdir back os.chdir(current_dir) hostname, port = get_host_port2("host.cfg") ### userId = 2 if hostname == 'EC2': try: req = urllib2.Request('http://169.254.169.254/latest/meta-data/public-ipv4') response = urllib2.urlopen(req) hostname = response.read() except Exception, e: print 'e: ', e """ try: req2 = urllib2.Request(' http://169.254.169.254/latest/user-data') response2 = urllib2.urlopen(req2) userId = json.loads(response2.read())['username'] except Exception, e: print 'e: ', e userId = 2 """ """ if userIdDict.has_key(userId): userId = userIdDict[userId] else : userId = 'lei' """ print 'userId: ', userId print 'hostname: ', hostname print 'port: ', port ### url = 'http://cmacws.jpl.nasa.gov:8090/static/twoDimMap/' + tag + '/' + imgFileName ### url = 'http://' + hostname + ':' + port + '/static/twoDimMap/' + tag + '/' + imgFileName ### print 'url: ', url plotUrl = 'http://' + hostname + ':' + port + '/static/twoDimMap/' + tag + '/' + imgFileName print 'plotUrl: ', plotUrl dataUrl = 'http://' + hostname + ':' + port + '/static/twoDimMap/' + tag + '/' + dataFileName print 'dataUrl: ', dataUrl failedImgUrl = 'http://' + hostname + ':' + port + '/static/plottingFailed.png' print 'failedImgUrl: ', failedImgUrl if imgFileName is '' or not os.path.exists(output_dir+'/'+imgFileName): print '****** Error: %s not exist' % imgFileName plotUrl = failedImgUrl if dataFileName is '' or not os.path.exists(output_dir+'/'+dataFileName): print '****** Error: %s not exist' % dataFileName dataUrl = failedImgUrl print 'message: ', message if len(message) == 0 or message.find('Error') >= 0 or message.find('error:') >= 0 : success = False ### url = '' plotUrl = '' dataUrl = '' except ValueError, e: # chdir to current_dir in case the dir is changed to where the app is in the try block os.chdir(current_dir) print 'change dir back to: ', current_dir success = False message = str(e) except Exception, e: # chdir to current_dir in case the dir is changed to where the app is in the try block os.chdir(current_dir) print 'change dir back to: ', current_dir success = False ### message = str("Error caught in displayTwoDimMap()") message = str(e) purpose = request.args.get('purpose')#"Test .\'\"\\purpose" executionEndTime = int(time.time()) ### urlLink = 'model1=%s&var1=%s&lon1=%s&lon2=%s&lat1=%s&lat2=%s&startT=%s&endT=%s&months=%s&scale=%s&image=%s&data_url=%s' % (model,var,lon1,lon2,lat1,lat2,startT,endT,months,scale,plotUrl,dataUrl) urlLink = request.query_string print 'urlLink: ', urlLink post_json = {'source': 'JPL', 'parameters':urlLink, 'frontend_url': frontend_url, 'backend_url': backend_url, 'userId': long(userId), 'executionStartTime':long(executionStartTime)*1000, 'executionEndTime':long(executionEndTime)*1000} post_json = json.dumps(post_json) if USE_CMU: try: print requests.post(CMU_PROVENANCE_URL, data=post_json, headers=HEADERS).text print requests.post(CMU_PROVENANCE_URL_2, data=post_json, headers=HEADERS).text ### print requests.post(VIRTUAL_EINSTEIN_URL, data=post_json, headers=HEADERS).text except: print 'Something went wrong with Wei\'s stuff' #/added by Chris return jsonify({ 'success': success, 'message': message, 'url': plotUrl, 'dataUrl': dataUrl })
26,483
def encrypt(msg, hexPubkey): """Encrypts message with hex public key""" return pyelliptic.ECC(curve='secp256k1').encrypt( msg, hexToPubkey(hexPubkey))
26,484
def indexing(zDatagridLeft,zDatagridRight,zModelgridLeft,zModelgridRight): """ Searches for closest distances between actual and theorectical points. zDatagridLeft = float - tiled matrix (same values column-wise) of z coordintates of droplet on left side, size = [len(zModel),len(zActualLeft)] zDatagridRight = float - tiled matrix (same values column-wise) of z coordintates of droplet on right side, size = [len(zModel),len(zActualRight)] zModelgridLeft = float - tiled matrix (same values row-wise) of theorectical z coordintates of droplet (one side), size = [len(zModel),len(zActualLeft)] zModelgridRight = float - tiled matrix (same values row-wise) of theorectical z coordintates of droplet (one side), size = [len(zModel),len(zActualRight)] """ #indexing location of closest value indexLeft=np.argmin(np.abs((zModelgridLeft-zDatagridLeft)),axis=0) indexRight=np.argmin(np.abs((zModelgridRight-zDatagridRight)),axis=0) return indexLeft,indexRight
26,485
def torch2np(tensor): """ Convert from torch tensor to numpy convention. If 4D -> [b, c, h, w] to [b, h, w, c] If 3D -> [c, h, w] to [h, w, c] :param tensor: Torch tensor :return: Numpy array """ array, d = tensor.detach().cpu().numpy(), tensor.dim() perm = [0, 2, 3, 1] if d == 4 else [1, 2, 0] if d == 3 else None return array.transpose(perm) if perm else array
26,486
def test_api_fetch_single_row_fail(client): """test fetch unexisting single row from API""" fetch = client.get("/api/resolution/10") response = fetch.get_json() response_status = response.get("status") response_message = response.get("message") assert response_status is False assert response_message == "can't find resolution with the given id"
26,487
def get_filenames(feature_folder, glob_pattern, sample_size=None): """ Finds the all the files in the given feature folder which matches the glob pattern. :param feature_folder: The folder to search for files. :param glob_pattern: The glob pattern to use for finding files. :param sample_size: If given, restrict the number of files loaded to a sample of this size. :return: A list of files matching the glob pattern in the feature folder. """ files = glob.glob(os.path.join(feature_folder, glob_pattern)) if sample_size is not None and sample_size < len(files): files = random.sample(files, sample_size) return files
26,488
def test_3_best(): """s0 iterates; s1~3 are all [[1,0,0],[1,2,1],[2,2,0]]""" most_successes = 0 for raw_strat in itertools.product(range(3), repeat=9): strat = list(chunks(raw_strat, 3)) successes = 0 for hats in itertools.product(range(3), repeat=4): guesses = [strat[hats[3]][hats[1]], bs[hats[0]][hats[2]], bs[hats[1]][hats[3]], bs[hats[2]][hats[0]]] if (hats[0] == guesses[0] or hats[1] == guesses[1] or hats[2] == guesses[2] or hats[3] == guesses[3]): successes += 1 if successes > most_successes: most_successes = successes print(strat, successes)
26,489
def message_to_csv(msg: Any, truncate_length: int = None, no_arr: bool = False, no_str: bool = False) -> str: """ Convert a ROS message to string of comma-separated values. :param msg: The ROS message to convert. :param truncate_length: Truncate values for all message fields to this length. This does not truncate the list of message fields. :param no_arr: Exclude array fields of the message. :param no_str: Exclude string fields of the message. :returns: A string of comma-separated values representing the input message. """ def to_string(val, field_type=None): nonlocal truncate_length, no_arr, no_str r = '' if any(isinstance(val, t) for t in [list, tuple, array.array, numpy.ndarray]): if no_arr is True and field_type is not None: r = __abbreviate_array_info(val, field_type) else: for i, v in enumerate(val): if r: r += ',' if truncate_length is not None and i >= truncate_length: r += '...' break r += to_string(v) elif any(isinstance(val, t) for t in [bool, bytes, float, int, str, numpy.number]): if no_str is True and isinstance(val, str): val = '<string length: <{0}>>'.format(len(val)) elif any(isinstance(val, t) for t in [bytes, str]): if truncate_length is not None and len(val) > truncate_length: val = val[:truncate_length] if isinstance(val, bytes): val += b'...' else: val += '...' r = str(val) else: r = message_to_csv(val, truncate_length, no_arr, no_str) return r result = '' # We rely on __slots__ retaining the order of the fields in the .msg file. for field_name, field_type in zip(msg.__slots__, msg.SLOT_TYPES): value = getattr(msg, field_name) if result: result += ',' result += to_string(value, field_type) return result
26,490
def create_message(username, message): """ Creates a standard message from a given user with the message Replaces newline with html break """ message = message.replace('\n', '<br/>') return '{{"service":1, "data":{{"message":"{mes}", "username":"{user}"}} }}'.format(mes=message, user=username)
26,491
def overlaps(sdf, other): """ Indicates if the intersection of the two geometries has the same shape type as one of the input geometries and is not equivalent to either of the input geometries. ========================= ========================================================= **Argument** **Description** ------------------------- --------------------------------------------------------- sdf Required Spatially Enabled DataFrame. The dataframe to have the operation performed on. ------------------------- --------------------------------------------------------- other Required Spatially Enabled DataFrame or arcgis.Geometry. This is the selecting data. ========================= ========================================================= :returns: pd.DataFrame (Spatially enabled DataFrame) """ global _HASARCPY, _HASSHAPELY if _HASARCPY == False and _HASSHAPELY == False: return None ud = pd.Series([False] * len(sdf)) if isinstance(other, (Point, Polygon, Polyline, MultiPoint)): sindex = sdf.spatial.sindex() q1 = sindex.intersect(bbox=other.extent) sub = sdf.iloc[q1] dj = sub[sdf.spatial.name].geom.overlaps(other) dj.index = sub.index ud = ud | dj return sdf[ud] elif _is_geoenabled(other): sindex = sdf.spatial.sindex() name = other.spatial.name for index, seg in other.iterrows(): g = seg[name] q1 = sindex.intersect(bbox=g.extent) sub = sdf.iloc[q1] if len(sub) > 0: dj = sub[sdf.spatial.name].geom.overlaps(g) dj.index = sub.index ud = ud | dj return sdf[ud] else: raise ValueError(("Invalid input, please verify that `other` " "is a Point, Polygon, Polyline, MultiPoint, " "or Spatially enabled DataFrame")) return None
26,492
def squarelimit(picture, depth, qstart=1, qstop=4): """Squarelimit is drawn in a triangular style. For the whole picture, 4 triangles forming a square are drawn. """ # no of quarters n = 4 # advance in degrees q = 90 transform(CORNER) scale(1, 1) for i in range(n): push() rotate( i * q ) if dbg_offset: translate(picture.size[0] / offsetpart, picture.size[1] / offsetpart) draw_squarellimit_quarter(picture, depth, True, True, True) pop()
26,493
def render_page(request, page): """Рендер страницы""" if page.registration_required and not request.user.is_authenticated: from django.contrib.auth.views import redirect_to_login return redirect_to_login(request.path) # if page.template: # template = loader.get_template(page.template) # print(template) # # else: # # template = loader.get_template(DEFAULT_TEMPLATE) # # t = Template(template) # # p = Template(template).render(RequestContext(request, {'page': page})) # print(p) # # page.title = mark_safe(page.title) # # page.text = mark_safe(page.text) # return HttpResponse(p) # # return HttpResponse(template.render({'page': page}, request)) return render(request, page.template, {"page": page})
26,494
def latex_table(report: dict, file_path: str) -> None: """Convert an sklearn style classification report into a LaTeX table and save the result. Args: report (dict): sklearn style classification report. file_path (str): File destination. """ df = prepare_report_df(report) table_string = generate_table(df) save_table(table_string, file_path)
26,495
def _whctrs(anchor): """ Return width, height, x center, and y center for an anchor (window). """ w = anchor[2] - anchor[0] + 1 h = anchor[3] - anchor[1] + 1 x_ctr = anchor[0] + 0.5 * (w - 1) y_ctr = anchor[1] + 0.5 * (h - 1) # 16,16, 7.5, 7.5 return w, h, x_ctr, y_ctr
26,496
def obtener_atletas_pais(atletas: list, pais_interes: str) -> list: """ Función que genera una lista con la información de los atletas del país dado, sin importar el año en que participaron los atletas. Parámetros: atletas: list de diccionarios con la información de cada atleta. pais_interes: str. Retorna: atletas_pais: list con los diccionarios de los atletas del país. diccionario de cada atleta: {'nombre': str, 'evento': str, 'anio': int}. """ # Inicializar lista de atletas del país. atletas_pais = list() # Inicio de recorrido por la lista de atletas. for cada_atleta in atletas: # Definición de variables del atleta actual. anio_actual = cada_atleta['anio'] nombre_actual = cada_atleta['nombre'] evento_actual = cada_atleta['evento'] pais_actual = cada_atleta['pais'] # Verificación de nombre y rango de tiempo. if pais_actual == pais_interes: # Se añade el diccionario de atleta a la lista de atletas. atletas_pais.append({'nombre': nombre_actual, 'evento': evento_actual, 'anio': anio_actual}) return atletas_pais
26,497
def kl_div_mixture_app(m1, v1, m2, v2, return_approximations=False, return_upper_bound=False): """Approximate KL divergence between Gaussian and mixture of Gaussians See Durrieu et al, 2012: "Lower and upper bounds for approximation of the Kullback-Leibler divergence between Gaussian Mixture Models" https://serval.unil.ch/resource/serval:BIB_513DF4E21898.P001/REF Both the variational and the product approximation are simplified here compared to the paper, as we assume to have a single Gaussian as the first argument. m1: ([batch_dims], data_dims) v1: ([batch_dims], data_dims) m2: ([batch_dims], mixtures, data_dims) v2: ([batch_dims], mixtures, data_dims) """ assert m1.ndim + 1 == m2.ndim if return_upper_bound: res = _kl_div_mixture_app_with_upper_bound(m1, v1, m2, v2) if return_approximations: return res else: return res[0], res[3] else: kls_app, kls_var, kls_prod = _kl_div_mixture_app(m1, v1, m2, v2) if return_approximations: return kls_app, kls_var, kls_prod else: return kls_app
26,498
def wilson_primality_test(n: int) -> bool: """ https://en.wikipedia.org/wiki/Wilson%27s_theorem >>> assert all(wilson_primality_test(i) for i in [2, 3, 5, 7, 11]) >>> assert not all(wilson_primality_test(i) for i in [4, 6, 8, 9, 10]) """ return ((factorial_lru(n - 1) + 1) % n) == 0
26,499