content
stringlengths
22
815k
id
int64
0
4.91M
def test_include_via_python_module_name(tmpdir): """Check if an include can be a Python module name""" settings_file = tmpdir.join("settings.toml") settings_file.write( """ [default] default_var = 'default' """ ) dummy_folder = tmpdir.mkdir("dummy") dummy_folder.join("dummy_module.py").write('FOO = "164110"') dummy_folder.join("__init__.py").write('print("initing dummy...")') settings = LazySettings( SETTINGS_FILE_FOR_DYNACONF=str(settings_file), INCLUDES_FOR_DYNACONF=["dummy.dummy_module"], ) assert settings.DEFAULT_VAR == "default" assert settings.FOO == "164110"
33,300
def download_table_dbf(file_name, cache=True): """ Realiza o download de um arquivo auxiliar de dados do SINAN em formato "dbf" ou de uma pasta "zip" que o contém (se a pasta "zip" já não foi baixada), em seguida o lê como um objeto pandas DataFrame e por fim o elimina Parâmetros ---------- file_name: objeto str String do nome do arquivo "dbf" Retorno ------- df: objeto pandas DataFrame Dataframe que contém os dados de um arquivo auxiliar de dados originalmente em formato "dbf" """ ftp = FTP('ftp.datasus.gov.br') ftp.login() try: if file_name == 'CADMUN': fname = file_name + '.DBF' ftp.cwd('/dissemin/publicos/SIM/CID10/TABELAS/') ftp.retrbinary(f'RETR {fname}', open(fname, 'wb').write) elif file_name == 'rl_municip_regsaud': folder = 'base_territorial.zip' ftp.cwd('/territorio/tabelas/') ftp.retrbinary(f'RETR {folder}', open(folder, 'wb').write) zip = ZipFile(folder, 'r') fname = file_name + '.dbf' zip.extract(fname) except: raise Exception('Could not download {}'.format(fname)) dbf = DBF(fname) df = pd.DataFrame(iter(dbf)) os.unlink(fname) return df
33,301
def test_one_core_fail(cache_mode): """ title: Test if OpenCAS correctly handles failure of one of multiple core devices. description: | When one core device fails in a single cache instance all blocks previously occupied should be available to other core devices. Test is without pass through mode. pass_criteria: - No system crash. - Second core is able to use OpenCAS. """ with TestRun.step("Prepare one cache and two core devices."): cache_dev = TestRun.disks['cache'] cache_dev.create_partitions([Size(1, Unit.GibiByte)] * 2) cache_part = cache_dev.partitions[0] core_dev1 = TestRun.disks['core1'] # This device would be unplugged. core_dev1.create_partitions([Size(2, Unit.GibiByte)]) core_part1 = core_dev1.partitions[0] core_dev2 = TestRun.disks['core2'] core_dev2.create_partitions([Size(2, Unit.GibiByte)]) core_part2 = core_dev2.partitions[0] Udev.disable() with TestRun.step("Start cache"): cache = casadm.start_cache(cache_part, cache_mode, force=True) caches_count = len(casadm_parser.get_caches()) if caches_count != 1: TestRun.fail(f"Expected caches count: 1; Actual caches count: {caches_count}.") with TestRun.step("Add both core devices to cache."): core1 = cache.add_core(core_part1) core2 = cache.add_core(core_part2) cores_count = len(casadm_parser.get_cores(cache.cache_id)) if cores_count != 2: TestRun.fail(f"Expected cores count: 2; Actual cores count: {cores_count}.") with TestRun.step("Change sequential cutoff policy."): cache.set_seq_cutoff_policy(SeqCutOffPolicy.never) with TestRun.step("Fill cache with pages from the first core."): dd_builder(cache_mode, core1, cache.size).run() cache_occupied_blocks_before = cache.get_occupancy() with TestRun.step("Unplug the first core device."): core_dev1.unplug() with TestRun.step("Check if core device is really out of cache."): output = str(casadm.list_caches().stdout.splitlines()) if core_part1.path in output: TestRun.fail("The first core device should be unplugged!") with TestRun.step("Check if the remaining core is able to use cache."): dd_builder(cache_mode, core2, Size(100, Unit.MebiByte)).run() if not float(core2.get_occupancy().get_value()) > 0: TestRun.LOGGER.error("The remaining core is not able to use cache.") with TestRun.step("Check if occupancy from the first core is removed from cache."): # Cache occupancy cannot be lower than before the first core fails and after that # should be equal to the sum of occupancy of the first and the remaining core cache_occupied_blocks_after = cache.get_occupancy() if cache_occupied_blocks_before > cache_occupied_blocks_after \ or cache_occupied_blocks_after != core2.get_occupancy() + core1.get_occupancy(): TestRun.fail("Blocks previously occupied by the first core " "aren't released after this core failure.") with TestRun.step("Stop cache."): casadm.stop_all_caches() with TestRun.step("Plug back the first core."): core_dev1.plug()
33,302
def aq_name(path_to_shp_file): """ Computes the name of a given aquifer given it's shape file :param path_to_shp_file: path to the .shp file for the given aquifer :return: a string (name of the aquifer) """ str_ags = path_to_shp_file.split('/') str_aq = "" if len(str_ags) >= 2: str_aq = str(str_ags[1]) print(str_aq) return str_aq
33,303
def check_dbtable(dbname, tablename): """ Check tablename exists in database""" LOGGER.info(f"Checking {tablename} exits in: {dbname}") # Connect to DB con = sqlite3.connect(dbname) # Create the table create_table = """ CREATE TABLE IF NOT EXISTS {tablename} ( {statement} ) """.format(**{'tablename': tablename, 'statement': _table_statement}) LOGGER.debug(create_table) cur = con.cursor() cur.execute(create_table) con.commit() con.close() return
33,304
def execution_start(): """Code to execute after the config files and command line flags have been parsedself. This setuptools hook is the earliest that will be able to use custom command line flags. """ # Add to the search patterns used by modules if "ALFA" not in config.sp: config.update_dict(config.sp, {"ALFA": {"fn": "*ALFA_feature_counts.tsv"}}) if "tin-score" not in config.sp: config.update_dict(config.sp, {"tin-score": {"fn": "TIN_score.tsv"}}) if "zpca/pca" not in config.sp: config.update_dict(config.sp, {"zpca/pca": {"fn": "PCA.tsv"}}) if "zpca/scree" not in config.sp: config.update_dict(config.sp, {"zpca/scree": {"fn": "scree.tsv"}})
33,305
def clear_old_snapshots(): """ Remove any old snapshots to minimize disk space usage locally. """ logging.info('Removing old Cassandra snapshots...') try: subprocess.check_call([NODE_TOOL, 'clearsnapshot']) except CalledProcessError as error: logging.error('Error while deleting old Cassandra snapshots. Error: {0}'.\ format(str(error)))
33,306
def enable_host_logger(): """Enable host logger for logging stdout from remote commands as it becomes available. """ enable_logger(host_logger)
33,307
def validate_json(): """validates if input is in JSON format""" if not request.is_json: abort(400, "request should be in JSON format")
33,308
def gini(arr, mode='all'): """Calculate the Gini coefficient(s) of a matrix or vector. Parameters ---------- arr : array-like Array or matrix on which to compute the Gini coefficient(s). mode : string, optional One of ['row-wise', 'col-wise', 'all']. Default is 'all'. Returns ------- coeffs : array-like Array of Gini coefficients. Note ---- If arr is a transition matrix A, such that Aij = P(S_k=j|S_{k-1}=i), then 'row-wise' is equivalent to 'tmat_departure' and 'col-wise' is equivalent to 'tmat_arrival'. Similarly, if arr is the observation (lambda) matrix of an HMM such that lambda \in \mathcal{C}^{n_states \times n_units}, then 'row-wise' is equivalent to 'lambda_across_units' and 'col-wise' is equivalent to 'lambda_across_units'. If mode = 'all', then the matrix is unwrapped into a numel-dimensional array before computing the Gini coefficient. """ if mode is None: mode = 'row-wise' if mode not in ['row-wise', 'col-wise', 'all']: raise ValueError("mode '{}' not supported!".format(mode)) gini_coeffs = None if mode=='all': arr = np.atleast_1d(arr).astype(float) gini_coeffs = _gini(arr) elif mode=='row-wise': arr = np.atleast_2d(arr).astype(float) gini_coeffs = [] for row in arr: gini_coeffs.append(_gini(row)) elif mode=='col-wise': arr = np.atleast_2d(arr).astype(float) gini_coeffs = [] for row in arr.T: gini_coeffs.append(_gini(row)) return gini_coeffs
33,309
def build_future_index(): """ 编制指数数据:期货加权指数,主力合约指数,远月主力合约,交割主力合约 对应的symbol-xx00:持仓量加权,xx11:成交量加权,xx88:主力合约,x99:远月合约,xx77:交割月合约 按成交量对同一天的交易合约进行排序,取排名前三的交易合约,成交量最大的为主力合约 最接近当月的合约为交割主力合约,在主力合约后交割的为远月主力合约 :return: """ # 更新数据库行情数据 独立运行,不在此处更新数据 # insert_hq_to_mongo() # 连接数据库 # conn = connect_mongo(db='quote') index_cursor = conn['index'] hq_cursor = conn['future'] # 从 future collection中提取60天内交易的品种 filter_dict = {'datetime': {"$gt": datetime.today() - timedelta(360)}} codes = hq_cursor.distinct('code', filter_dict) if not isinstance(codes, list) or len(codes) == 0: print("Don't find any trading code in future collection!") return # 按品种分别编制指数 for code in codes: # 获取指数数据最近的一条主力合约记录,判断依据是前一天的持仓量 last_doc = index_cursor.find_one({'symbol': code + '88'}, sort=[('datetime', DESCENDING)]) if last_doc: filter_dict = {'code': code, 'datetime': {'$gte': last_doc['datetime']}} # 已经改名交易品种['GN', 'WS', 'WT', 'RO', 'ER', 'ME', 'TC'] # 老合约 新合约 老合约最后交易日 # 甲醇 ME/50吨 MA/10吨 2015-5-15 # 动力煤 TC/200吨 ZC/100吨 2016-4-8 # 强筋小麦 WS/10吨 WH/20吨 2013-05-23 # 硬白小麦 WT/10吨 PM/50吨 2012-11-22 # 早籼稻 ER/10吨 RI/20吨 2013-5-23 # 绿豆 GN 2010-3-23 # 菜籽油 RO/5吨 OI/10吨 2013-5-15 # if code in ['GN', 'WS', 'WT', 'RO', 'ER', 'ME', 'TC']: # print('{} is the {} last trading day.'.format(last_doc['datetime'].strftime('%Y-%m-%d'), code)) # continue # else: # print("Build {} future index from {}".format(code, last_doc['datetime'])) else: # 测试指定日期 # filter_dict = {'code': code, 'datetime': {'$lte': datetime(2003, 1, 1)}} filter_dict = {'code': code} print("Build {} future index from trade beginning.".format(code)) # 从数据库读取所需数据 hq = hq_cursor.find(filter_dict, {'_id': 0}).sort([("datetime", ASCENDING)]) hq_df = pd.DataFrame(list(hq)) if hq_df.empty: print('{} index data have been updated before!'.format(code)) continue hq_df.set_index(['datetime', 'symbol'], inplace=True) # 需要按照索引排序 date_index = hq_df.index.levels[0] if len(date_index) < 2: # 新的数据 print('{} index data have been updated before!'.format(code)) continue index_names = ['domain', 'near', 'next'] contract_df = pd.DataFrame(index=date_index, columns=index_names) for date in date_index: # hq.py:493: PerformanceWarning: indexing past lexsort depth may impact performance. # s = hq_df.loc[date, 'openInt'].copy() s = hq_df.loc[date, 'openInt'].copy() s.sort_values(ascending=False, inplace=True) s = s[:min(3, len(s))] # 预防合约小于3的情况,避免出现交割月和主力合约重合,主力合约和下月合约重合 domain = s.index[0] contract_df.loc[date, 'domain'] = domain contract_df.loc[date, 'near'] = s.index.min() try: if s.index[1] > domain: contract_df.loc[date, 'next'] = s.index[1] elif s.index[2] > domain: contract_df.loc[date, 'next'] = s.index[2] else: contract_df.loc[date, 'next'] = domain except IndexError: print("{} domain contract is the last contract {}".format(code, domain)) contract_df.loc[date, 'next'] = domain pre_contract_df = contract_df.shift(1).dropna() # length = len(contract_df) pre_no_index_df = pre_contract_df.reset_index() hq_df = hq_df.loc[pre_no_index_df.datetime[0]:] # 期货指数数据从第二个交易日开始 frames = [] index_symbol = [code + x for x in ['00', '11', '88', '77', '99']] multi_index_names = ['datetime', 'symbol'] # 主力,交割,远月合约数据 for name, symbol in zip(index_names, index_symbol[-3:]): multi_index = pd.MultiIndex.from_frame( pre_no_index_df[['datetime', name]], names=multi_index_names) index_diff = multi_index.difference(hq_df.index) # 头一天还有交割仓位,第二天合约消失的情况 if not index_diff.empty: date_index = index_diff.get_level_values(level=0) pre_contract_df.loc[date_index, name] = contract_df.loc[date_index, name] pre_no_index_df = pre_contract_df.reset_index() multi_index = pd.MultiIndex.from_frame( pre_no_index_df[['datetime', name]], names=multi_index_names) print('{} use {} current day contract'.format(symbol, len(index_diff))) index_df = hq_df.loc[multi_index] index_df.reset_index(inplace=True) index_df['contract'] = index_df['symbol'] index_df['symbol'] = symbol frames += index_df.to_dict('records') # 加权指数 for symbol, weight_name in zip(index_symbol[:2], ['openInt', 'volume']): index_df = build_weighted_index(hq_df, weight=weight_name) index_df.reset_index(inplace=True) index_df['code'] = code index_df['market'] = hq_df.market[0] index_df['symbol'] = symbol frames += index_df.to_dict('records') result = index_cursor.insert_many(frames) if result.acknowledged: print('{} index data insert success.'.format(code)) else: print('{} index data insert failure.'.format(code))
33,310
def getIndex(): """ Retrieves index value. """ headers = { 'accept': 'application/json', } indexData = requests.get( APIUrls.lnapi+APIUrls.indexUrl, headers=headers, ) if indexData.status_code == 200: return indexData.json() else: raise RuntimeError( 'Unable to fetch index data:\n' f'{indexData.text}' )
33,311
def add_formflow_to_graph(graph, formflow): """Add a formflow object and its edges to the graph Also iterates through tasks (formflow steps) and adds form displays, formflow jump, run command rules as well as any referenced conditions """ graph.add_node(formflow.guid, formflow.map()) FORMFLOW_LOOKUP[formflow.name] = formflow.guid if formflow.image: i_dict = { "type": "link", "link_type": "formflow icon" } graph.add_edge(formflow.guid, formflow.image.lower(), attr_dict=i_dict) if formflow.conditions: for condition in formflow.conditions: c_dict = { "type": "link", "link_type": "formflow condition", "condition": condition["VWT_ConditionId"].lower(), "guid": condition["VWT_PK"].lower() } graph.add_edge(formflow.guid, c_dict["condition"], attr_dict=c_dict) if formflow.tasks: for task in formflow.tasks: go_task = GlowObject(settings["task"], task) add_task_edge_to_graph(graph, formflow, go_task) if formflow.data: xml_parser = XMLParser(formflow.data) for template in xml_parser.iterfind("ShowFormActivity"): template_id = template["template"].lower() FORMSTEP_LOOKUP[template["name"]] = template_id graph.add_edge(formflow.guid, template_id, attr_dict=template) for ff in xml_parser.iterfind("JumpToActivity"): graph.add_edge(formflow.guid, ff["formflow"].lower(), attr_dict=ff) for condition_type in ("ConditionalIfActivity", "ConditionalWhileActivity", "NativeTransitionInfo"): for condition in xml_parser.iterfind(condition_type): if condition: graph.add_edge(formflow.guid, condition["condition"].lower(), attr_dict=condition) for sound in xml_parser.iterfind("PlayAudioActivity"): graph.add_edge(formflow.guid, sound["sound"].lower(), attr_dict=sound) for command in xml_parser.iterfind("RunCommandActivity"): command_name = command["command"] entity = get_command_entity(command_name, formflow.entity) command_full_name = "{}-{}".format(command_name, entity) graph.add_edge(formflow.guid, command_full_name, attr_dict=command)
33,312
def getDataFile(fname): """Return complete path to datafile fname. Data files are in the directory skeleton/skeleton/data """ return os.path.join(getDataPath(),fname)
33,313
def input_as_string(filename:str) -> str: """returns the content of the input file as a string""" with open(filename, encoding="utf-8") as file: return file.read().rstrip("\n")
33,314
def channame_to_python_format_string(node, succgen=None): """See channame_str_to_python_format_string @succgen is optional, if given will check that identifiers can be found. """ if not node: #empty AST return (True, "") if node.type == 'Identifier': # and len(node.children) >= 1: #if no succgen, assume its a channel if not succgen or node.children[0] in succgen.channel_identifiers: #Of the form "channame[x][x]...[x]" static = True if node.leaf: #Have IndexList? idxs = [] for c in node.leaf.children: assert c.type == 'Index' (exprstatic, expr) = _expression_to_python_format_string(c.leaf, succgen) static = static and exprstatic idxs += [expr] idxs = "".join(["[" + x + "]" for x in idxs]) return (static, node.children[0] + idxs) else: return (True, node.children[0]) else: print node, succgen.channel_identifiers raise IllegalExpressionException('Unknown channel ' + node.children[0]) else: raise IllegalExpressionException('Illegal expression type for channame: ' + node.type)
33,315
def write_colorama(typ: str, name: str, msg: str, encoding: str) -> None: """if we're writing to the screen""" try: _write_colorama_screen(typ, name + msg) except IOError: sys.stdout.write(f'error writing line...encoding={encoding!r}\n') sys.stdout.write(msg)
33,316
def clean_import(): """Create a somewhat clean import base for lazy import tests""" du_modules = { mod_name: mod for mod_name, mod in sys.modules.items() if mod_name.startswith("bs_dateutil") } other_modules = {mod_name for mod_name in sys.modules if mod_name not in du_modules} for mod_name in du_modules: del sys.modules[mod_name] yield # Delete anything that wasn't in the origin sys.modules list for mod_name in list(sys.modules): if mod_name not in other_modules: del sys.modules[mod_name] # Restore original modules for mod_name, mod in du_modules.items(): sys.modules[mod_name] = mod
33,317
def is_bad(x): """ for numeric vector x, return logical vector of positions that are null, NaN, infinite""" if can_convert_v_to_numeric(x): x = safe_to_numeric_array(x) return numpy.logical_or( pandas.isnull(x), numpy.logical_or(numpy.isnan(x), numpy.isinf(x)) ) return pandas.isnull(x)
33,318
def psf_generator(cmap='hot', savebin=False, savetif=False, savevol=False, plot=False, display=False, psfvol=False, psftype=0, expsf=False, empsf=False, realshape=(0,0), **kwargs): """Calculate and save point spread functions.""" args = { 'shape': (50, 50), # number of samples in z and r direction 'dims': (5.0, 5.0), # size in z and r direction in micrometers 'ex_wavelen': 488.0, # excitation wavelength in nanometers 'em_wavelen': 520.0, # emission wavelength in nanometers 'num_aperture': 1.2, 'refr_index': 1.333, 'magnification': 1.0, 'pinhole_radius': 0.05, # in micrometers 'pinhole_shape': 'round', } args.update(kwargs) if (psftype == 0): psf_matrix = psf.PSF(psf.ISOTROPIC | psf.EXCITATION, **args) print('psf.ISOTROPIC | psf.EXCITATION generated') if (psftype == 1): psf_matrix = psf.PSF(psf.ISOTROPIC | psf.EMISSION, **args) print('psf.ISOTROPIC | psf.EMISSION generated') if (psftype == 2): psf_matrix = psf.PSF(psf.ISOTROPIC | psf.WIDEFIELD, **args) print('psf.ISOTROPIC | psf.WIDEFIELD generated') if (psftype == 3): psf_matrix = psf.PSF(psf.ISOTROPIC | psf.CONFOCAL, **args) print('psf.ISOTROPIC | psf.CONFOCAL generated') if (psftype == 4): psf_matrix = psf.PSF(psf.ISOTROPIC | psf.TWOPHOTON, **args) print('psf.ISOTROPIC | psf.TWOPHOTON generated') if (psftype == 5): psf_matrix = psf.PSF(psf.GAUSSIAN | psf.EXCITATION, **args) print('psf.GAUSSIAN | psf.EXCITATION generated') if (psftype == 6): psf_matrix = psf.PSF(psf.GAUSSIAN | psf.EMISSION, **args) print('psf.GAUSSIAN | psf.EMISSION generated') if (psftype == 7): print('psf.GAUSSIAN | psf.WIDEFIELD generated') psf_matrix = psf.PSF(psf.GAUSSIAN | psf.WIDEFIELD, **args) if (psftype == 8): psf_matrix = psf.PSF(psf.GAUSSIAN | psf.CONFOCAL, **args) print('psf.GAUSSIAN | psf.CONFOCAL generated') if (psftype == 9): psf_matrix = psf.PSF(psf.GAUSSIAN | psf.TWOPHOTON, **args) print('psf.GAUSSIAN | psf.TWOPHOTON generated') if (psftype == 10): psf_matrix = psf.PSF(psf.GAUSSIAN | psf.EXCITATION | psf.PARAXIAL, **args) print('psf.GAUSSIAN | psf.EXCITATION | psf.PARAXIAL generated') if (psftype == 11): psf_matrix = psf.PSF(psf.GAUSSIAN | psf.EMISSION | psf.PARAXIAL, **args) print('psf.GAUSSIAN | psf.EMISSION | psf.PARAXIAL generated') if (psftype == 12): psf_matrix = psf.PSF(psf.GAUSSIAN | psf.WIDEFIELD | psf.PARAXIAL, **args) print('psf.GAUSSIAN | psf.WIDEFIELD | psf.PARAXIAL generated') if (psftype == 13): print('psf.GAUSSIAN | psf.CONFOCAL | psf.PARAXIAL generated') psf_matrix = psf.PSF(psf.GAUSSIAN | psf.CONFOCAL | psf.PARAXIAL, **args) if (psftype == 14): psf_matrix = psf.PSF(psf.GAUSSIAN | psf.TWOPHOTON | psf.PARAXIAL, **args) print('psf.GAUSSIAN | psf.TWOPHOTON | psf.PARAXIAL generated') if empsf: psf_matrix = psf_matrix.expsf if expsf: psf_matrix = psf_matrix.empsf if psfvol: # psf_matrix = normalize_matrix(psf_matrix.volume()) psf_matrix = psf_matrix.volume() psf_matrix = psf_matrix[:realshape[0],:,:] psf_matrix = psf_matrix[:,:realshape[1],:realshape[1]] else: #psf_matrix = normalize_matrix(psf.mirror_symmetry(psf_matrix.data)) psf_matrix = psf.mirror_symmetry(psf_matrix.data) psf_matrix = psf_matrix[:realshape[1],:realshape[1]] if plot: import matplotlib.pyplot as plt plt.imshow(psf_matrix, cmap=cmap) plt.show() if display: import cv2 cv2.imshow('PSF',psf_matrix) cv2.waitKey(0) cv2.destroyAllWindows() if savetif: # save zr slices to TIFF files from tifffile import imsave imsave('psf_matrix.tif', psf_matrix, metadata = {'axes':'TZCYX'}, imagej=True) if savevol: # save xyz volumes to files. from tifffile import imsave imsave('psf_matrix_vol.tif', psf_matrix, metadata = {'axes':'TZCYX'}, imagej=True) print('PSF shape: ', psf_matrix.shape) return psf_matrix
33,319
def test_populate_extra_data_square_day(): """ If we have found a 'square' day, the description and square value is added """ value = 7 week_data = {'mon': {'value': value}} description = '__DESCRIPTION__' populate_extra_data(week_data, description) assert week_data == { 'mon': { 'value': value, 'square': value ** 2, 'description': '{} {}'.format(description, value ** 2) } }
33,320
def evaluate_model(model, X_test, y_test, category_names): """Predict data based on the test set of the input variables Args: model (sklearn.model_selection): Model X_test (numpy.ndarray): Test set of the input variables y_test (pandas.DataFrame): Test set of the output variables category_names (numpy.ndarray): List of category names """ y_pred = model.predict(X_test) for i, col in enumerate(category_names): if i > 0: print('column {}, index {}: '.format(col, i)) print(classification_report(y_test.iloc[:,i], y_pred[:,i]))
33,321
def test_errors(): """ Test error. :return: """ with raises(PyboticsError): raise PyboticsError() assert str(PyboticsError()) == PyboticsError().message assert str(PyboticsError("test")) == "test"
33,322
def save_team_images(column='team_wordmark'): """Function will loop through a dataframe column and save URL images locally""" df = pd.read_csv(r'https://github.com/guga31bb/nflfastR-data/raw/master/teams_colors_logos.csv') my_series = df[column] my_list = my_series.to_list() for im_url in my_list: image_url = im_url filename = image_url.split("/")[-1] local_path = r'..\figures' file_path = path.join(local_path, filename) urllib.request.urlretrieve(image_url, file_path)
33,323
def correct_format(): """ This method will be called by iolite when the user selects a file to import. Typically, it uses the provided name (stored in plugin.fileName) and parses as much of it as necessary to determine if this importer is appropriate to import the data. For example, although X Series II and Agilent data are both comma separated value files, they can be distinguished by the characteristic formatting in each. In our implementation, distinguishing the two is done with 'regular expressions' (QRegularExpression) parsing of the first several lines of the file. Keep in mind that there is nothing stopping you from just returning True (thus supporting all files!) or simply checking the file extension, but such generic checks can yield unexpected results. You cannot be sure which order the various importer plugins will be checked for compatibility. This method must return either True or False. """ IoLog.debug("correct_format called on file = %s"%(importer.fileName)) if importer.fileName.endswith('ioe'): return True return False
33,324
def move_channel_down(midiout): """ """ activate_pattern_window(midiout) keyboard.send('alt', True, False) keyboard.send('down', True, False) keyboard.send('down', False, True) keyboard.send('alt', False, True)
33,325
def T0_T0star(M, gamma): """Total temperature ratio for flow with heat addition (eq. 3.89) :param <float> M: Initial Mach # :param <float> gamma: Specific heat ratio :return <float> Total temperature ratio T0/T0star """ t1 = (gamma + 1) * M ** 2 t2 = (1.0 + gamma * M ** 2) ** 2 t3 = 2.0 + (gamma - 1.0) * M ** 2 return t1 / t2 * t3
33,326
def remount_as( ip: Optional[str] = None, writeable: bool = False, folder: str = "/system" ) -> bool: """ Mount/Remount file-system. Requires root :param folder: folder to mount :param writeable: mount as writeable or readable-only :param ip: device ip :rtype: true on success """ if writeable: return ( shell(f"mount -o rw,remount {folder}", ip=ip).code == ADBCommandResult.RESULT_OK ) else: return ( shell(f"mount -o ro,remount {folder}", ip=ip).code == ADBCommandResult.RESULT_OK )
33,327
def cli(ctx): """list create modify addtovolumeaccessgroup removefromvolumeaccessgroup delete """
33,328
def flip_channels(img): """Flips the order of channels in an image; eg, BGR <-> RGB. This function assumes the image is a numpy.array (what's returned by cv2 function calls) and uses the numpy re-ordering methods. The number of channels does not matter. If the image array is strictly 2D, no re-ordering is possible and the original data is returned untouched. """ if len(img.shape) == 2: return img; return img[:,:,::-1]
33,329
def autodiscover_datafiles(varmap): """Return list of (dist directory, data file list) 2-tuples. The ``data_dirs`` setup var is used to give a list of subdirectories in your source distro that contain data files. It is assumed that all such files will go in the ``share`` subdirectory of the prefix where distutils is installing your distro (see the distutils docs); within that directory, a subdirectory with the same name as your program (i.e., the ``name`` setup var) will be created, and each directory in ``data_dirs`` will be a subdirectory of that. So, for example, if you have example programs using your distro in the ``"examples"`` directory in your distro, you would declare ``data_dirs = "examples"`` in your setup vars, and everything under that source directory would be installed into ``share/myprog/examples``. """ result = [] try: datadirs = varmap['data_dirs'] except KeyError: pass else: pathprefix = "share/{}".format(varmap['name']) for datadir in datadirs: for dirname, subdirs, filenames in os.walk(datadir): if filenames and ("." not in dirname): distdir = dirname.replace(os.sep, '/') distfiles = [ "{}/{}".format(distdir, filename) for filename in filenames if not filename.startswith(".") ] if distfiles: distdir = dirname.replace(os.sep, '/') result.append( ("{}/{}".format(pathprefix, distdir), distfiles) ) return result
33,330
def dict_to_hdf5(dic, endpoint): """Dump a dict to an HDF5 file. """ filename = gen_filename(endpoint) with h5py.File(filename, 'w') as handler: walk_dict_to_hdf5(dic, handler) print('dumped to', filename)
33,331
def parse_main(index): """Parse a main function containing block items. Ex: int main() { return 4; } """ err = "expected main function starting" index = match_token(index, token_kinds.int_kw, ParserError.AT, err) index = match_token(index, token_kinds.main, ParserError.AT, err) index = match_token(index, token_kinds.open_paren, ParserError.AT, err) index = match_token(index, token_kinds.close_paren, ParserError.AT, err) node, index = parse_compound_statement(index) return nodes.Main(node), index
33,332
def elist2tensor(elist, idtype): """Function to convert an edge list to edge tensors. Parameters ---------- elist : iterable of int pairs List of (src, dst) node ID pairs. idtype : int32, int64, optional Integer ID type. Must be int32 or int64. Returns ------- (Tensor, Tensor) Edge tensors. """ if len(elist) == 0: u, v = [], [] else: u, v = zip(*elist) u = list(u) v = list(v) return F.tensor(u, idtype), F.tensor(v, idtype)
33,333
def _term_to_xapian_value(term, field_type): """ Converts a term to a serialized Xapian value based on the field_type. """ assert field_type in FIELD_TYPES def strf(dt): """ Equivalent to datetime.datetime.strptime(dt, DATETIME_FORMAT) but accepts years below 1900 (see http://stackoverflow.com/q/10263956/931303) """ return '%04d%02d%02d%02d%02d%02d' % ( dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second) if field_type == 'boolean': assert isinstance(term, bool) if term: value = 't' else: value = 'f' elif field_type == 'integer': value = INTEGER_FORMAT % term elif field_type == 'float': value = xapian.sortable_serialise(term) elif field_type == 'date' or field_type == 'datetime': if field_type == 'date': # http://stackoverflow.com/a/1937636/931303 and comments term = datetime.datetime.combine(term, datetime.time()) value = strf(term) else: # field_type == 'text' value = _to_xapian_term(term) return value
33,334
def printname(name): """ Print whatever is given for name Parameters ---------- name : object Any printable object Returns ------- no return : print statement """ print(name)
33,335
def pop_stl1(osurls, radiourls, splitos): """ Replace STL100-1 links in 10.3.3+. :param osurls: List of OS platforms. :type osurls: list(str) :param radiourls: List of radio platforms. :type radiourls: list(str) :param splitos: OS version, split and cast to int: [10, 3, 3, 2205] :type splitos: list(int) """ if newer_103(splitos, 3): osurls = osurls[1:] radiourls = radiourls[1:] return osurls, radiourls
33,336
def process_map_model_validation_error(self, exception_map, exception): """ Processes the exception map for the given exception. This process method includes the specific information of this exception into the exception map. :type exception_map: Dictionary :param exception_map: The map containing the exception information. :type exception: Exception :param exception: The exception to be processed. """ # retrieves the model in the exception and then uses it to retrieve # both the validation map and the validation errors map, these values # are going to be used to returns dome debugging information exception_model = exception.model validation_map = exception_model.validation_map validation_errors_map = exception_model.validation_errors_map # sets both the validation and the validation errors map in the map # so that the exception may contain some debugging information exception_map["validation_map"] = validation_map exception_map["validation_errors_map"] = validation_errors_map
33,337
def processAlert(p_json): """ create pyFireEyeAlert Instance of the json and makes all the mapping :param p_json: :type p_json: """ logger.debug(p_json) fireinstance = pyFireEyeAlert(p_json) # This comment will be added to every attribute for reference auto_comment = "Auto generated by FireMisp "+ (fireinstance.alert_id) # create a MISP event logger.debug("alert %s ",fireinstance.alert_id) has_previous_event = True event = check_for_previous_events(fireinstance) map_alert_to_event(auto_comment, event, fireinstance)
33,338
def empty_search(): """ :return: json response of empty list, meaning empty search result """ return jsonify(results=[])
33,339
def clip_raster_mean(raster_path, feature, var_nam): """ Opens a raster file from raster_path and applies a mask based on a polygon (feature). It then extracts the percentage of every class with respects to the total number of pixels contained in the mask. :param raster_path: raster path (raster must contain classes) :param feature: polygon feature (extracted from a shapefile or geojson) :return: dictionary containing the percentage of pixels contained in the mask """ with rasterio.open(raster_path) as src: # Apply mask to raster and crop out_image, out_transform = rasterio.mask.mask(src, [feature["geometry"]], crop=True) if var_nam == 'PTED': out_image[out_image < 0] = np.nan return np.nanmean(out_image)
33,340
def problem475(): """ 12n musicians participate at a music festival. On the first day, they form 3n quartets and practice all day. It is a disaster. At the end of the day, all musicians decide they will never again agree to play with any member of their quartet. On the second day, they form 4n trios, each musician avoiding his previous quartet partners. Let f(12n) be the number of ways to organize the trios amongst the 12n musicians. You are given f(12) = 576 and f(24) mod 1 000 000 007 = 509089824. Find f(600) mod 1 000 000 007. """ pass
33,341
def load_data(in_file): """load json file from seqcluster cluster""" with open(in_file) as in_handle: return json.load(in_handle)
33,342
def main(inargs): """Run the program.""" cube = iris.load_cube(inargs.infile, 'sea_water_salinity') cube = gio.salinity_unit_check(cube) outfile_metadata = {inargs.infile: cube.attributes['history'],} cube.attributes['history'] = gio.write_metadata(file_info=outfile_metadata) iris.save(cube, inargs.outfile)
33,343
def test_setitem(atom_dict): """Test setting items. """ atom_dict.untyped[''] = 1 atom_dict.keytyped[1] = '' with pytest.raises(TypeError): atom_dict.keytyped[''] = 1 atom_dict.valuetyped[1] = 1 with pytest.raises(TypeError): atom_dict.valuetyped[''] = '' atom_dict.fullytyped[1] = 1 with pytest.raises(TypeError): atom_dict.fullytyped[''] = 1 with pytest.raises(TypeError): atom_dict.fullytyped[1] = ''
33,344
def test_multichannel(): """Test adding multichannel image.""" viewer = ViewerModel() np.random.seed(0) data = np.random.random((15, 10, 5)) viewer.add_image(data, channel_axis=-1) assert len(viewer.layers) == data.shape[-1] for i in range(data.shape[-1]): assert np.all(viewer.layers[i].data == data.take(i, axis=-1)) assert viewer.layers[i].colormap[0] == base_colormaps[i]
33,345
def applyPatch(sourceDir, f, patchLevel='0'): """apply single patch""" if os.path.isdir(f): # apply a whole dir of patches out = True with os.scandir(f) as scan: for patch in scan: if patch.is_file() and not patch.name.startswith("."): out = applyPatch(sourceDir, os.path.join(f, patch), patchLevel) and out return out cmd = ["patch", "--ignore-whitespace", "-d", sourceDir, "-p", str(patchLevel), "-i", f] result = system(cmd) if not result: CraftCore.log.warning(f"applying {f} failed!") return result
33,346
def run_script(script: Script, options: CliOptions) -> None: """ Run the script with the given (command line) options. """ template_ctx = build_template_context(script, options) verbose = RunContext().get('verbose') pretend = RunContext().get('pretend') if verbose >= 3: log.info('Compiling script <35>{name}\n{script}'.format( name=script.name, script=shell.highlight(script.command, 'jinja') )) yaml_str = yaml.dump(template_ctx, default_flow_style=False) log.info('with context:\n{}\n'.format(shell.highlight(yaml_str, 'yaml'))) # Command is either specified directly in pelconf.yaml or lives in a # separate file. command = script.command if script.command_file: with open(conf.proj_path(script.command_file)) as fp: command = fp.read() if not command: raise ValueError("Scripts must have 'command' or 'command_file' specified.") cmd = templates.Engine().render(command, template_ctx) retcode = exec_script_command(cmd, pretend) if verbose: log.info("Script exited with code: <33>{}", retcode) if retcode not in script.success_exit_codes: sys.exit(retcode)
33,347
def expand_at_linestart(P, tablen): """只扩展行开头的制表符号""" import re def exp(m): return m.group().expandtabs(tablen) return ''.join([ re.sub(r'^\s+', exp, s) for s in P.splitlines(True) ])
33,348
def print_full_dataframe(df): """ Helper function to print a pandas dataframe with NO truncation of rows/columns This will reset the defaults after, so it can be useful for inspection without annoying side effects in a script (pulled from a stack overflow example). """ pd.set_option('display.max_rows', None) pd.set_option('display.max_columns', None) pd.set_option('display.width', 2000) pd.set_option('display.max_colwidth', None) print(df) pd.reset_option('display.max_rows') pd.reset_option('display.max_columns') pd.reset_option('display.width') pd.reset_option('display.max_colwidth')
33,349
def configs(): """Create a mock Configuration object with sentinel values Eg. Configuration( base_jar=sentinel.base_jar, config_file=sentinel.config_file, ... ) """ return Configuration(**dict( (k, getattr(sentinel, k)) for k in DEFAULTS._asdict().keys() ))
33,350
def svn_fs_revision_root_revision(root): """svn_fs_revision_root_revision(svn_fs_root_t * root) -> svn_revnum_t""" return _fs.svn_fs_revision_root_revision(root)
33,351
def config_ospf_interface( tgen, topo=None, input_dict=None, build=False, load_config=True ): """ API to configure ospf on router. Parameters ---------- * `tgen` : Topogen object * `topo` : json file data * `input_dict` : Input dict data, required when configuring from testcase * `build` : Only for initial setup phase this is set as True. * `load_config` : Loading the config to router this is set as True. Usage ----- r1_ospf_auth = { "r1": { "links": { "r2": { "ospf": { "authentication": "message-digest", "authentication-key": "ospf", "message-digest-key": "10" } } } } } result = config_ospf_interface(tgen, topo, r1_ospf_auth) Returns ------- True or False """ logger.debug("Enter lib config_ospf_interface") result = False if topo is None: topo = tgen.json_topo if not input_dict: input_dict = deepcopy(topo) else: input_dict = deepcopy(input_dict) config_data_dict = {} for router in input_dict.keys(): config_data = [] for lnk in input_dict[router]["links"].keys(): if "ospf" not in input_dict[router]["links"][lnk]: logger.debug( "Router %s: ospf config is not present in" "input_dict", router ) continue ospf_data = input_dict[router]["links"][lnk]["ospf"] data_ospf_area = ospf_data.setdefault("area", None) data_ospf_auth = ospf_data.setdefault("authentication", None) data_ospf_dr_priority = ospf_data.setdefault("priority", None) data_ospf_cost = ospf_data.setdefault("cost", None) data_ospf_mtu = ospf_data.setdefault("mtu_ignore", None) try: intf = topo["routers"][router]["links"][lnk]["interface"] except KeyError: intf = topo["switches"][router]["links"][lnk]["interface"] # interface cmd = "interface {}".format(intf) config_data.append(cmd) # interface area config if data_ospf_area: cmd = "ip ospf area {}".format(data_ospf_area) config_data.append(cmd) # interface ospf auth if data_ospf_auth: if data_ospf_auth == "null": cmd = "ip ospf authentication null" elif data_ospf_auth == "message-digest": cmd = "ip ospf authentication message-digest" else: cmd = "ip ospf authentication" if "del_action" in ospf_data: cmd = "no {}".format(cmd) config_data.append(cmd) if "message-digest-key" in ospf_data: cmd = "ip ospf message-digest-key {} md5 {}".format( ospf_data["message-digest-key"], ospf_data["authentication-key"] ) if "del_action" in ospf_data: cmd = "no {}".format(cmd) config_data.append(cmd) if ( "authentication-key" in ospf_data and "message-digest-key" not in ospf_data ): cmd = "ip ospf authentication-key {}".format( ospf_data["authentication-key"] ) if "del_action" in ospf_data: cmd = "no {}".format(cmd) config_data.append(cmd) # interface ospf dr priority if data_ospf_dr_priority: cmd = "ip ospf priority {}".format(ospf_data["priority"]) if "del_action" in ospf_data: cmd = "no {}".format(cmd) config_data.append(cmd) # interface ospf cost if data_ospf_cost: cmd = "ip ospf cost {}".format(ospf_data["cost"]) if "del_action" in ospf_data: cmd = "no {}".format(cmd) config_data.append(cmd) # interface ospf mtu if data_ospf_mtu: cmd = "ip ospf mtu-ignore" if "del_action" in ospf_data: cmd = "no {}".format(cmd) config_data.append(cmd) if build: return config_data if config_data: config_data_dict[router] = config_data result = create_common_configurations( tgen, config_data_dict, "interface_config", build=build ) logger.debug("Exiting lib API: config_ospf_interface()") return result
33,352
def is_floatscalar(x: Any) -> bool: """Check whether `x` is a float scalar. Parameters: ---------- x: A python object to check. Returns: ---------- `True` iff `x` is a float scalar (built-in or Numpy float). """ return isinstance(x, ( float, np.float16, np.float32, np.float64, ))
33,353
async def start_handler(event, channel_layer, msg_data): """ Handler for "/start" """ current_bot = await bot_client.get_me() user_details = await bot_client.get_entity(event.message.peer_id.user_id) bot = Bot.objects.get(id=current_bot.id) individual, i_created = IndividualChat.objects.get_or_create( id=user_details.id, defaults={ USERNAME: user_details.username, FIRST_NAME: user_details.first_name, LAST_NAME: user_details.last_name, "access_hash": user_details.access_hash, }, ) bot_individual, bi_created = BotIndividual.objects.get_or_create( bot=bot, individual=individual ) if bi_created: bot_individual.save() await save_send_message(msg_data, channel_layer) await event.respond(MSG_WELCOME) msg_data = message_data( event.chat_id, event.message.id, current_bot.id, current_bot.first_name, MSG_WELCOME ) msg_data[FROM_GROUP] = False await save_send_message(msg_data, channel_layer)
33,354
def http_header_control_cache(request): """ Tipo de control de cache url: direccion de la pagina web""" print "--------------- Obteniendo cache control -------------------" try: cabecera = request.headers cache_control = cabecera.get("cache-control") except Exception: cache_control = "NA" print "Error inesperado en la %s no se encontro cache_control" % (url) return cache_control
33,355
def summation(limit): """ Returns the summation of all natural numbers from 0 to limit Uses short form summation formula natural summation :param limit: {int} :return: {int} """ return (limit * (limit + 1)) // 2 if limit >= 0 else 0
33,356
def reportoutputfiles(tasklistlist): """ report on what files will be output by the tasks input: list of list of tasks output: none (prints to screen) """ print print "listing output files" pathlist = [] for tasklist in tasklistlist: for task in tasklist: pathlist.extend(task.outputfilepaths()) pathlist.sort() for path in pathlist: print path print "%d total output files" % len(pathlist) # end reportoutputfiles()
33,357
def main(): """Main operational flow""" # Set target locations and specific filename local_data_folder = './../input-data' target_def_blob_store_path = '/blob-input-data/' input_filename = 'HPI_master_cleansed.csv' # Get input data files from local data_file_paths = data_filepaths(data_folder = local_data_folder) # Get the default blob store def_blob_store = ws.get_default_datastore() # Upload files to blob store def_blob_store.upload_files( files=data_file_paths, target_path=target_def_blob_store_path, overwrite=True, show_progress=True ) # Create File Dataset datastore_paths = [(def_blob_store, str(target_def_blob_store_path + input_filename))] fd = Dataset.File.from_files(path=datastore_paths) # Register the dataset register_dataset(dataset=fd, workspace=ws, name='HPI_file_dataset')
33,358
def load_and_initialize_hub_module(module_path, signature='default'): """Loads graph of a TF-Hub module and initializes it into a session. Args: module_path: string Path to TF-Hub module. signature: string Signature to use when creating the apply graph. Return: graph: tf.Graph Graph of the module. session: tf.Session Session with initialized variables and tables. inputs: dict Dictionary of input tensors. outputs: dict Dictionary of output tensors. Raises: ValueError: If signature contains a SparseTensor on input or output. """ graph = tf.Graph() with graph.as_default(): tf.compat.v1.logging.info('Importing %s', module_path) module = hub.Module(module_path) signature_inputs = module.get_input_info_dict(signature) signature_outputs = module.get_output_info_dict(signature) # First check there are no SparseTensors in input or output. for key, info in list(signature_inputs.items()) + list( signature_outputs.items()): if info.is_sparse: raise ValueError( 'Signature "%s" has a SparseTensor on input/output "%s".' ' SparseTensors are not supported.' % (signature, key)) # Create placeholders to represent the input of the provided signature. inputs = {} for input_key, input_info in signature_inputs.items(): inputs[input_key] = tf.compat.v1.placeholder( shape=input_info.get_shape(), dtype=input_info.dtype, name=input_key) outputs = module(inputs=inputs, signature=signature, as_dict=True) session = tf.compat.v1.Session(graph=graph) session.run(tf.compat.v1.global_variables_initializer()) session.run(tf.compat.v1.tables_initializer()) return graph, session, inputs, outputs
33,359
def application(): """An application for testing""" yield create_test_application()
33,360
def j_index(true_labels, predicts): """ j_index Computes the Jaccard Index of the given set, which is also called the 'intersection over union' in multi-label settings. It's defined as the intersection between the true label's set and the prediction's set, divided by the sum, or union, of those two sets. Parameters ---------- true_labels: numpy.ndarray of shape (n_samples, n_target_tasks) A matrix with the true labels for all the classification tasks and for n_samples. predicts: numpy.ndarray of shape (n_samples, n_target_tasks) A matrix with the predictions for all the classification tasks and for n_samples. Returns ------- float The J-index, or 'intersection over union', for the given sets. Examples -------- >>> from skmultiflow.evaluation.metrics.metrics import j_index >>> true_labels = [[0,1,0,1],[0,0,0,1],[1,1,0,1],[1,1,1,1]] >>> predictions = [[0,1,0,1],[0,1,1,0],[0,1,0,1],[1,1,1,1]] >>> j_index(true_labels, predictions) 0.66666666666666663 """ if not hasattr(true_labels, 'shape'): true_labels = np.asarray(true_labels) if not hasattr(predicts, 'shape'): predicts = np.asarray(predicts) N, L = true_labels.shape s = 0.0 for i in range(N): inter = sum((true_labels[i, :] * predicts[i, :]) > 0) * 1. union = sum((true_labels[i, :] + predicts[i, :]) > 0) * 1. if union > 0: s += inter / union elif np.sum(true_labels[i, :]) == 0: s += 1. return s * 1. / N
33,361
def mkdir_p(path): """Make a directory if it doesn't already exist""" try: os.makedirs(path) except OSError as exc: # Python >2.5 if exc.errno == errno.EEXIST and os.path.isdir(path): pass else: raise
33,362
def odefun(x, t, net, alph=[1.0,1.0,1.0]): """ neural ODE combining the characteristics and log-determinant (see Eq. (2)), the transport costs (see Eq. (5)), and the HJB regularizer (see Eq. (7)). d_t [x ; l ; v ; r] = odefun( [x ; l ; v ; r] , t ) x - particle position l - log determinant v - accumulated transport costs (Lagrangian) r - accumulates violation of HJB condition along trajectory """ nex, d_extra = x.shape d = d_extra - 3 z = pad(x[:, :d], (0, 1, 0, 0), value=t) # concatenate with the time t gradPhi, trH = net.trHess(z) dx = -(1.0/alph[0]) * gradPhi[:,0:d] dl = -(1.0/alph[0]) * trH.unsqueeze(1) dv = 0.5 * torch.sum(torch.pow(dx, 2) , 1 ,keepdims=True) dr = torch.abs( -gradPhi[:,-1].unsqueeze(1) + alph[0] * dv ) return torch.cat( (dx,dl,dv,dr) , 1 )
33,363
def create_transform(num_flow_steps, param_dim, context_dim, base_transform_kwargs): """Build a sequence of NSF transforms, which maps parameters x into the base distribution u (noise). Transforms are conditioned on strain data y. Note that the forward map is f^{-1}(x, y). Each step in the sequence consists of * A linear transform of x, which in particular permutes components * A NSF transform of x, conditioned on y. There is one final linear transform at the end. This function was adapted from the uci.py example in https://github.com/bayesiains/nsf Arguments: num_flow_steps {int} -- number of transforms in sequence param_dim {int} -- dimensionality of x context_dim {int} -- dimensionality of y base_transform_kwargs {dict} -- hyperparameters for NSF step Returns: Transform -- the constructed transform """ transform = transforms.CompositeTransform([ transforms.CompositeTransform([ create_linear_transform(param_dim), create_base_transform(i, param_dim, context_dim=context_dim, **base_transform_kwargs) ]) for i in range(num_flow_steps) ] + [ create_linear_transform(param_dim) ]) return transform
33,364
def filterUniques(tar, to_filter, score, ns): """ Filters unique psms/peptides/proteins from (multiple) Percolator output XML files. Takes a tarred set of XML files, a filtering query (e.g. psms), a score to filter on and a namespace. Outputs an ElementTree. """ for tf in to_filter: assert tf in ['psms', 'peptides', 'proteins'], Exception('filterUnique function needs a specified to_filter list of psms, peptides, proteins.') assert score in ['q','pep','p'], Exception('filterUnique function needs a specified score to filter on of q, pep or p.') try: with tarfile.open(tar, 'r') as f: members = f.getmembers() f.extractall() except: sys.stderr.write('Could not extract Percolator files from dataset: %s \n' % tar) return 1 docs = [] for fn in members: docs.append(etree.parse(fn.name)) # lookup dicts scores = {'q':'q_value', 'pep':'pep', 'p':'p_value'} filt_el_dict = {'psms':'xmlns:peptide_seq', 'peptides':'@xmlns:peptide_id' } # result dict filtered = {'psms':{}, 'peptides':{}, 'proteins':{} } for doc in docs: for filt_el in to_filter: feattree = doc.xpath('//xmlns:%s' % filt_el, namespaces=ns) if feattree == []: sys.stdout.write('%s not found in (one of the) Percolator output documents. Continuing...\n' % filt_el) continue for feat in feattree[0]: # It's actually faster to loop through the feat's children, # but this is 2-line code and still readable. featscore = float(feat.xpath('xmlns:%s' % scores[score], namespaces=ns)[0].text) seq = feat.xpath('%s' % filt_el_dict[filt_el], namespaces=ns) try: # psm seqs are parsed here seq = seq[0].attrib['seq'] except Exception: ## caught when parsing peptide seqs (different format) seq = str(seq[0]) if seq not in filtered[filt_el]: filtered[filt_el][seq] = feat elif featscore < filtered[filt_el][seq]: #FIXME now it only works for LOWER than scores (eg q-vals, pep, but not for scores that are better when higher) filtered[filt_el][seq] = feat # make trees from filtered dicts for filt_el in filtered: outlist = [] for feat in filtered[filt_el].values(): outlist.append(feat) filtered[filt_el] = outlist # node = etree.Element(filt_el) # node.extend(filtered[filt_el].values()) # filtered[filt_el] = node outdoc = refillTree(docs[0], ['psms', 'peptides', 'proteins'], filtered, ns) return outdoc
33,365
def test_normal_double(): """Exercise the EndpointHandler functionality.""" handler = TwoEndpointHandler() # Receiver handler.to_receive(('int', 1)) assert handler.has_receive() received = list(handler.receive_all()) assert len(received) == 1 assert received[0].data == 2 assert not handler.has_to_send() handler.to_receive(('float', 0.5)) assert handler.has_receive() received = list(handler.receive_all()) assert len(received) == 1 assert received[0].data == 1.0 assert not handler.has_to_send() # Sender handler.send(2) assert handler.has_to_send() sent = list(handler.to_send_all()) assert len(sent) == 1 assert sent[0].data == ('int', 1) handler.send(1.0) assert handler.has_to_send() sent = list(handler.to_send_all()) assert len(sent) == 1 assert sent[0].data == ('float', 0.5)
33,366
def for_in_pyiter(it): """ >>> for_in_pyiter(Iterable(5)) [0, 1, 2, 3, 4] """ l = [] for item in it: l.append(item) return l
33,367
def train_test_split_with_none(X, y=None, sample_weight=None, random_state=0): """ Splits into train and test data even if they are None. @param X X @param y y @param sample_weight sample weight @param random_state random state @return similar to :epkg:`scikit-learn:model_selection:train_test_split`. """ not_none = [_ for _ in [X, y, sample_weight] if _ is not None] res = train_test_split(*not_none) inc = len(not_none) trains = [] tests = [] for i in range(inc): trains.append(res[i * 2]) tests.append(res[i * 2 + 1]) while len(trains) < 3: trains.append(None) tests.append(None) X_train, y_train, w_train = trains X_test, y_test, w_test = tests return X_train, y_train, w_train, X_test, y_test, w_test
33,368
def plot_by_gene_and_domain(name, seqs, tax, id2name): """ plot insertions for each gene and domain """ for gene in set([seq[0] for seq in list(seqs.values())]): for domain in set([seq[1] for seq in list(seqs.values())]): plot_insertions(name, seqs, gene, domain, tax, id2name)
33,369
def measure_time(func, repeat=1000): """ Repeatedly executes a function and records lowest time. """ def wrapper(*args, **kwargs): min_time = 1000 for _ in range(repeat): start = timer() result = func(*args, **kwargs) curr_time = timer() - start if curr_time < min_time: min_time = curr_time return [min_time, result] return wrapper
33,370
def initialise_players(frame_data, params): """ initialise_players(team,teamname,params) create a list of player objects that holds their positions and velocities from the tracking data dataframe Parameters ----------- team: row (i.e. instant) of either the home or away team tracking Dataframe teamname: team name "Home" or "Away" params: Dictionary of model parameters (default model parameters can be generated using default_model_params() ) Returns ----------- team_players: list of player objects for the team at at given instant """ # get player ids player_ids = np.unique([x.split("_")[0] for x in frame_data.keys()]) # create list team_players = [] for p in player_ids: # create a player object for player_id 'p' team_player = player(p, frame_data, params) if team_player.inframe: team_players.append(team_player) return team_players
33,371
def filter_access_token(interaction, current_cassette): """Add Betamax placeholder to filter access token.""" request_uri = interaction.data["request"]["uri"] response = interaction.data["response"] if "api/v1/access_token" not in request_uri or response["status"]["code"] != 200: return body = response["body"]["string"] try: token = json.loads(body)["access_token"] except (KeyError, TypeError, ValueError): return current_cassette.placeholders.append( betamax.cassette.cassette.Placeholder( placeholder="<ACCESS_TOKEN>", replace=token ) )
33,372
def read_edgelist(f, directed=True, sep=r"\s+", header=None, keep_default_na=False, **readcsvkwargs): """ Creates a csrgraph from an edgelist. The edgelist should be in the form [source destination] or [source destination edge_weight] The first column needs to be the source, the second the destination. If there is a third column it's assumed to be edge weights. Otherwise, all arguments from pandas.read_csv can be used to read the file. f : str Filename to read directed : bool Whether the graph is directed or undirected. All csrgraphs are directed, undirected graphs simply add "return edges" sep : str CSV-style separator. Eg. Use "," if comma separated header : int or None pandas read_csv parameter. Use if column names are present keep_default_na: bool pandas read_csv argument to prevent casting any value to NaN read_csv_kwargs : keyword arguments for pd.read_csv Pass these kwargs as you would normally to pd.read_csv. Returns : csrgraph """ # Read in csv correctly to each column elist = pd.read_csv( f, sep=sep, header=header, keep_default_na=keep_default_na, **readcsvkwargs ) if len(elist.columns) == 2: elist.columns = ['src', 'dst'] elist['weight'] = np.ones(elist.shape[0]) elif len(elist.columns) == 3: elist.columns = ['src', 'dst', 'weight'] else: raise ValueError(f""" Invalid columns: {elist.columns} Expected 2 (source, destination) or 3 (source, destination, weight) Read File: \n{elist.head(5)} """) # Create name mapping to normalize node IDs # Somehow this is 1.5x faster than np.union1d. Shame on numpy. allnodes = list( set(elist.src.unique()) .union(set(elist.dst.unique()))) # Factor all nodes to unique IDs names = ( pd.Series(allnodes).astype('category') .cat.categories ) nnodes = names.shape[0] # Get the input data type if nnodes > UINT16_MAX: dtype = np.uint32 if nnodes > UINT32_MAX: dtype = np.uint64 else: dtype = np.uint16 name_dict = dict(zip(names, np.arange(names.shape[0], dtype=dtype))) elist.src = elist.src.map(name_dict).astype(dtype) elist.dst = elist.dst.map(name_dict).astype(dtype) # clean up temp data allnodes = None name_dict = None gc.collect() # If undirected graph, append edgelist to reversed self if not directed: other_df = elist.copy() other_df.columns = ['dst', 'src', 'weight'] elist = pd.concat([elist, other_df]) other_df = None gc.collect() # Need to sort by src for _edgelist_to_graph elist = elist.sort_values(by='src') # extract numpy arrays and clear memory src = elist.src.to_numpy() dst = elist.dst.to_numpy() weight = elist.weight.to_numpy() elist = None gc.collect() G = methods._edgelist_to_graph( src, dst, weight, nnodes, nodenames=names ) return G
33,373
def is_comment(txt_row): """ Tries to determine if the current line of text is a comment line. Args: txt_row (string): text line to check. Returns: True when the text line is considered a comment line, False if not. """ if (len(txt_row) < 1): return True if ((txt_row[0] == '(') and (txt_row[len(txt_row) - 1] == ')')): return True else: return False
33,374
def updateBillingPlanPaymentDefinition(pk, paypal_payment_definition): """Update an existing payment definition of a billing plan :param pk: the primary key of the payment definition (associated with a billing plan) :type pk: integer :param paypal_payment_definition: Paypal billing plan payment definition :type paypal_payment_definition: object :returns: True for successful update or False in any other case :rtype: bool """ try: try: frequency_interval = paypal_payment_definition['frequency_interval'] except: frequency_interval = None try: cycles = paypal_payment_definition['cycles'] except: cycles = None try: charge_models = paypal_payment_definition['charge_models'] except: charge_models = dict() try: amount_value = paypal_payment_definition['amount']['value'] except: amount_value = None try: amount_currency = paypal_payment_definition['amount']['currency'] except: amount_currency = None BillingPlanPaymentDefinition.objects.filter(pk=pk).update( name=paypal_payment_definition['name'], type=paypal_payment_definition['type'], frequency=paypal_payment_definition['frequency'], frequency_interval=frequency_interval, cycles=cycles, charge_models=json.dumps(utilities.object2dict(charge_models, False)), amount_value=amount_value, amount_currency=amount_currency, json=json.dumps(utilities.object2dict(paypal_payment_definition, False)) ) return True except Exception as ex: log.error("Error in billing plan's payment definition modification (pk:=%d): %s" % (pk, str(ex)) ) return False
33,375
def format_float_list(array: List[float], precision: int = 4) -> List[str]: """ Formats a list of float values to a specific precision. :param array: A list of float values to format. :param precision: The number of decimal places to use. :return: A list of strings containing the formatted floats. """ return [format_float(f, precision) for f in array]
33,376
def counts_card() -> html.Div: """Return the div that contains the overall count of patients/studies/images.""" return html.Div( className="row", children=[ html.Div( className="four columns", children=[ html.Div( className="card gold-left-border", children=html.Div( className="container", children=[ html.H4(id="patient-count", children=""), html.P(children="patients"), ], ), ) ], ), html.Div( className="four columns", children=[ html.Div( className="card green-left-border", children=html.Div( className="container", children=[ html.H4(id="study-count", children=""), html.P(children="studies"), ], ), ) ], ), html.Div( className="four columns", children=[ html.Div( className="card purple-left-border", children=html.Div( className="container", children=[ html.H4(id="image-count", children=""), html.P(children="images"), ], ), ) ], ), ], )
33,377
def validate_est(est: EstData, include_elster_responses: bool = False): """ Data for a Est is validated using ERiC. If the validation is successful then this should return a 200 HTTP response with {'success': bool, 'est': est}. Otherwise this should return a 400 response if the validation failed with {‘code’ : int,‘message’: str,‘description’: str,‘‘validation_problems’ : [{‘code’: int, ‘message’: str}]} or a 400 response for other client errors and a 500 response for server errors with {‘code’ : int, ‘message’: str, ‘description’: str} :param est: the JSON input data for the ESt :param include_elster_responses: query parameter which indicates whether the ERiC/Server response are returned """ try: request = EstValidationRequestController(est, include_elster_responses) result = request.process() if "transferticket" in result: result["transfer_ticket"] = result.pop("transferticket") return result except EricProcessNotSuccessful as e: logging.getLogger().info("Could not validate est", exc_info=True) raise HTTPException(status_code=422, detail=e.generate_error_response(include_elster_responses))
33,378
async def list_solver_releases( solver_key: SolverKeyId, user_id: int = Depends(get_current_user_id), catalog_client: CatalogApi = Depends(get_api_client(CatalogApi)), url_for: Callable = Depends(get_reverse_url_mapper), ): """ Lists all releases of a given solver """ releases: List[Solver] = await catalog_client.list_solver_releases( user_id, solver_key ) for solver in releases: solver.url = url_for( "get_solver_release", solver_key=solver.id, version=solver.version ) return sorted(releases, key=attrgetter("pep404_version"))
33,379
def read_silicon_data(tool, target: Target): """ Reads silicon data from device :param tool: Programming/debugging tool used for communication :param target: The target object. :return: Device response """ logger.debug('Read silicon data') tool.reset(ResetType.HW) passed, response = provision_keys_and_policies(tool, None, target.register_map) return response
33,380
def offset_perimeter(geometry, offset, side='left', plot_offset=False): """Offsets the perimeter of a geometry of a :class:`~sectionproperties.pre.sections.Geometry` object by a certain distance. Note that the perimeter facet list must be entered in a consecutive order. :param geometry: Cross-section geometry object :type geometry: :class:`~sectionproperties.pre.sections.Geometry` :param float offset: Offset distance for the perimeter :param string side: Side of the perimeter offset, either 'left' or 'right'. E.g. 'left' for a counter-clockwise offsets the perimeter inwards. :param bool plot_offset: If set to True, generates a plot comparing the old and new geometry The following example 'corrodes' a 200UB25 I-section by 1.5 mm and compares a few of the section properties:: import sectionproperties.pre.sections as sections from sectionproperties.pre.offset import offset_perimeter from sectionproperties.analysis.cross_section import CrossSection # calculate original section properties original_geometry = sections.ISection(d=203, b=133, t_f=7.8, t_w=5.8, r=8.9, n_r=16) original_mesh = original_geometry.create_mesh(mesh_sizes=[3.0]) original_section = CrossSection(original_geometry, original_mesh) original_section.calculate_geometric_properties() original_area = original_section.get_area() (original_ixx, _, _) = original_section.get_ic() # calculate corroded section properties corroded_geometry = offset_perimeter(original_geometry, 1.5, plot_offset=True) corroded_mesh = corroded_geometry.create_mesh(mesh_sizes=[3.0]) corroded_section = CrossSection(corroded_geometry, corroded_mesh) corroded_section.calculate_geometric_properties() corroded_area = corroded_section.get_area() (corroded_ixx, _, _) = corroded_section.get_ic() # compare section properties print("Area reduction = {0:.2f}%".format( 100 * (original_area - corroded_area) / original_area)) print("Ixx reduction = {0:.2f}%".format( 100 *(original_ixx - corroded_ixx) / original_ixx)) The following plot is generated by the above example: .. figure:: ../images/offset_example.png :align: center :scale: 75 % 200UB25 with 1.5 mm corrosion. The following is printed to the terminal: .. code-block:: text Area reduction = 41.97% Ixx reduction = 39.20% """ # initialise perimeter points list perimeter_points = [] # add perimeter points to the list for facet_idx in geometry.perimeter: # get the facet facet = geometry.facets[facet_idx] # get the first point on the facet point = geometry.points[facet[0]] # add the (x,y) tuple to the list perimeter_points.append((point[0], point[1])) # create LinearRing object perimeter = LinearRing(perimeter_points) # offset perimeter new_perimeter = perimeter.parallel_offset( distance=offset, side=side, resolution=0, join_style=2 ) (new_xcoords, new_ycoords) = new_perimeter.xy # create deep copy of original geometry object new_geometry = copy.deepcopy(geometry) # replace offset points in new geometry for (i, facet_idx) in enumerate(new_geometry.perimeter): # get the facet facet = new_geometry.facets[facet_idx] # get the first point on the facet point = new_geometry.points[facet[0]] # replace the point location with the offset location point[0] = new_xcoords[i] point[1] = new_ycoords[i] if plot_offset: (fig, ax) = plt.subplots() # plot new geometry for (i, f) in enumerate(new_geometry.facets): if i == 0: ax.plot([new_geometry.points[f[0]][0], new_geometry.points[f[1]][0]], [new_geometry.points[f[0]][1], new_geometry.points[f[1]][1]], 'ko-', markersize=2, label='Offset Geometry') else: ax.plot([new_geometry.points[f[0]][0], new_geometry.points[f[1]][0]], [new_geometry.points[f[0]][1], new_geometry.points[f[1]][1]], 'ko-', markersize=2) # plot the original perimeter for (i, facet_idx) in enumerate(geometry.perimeter): f = geometry.facets[facet_idx] if i == 0: ax.plot([geometry.points[f[0]][0], geometry.points[f[1]][0]], [geometry.points[f[0]][1], geometry.points[f[1]][1]], 'r--', markersize=2, label='Original Perimeter') else: ax.plot([geometry.points[f[0]][0], geometry.points[f[1]][0]], [geometry.points[f[0]][1], geometry.points[f[1]][1]], 'r--', markersize=2) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) ax.set_title('Offset Geometry') ax.set_aspect('equal', anchor='C') plt.tight_layout() plt.show() return new_geometry
33,381
def WalterComposition(F,P): """ Calculates the melt composition generated as a function of F and P, using the parameterisation of Duncan et al. (2017). Parameters ----- F: float Melt fraction P: float Pressure in GPa Returns ----- MeltComposition: series Major elements in wt% T: float Temperatures associated with the melt in C """ if isinstance(F,float): F = np.array([F]) P = np.array([P]) if isinstance(F,list): F = np.array(F) P = np.array(P) comp = pd.DataFrame(np.array([P,F]).T,columns=['P','X']) F = F*100 if F[F.argmin()] == 0: F[F.argmin()] = F[F.argmin()+1] comp['SiO2'] = ((-2.137e-5*P-9.83e-4)*F**2 + (5.975e-3*P+8.513e-2)*F +(-4.236e-1*P+4.638e1)) comp['Al2O3'] = ((-1.908e-4*P-1.366e-4)*F**2+(4.589e-2*P-1.525e-1)*F +(-2.685*P+2.087e1)) comp['FeO'] = ((2.365e-4*P-8.492e-4)*F**2+(-3.723e-2*P+1.1e-1)*F +(1.366*P+5.488)) comp['MgO'] = ((-8.068e-5*P+1.747e-3)*F**2+(-1.268e-2*P+9.761e-2)*F +(2.12*P+9.914)) comp['CaO'] = ((4.305e-5*P-4.513e-4)*F**2+(1.104e-3*P-4.948e-2)*F +(-5.564e-1*P+1.294e1)) comp['TiO2'] = 12.370*F**-0.917 comp['Na2O'] = 5.339*F**-0.654 comp['K2O'] = 6.612*F**-0.923 limTiO2 = 12.370*3**-0.917 limNa2O = 5.339*3**-0.654 limK2O = 6.612*3**-0.923 comp.TiO2[comp.TiO2>limTiO2] = limTiO2 comp.Na2O[comp.Na2O>limNa2O] = limNa2O comp.K2O[comp.K2O>limK2O] = limK2O comp['Cr2O3'] = -7.86e-5*F**2 + 9.705e-3*F + 2.201e-1 comp['MnO'] = -6.757e-6*F**2 + -2.04e-5*F + 2.014e-1 T = ((8.597e-3*P-1.963e-2)*F**2 + (-1.374*P+7.383)*F + 9.369e1*P + 1.177e3) return comp, T
33,382
def fetch_study_metadata( data_dir: Path, version: int = 7, verbose: int = 1 ) -> pd.DataFrame: """ Download if needed the `metadata.tsv.gz` file from Neurosynth and load it into a pandas DataFrame. The metadata table contains the metadata for each study. Each study (ID) is stored on its own line. These IDs are in the same order as the id column of the associated `coordinates.tsv.gz` file, but the rows will differ because the coordinates file will contain multiple rows per study. They are also in the same order as the rows in the `features.npz` files for the same version. The metadata will therefore have N rows, N being the number of studies in the Neurosynth dataset. The columns (for version 7) are: - id - doi - space - title - authors - year - journal Parameters ---------- data_dir : Path the path for the directory where downloaded data should be saved. version : int, optional the neurosynth data version, by default 7 verbose : int, optional verbose param for nilearn's `_fetch_files`, by default 1 Returns ------- pd.DataFrame the study metadata dataframe """ metadata_filename = f"data-neurosynth_version-{version}_metadata.tsv.gz" metadata_file = _fetch_files( data_dir, [ ( metadata_filename, NS_DATA_URL + metadata_filename, {}, ), ], verbose=verbose, )[0] metadata = pd.read_table(metadata_file) return metadata
33,383
def proveFormula(formula: str) -> Union[int, str]: """ Implements proveFormula according to grader.py >>> proveFormula('p') 1 >>> proveFormula('(NOT (NOT (NOT (NOT not)) )\t)') 1 >>> proveFormula('(NOT (NOT (NOT (NOT not)) )') 'E' >>> proveFormula('(IF p p)') 'T' >>> proveFormula('(AND p (NOT p))') 'U' >>> proveFormula('(OR p (NOT q))') 3 """ ast = parse(formula) if ast is None: return 'E' result = determine_satisfiability(ast) if result is True: return 'T' if result is False: return 'U' return result
33,384
def read_config(): """Read configuration file.""" config_file = os.getenv('CONFIG_FILE_PATH') if not config_file: config_file = os.path.join( os.path.dirname(os.path.realpath(__file__)), 'config.json') with open(config_file) as file: return json.load(file)
33,385
def add_and_check(database, table_name, episode, expected=1): """ utility to add episode and ensure there is a single episode for that podcast """ assert database.add_new_episode_data(table_name, episode) eps = database.find_episodes_to_download(table_name) assert len(eps) == expected eps = database.find_episodes_to_copy(table_name) assert len(eps) == 0
33,386
def sendSingleCommand(server, user, password, command): """Wrapper function to open a connection and execute a single command. Args: server (str): The IP address of the server to connect to. username (str): The username to be used in the connection. password (str): The password associated with the user. command (str): The command to be executed. Returns: String: String containing the command output. """ # Open SSH connection channel = openChannel(server, user, password) output = "" try: output = sendCommand(channel, command) finally: # Close ssh connection closeChannel(channel) return output
33,387
def test_piecewise_fermidirac(precision): """Creates a Chebyshev approximation of the Fermi-Dirac distribution within the interval (-3, 3), and tests its accuracy for scalars, matrices, and distributed matrices. """ mu = 0.0 beta = 10.0 def f(x): return 1 / (np.exp(beta * (x - mu)) + 1) is_vectorized = True interval = (-3, 3) n_cheb = 200 # The first one comes from Chebyshev error, the latter from numerical. rtol = max(5e-6, 10 * testutils.eps(precision)) atol = max(5e-6, 10 * testutils.eps(precision)) test_samples = 1000 test_margin = 0 p_sz = 16 D = 128 dtype = np.float32 M = random_self_adjoint(D, dtype) # Make sure the spectrum of M is within the interval. interval_range = max(abs(i) for i in interval) M = M / (jnp.linalg.norm(M) / interval_range) v = np.random.randn(D, 1).astype(dtype) chebyshev_test( f, interval, M, v, n_cheb, is_vectorized, atol, rtol, test_samples, test_margin, p_sz, precision=precision, )
33,388
def load_img(flist): """ Loads images in a list of arrays Args : list of files Returns list of all the ndimage arrays """ rgb_imgs = [] for i in flist: rgb_imgs.append(cv2.imread(i, -1)) # flag <0 to return img as is print "\t> Batch import of N frames\t", len(rgb_imgs) size_var = cv2.imread(i) # (height, width, channels) return rgb_imgs, size_var
33,389
def makesimpleheader(headerin,naxis=2,radesys=None,equinox=None,pywcsdirect=False): """ Function to make a new 'simple header' from the WCS information in the input header. Parameters ---------- headerin : astropy.io.fits.header Header object naxis : int Specifies how many axes the final header should have. Default=2 radesys :str RA/DEC system to use (valid SkyCoord frame system, e.g. 'icrs') equinox : str Equinox to use for the output header pywcsdirect : bool True to create the header directly with astropy.wcs.WCS Returns ------- astropy.io.fits.header Output header """ if type(headerin)==str: import astropy.io.fits as pyfits headerin=pyfits.getheader(headerin) if pywcsdirect==True: wcstemp=pywcs.WCS(header=headerin) else: wcstemp=pywcs.WCS(naxis=naxis); if naxis>2: wcstemp.wcs.crpix=[float(headerin['CRPIX1']),float(headerin['CRPIX2']),float(headerin['CRPIX3'])] wcstemp.wcs.crval=[float(headerin['CRVAL1']),float(headerin['CRVAL2']),float(headerin['CRVAL3'])] wcstemp.wcs.ctype=[headerin['CTYPE1'],headerin['CTYPE2'],headerin['CTYPE3']] try: wcstemp.wcs.cunit=[headerin['CUNIT1'],headerin['CUNIT2'],headerin['CUNIT3']] except: pass try: wcstemp.wcs.cdelt=list(getcdelts(headerin))+[headerin['CDELT3']]; except: raise(Exception('Invalid WCS CDELTS')) else: wcstemp.wcs.crpix=[float(headerin['CRPIX1']),float(headerin['CRPIX2'])] wcstemp.wcs.crval=[float(headerin['CRVAL1']),float(headerin['CRVAL2'])] wcstemp.wcs.ctype=[headerin['CTYPE1'],headerin['CTYPE2']] try: wcstemp.wcs.cunit=[headerin['CUNIT1'],headerin['CUNIT2']] except: pass try: wcstemp.wcs.cdelt=list(getcdelts(headerin)); except: raise(Exception('Invalid WCS CDELTS')) try: crota=getcdelts(headerin,getrot=True)[-1] #degrees, from N except: raise(Exception('Invalid WCS params for CROTAx')) #if crota!=0.: wcstemp.wcs.crota=[crota]*2 #Header will include PC_x cards if crot not 0 try: wcstemp.wcs.radesys=headerin['RADESYS'] except: pass try: wcstemp.wcs.equinox=headerin['EQUINOX'] except: pass if radesys is not None: wcstemp.wcs.radesys=radesys; #e.g. 'FK5', 'ICRS'. For manually forcing string, not true reprojection. if equinox is not None: wcstemp.wcs.equinox=equinox; #e.g. 2000.0 simpleheader=wcstemp.to_header() if pywcsdirect==False: if crota!=0.: simpleheader['CROTA2']=crota #Alternative method to just use (deprecated) CROTA2 card simpleheader['NAXIS']=naxis; try: simpleheader['NAXIS1']=int(headerin['NAXIS1']); simpleheader['NAXIS2']=int(headerin['NAXIS2']); except: pass if naxis>2: for card in ['NAXIS3','CRPIX3','CRVAL3','CDELT3','CTYPE3','CUNIT3', 'SPECSYS','ALTRVAL','ALTRPIX']: try: simpleheader[card]=headerin[card] except: pass for card in ['CROTA','CROTA1','CROTA2','BSCALE','BZERO','ZSCALE','BMAJ','BMIN','BPA', 'JANSCALE','FLUXCONV', 'WAVELEN','FREQ', 'RESTFRQ', 'LATPOLE','LONPOLE']: try: simpleheader[card]=float(headerin[card]) except: pass for card in ['BUNIT','OBJECT','TELESCOP','ZUNITS','SPECSYS']: try: simpleheader[card]=headerin[card] except: pass return simpleheader
33,390
def _callcatch(ui, func): """like scmutil.callcatch but handles more high-level exceptions about config parsing and commands. besides, use handlecommandexception to handle uncaught exceptions. """ detailed_exit_code = -1 try: return scmutil.callcatch(ui, func) except error.AmbiguousCommand as inst: detailed_exit_code = 10 ui.warn( _(b"hg: command '%s' is ambiguous:\n %s\n") % (inst.prefix, b" ".join(inst.matches)) ) except error.CommandError as inst: detailed_exit_code = 10 if inst.command: ui.pager(b'help') msgbytes = pycompat.bytestr(inst.message) ui.warn(_(b"hg %s: %s\n") % (inst.command, msgbytes)) commands.help_(ui, inst.command, full=False, command=True) else: ui.warn(_(b"hg: %s\n") % inst.message) ui.warn(_(b"(use 'hg help -v' for a list of global options)\n")) except error.UnknownCommand as inst: detailed_exit_code = 10 nocmdmsg = _(b"hg: unknown command '%s'\n") % inst.command try: # check if the command is in a disabled extension # (but don't check for extensions themselves) formatted = help.formattedhelp( ui, commands, inst.command, unknowncmd=True ) ui.warn(nocmdmsg) ui.write(formatted) except (error.UnknownCommand, error.Abort): suggested = False if inst.all_commands: sim = error.getsimilar(inst.all_commands, inst.command) if sim: ui.warn(nocmdmsg) ui.warn(b"(%s)\n" % error.similarity_hint(sim)) suggested = True if not suggested: ui.warn(nocmdmsg) ui.warn(_(b"(use 'hg help' for a list of commands)\n")) except IOError: raise except KeyboardInterrupt: raise except: # probably re-raises if not handlecommandexception(ui): raise if ui.configbool(b'ui', b'detailed-exit-code'): return detailed_exit_code else: return -1
33,391
def _determ_estim_update(new_bit, counts): """Beliefs only a sequence of all ones or zeros. """ new_counts = counts[:] new_counts[new_bit] += 1 if new_counts[0] > 0 and new_counts[1] > 0: return LOG_ZERO log_p_new = _determ_log_p(new_counts) log_p_old = _determ_log_p(counts) return log_p_new - log_p_old
33,392
def get_projects(config): """Find all XNAT projects and the list of scan sites uploaded to each one. Args: config (:obj:`datman.config.config`): The config for a study Returns: dict: A map of XNAT project names to the URL(s) of the server holding that project. """ projects = {} for site in config.get_sites(): xnat_project = config.get_key("XnatArchive", site=site) projects.setdefault(xnat_project, set()).add(site) return projects
33,393
def load_jsonrpc_method(name): """Load a method based on the file naming conventions for the JSON-RPC. """ base_path = (repo_root() / "doc" / "schemas").resolve() req_file = base_path / f"{name.lower()}.request.json" resp_file = base_path / f"{name.lower()}.schema.json" request = CompositeField.from_js(json.load(open(req_file)), path=name) response = CompositeField.from_js(json.load(open(resp_file)), path=name) # Normalize the method request and response typename so they no # longer conflict. request.typename += "Request" response.typename += "Response" return Method( name=method_name_override.get(name, name), request=request, response=response, )
33,394
def download_missing_namespace(network_id: int, namespace: str): """Output a namespace built from the missing names in the given namespace. --- tags: - network parameters: - name: network_id in: path description: The database network identifier required: true type: integer - name: namespace in: path description: The keyword of the namespace to extract required: true type: string """ graph = manager.cu_authenticated_get_graph_by_id_or_404(network_id) names = get_incorrect_names_by_namespace(graph, namespace) # TODO put into report data return _build_namespace_helper(graph, namespace, names)
33,395
def client_decrypt_hello_reply(ciphertext, iv1, key1): """ Decrypt the server's reply using the IV and key we sent to it. Returns iv2, key2, salt2 (8 bytes), and the original salt1. The pair iv2/key2 are to be used in future communications. Salt1 is returned to help confirm the integrity of the operation. """ iv1 = bytes(iv1) key1 = bytes(key1) # iv_ = ciphertext[0:AES_BLOCK_BYTES] # of no interest cipher = Cipher( algorithms.AES(key1), modes.CBC(iv1), backend=default_backend()) decryptor = cipher.decryptor() plaintext = decryptor.update(ciphertext) + decryptor.finalize() # unpadded = strip_pkcs7_padding(plaintext, AES_BLOCK_BYTES) unpadder = padding.PKCS7(AES_BLOCK_BITS).unpadder() unpadded = unpadder.update(plaintext) + unpadder.finalize() iv2 = unpadded[:AES_BLOCK_BYTES] key2 = unpadded[AES_BLOCK_BYTES: 3 * AES_BLOCK_BYTES] salt2 = unpadded[3 * AES_BLOCK_BYTES: 3 * AES_BLOCK_BYTES + 8] salt1 = unpadded[3 * AES_BLOCK_BYTES + 8: 3 * AES_BLOCK_BYTES + 16] v_bytes = unpadded[3 * AES_BLOCK_BYTES + 16: 3 * AES_BLOCK_BYTES + 20] version2 = v_bytes[0] |\ (v_bytes[1] << 8) |\ (v_bytes[2] << 16) |\ (v_bytes[3] << 24) return iv2, key2, salt2, salt1, version2
33,396
def load_labeled_data(filename): """ Loads data from a csv, where the last column is the label of the data in that row :param filename: name of the file to load :return: data frames and labels in separate arrays """ dataframe = pandas.read_csv(filename, header=None) dataset = dataframe.values data = dataset[:, 0:-1].astype(float) labels = dataset[:, -1] return data, labels
33,397
def get_logger(name, level='debug', log_file='log.txt'): """ Retrieve the logger for SWIFLow with coloredlogs installed in the right format """ # Setup logging log_level = level.upper() level = logging.getLevelName(log_level) # Add a custom format for logging fmt = "%(levelname)s: %(msg)s" # Always write log file to <output>/log.txt log = logging.getLogger(name) # Log to file, no screen output. logging.basicConfig(filename=log_file, filemode='w+', level=log_level, format=fmt) return log
33,398
def filesystem_move( source_path, source_type, destination_path, destination_type, backup_ending, ): """ Moves a file from the source to the destination. Arguments --------- source_path: path to the source source_type: Type of the source (a directory -> 'dir' or a file -> 'file') destination_path: path to the destination destination_type: Type of the destination (place it in a directory -> 'dir' or replace it -> 'file') backup_ending: the file ending for backup files Returns ------- The hash of the source """ ( source_path, source_type, destination_path, destination_type, backup_ending, _, ) = sh.filesystem_type_check( source_path, source_type, destination_path, destination_type, backup_ending, ) # destination file with name of source exists if (source_type == 'dir' and os.path.isdir(destination_path)) or ( source_type == 'file' and os.path.isfile(destination_path)): # Backup file name already exists backup_path = destination_path + backup_ending if os.path.exists(backup_path): raise FileExistsError( errno.EEXIST, os.strerror(errno.EEXIST), backup_path, ) # move old file to backup os.rename(destination_path, backup_path) elif os.path.exists(destination_path): raise ValueError( "Expected a {} at `{}`, but did not found one.".format( "file" if source_type == "file" else "directory", destination_path)) if source_type == 'dir': os.mkdir(destination_path) for root, dirs, files in os.walk(source_path): # set the prefix from source_path to destination_path dest_root = os.path.join(destination_path, root[len(source_path) + 1:]) for directory in dirs: os.mkdir(os.path.join(dest_root, directory)) for fil in files: shutil.copyfile( os.path.join(root, fil), os.path.join(dest_root, fil), ) return sh.hash_directory(destination_path) elif source_type == 'file': # finally link source to destination shutil.copyfile(source_path, destination_path) return sh.hash_file(destination_path)
33,399