content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def mtl_to_json(mtl_text): """ Convert Landsat MTL file to dictionary of metadata values """ mtl = {} for line in mtl_text.split('\n'): meta = line.replace('\"', "").strip().split('=') if len(meta) > 1: key = meta[0].strip() item = meta[1].strip() if key != "GROUP" and key != "END_GROUP": mtl[key] = item return mtl
310be04e9fbf756e9cf5ead60e53aae974d2ed50
3,640,000
def endian_swap(word): """Given any string, swap bits and return the result. :rtype: str """ return "".join([word[i:i+2] for i in [6, 4, 2, 0]])
dfca46a012602150957a0830cf30cc6b6790df80
3,640,001
import logging def get_grundsteuer(request_id: str): """ Route for retrieving job status of a grundsteuer tax declaration validation from the queue. :param request_id: the id of the job. """ try: raise NotImplementedError() except NotImplementedError: logging.getLogger().info("Could not retrieve status of job " + request_id, exc_info=True) return JSONResponse(status_code=500, content=generate_dummy_error_response())
d92431ff1e09652d78b7beeaeabdeb2d502d0829
3,640,002
def str_to_col_grid_lists(s): """ Convert a string to selected columns and selected grid ranges. Parameters: s: (str) a string representing one solution. For instance, *3**9 means 2 out of 5 dimensions are selected; the second and the last columns are selected, and their corresponding grid ranges are 3 and 9. The function will return (1, 4) and (3, 9). Return: selected_cols (list): list of columns selected as indicated by the string. selected_ranges (list): list of grid ranges selected as indicated by the string. """ selected_cols, selected_ranges = [], [] for i in range(len(s)): if s[i] != "*": selected_cols.append(i) selected_ranges.append(int(s[i])) return selected_cols, selected_ranges
4f5c67afa0dc97070b08223acbe6764010fd213a
3,640,003
from typing import Union import uuid from typing import List def get_installation_indices_by_installation_id( db_session: Session, installation_id: Union[str, uuid.UUID] ) -> List[SlackIndexConfiguration]: """ Gets all the indices set up in an installation given on the ID of that installation. """ bot_installation = ( db_session.query(SlackOAuthEvent) .filter(SlackOAuthEvent.id == installation_id) .one() ) return get_installation_indices(db_session, bot_installation)
0025599259a8f23e1da462d465448f3ed9a1701f
3,640,004
def convert_hdf(proj_dir, dir_list, hdf_filepath_list, hdf_filename_list): """Converts downloaded HDF file into geotiff file format.""" global src_xres global src_yres geotiff_list = [] """Converts MODIS HDF files to a geotiff format.""" print "Converting MODIS HDF files to geotiff format..." out_format = 'GTiff' local_array = zip(hdf_filepath_list, hdf_filename_list) for dir in dir_list: for in_filepath, out_filename in local_array: # Open the LST_Day_1km dataset src_open = gdal.Open(in_filepath, gdalconst.GA_ReadOnly) # open file with all sub-datasets src_subdatasets = src_open.GetSubDatasets() # make a list of sub-datasets in the HDF file subdataset = gdal.Open(src_subdatasets[0][0]) # Get parameters from LST dataset src_cols = subdataset.RasterXSize src_rows = subdataset.RasterYSize src_band_count = subdataset.RasterCount src_geotransform = subdataset.GetGeoTransform() src_xres = src_geotransform[1] src_yres = src_geotransform[5] src_proj = subdataset.GetProjection() # Read dataset to array src_band = subdataset.GetRasterBand(1) src_array = src_band.ReadAsArray(0, 0, src_cols, src_rows).astype(np.float) # Set up output file driver = gdal.GetDriverByName(out_format) out_file = "%s\%s.%s" % (dir, out_filename, "tif") out_geotiff = driver.Create(out_file, src_cols, src_rows, src_band_count, gdal.GDT_Float32) out_geotiff.SetGeoTransform(src_geotransform) out_geotiff.SetProjection(src_proj) out_geotiff.GetRasterBand(1).WriteArray(src_array) out_geotiff.FlushCache() # Create list of output geotiffs geotiff_list.append(out_file) return geotiff_list, src_xres, src_yres
f74b3e89b957746aaec9c04b4615bc5a3f7388e7
3,640,005
def _join_type_and_checksum(type_list, checksum_list): """ Join checksum and their correlated type together to the following format: "checksums": [{"type":"md5", "checksum":"abcdefg}, {"type":"sha256", "checksum":"abcd12345"}] """ checksums = [ { "type": c_type, "checksum": checksum, } for c_type, checksum in zip(type_list, checksum_list) ] return checksums
7f09ee72c6f51ad87d75a9b5e74ad8ef4776323f
3,640,006
def _local_groupby(df_rows, axis=0): """Apply a groupby on this partition for the blocks sent to it. Args: df_rows ([pd.DataFrame]): A list of dataframes for this partition. Goes through the Ray object store. Returns: A DataFrameGroupBy object from the resulting groupby. """ concat_df = pd.concat(df_rows, axis=axis) return concat_df.groupby(concat_df.index)
d78cd88bac7b03136bbe8401d207ee10c2d031f9
3,640,007
def colors_terrain() -> dict: """ Age of Empires II terrain colors for minimap. Credit for a list of Age of Empires II terrain and player colors goes to: https://github.com/goto-bus-stop/recanalyst. This function has great potential for contributions from designers and other specialists. Got information what Terrain IDs are what? Got better color suggestions? Please create an issue https://github.com/Deasilsoft/a2j/issues! Pull requests would be even more awesome! :rtype: dict """ return { 0: { 0: (0, 169, 0), 1: (51, 151, 39), 2: (0, 141, 0), }, # WATER 1: { 0: (48, 93, 182), 1: (48, 93, 182), 2: (48, 93, 182), }, # SHORES 2: { 0: (248, 201, 138), 1: (232, 180, 120), 2: (189, 150, 111), }, 3: { 0: (243, 170, 92), 1: (228, 162, 82), 2: (218, 156, 105), }, 4: { 0: (84, 146, 176), 1: (84, 146, 176), 2: (84, 146, 176), }, 5: { 0: (0, 169, 0), 1: (51, 151, 39), 2: (0, 141, 0), }, 6: { 0: (243, 170, 92), 1: (228, 162, 82), 2: (218, 156, 105), }, 7: { 0: (138, 139, 87), 1: (130, 136, 77), 2: (118, 130, 65), }, 8: { 0: (138, 139, 87), 1: (130, 136, 77), 2: (118, 130, 65), }, 9: { 0: (0, 169, 0), 1: (51, 151, 39), 2: (0, 141, 0), }, 10: { 0: (37, 116, 57), 1: (21, 118, 21), 2: (0, 114, 0), }, 11: { 0: (243, 170, 92), 1: (228, 162, 82), 2: (218, 156, 105), }, 12: { 0: (0, 169, 0), 1: (51, 151, 39), 2: (0, 141, 0), }, # FOREST 13: { 0: (37, 116, 57), 1: (21, 118, 21), 2: (0, 114, 0), }, 14: { 0: (248, 201, 138), 1: (232, 180, 120), 2: (189, 150, 111), }, 15: { 0: (48, 93, 182), 1: (48, 93, 182), 2: (48, 93, 182), }, # CLIFFS 16: { 0: (128, 100, 100), 1: (128, 100, 100), 2: (128, 100, 100), }, 17: { 0: (37, 116, 57), 1: (21, 118, 21), 2: (0, 114, 0), }, 18: { 0: (37, 116, 57), 1: (21, 118, 21), 2: (0, 114, 0), }, 19: { 0: (37, 116, 57), 1: (21, 118, 21), 2: (0, 114, 0), }, 20: { 0: (37, 116, 57), 1: (21, 118, 21), 2: (0, 114, 0), }, 21: { 0: (37, 116, 57), 1: (21, 118, 21), 2: (0, 114, 0), }, 22: { 0: (0, 74, 161), 1: (0, 74, 161), 2: (0, 74, 161), }, 23: { 0: (0, 74, 187), 1: (0, 74, 187), 2: (0, 74, 187), }, 24: { 0: (243, 170, 92), 1: (228, 162, 82), 2: (218, 156, 105), }, 25: { 0: (243, 170, 92), 1: (228, 162, 82), 2: (218, 156, 105), }, 26: { 0: (152, 192, 240), 1: (152, 192, 240), 2: (152, 192, 240), }, 27: { 0: (243, 170, 92), 1: (228, 162, 82), 2: (218, 156, 105), }, 28: { 0: (48, 93, 182), 1: (48, 93, 182), 2: (48, 93, 182), }, 29: { 0: (138, 139, 87), 1: (130, 136, 77), 2: (118, 130, 65), }, 30: { 0: (138, 139, 87), 1: (130, 136, 77), 2: (118, 130, 65), }, 31: { 0: (138, 139, 87), 1: (130, 136, 77), 2: (118, 130, 65), }, 32: { 0: (0, 169, 0), 1: (51, 151, 39), 2: (0, 141, 0), }, 33: { 0: (243, 170, 92), 1: (228, 162, 82), 2: (218, 156, 105), }, 34: { 0: (0, 169, 0), 1: (51, 151, 39), 2: (0, 141, 0), }, 35: { 0: (152, 192, 240), 1: (152, 192, 240), 2: (152, 192, 240), }, 36: { 0: (243, 170, 92), 1: (228, 162, 82), 2: (218, 156, 105), }, 37: { 0: (152, 192, 240), 1: (152, 192, 240), 2: (152, 192, 240), }, 38: { 0: (243, 170, 92), 1: (228, 162, 82), 2: (218, 156, 105), }, 39: { 0: (243, 170, 92), 1: (228, 162, 82), 2: (189, 209, 253), }, 40: { 0: (243, 170, 92), 1: (228, 162, 82), 2: (218, 156, 105), }, 41: { 0: (243, 170, 92), 1: (228, 162, 82), 2: (218, 156, 105), }, 42: { 0: (243, 170, 92), 1: (228, 162, 82), 2: (218, 156, 105), }, 43: { 0: (243, 170, 92), 1: (228, 162, 82), 2: (218, 156, 105), }, 44: { 0: (0, 169, 0), 1: (51, 151, 39), 2: (0, 141, 0), }, 45: { 0: (248, 201, 138), 1: (232, 180, 120), 2: (189, 150, 111), }, 46: { 0: (248, 201, 138), 1: (232, 180, 120), 2: (189, 150, 111), }, 47: { 0: (28, 28, 28), 1: (28, 28, 28), 2: (28, 28, 28), }, 48: { 0: (37, 116, 57), 1: (21, 118, 21), 2: (0, 114, 0), }, 49: { 0: (37, 116, 57), 1: (21, 118, 21), 2: (0, 114, 0), }, 50: { 0: (37, 116, 57), 1: (21, 118, 21), 2: (0, 114, 0), }, 51: { 0: (248, 201, 138), 1: (232, 180, 120), 2: (189, 150, 111), }, 52: { 0: (248, 201, 138), 1: (232, 180, 120), 2: (189, 150, 111), }, 53: { 0: (248, 201, 138), 1: (232, 180, 120), 2: (189, 150, 111), }, 54: { 0: (84, 146, 176), 1: (84, 146, 176), 2: (84, 146, 176), }, 55: { 0: (37, 116, 57), 1: (21, 118, 21), 2: (0, 114, 0), }, 56: { 0: (37, 116, 57), 1: (21, 118, 21), 2: (0, 114, 0), }, 57: { 0: (0, 74, 161), 1: (0, 74, 161), 2: (0, 74, 161), }, 58: { 0: (0, 84, 176), 1: (0, 84, 176), 2: (0, 84, 176), }, 59: { 0: (84, 146, 176), 1: (84, 146, 176), 2: (84, 146, 176), }, 60: { 0: (0, 169, 0), 1: (51, 151, 39), 2: (0, 141, 0), }, 61: { 0: (243, 170, 92), 1: (228, 162, 82), 2: (218, 156, 105), }, 62: { 0: (0, 169, 0), 1: (51, 151, 39), 2: (0, 141, 0), }, 63: { 0: (138, 139, 87), 1: (130, 136, 77), 2: (118, 130, 65), }, 64: { 0: (138, 139, 87), 1: (130, 136, 77), 2: (118, 130, 65), }, 65: { 0: (138, 139, 87), 1: (130, 136, 77), 2: (118, 130, 65), }, 66: { 0: (138, 139, 87), 1: (130, 136, 77), 2: (118, 130, 65), }, 67: { 0: (138, 139, 87), 1: (130, 136, 77), 2: (118, 130, 65), }, 68: { 0: (0, 169, 0), 1: (51, 151, 39), 2: (0, 141, 0), }, 69: { 0: (0, 169, 0), 1: (51, 151, 39), 2: (0, 141, 0), }, 70: { 0: (0, 169, 0), 1: (51, 151, 39), 2: (0, 141, 0), }, 71: { 0: (0, 169, 0), 1: (51, 151, 39), 2: (0, 141, 0), }, 72: { 0: (0, 169, 0), 1: (51, 151, 39), 2: (0, 141, 0), }, 73: { 0: (0, 169, 0), 1: (51, 151, 39), 2: (0, 141, 0), }, 74: { 0: (0, 169, 0), 1: (51, 151, 39), 2: (0, 141, 0), }, 75: { 0: (0, 169, 0), 1: (51, 151, 39), 2: (0, 141, 0), }, 76: { 0: (0, 169, 0), 1: (51, 151, 39), 2: (0, 141, 0), }, 77: { 0: (0, 169, 0), 1: (51, 151, 39), 2: (0, 141, 0), }, 78: { 0: (0, 169, 0), 1: (51, 151, 39), 2: (0, 141, 0), }, 79: { 0: (0, 169, 0), 1: (51, 151, 39), 2: (0, 141, 0), }, 80: { 0: (0, 169, 0), 1: (51, 151, 39), 2: (0, 141, 0), }, 81: { 0: (0, 169, 0), 1: (51, 151, 39), 2: (0, 141, 0), }, 82: { 0: (0, 169, 0), 1: (51, 151, 39), 2: (0, 141, 0), }, 83: { 0: (0, 169, 0), 1: (51, 151, 39), 2: (0, 141, 0), }, 84: { 0: (0, 169, 0), 1: (51, 151, 39), 2: (0, 141, 0), }, 85: { 0: (0, 169, 0), 1: (51, 151, 39), 2: (0, 141, 0), }, 86: { 0: (0, 169, 0), 1: (51, 151, 39), 2: (0, 141, 0), }, 87: { 0: (0, 169, 0), 1: (51, 151, 39), 2: (0, 141, 0), }, 88: { 0: (0, 169, 0), 1: (51, 151, 39), 2: (0, 141, 0), }, 89: { 0: (0, 169, 0), 1: (51, 151, 39), 2: (0, 141, 0), }, 90: { 0: (84, 146, 176), 1: (84, 146, 176), 2: (84, 146, 176), }, 91: { 0: (84, 146, 176), 1: (84, 146, 176), 2: (84, 146, 176), }, 92: { 0: (84, 146, 176), 1: (84, 146, 176), 2: (84, 146, 176), }, 93: { 0: (84, 146, 176), 1: (84, 146, 176), 2: (84, 146, 176), }, 94: { 0: (84, 146, 176), 1: (84, 146, 176), 2: (84, 146, 176), }, 95: { 0: (48, 93, 182), 1: (48, 93, 182), 2: (48, 93, 182), }, 96: { 0: (48, 93, 182), 1: (48, 93, 182), 2: (48, 93, 182), }, 97: { 0: (48, 93, 182), 1: (48, 93, 182), 2: (48, 93, 182), }, 98: { 0: (48, 93, 182), 1: (48, 93, 182), 2: (48, 93, 182), }, 99: { 0: (48, 93, 182), 1: (48, 93, 182), 2: (48, 93, 182), } }
8e8f00d689ce00203127a9d810b6017ee5a04e18
3,640,008
import argparse def handle_kv_string(val): """This method is used as type field in --filter argument in ``buildtest buildspec find``. This method returns a dict of key,value pair where input is in format key1=val1,key2=val2,key3=val3 Args: val (str): Input string in ``key1=value1,key2=value2`` format that is processed into a dictionary type Returns: dict: A dict mapping of key=value pairs """ kv_dict = {} if "," in val: args = val.split(",") for kv in args: if "=" not in kv: raise argparse.ArgumentTypeError("Must specify k=v") key, value = kv.split("=")[0], kv.split("=")[1] kv_dict[key] = value return kv_dict if "=" not in val: raise argparse.ArgumentTypeError("Must specify in key=value format") key, value = val.split("=")[0], val.split("=")[1] kv_dict[key] = value return kv_dict
ccc51c26fe881660606c49a1b84a67a796f4083a
3,640,009
def _load_dataset(dataset_config, *args, num_batches=None, **kwargs): """ Loads a dataset from configuration file If num_batches is None, this function will return a generator that iterates over the entire dataset. """ dataset_module = import_module(dataset_config["module"]) dataset_fn = getattr(dataset_module, dataset_config["name"]) batch_size = dataset_config["batch_size"] framework = dataset_config.get("framework", "numpy") # XXX: BEGIN PATCH kwargs.update(dataset_config['kwargs']) # XXX: END PATCH dataset = dataset_fn(batch_size=batch_size, framework=framework, *args, **kwargs) if not isinstance(dataset, ArmoryDataGenerator): raise ValueError(f"{dataset} is not an instance of {ArmoryDataGenerator}") if dataset_config.get("check_run"): return EvalGenerator(dataset, num_eval_batches=1) if num_batches: return EvalGenerator(dataset, num_eval_batches=num_batches) return dataset
5a35be1cac9bf405206ebc29b24aa0c08c27a18f
3,640,010
import codecs import os def file_consolidate(filename): """ Consolidates duplicates and sorts by frequency for speedy lookup. """ # TODO: Really big files should not be loaded fully into memory to sort them. # TODO: Make it more robust, actually checking for errors, etc. sorting_hat = [] in_file = codecs.open(filename, 'r', 'utf-8') for data_in in in_file: sorting_hat.append(data_in) in_file.close() sorting_hat.sort() # Put all the duplicate entires next to each other. # Consolidate duplicates while writing to an external file. old_ngram = "" old_total = 0 out_file = codecs.open(filename+".tmp", 'w', 'utf-8') for i in sorting_hat: line = i.split("\t") if old_ngram == line[0]: old_total += int(line[1]) else: if old_ngram != "" and old_total > 0: out_file.write(old_ngram) out_file.write("\t") out_file.write(str(old_total)) out_file.write("\n") old_ngram = line[0] old_total = int(line[1]) out_file.write(old_ngram) out_file.write("\t") out_file.write(str(old_total)) out_file.write("\n") out_file.close() os.remove(filename) os.rename(filename+".tmp", filename) # Now sort it by frequency for high-speed lookups. TODO: put this in with the consolidation loop above. in_file = codecs.open(filename, 'r', 'utf-8') sorting_hat = [] for data_in in in_file: split_data = data_in.split("\t") if len(split_data) == 2: data_dict = {"name": split_data[0], "value": int(split_data[1])} sorting_hat.append(data_dict) in_file.close() sorting_hat.sort(key=itemgetter('value'), reverse=True) out_file = codecs.open(filename + ".tmp", 'w', 'utf-8') for item in sorting_hat: out_file.write(item["name"]) out_file.write("\t") out_file.write(str(item["value"])) out_file.write("\n") out_file.close() os.remove(filename) os.rename(filename + ".tmp", filename) return len(sorting_hat)
0ebce1dca6700b19110fe045b1d9ae458ae1a9bb
3,640,011
def mock_checks_health(mocker: MockFixture): """Fixture for mocking checks.health.""" return mocker.patch("website_checker.checks.health")
aa6dff915bc1559838e46cc3e486d916a2c9f117
3,640,012
from typing import Dict from typing import Any def decode_jwt( jwt_string: str ) -> Dict[Any, Any]: """ Decodes the given JWT string without performing any verification. Args: jwt_string (str): A string of the JWT to decode. Returns: dict: A dictionary of the body of the JWT. """ return jwt.decode( # type: ignore jwt_string, algorithms = ['ES256K'], options={"verify_signature": False} )
39b3e14a3eb63723b2a8df21d5252ea937b0a41b
3,640,013
import collections def _resolve_references(navigation, version, language): """ Iterates through an object (could be a dict, list, str, int, float, unicode, etc.) and if it finds a dict with `$ref`, resolves the reference by loading it from the respective JSON file. """ if isinstance(navigation, list): # navigation is type list, resolved_navigation should also be type list resolved_navigation = [] for item in navigation: resolved_navigation.append(_resolve_references(item, version, language)) return resolved_navigation elif isinstance(navigation, dict): # navigation is type dict, resolved_navigation should also be type dict resolved_navigation = collections.OrderedDict() if DEFAULT_BRANCH in navigation and version != 'doc_test': version = navigation[DEFAULT_BRANCH] for key, value in navigation.items(): if key == '$ref' and language in value: # The value is the relative path to the associated json file referenced_json = load_json_and_resolve_references(value[language], version, language) if referenced_json: resolved_navigation = referenced_json else: resolved_navigation[key] = _resolve_references(value, version, language) return resolved_navigation else: # leaf node: The type of navigation should be [string, int, float, unicode] return navigation
cb955d74844a86afc4982199ec81b18899466b0e
3,640,014
from typing import Optional from typing import Union from typing import Sequence def phq(data: pd.DataFrame, columns: Optional[Union[Sequence[str], pd.Index]] = None) -> pd.DataFrame: """Compute the **Patient Health Questionnaire (Depression) – 9 items (PHQ-9)**. The PHQ-9 is a measure for depression. .. note:: This implementation assumes a score range of [1, 4]. Use :func:`~biopsykit.questionnaires.utils.convert_scale()` to convert the items into the correct range beforehand. Parameters ---------- data : :class:`~pandas.DataFrame` dataframe containing questionnaire data. Can either be only the relevant columns for computing this score or a complete dataframe if ``columns`` parameter is supplied. columns : list of str or :class:`pandas.Index`, optional list with column names in correct order. This can be used if columns in the dataframe are not in the correct order or if a complete dataframe is passed as ``data``. Returns ------- :class:`~pandas.DataFrame` PHQ9 score Raises ------ :exc:`~biopsykit.utils.exceptions.ValidationError` if number of columns does not match :exc:`~biopsykit.utils.exceptions.ValueRangeError` if values are not within the required score range References ---------- Löwe, B., Spitzer, R.L., Zipfel, S., Herzog, W., 2002. Gesundheitsfragebogen für Patienten (PHQ-D). *Manual und Testunterlagen*. 2. Auflage """ score_name = "PHQ9" score_range = [0, 3] # create copy of data data = data.copy() if columns is not None: # if columns parameter is supplied: slice columns from dataframe _assert_has_columns(data, [columns]) data = data.loc[:, columns] _assert_num_columns(data, 9) _assert_value_range(data, score_range) return pd.DataFrame(data.sum(axis=1), columns=[score_name])
73b925b29a51b7f0575b3449b015d41d3287ca35
3,640,015
def mbc_choose_any_program(table_path): """ randomly select one item of MBCRadioProgramTable :param table_path: :return: """ table = playlist.MBCRadioProgramTable(table_path=table_path) programs = list(filter(lambda x: x.playlist_slug, table.programs)) random_id = randint(0, len(programs) - 1) if programs: return programs[random_id]
397c56f4a4d79bf3cd2ede5eba13414fcb1836ae
3,640,016
def logout_view(request): """Logout a user.""" logout(request) return redirect('users:login')
e14292c1fc78d8fb6f395129a1b77f141ce93627
3,640,017
import os def get_file_without_path(file_name, with_extension=False): """ get the name of a file without its path """ base = os.path.basename(file_name) if not with_extension: base = os.path.splitext(base)[0] return base
f6cf8c8003fe24a2b5ed265c3497bc866d201fb2
3,640,018
def _cast(vtype, value): """ Cast a table type into a python native type :param vtype: table type :type vtype: string :param value: value to cast :type value: string """ if not vtype: return None if isinstance(value, str): return_value = value.strip() if return_value == "": return_value = None elif 'float' in vtype.lower(): try: return_value = float(value) except ValueError: return_value = None elif 'int' in vtype.lower(): try: return_value = int(float(value)) except ValueError: return_value = None elif isinstance(value, (float, int, np.int, np.float, np.int16, np.int32, np.int64, np.float16, np.float32, np.float64)): return_value = value else: print("Cannot cast {0}".format(type(value))) return_value = None return return_value
27ffdb0dac7d7e5f092a798630e6b874626a27b2
3,640,019
def L2Norm(inputs, axis=0, num_axes=-1, eps=1e-5, mode='SUM', **kwargs): """L2 Normalization, introduced by `[Liu et.al, 2015] <https://arxiv.org/abs/1506.04579>`_. Parameters ---------- inputs : Tensor The input tensor. axis : int The start axis of stats region. num_axes : int The number of axes of stats region. Default is ``-1`` (Till End). eps : float The eps. mode : str The mode on computing normalizer. ``SUM`` or ``MEAN``. Returns ------- Tensor The output tensor. """ CheckInputs(inputs, 1) arguments = ParseArguments(locals()) output = Tensor.CreateOperator(nout=1, op_type='L2Norm', **arguments) if inputs.shape is not None: output.shape = inputs.shape[:] return output
20c0a1677874adfbd6c24cb6f662d1c0dc6c93f1
3,640,020
from typing import Union from typing import Sequence import inspect def has_option(obj, keywords: Union[str, Sequence[str]]) -> bool: """ Return a boolean indicating whether the given callable `obj` has the `keywords` in its signature. """ if not callable(obj): return False sig = inspect.signature(obj) return all(key in sig.parameters for key in ensure_tuple(keywords))
de2c6d4d458a8db6f0ff555d04570897e3440c10
3,640,021
import mmh3 import struct def create_element_rand(element_id): """ This function simply returns a 32 bit hash of the element id. The result value should be used a random priority. :param element_id: The element unique identifier :return: an random integer """ if isinstance(element_id, int): obj = struct.pack('i', element_id) elif isinstance(element_id, long): obj = struct.pack('q', element_id) elif isinstance(element_id, str): obj = element_id else: raise TypeError('Unknown type: pack it yourself with struct') return int(mmh3.hash(obj))
095ced835235bec4b042a8a8b5eb3c44e967390e
3,640,022
def _ul_add_action(actions, opt, res_type, stderr): """Create new and append it to the actions list""" r = _UL_RES[opt] if r[0] is None: _ul_unsupported_opt(opt, stderr) return False # we always assume the 'show' action to be requested and eventually change it later actions.append( [ _ul_show, {"res": r[0], "res_type": res_type, "desc": r[3], "unit": r[4], "opt": opt}, ] ) return True
098492f8bd875c611650fa773fd308d1097bcd18
3,640,023
from typing import List from typing import Any import time def _pack(cmd_id: int, payload: List[Any], privkey: datatypes.PrivateKey) -> bytes: """Create and sign a UDP message to be sent to a remote node. See https://github.com/ethereum/devp2p/blob/master/rlpx.md#node-discovery for information on how UDP packets are structured. """ cmd_id = to_bytes(cmd_id) expiration = rlp.sedes.big_endian_int.serialize(int(time.time() + EXPIRATION)) encoded_data = cmd_id + rlp.encode(payload + [expiration]) signature = privkey.sign_msg(encoded_data) message_hash = keccak(signature.to_bytes() + encoded_data) return message_hash + signature.to_bytes() + encoded_data
11ade65dc4ceceab509d13456845d37671b8abfb
3,640,024
def clip_boxes(boxes, shape): """ :param boxes: (...)x4, float :param shape: h, w """ orig_shape = boxes.shape boxes = boxes.reshape([-1, 4]) h, w = shape boxes[:, [0, 1]] = np.maximum(boxes[:, [0, 1]], 0) boxes[:, 2] = np.minimum(boxes[:, 2], w) boxes[:, 3] = np.minimum(boxes[:, 3], h) return boxes.reshape(orig_shape)
60dbdb4d3aee5a4a0f7dc076ad6d8415ddc82ba0
3,640,025
def loss_fn( models, backdoored_x, target_label, l2_factor=settings.BACKDOOR_L2_FACTOR, ): """loss function of backdoor model loss_student = softmax_with_logits(teacher(backdoor(X)), target) + softmax_with_logits(student(backdoor(X)), target) + L2_norm(mask_matrix) Args: models(Python dict): teacher, student, backdoor models x: a tf tensor of data, size = (batch_size, H, W, C) target_label: a tf tensor of target label, one-hot encoded, size = (batch_size, class_num) Returns: loss_backdoor: a tf tensor indicates loss of backdoor model """ logits_from_teacher = models["teacher"](backdoored_x) logits_from_student = models["student"](backdoored_x) loss_backdoor = tf.nn.softmax_cross_entropy_with_logits( labels=target_label, logits=logits_from_teacher ) loss_backdoor += tf.nn.softmax_cross_entropy_with_logits( labels=target_label, logits=logits_from_student ) loss_backdoor += ( tf.nn.l2_loss(models["backdoor"].get_mask() * models["backdoor"].get_trigger()) * l2_factor ) return tf.math.reduce_mean(loss_backdoor)
d13fa05f4f5ac7adbebb62a48774cfc552c3d42e
3,640,026
from .models import OneTimePassword, compute_expires_at def create_otp(slug, related_objects=None, data=None, key_generator=None, expiration=None, deactivate_old=False): """ Create new one time password. One time password must be identified with slug. Args: slug: string for OTP identification. related_objects: model instances related with OTP. data: data which will be stored with OTP in the JSON format. key_generator: OTP key generator. expiration: OTP expiration time in seconds, default expiration will be used for None value. deactivate_old: deactivate old tokens with the same slug ane related objects. Returns: OTP instance """ if deactivate_old: deactivate_otp(slug, related_objects=related_objects) key_generator = settings.OTP_DEFAULT_KEY_GENERATOR if key_generator is None else key_generator key_generator = import_string(key_generator) if isinstance(key_generator, str) else key_generator otp = OneTimePassword.objects.create( slug=slug, key_generator=key_generator, expires_at=compute_expires_at(expiration or settings.OTP_DEFAULT_AGE), data=data ) if related_objects: otp.related_objects.add(*related_objects) return otp
20cbfd88b676ff0357fa5a37a51a3ffa24b4f76b
3,640,027
def get_pod_from_dn(dn): """ This parses the pod from a dn designator. They look like this: topology/pod-1/node-101/sys/phys-[eth1/6]/CDeqptMacsectxpkts5min """ pod = POD_REGEX.search(dn) if pod: return pod.group(1) else: return None
23b790bf7b216239916ba86829bb5bee0e346a4a
3,640,028
import trace def extend_table(rows, table): """ appends the results of the array to the existing table by an objectid """ try: dtypes = np.dtype( [ ('_ID', np.int), ('DOM_DATE', '|S48'), ('DOM_DATE_CNT', np.int32), ('DOM_DATE_PER', np.float64), ('DOM_YEAR', np.int32), ('DOM_YEAR_CNT', np.int32), ('DOM_YEAR_PER', np.float64), ('OLDEST_DATE', '|S1024'), ('NEWEST_DATE', '|S1024'), ('NO_DATE_CNT', np.int32), ('NO_DATE_PER', np.float64), ('PCT_2_YEAR', np.float64), ('PCT_5_YEAR', np.float64), ('PCT_10_YEAR', np.float64), ('PCT_15_YEAR', np.float64), ('PCT_15_PLUS_YEAR', np.float64), ('FEATURE_CNT', np.int32), ('CURRENCY_SCORE', np.int32) ] ) array = np.array(rows, dtypes) da.ExtendTable(table, "OID@", array, "_ID", False) return table except: line, filename, synerror = trace() raise FunctionError( { "function": "", "line": line, "filename": filename, "synerror": synerror, "arc" : str(arcpy.GetMessages(2)) } )
fc34b897d7e23e8833a63b0fd7ce72cd090f35ab
3,640,029
def drawblock(arr, num_class=10, fixed=False, flip=False, split=False): """ draw images in block :param arr: array of images. format='NHWC'. sequence=[cls1,cls2,cls3,...,clsN,cls1,cls2,...clsN] :param num_class: number of class. default as number of images across height. Use flip=True to set number of width as across width instead :param fixed: force number of number of width == number of height :param flip: flip :param split: set an int to split. currently only support split horizontally :return: blocks of images """ n_im = arr.shape[0] h_im = arr.shape[1] w_im = arr.shape[2] c_im = arr.shape[3] if flip: num_w = num_class num_h = np.ceil(np.float(n_im) / num_w) if not fixed else num_w if fixed and isinstance(fixed, int): num_h = fixed else: num_h = num_class num_w = np.ceil(np.float(n_im) / num_h) if not fixed else num_h if fixed and isinstance(fixed, int): num_w = fixed h_block = (h_im + 2) * num_h - 2 w_block = (w_im + 2) * num_w - 2 newarr = np.zeros((int(h_block), int(w_block), int(c_im)), dtype=np.uint8) for i in xrange(n_im): if i > num_w * num_h - 1: break if flip: wk = i % num_w hk = i // num_w else: hk = i % num_h wk = i // num_h wk = int(wk) hk = int(hk) newarr[hk*(h_im+2):hk*(h_im+2)+h_im, wk*(w_im+2):wk*(w_im+2)+w_im, :] = arr[i] if split: temp = newarr newnh = int(np.ceil(float(num_class) / split)) newh = (h_im + 2) * newnh - 2 neww = int(w_block * split + 2) newarr = np.zeros((newh, neww, int(c_im)), dtype=np.uint8) for i in range(split): if not num_class % split == 0 and i == split - 1: newarr[:-h_im-2, i * w_block+i*2:(i + 1) * w_block+(i+1)*2, :] = temp[i * newh+i*2:, :, :] else: newarr[:, i*w_block+i*2:(i+1)*w_block, :] = temp[i*newh+i*2:(i+1)*newh, :, :] return newarr
221dc90d8a674963221abe11720d23ac92af6225
3,640,030
def with_key(output_key_matcher): """Check does it have a key.""" return output_key_matcher
5bcb64550ce202f66ac43325fe8876249b45c52d
3,640,031
def generatePersistenceManager(inputArgument, namespace = None): """Generates a persistence manager base on an input argument. A persistence manager is a utility object that aids in storing persistent data that must be saved after the interpreter shuts down. This function will interpret the input argument provided and will return an appropriate persistence manager object if possible. inputArgument -- if a True Bool: a generic persistence file will be used. -- if a String: the string will be interpreted as a filename for the persistence file. -- if a utilities.persistenceManager object: the object will be used directly. namespace -- a text string used to specify a namespace for the persistence manager. This allows multiple identical VMs to share a common persistence file. """ if type(inputArgument) == bool and inputArgument: #a True bool was provided as the input argument. Create a new persistence manager that uses a default file. persistenceFilename = "defaultPersistence.vmp" return persistenceManager(persistenceFilename, namespace) elif type(inputArgument) == str: #A string was provided as the persistence manager, so use that string as the filename return persistenceManager(inputArgument, namespace) elif type(inputArgument) == persistenceManager: # a persistenceManager object was provided, so use that. if namespace: inputArgument.namespace = namespace #update the namespace used by the persistence manager return inputArgument else: return None
a1042764974d1b8030c6b6dd2add444bea9e521c
3,640,032
def get_app(): """ Creates a Sanic application whose routes are documented using the `api` module. The routes and their documentation must be kept in sync with the application created by `get_benchmark_app()`, so that application can serve as a benchmark in test cases. """ app = Sanic("test_api") app.blueprint(swagger_blueprint) @MessageAPI.post(app, "/message") def message(request): data = request.json assert "message" in data return {"message": "Message received."} @app.get("/excluded") @MessageAPI(exclude=True, tag="Excluded") def excluded(request): return {"message": "Excluded."} @ExcludedMessageAPI.delete(app, "/excluded_delete") def excluded_delete(request): return {"message": "Excluded."} @ExcludedMessageAPI.get(app, "/excluded_get") def excluded_get(request): return {"message": "Excluded."} @ExcludedMessageAPI.head(app, "/excluded_head") def excluded_head(request): return {"message": "Excluded."} @ExcludedMessageAPI.options(app, "/excluded_options") def excluded_options(request): return {"message": "Excluded."} @ExcludedMessageAPI.patch(app, "/excluded_patch") def excluded_patch(request): return {"message": "Excluded."} @ExcludedMessageAPI.post(app, "/excluded_post") def excluded_post(request): return {"message": "Excluded."} @ExcludedMessageAPI.put(app, "/excluded_put") def excluded_put(request): return {"message": "Excluded."} @ExcludedMessageAPI.route(app, "/excluded_route", methods=("GET", "POST")) def excluded_route(request): return {"message": "Excluded."} return app
1f8a11ee404082dcca0c1df91910157e5c169854
3,640,033
import base64 def predict(request): """View to predict output for selected prediction model Args: request (json): prediction model input (and parameters) Returns: json: prediction output """ projects = [{"name":"Erschließung Ob den Häusern Stadt Tengen", "id":101227}, {"name":"Stadtbauamt Bräunlingen Feldweg", "id":101205}] if request.method == "GET": context = {"projects": projects} return render(request, "app/predict.html", context) elif request.method == "POST": with open(image_path, "rb") as image_file: image_data = base64.b64encode(image_file.read()).decode('utf-8') context = {"projects": projects, "image": image_data} return render(request, 'app/predict/index.html', context)
364db414d2c5811df0fe36e516868e0db76f896b
3,640,034
def is_dict(etype) -> bool: """ Determine whether etype is a Dict """ return type(etype) is GenericMeta and etype.__extra__ is dict
fb0e422e08abd3b20611a8817300334d32638b49
3,640,035
import torch from typing import List def hidden_state_embedding(hidden_states: torch.Tensor, layers: List[int], use_cls: bool, reduce_mean: bool = True) -> torch.Tensor: """ Extract embeddings from hidden attention state layers. Parameters ---------- hidden_states Attention hidden states in the transformer model. layers List of layers to use for the embedding. use_cls Whether to use the next sentence token (CLS) to extract the embeddings. reduce_mean Whether to take the mean of the output tensor. Returns ------- Tensor with embeddings. """ hs = [hidden_states[layer][:, 0:1, :] if use_cls else hidden_states[layer] for layer in layers] hs = torch.cat(hs, dim=1) # type: ignore y = hs.mean(dim=1) if reduce_mean else hs # type: ignore return y
f732e834f9c3437a4a7278aa6b9bfc54589b093b
3,640,036
from datetime import datetime def is_new_user(day: datetime.datetime, first_day: datetime.datetime): """ Check if user has contributed results to this project before """ if day == first_day: return 1 else: return 0
8da8039d1c8deb5bb4414565d3c9dc19ce15adb6
3,640,037
def to_ndarray(X): """ Convert to numpy ndarray if not already. Right now, this only converts from sparse arrays. """ if isinstance(X, np.ndarray): return X elif sps.issparse(X): print('Converting from sparse type: {}'.format(type(X))) return X.toarray() else: raise ValueError('Unexpected data type: {}'.format(type(X)))
337a78066316f32cf3a4f541d38c78de18750264
3,640,038
def _2d_gauss(x, y, sigma=2.5 / 60.0): """A Gaussian beam""" return np.exp(-(x ** 2 + y ** 2) / (2 * sigma ** 2))
c010989499682e4847376a162852c9f758907385
3,640,039
def attach_task_custom_attributes(queryset, as_field="task_custom_attributes_attr"): """Attach a json task custom attributes representation to each object of the queryset. :param queryset: A Django projects queryset object. :param as_field: Attach the task custom attributes as an attribute with this name. :return: Queryset object with the additional `as_field` field. """ model = queryset.model sql = """ SELECT json_agg( row_to_json(custom_attributes_taskcustomattribute) ORDER BY custom_attributes_taskcustomattribute.order ) FROM custom_attributes_taskcustomattribute WHERE custom_attributes_taskcustomattribute.project_id = {tbl}.id """ sql = sql.format(tbl=model._meta.db_table) queryset = queryset.extra(select={as_field: sql}) return queryset
584d2f918ae1844beb5cab71318691094de6d56d
3,640,040
import torch def softmax_like(env, *, trajectory_model, agent_model, log=False): """softmax_like :param env: OpenAI Gym environment :param trajectory_model: trajectory probabilistic program :param agent_model: agent's probabilistic program :param log: boolean; if True, print log info """ Qs = torch.as_tensor( [ infer_Q( env, action, trajectory_model=trajectory_model, agent_model=agent_model, log=log, ) for action in range(env.action_space.n) ] ) action_logits = args.alpha * Qs action_dist = Categorical(logits=action_logits) if log: print('policy:') print( tabulate( [action_dist.probs.tolist()], headers=env.actions, tablefmt='fancy_grid', ) ) return action_dist.sample()
7b51e0336399914e357b4dbed0490e93fb22f70a
3,640,041
def bulk_add(packages, user): """ Support bulk add by processing entries like: repo [org] """ added = 0 i = 0 packages = packages.split('\n') num = len(packages) org = None results = str() db.set(config.REDIS_KEY_USER_SLOTNUM_PACKAGE % user, num) results += "Added %s slots.\n" % num orgs_selected = db.hgetall(config.REDIS_KEY_USER_ORGS_SELECTED % user).items() for package in packages: try: # First, try: repo [org] package, org = package.split() for orgsel in orgs_selected: if org == orgsel[1]: get_package_selected(user, package=package, orgset=orgsel[0], slotset=i) results += ("Added %s to slot %s with organization %s.\n" % (package, i + 1, org)) added += 1 i += 1 except: # Next, try: repo try: package = package.split() package = package[0] get_package_selected(user, package=package, slotset=i) results += "Added %s to slot %s.\n" % ( package, i + 1) added += 1 i += 1 except: # Give up pass results += "Added %s packages" % added if added == 0: results += ", check org slots for matching org?\n" else: results += ".\n" return results
7b027b45e6e3385fc3bc3da8916b8322dde7cfda
3,640,042
def laser_heater_to_energy_spread(energy_uJ): """ Returns rms energy spread in induced in keV. Based on fits to measurement in SLAC-PUB-14338 """ return 7.15*sqrt(energy_uJ)
59feb872f0c652e0ef28b0958d2b25c174a79152
3,640,043
def apparent_attenuation(og, fg): """Apparent attenuation """ return 100.0 * (float(og) - float(fg)) / float(og)
e22ce07229baa4eacb7388280630d6097e21f364
3,640,044
def most_similar(W, vocab, id2word, word, n=15): """ Find the `n` words most similar to the given `word`. The provided `W` must have unit vector rows, and must have merged main- and context-word vectors (i.e., `len(W) == len(word2id)`). Returns a list of word strings. """ assert len(W) == len(vocab) word_id = vocab[word][0] dists = np.dot(W, W[word_id]) top_ids = np.argsort(dists)[::-1][:n + 1] return [id2word[id] for id in top_ids if id != word_id][:n]
3e13a1e24935c7eacea9973c9af315d0a2a0fca4
3,640,045
import os import json def getRecordsFromDb(): """Return all records found in the database associated with :func:`dbFilePath()`. List of records are cached using an application configuration entry identified by ``_CACHED_RECORDS`` key. See also :func:`openDb`. """ try: records = flask.current_app.config["_CACHED_RECORDS"] except KeyError: records = None database_filepath = dbFilePath() app.logger.info("database_filepath: %s" % database_filepath) if not os.path.isfile(database_filepath): raise IOError(2, 'Database file %s does not exist', database_filepath) database_connection = openDb(database_filepath) cursor = database_connection.cursor() # get record count cursor.execute('select count(1) from _') count = int(cursor.fetchone()[0]) # load db if needed or count has changed if records is None or count != len(records): cursor.execute('select record from _ order by revision desc,build_date desc') records = [json.loads(record[0]) for record in cursor.fetchall()] flask.current_app.config["_CACHED_RECORDS"] = records database_connection.close() return records
881ad1b813019f796fe70c1795c3ad4a4d8ef303
3,640,046
def build_cell(num_units, num_layers, cell_fn, initial_state=None, copy_state=True, batch_size=None, output_dropout_rate=0., input_shape=None, attention_mechanism_fn=None, memory=None, memory_sequence_len=None, alignment_history=False, mode=tf.estimator.ModeKeys.TRAIN, name=None): """" General function to create RNN cells for decoding. Handles multi-layer cases, LSTMs and attention wrappers """ if alignment_history == True: print("a") input() cells = [] for _ in range(num_layers): cell = cell_fn(num_units, dtype=tf.float32, name=name) # build internal variables if input shape provided if input_shape is not None: cell.build(input_shape) # apply dropout if its a tensor or we are in training if ((isinstance(output_dropout_rate, tf.Tensor) or output_dropout_rate > 0 and mode == tf.estimator.ModeKeys.TRAIN)): cell = tf.contrib.rnn.DropoutWrapper( cell, output_keep_prob=1 - output_dropout_rate) cells.append(cell) if num_layers > 1: cell = tf.nn.rnn_cell.MultiRNNCell(cells) else: cell = cells[0] if initial_state is not None and not copy_state: if batch_size is None: batch_size = tf.shape(tf.contrib.framework.nest.flatten(initial_state)[0])[0] zero_state = cell.zero_state(batch_size, tf.float32) initial_state = bridge_state(initial_state, zero_state) if attention_mechanism_fn is not None: attention_mechanism = attention_mechanism_fn( num_units, memory, memory_sequence_len) cell_input_fn = None if isinstance(attention_mechanism, CoverageBahdanauAttention): cell_input_fn = ( lambda inputs, attention: tf.concat([inputs, tf.split(attention, 2, axis=-1)[0]], -1)) cell = tf.contrib.seq2seq.AttentionWrapper( cell, attention_mechanism, output_attention=not isinstance( attention_mechanism, tf.contrib.seq2seq.BahdanauAttention), attention_layer_size=num_units, initial_cell_state=initial_state, alignment_history=alignment_history) if batch_size is None: batch_size = tf.shape(tf.contrib.framework.nest.flatten(initial_state)[0])[0] initial_state = cell.zero_state(batch_size, tf.float32) return (cell, initial_state) if initial_state is not None else cell
85d284ba314bea94ba015f7a85d0ba6685103292
3,640,047
def setup(hass: HomeAssistant, config: ConfigType) -> bool: """Use config values to set up a function enabling status retrieval.""" conf = config[DOMAIN] host = conf[CONF_HOST] port = conf[CONF_PORT] apcups_data = APCUPSdData(host, port) hass.data[DOMAIN] = apcups_data # It doesn't really matter why we're not able to get the status, just that # we can't. try: apcups_data.update(no_throttle=True) except Exception: # pylint: disable=broad-except _LOGGER.exception("Failure while testing APCUPSd status retrieval") return False return True
ccb2061fe8c36b799e5179f113c380d379ebec9d
3,640,048
import signal def _lagged_coherence_1freq(x, f, Fs, N_cycles=3, f_step=1): """Calculate lagged coherence of x at frequency f using the hanning-taper FFT method""" # Determine number of samples to be used in each window to compute lagged coherence Nsamp = int(np.ceil(N_cycles * Fs / f)) # For each N-cycle chunk, calculate the fourier coefficient at the frequency of interest, f chunks = _nonoverlapping_chunks(x, Nsamp) C = len(chunks) hann_window = signal.hanning(Nsamp) fourier_f = np.fft.fftfreq(Nsamp, 1 / float(Fs)) fourier_f_idx = np.argmin(np.abs(fourier_f - f)) fourier_coefsoi = np.zeros(C, dtype=complex) for i2, c in enumerate(chunks): fourier_coef = np.fft.fft(c * hann_window) fourier_coefsoi[i2] = fourier_coef[fourier_f_idx] # Compute the lagged coherence value lcs_num = 0 for i2 in range(C - 1): lcs_num += fourier_coefsoi[i2] * np.conj(fourier_coefsoi[i2 + 1]) lcs_denom = np.sqrt(np.sum( np.abs(fourier_coefsoi[:-1])**2) * np.sum(np.abs(fourier_coefsoi[1:])**2)) return np.abs(lcs_num / lcs_denom)
8a1cefe6fa2ef87dbc71f3f4449afc4406fa2c5f
3,640,049
def program_hash(p:Program)->Hash: """ Calculate the hashe of a program """ string=";".join([f'{nm}({str(args)})' for nm,args in p.ops if nm[0]!='_']) return md5(string.encode('utf-8')).hexdigest()
f12ed910bc94070f64fe673ddd81925a704c700a
3,640,050
async def get_events(user_creds, client_creds, list_args, filter_func=None): """List events from all calendars according to the parameters given. The supplied credentials dict may be updated if tokens are refreshed. :param user_creds: User credentials from `obtain_user_permission`. :param client_creds: Client credentials from configuration. :param list_args: Arguments to pass to the calendar API's event list function. :param filter_func: Callable that can filter out individual events. The function should return True to include, False to exclude. :raise CredentialsError: if the credentials have not been set up, or if they have expired. """ filter_func = filter_func or no_filter if "access_token" not in user_creds: raise CredentialsError("No access token in user credentials.") async with Aiogoogle(user_creds=user_creds, client_creds=client_creds) as aiogoogle: # Is there a way to cache service discovery? service = await aiogoogle.discover("calendar", "v3") try: calendar_list = await aiogoogle.as_user( service.calendarList.list(), timeout=30 ) _update_user_creds(user_creds, aiogoogle.user_creds) events = [] for calendar_list_entry in calendar_list["items"]: events += await _get_calendar_events( aiogoogle, service, list_args, calendar_list_entry, filter_func, ) return dict(items=sorted(events, key=_event_sort_key_function)) except HTTPError as ex: if "invalid_grant" in str(ex): raise CredentialsError("User credentials rejected.") from ex raise
00a99194c993c5155a03b985ba46fec84fd82ad7
3,640,051
import logging import pickle def process_file(input_file, input_type, index, is_parallel): """ Process an individual SAM/BAM file. How we want to process the file depends on the input type and whether we are operating in parallel. If in parallel the index must be loaded for each input file. If the input is a BAM file it needs to be read using Pysam, if SAM it can be read directly as a text file. Args: input_file: Path to the input file. input_type: Whether the file is 'bam' or 'sam'. index: If operating in parallel a string to the index file, if not the loaded GTF index dictionary. is_parallel: Whether to operate in parallel. Returns: Dictionary containing alignment statistics for the input file. """ sample_name = input_file.split("/")[-1] logger = logging.getLogger("stats." + sample_name[0:10]) logger.info("Processing " + sample_name + "...") if is_parallel: logger.info("Loading index...") with open(index, "rb") as index_file: loaded_index = pickle.load(index_file) logger.info("Loaded.") else: loaded_index = index if input_type == "sam": logger.info("Parsing SAM file...") with open(input_file) as sam: output_table = gen_stats(sam, input_type, sample_name, loaded_index) elif input_type == "bam": logger.info("Parsing BAM file...") bam = pysam.AlignmentFile(input_file, "rb") output_table = gen_stats(bam, input_type, sample_name, loaded_index) logger.info("Finished " + sample_name) return output_table
a10c6b520fb586f4320f538b91adf7e7add4ace3
3,640,052
def add_dictionaries(coefficients, representatives, p): """ Computes a dictionary that is the linear combination of `coefficients` on `representatives` Parameters ---------- coefficients : :obj:`Numpy Array` 1D array with the same number of elements as `representatives`. Each entry is an integer mod p. representatives : :obj:`list(dict)` List where each entry is a dictionary. The keys on each dictionary are integers, and these might coincide with dictionaries on other entries. p : int(prime) Returns ------- rep_sum : :obj:`dict` Result of adding the dictionaries on `representatives` with `coefficients`. Example ------- >>> import numpy as np >>> p=5 >>> coefficients = np.array([1,2,3]) >>> representatives = [ ... {0:np.array([1,3]), 3:np.array([0,0,1])}, ... {0:np.array([4,3]),2:np.array([4,5])}, ... {3:np.array([0,4,0])}] >>> add_dictionaries(coefficients, representatives, p) {0: array([4, 4]), 3: array([0, 2, 1]), 2: array([3, 0])} """ rep_sum = {} for i, rep in enumerate(representatives): for spx_idx in iter(rep): if spx_idx not in rep_sum: rep_sum[spx_idx] = (coefficients[i] * rep[spx_idx]) % p else: rep_sum[spx_idx] = (rep_sum[spx_idx] + coefficients[i] * rep[ spx_idx]) % p # end else # end for # end for # Find simplices where expression is zero zero_simplices = [] for spx_idx in iter(rep_sum): if not np.any(rep_sum[spx_idx]): zero_simplices.append(spx_idx) # end if # end for # If an entry is zero, delete it for spx_idx in zero_simplices: del rep_sum[spx_idx] # end for return rep_sum
ffdb894b11509a72bc6baadc4c8c0d0d15f98110
3,640,053
def dropsRowsWithMatchClassAndDeptRemainderIsZero(df, Col, RemainderInt, classToShrink): """ Takes as input a dataframe, a column, a remainder integer, and a class within the column. Returns the dataframe minus the rows that match the ClassToShrink in the Col and have a depth from the DEPT col with a remainder of zero. """ print("original lenght of dataframe = ", len(df)) df_new = df.drop(df[(df[Col] == classToShrink) & (df.index % 10 != 0)].index) print("length of new dataframe after dropping rows = ", len(df_new)) print("number of rows dropped = ", len(df) - len(df_new)) print("length of 0 class is :", len(df_new[df_new[Col] == classToShrink])) return df_new
f88ec5e8293d753defe0a6d31f083e52218011ba
3,640,054
import sys from meerschaum.config._paths import ( PLUGINS_RESOURCES_PATH, PLUGINS_ARCHIVES_RESOURCES_PATH, PLUGINS_INIT_PATH ) from meerschaum.utils.warnings import error, warn as _warn import plugins from meerschaum.utils.packages import attempt_import def import_plugins( plugins_to_import: Union[str, List[str], None] = None, warn: bool = True, ) -> Union[ 'ModuleType', Tuple['ModuleType', None] ]: """ Import the Meerschaum plugins directory. :param plugins_to_import: If provided, only import the specified plugins. Otherwise import the entire plugins module. May be a string, list, or `None`. Defaults to `None`. """ global __path__ PLUGINS_RESOURCES_PATH.mkdir(parents=True, exist_ok=True) PLUGINS_INIT_PATH.touch() _locks['__path__'].acquire() _locks['sys.path'].acquire() if isinstance(plugins_to_import, str): plugins_to_import = [plugins_to_import] if str(PLUGINS_RESOURCES_PATH.parent) not in sys.path: sys.path.insert(0, str(PLUGINS_RESOURCES_PATH.parent)) if str(PLUGINS_RESOURCES_PATH.parent) not in __path__: __path__.append(str(PLUGINS_RESOURCES_PATH.parent)) if not plugins_to_import: try: except ImportError as e: warn(e) plugins = None else: plugins = attempt_import( *[('plugins.' + p) for p in plugins_to_import], install=False, warn=True, lazy=False, venv=None, ) if plugins is None and warn: _warn(f"Failed to import plugins.", stacklevel=3) if str(PLUGINS_RESOURCES_PATH.parent) in sys.path: sys.path.remove(str(PLUGINS_RESOURCES_PATH.parent)) _locks['__path__'].release() _locks['sys.path'].release() return plugins
10e434c64c9f32a857cf074bef2e8bce821f00d0
3,640,055
import re from datetime import datetime def _opendata_to_section_meeting(data, term_year): """Converts OpenData class section info to a SectionMeeting instance. Args: data: An object from the `classes` field returned by OpenData. term_year: The year this term is in. """ date = data['date'] days = [] if date['weekdays']: days = re.findall(r'[A-Z][a-z]?', date['weekdays'].replace('U', 'Su')) # TODO(david): Actually use the term begin/end dates when we get nulls date_format = '%m/%d/%Y' start_date = None end_date = None if date['start_date']: start_date = date['start_date'] + '/' + str(term_year) start_date = datetime.strptime(start_date, date_format) if date['end_date']: end_date = date['end_date'] + '/' + str(term_year) end_date = datetime.strptime(end_date, date_format) time_format = '%H:%M' # TODO(david): DRY-up start_seconds = None if date['start_time']: start_time = datetime.strptime(date['start_time'], time_format) start_seconds = (start_time - start_time.replace(hour=0, minute=0, second=0)).seconds end_seconds = None if date['end_time']: end_time = datetime.strptime(date['end_time'], time_format) end_seconds = (end_time - end_time.replace(hour=0, minute=0, second=0)).seconds meeting = m.SectionMeeting( start_seconds=start_seconds, end_seconds=end_seconds, days=days, start_date=start_date, end_date=end_date, building=data['location']['building'], room=data['location']['room'], is_tba=date['is_tba'], is_cancelled=date['is_cancelled'], is_closed=date['is_closed'], ) if data['instructors']: last_name, first_name = data['instructors'][0].split(',') prof_id = m.Professor.get_id_from_name(first_name, last_name) if not m.Professor.objects.with_id(prof_id): m.Professor(id=prof_id, first_name=first_name, last_name=last_name).save() meeting.prof_id = prof_id return meeting
bdbd2160d61732e3d33357f3f65489ae004fd1aa
3,640,056
import requests import json def get_token(): """ returns a session token from te internal API. """ auth_url = '%s/sessions' % local_config['INTERNAL_API_BASE_URL'] auth_credentials = {'eppn': 'worker@pebbles', 'password': local_config['SECRET_KEY']} try: r = requests.post(auth_url, auth_credentials, verify=local_config['SSL_VERIFY']) return json.loads(r.text).get('token') except: return None
da875c11dd887a895fe6c133cba3d30e3b73082c
3,640,057
def setlist(L): """ list[alpha] -> set[alpha] """ # E : set[alpha] E = set() # e : alpha for e in L: E.add(e) return E
7607d3d47ea5634773298afaea12d03759c0f1d4
3,640,058
from typing import List import re def ek_8_fix(alts: List[str]) -> List[str]: """ Replace ek, 8 patterns in text. This is google ASR specifc. Google gets confused between 1 and 8. Therefore if alternatives only contain 8 and 1, we change everything to 8 pm. TODO: Another really structurally bad piece of logic. """ count_ek = 0 count_8 = 0 count_other = 0 rule = r"\b(?P<num>\d+) p(.| )?m(.)?" for text in alts: match = re.search(rule, text, flags=re.I | re.U) if match: if match.group("num") == "1": count_ek += 1 elif match.group("num") == "8": count_8 += 1 else: count_other += 1 if count_8 and count_ek and not count_other: output_alts = [] substitute = "8 pm" for text in alts: text = re.sub(rule, substitute, text, flags=re.I | re.U) output_alts.append(text) return output_alts return alts
a1cbfda0db1d049fac703ecf771d6d7b0ae008d6
3,640,059
def _pixel_at(x, y): """ Returns (r, g, b) color code for a pixel with given coordinates (each value is in 0..256 limits) """ screen = QtGui.QGuiApplication.primaryScreen() color = screen.grabWindow(0, x, y, 1, 1).toImage().pixel(0, 0) return ((color >> 16) & 0xFF), ((color >> 8) & 0xFF), (color & 0xFF)
62341d5d7edc3529b5184babddf475bc35f407bf
3,640,060
from datetime import datetime import time def parse_tibia_time(tibia_time: str) -> datetime: """Gets a time object from a time string from tibia.com""" tibia_time = tibia_time.replace(",","").replace("&#160;", " ") # Getting local time and GMT t = time.localtime() u = time.gmtime(time.mktime(t)) # UTC Offset local_utc_offset = ((timegm(t) - timegm(u)) / 60 / 60) # Extracting timezone tz = tibia_time[-4:].strip() try: # Convert time string to time object # Removing timezone cause CEST and CET are not supported t = datetime.strptime(tibia_time[:-4].strip(), "%b %d %Y %H:%M:%S") except ValueError: log.error("parse_tibia_time: couldn't parse '{0}'".format(tibia_time)) return None # Getting the offset if tz == "CET": utc_offset = 1 elif tz == "CEST": utc_offset = 2 else: log.error("parse_tibia_time: unknown timezone for '{0}'".format(tibia_time)) return None # Add/subtract hours to get the real time return t + timedelta(hours=(local_utc_offset - utc_offset))
da9e8f4a9b8a94161d215ff1119d8510de57b434
3,640,061
def a3v(V: Vector3) -> np.ndarray: """Converts vector3 to numpy array. Arguments: V {Vector3} -- Vector3 class containing x, y, and z. Returns: np.ndarray -- Numpy array with the same contents as the vector3. """ return np.array([V.x, V.y, V.z])
f32476c613a8032bf7119d5b99a89e72c56628d2
3,640,062
def _p_value_color_format(pval): """Auxiliary function to set p-value color -- green or red.""" color = "green" if pval < 0.05 else "red" return "color: %s" % color
ae58986dd586a1e6cd6b6281ff444f18175d1d32
3,640,063
def rms(da, dim=None, dask='parallelized', keep_attrs=True): """ Reduces a dataarray by calculating the root mean square along the dimension dim. """ # TODO If dim is None then take the root mean square along all dimensions? if dim is None: raise ValueError('Must supply a dimension along which to calculate rms') rms = xr.apply_ufunc(_rms_gufunc, da, input_core_dims=[[dim]], dask=dask, output_dtypes=[da.dtype], keep_attrs=keep_attrs) # Return the name of the da as variable_rms rms.name = str(da.name) + '_rms' return rms
c34575469fffb3ad1099a05b66acb31320e8f7c4
3,640,064
def generator(seed): """ build the generator network. """ weights_initializer = tf.truncated_normal_initializer(stddev=0.02) # fully connected layer to upscale the seed for the input of # convolutional net. target = tf.contrib.layers.fully_connected( inputs=seed, num_outputs=4 * 4 * 256, activation_fn=tf.nn.relu, normalizer_fn=None, weights_initializer=weights_initializer, scope='g_project') # reshape to images target = tf.reshape(target, [-1, 4, 4, 256]) # transpose convolution to upscale for layer_idx in range(4): if layer_idx == 3: num_outputs = 1 kernel_size = 32 stride = 1 # arXiv:1511.06434v2 # use tanh in output layer activation_fn = tf.nn.tanh # arXiv:1511.06434v2 # use batch norm except the output layer normalizer_fn = None else: num_outputs = 2 ** (6 - layer_idx) kernel_size = 5 stride = 2 # arXiv:1511.06434v2 # use ReLU activation_fn = tf.nn.relu # arXiv:1511.06434v2 # use batch norm normalizer_fn = tf.contrib.layers.batch_norm target = tf.contrib.layers.convolution2d_transpose( inputs=target, num_outputs=num_outputs, kernel_size=kernel_size, stride=stride, padding='SAME', activation_fn=activation_fn, normalizer_fn=normalizer_fn, weights_initializer=weights_initializer, scope='g_conv_t_{}'.format(layer_idx)) return target
93258f49ba0fc7d7d03507bdc7dc413b2a9e23d5
3,640,065
from typing import Callable import logging def solve_fxdocc_root(iws, e_onsite, concentration, hilbert_trafo: Callable[[complex], complex], beta: float, occ: float = None, self_cpa_iw0=None, mu0: float = 0, weights=1, n_fit=0, restricted=True, **root_kwds) -> RootFxdocc: """Determine the CPA self-energy by solving the root problem for fixed `occ`. Parameters ---------- iws : (N_iw) complex array_like Positive fermionic Matsubara frequencies. e_onsite : (N_cmpt) float or (..., N_iw, N_cmpt) complex np.ndarray On-site energy of the components. This can also include a local frequency dependent self-energy of the component sites. If multiple non-frequency dependent on-site energies should be considered simultaneously, pass an on-site energy with `N_z=1`: `e_onsite[..., np.newaxis, :]`. concentration : (..., N_cmpt) float array_like Concentration of the different components used for the average. hilbert_trafo : Callable[[complex], complex] Hilbert transformation of the lattice to calculate the coherent Green's function. beta : float Inverse temperature. occ : float Total occupation. self_cpa_iw0, mu0 : (..., N_iw) complex np.ndarray and float, optional Starting guess for CPA self-energy and chemical potential. `self_cpa_iw0` implicitly contains the chemical potential `mu0`, thus they should match. Returns ------- root.self_cpa : (..., N_iw) complex np.ndarray The CPA self-energy as the root of `self_root_eq`. root.mu : float Chemical potential for the given occupation `occ`. Other Parameters ---------------- weights : (N_iw) float np.ndarray, optional Passed to `gftool.density_iw`. Residues of the frequencies with respect to the residues of the Matsubara frequencies `1/beta`. (default: 1.) For Padé frequencies this needs to be provided. n_fit : int, optional Passed to `gftool.density_iw`. Number of additionally fitted moments. If Padé frequencies are used, this is typically not necessary. (default: 0) restricted : bool, optional Whether `self_cpa_z` is restricted to `self_cpa_z.imag <= 0`. (default: True) Note, that even if `restricted=True`, the imaginary part can get negative within tolerance. This should be removed by hand if necessary. root_kwds Additional arguments passed to `scipy.optimize.root`. `method` can be used to choose a solver. `options=dict(fatol=tol)` can be specified to set the desired tolerance `tol`. Raises ------ RuntimeError If unable to find a solution. See Also -------- solve_root Examples -------- >>> from functools import partial >>> beta = 30 >>> e_onsite = [-0.3, 0.3] >>> conc = [0.3, 0.7] >>> hilbert = partial(gt.bethe_gf_z, half_bandwidth=1) >>> occ = 0.5, >>> iws = gt.matsubara_frequencies(range(1024), beta=30) >>> self_cpa_iw, mu = gt.cpa.solve_fxdocc_root(iws, e_onsite, conc, ... hilbert, occ=occ, beta=beta) >>> import matplotlib.pyplot as plt >>> __ = plt.plot(iws.imag, self_cpa_iw.imag, '+--') >>> __ = plt.axhline(np.average(e_onsite, weights=conc) - mu) >>> __ = plt.plot(iws.imag, self_cpa_iw.real, 'x--') >>> plt.show() check occupation >>> gf_coher_iw = hilbert(iws - self_cpa_iw) >>> gt.density_iw(iws, gf_coher_iw, beta=beta, moments=[1, self_cpa_iw[-1].real]) 0.499999... check CPA >>> self_compare = gt.cpa.solve_root(iws, np.array(e_onsite)-mu, conc, ... hilbert_trafo=hilbert) >>> np.allclose(self_cpa_iw, self_compare, atol=1e-5) True """ concentration = np.asarray(concentration)[..., np.newaxis, :] e_onsite = np.asarray(e_onsite) if self_cpa_iw0 is None: # static average + 0j to make it complex array self_cpa_iw0 = np.sum(e_onsite * concentration, axis=-1) - mu0 + 0j self_cpa_iw0, __ = np.broadcast_arrays(self_cpa_iw0, iws) self_cpa_nomu = self_cpa_iw0 + mu0 # strip contribution of mu # TODO: use on-site energy to estimate m2+mu, which only has to be adjusted by mu m1 = np.ones_like(self_cpa_iw0[..., -1].real) def _occ_diff(x): gf_coher_iw = hilbert_trafo(iws - x) m2 = x[..., -1].real # for large iws, real part should static part occ_root = density_iw(iws, gf_iw=gf_coher_iw, beta=beta, weights=weights, moments=np.stack([m1, m2], axis=-1), n_fit=n_fit).sum() return occ_root - occ mu = chemical_potential(lambda mu: _occ_diff(self_cpa_nomu - mu), mu0=mu0) LOGGER.debug("VCA chemical potential: %s", mu) # one iteration gives the ATA: average t-matrix approximation self_cpa_nomu = self_fxdpnt_eq(self_cpa_nomu - mu, iws, e_onsite - mu, concentration, hilbert_trafo) + mu mu = chemical_potential(lambda mu: _occ_diff(self_cpa_nomu - mu), mu0=mu) LOGGER.debug("ATA chemical potential: %s", mu) x0, shapes = _join([mu], self_cpa_nomu.real, self_cpa_nomu.imag) self_root_eq_ = partial(restrict_self_root_eq if restricted else self_root_eq, z=iws, concentration=concentration, hilbert_trafo=hilbert_trafo) def root_eq(mu_selfcpa): mu, self_cpa_re, self_cpa_im = _split(mu_selfcpa, shapes) self_cpa = self_cpa_re + 1j*self_cpa_im - mu # add contribution of mu self_root = self_root_eq_(self_cpa, e_onsite=e_onsite - mu) occ_root = _occ_diff(self_cpa) return _join([self_root.size*occ_root], self_root.real, self_root.imag)[0] root_kwds.setdefault("method", "krylov") LOGGER.debug('Search CPA self-energy root') if 'callback' not in root_kwds and LOGGER.isEnabledFor(logging.DEBUG): # setup LOGGER if no 'callback' is provided root_kwds['callback'] = lambda x, f: LOGGER.debug( 'Residue: mu=%+6g cpa=%6g', f[0], np.linalg.norm(f[1:]) ) sol = optimize.root(root_eq, x0=x0, **root_kwds) LOGGER.debug("CPA self-energy root found after %s iterations.", sol.nit) if not sol.success: raise RuntimeError(sol.message) mu, self_cpa_re, self_cpa_im = _split(sol.x, shapes) self_cpa = self_cpa_re - mu + 1j*self_cpa_im # add contribution of mu LOGGER.debug("CPA chemical potential: %s", mu.item()) return RootFxdocc(self_cpa, mu=mu.item())
e0550d50d7d1b69e26982b42f44a540bf408881f
3,640,066
def getn_hidden_area(*args): """getn_hidden_area(int n) -> hidden_area_t""" return _idaapi.getn_hidden_area(*args)
3265d4258ce6717e8ca23bd10754e1b1648d4217
3,640,067
def cdist(X: DNDarray, Y: DNDarray = None, quadratic_expansion: bool = False) -> DNDarray: """ Calculate Euclidian distance between two DNDarrays: .. math:: d(x,y) = \\sqrt{(|x-y|^2)} Returns 2D DNDarray of size :math: `m \\times n` Parameters ---------- X : DNDarray 2D array of size :math: `m \\times f` Y : DNDarray 2D array of size :math: `n \\times f` quadratic_expansion : bool Whether to use quadratic expansion for :math:`\\sqrt{(|x-y|^2)}` (Might yield speed-up) """ if quadratic_expansion: return _dist(X, Y, _euclidian_fast) else: return _dist(X, Y, _euclidian)
14a2368ff0717ff04e0477699ff13d20f359ba0d
3,640,068
def popcount_u8(x: np.ndarray): """Return the total bit count of a uint8 array""" if x.dtype != np.uint8: raise ValueError("input dtype must be uint8") count = 0 # for each item look-up the number of bits in the LUT for elem in x.flat: count += u8_count_lut[elem] return count
e85c07b3df7dcd993c0f1cc7f9dbecd97e8be317
3,640,069
from scipy import stats def split_errorRC(tr, t1, t2, q, Emat, maxdt, ddt, dphi): """ Calculates error bars based on a F-test and a given confidence interval q. Note ---- This version uses a Fisher transformation for correlation-type misfit. Parameters ---------- tr : :class:`~obspy.core.Trace` Seismogram t1 : :class:`~obspy.core.utcdatetime.UTCDateTime` Start time of picking window t2 : :class:`~obspy.core.utcdatetime.UTCDateTime` End time of picking window q : float Confidence level Emat : :class:`~numpy.ndarray` Energy minimization matrix Returns ------- err_dtt : float Error in dt estimate (sec) err_phi : float Error in phi estimate (degrees) err_contour : :class:`~numpy.ndarray` Error contour for plotting """ phi = np.arange(-90.0, 90.0, dphi)*np.pi/180. dtt = np.arange(0., maxdt, ddt) # Copy trace to avoid overriding tr_tmp = tr.copy() tr_tmp.trim(t1, t2) # Get degrees of freedom dof = split_dof(tr_tmp) if dof <= 3: dof = 3.01 print( "Degrees of freedom < 3. Fixing to DOF = 3, which may " + "result in inaccurate errors") n_par = 2 # Fisher transformation vmin = np.arctanh(Emat.min()) # Error contour zrr_contour = vmin + (vmin*np.sign(vmin)*n_par/(dof - n_par) * stats.f.ppf(1. - q, n_par, dof - n_par)) *\ np.sqrt(1./(dof-3)) # Back transformation err_contour = np.tanh(zrr_contour) # Estimate uncertainty (q confidence interval) err = np.where(Emat < err_contour) err_phi = max( 0.25*(phi[max(err[0])] - phi[min(err[0])])*180./np.pi, 0.25*dphi) err_dtt = max(0.25*(dtt[max(err[1])] - dtt[min(err[1])]), 0.25*ddt) return err_dtt, err_phi, err_contour
3155031382c881a15a8a300d6656cae1fc0fee64
3,640,070
import copy def filter_parts(settings): """ Remove grouped components and glyphs that have been deleted or split. """ parts = [] temp = copy.copy(settings['glyphs']) for glyph in settings['glyphs']: name = glyph['class_name'] if name.startswith("_split") or name.startswith("_group") or name.startswith("_delete"): parts.append(glyph) temp.remove(glyph) settings['glyphs'] = temp # Remove from the training glyphs as well temp2 = copy.copy(settings['training_glyphs']) for glyph in settings['training_glyphs']: name = glyph['class_name'] if name.startswith("_split") or name.startswith("_group") or name.startswith("_delete"): temp2.remove(glyph) settings['training_glyphs'] = temp2 return parts
f8d6a59eeeb314619fd4c332e2594dee3543ee9c
3,640,071
def kernel_zz(Y, X, Z): """ Kernel zz for second derivative of the potential generated by a sphere """ radius = np.sqrt(Y ** 2 + X ** 2 + Z ** 2) r2 = radius*radius r5 = r2*r2*radius kernel = (3*Z**2 - r2)/r5 return kernel
14f36fe23531994cd40c74d26b91477d266ca21c
3,640,072
def getAccentedVocal(vocal, acc_type="g"): """ It returns given vocal with grave or acute accent """ vocals = {'a': {'g': u'\xe0', 'a': u'\xe1'}, 'e': {'g': u'\xe8', 'a': u'\xe9'}, 'i': {'g': u'\xec', 'a': u'\xed'}, 'o': {'g': u'\xf2', 'a': u'\xf3'}, 'u': {'g': u'\xf9', 'a': u'\xfa'}} return vocals[vocal][acc_type]
cfec276dac32e6ff092eee4f1fc84b412c5c915c
3,640,073
def env_initialize(env, train_mode=True, brain_idx=0, idx=0, verbose=False): """ Setup environment and return info """ # get the default brain brain_name = env.brain_names[brain_idx] brain = env.brains[brain_name] # reset the environment env_info = env.reset(train_mode=train_mode)[brain_name] # examine the state space and action space state = env_info.vector_observations[idx] state_size = len(state) action_size = brain.vector_action_space_size if verbose: # number of agents in the environment print(f'Number of agents: {len(env_info.agents)}') print(f'Number of actions: {action_size}') print(f'States have length: {state_size}') print(f'States look like: {state}') return (brain, brain_name, state, action_size, state_size)
3c951a77009cca8c876c36965ec33781dd2c08dd
3,640,074
def lorentzianfit(x, y, parent=None, name=None): """Compute Lorentzian fit Returns (yfit, params), where yfit is the fitted curve and params are the fitting parameters""" dx = np.max(x) - np.min(x) dy = np.max(y) - np.min(y) sigma = dx * 0.1 amp = fit.LorentzianModel.get_amp_from_amplitude(dy, sigma) a = FitParam(_("Amplitude"), amp, 0.0, amp * 1.2) b = FitParam(_("Base line"), np.min(y), np.min(y) - 0.1 * dy, np.max(y)) sigma = FitParam(_("Std-dev") + " (σ)", sigma, sigma * 0.2, sigma * 10) mu = FitParam(_("Mean") + " (μ)", xpeak(x, y), np.min(x), np.max(x)) params = [a, sigma, mu, b] def fitfunc(x, params): return fit.LorentzianModel.func(x, *params) values = guifit( x, y, fitfunc, params, parent=parent, wintitle=_("Lorentzian fit"), name=name ) if values: return fitfunc(x, values), params
cd221c3483ee7f54ac49baaeaf617ef8ec2b7fa7
3,640,075
def tf_quat(T): """ Return quaternion from 4x4 homogeneous transform """ assert T.shape == (4, 4) return rot2quat(tf_rot(T))
7fb2a7b136201ec0e6a92faf2cc030830df46fa5
3,640,076
def solve2(lines): """Solve the problem.""" result = 0 for group in parse_answers2(lines): result += len(group) return result
5990b61e713733ba855937b8191b8a8a4f503873
3,640,077
def get_contract_type(timestamp: int, due_timestamp: int) -> str: """Get the contract_type Input the timestamp and due_timestamp. Return which contract_type is. Args: timestamp: The target timestamp, you want to know. due_timestamp: The due timestamp of the contract. Returns: The contract_type name. Raises: RuntimeError: An error occurred timestamp gt due_timestamp. """ minus = due_timestamp - timestamp if minus < 0: raise RuntimeError("the timestamp more than due_timestamp") if minus < 7 * 24 * 60 * 60 * 1000: return CONTRACT_TYPE_THIS_WEEK elif minus < 14 * 24 * 60 * 60 * 1000: return CONTRACT_TYPE_NEXT_WEEK else: return CONTRACT_TYPE_QUARTER
3b3a084f786c82a5fc1b2a7a051e9005b3df5f0a
3,640,078
from typing import Any from typing import Optional from typing import Union from typing import Type from typing import Tuple from typing import Sequence from typing import cast def is_sequence_of(obj: Any, types: Optional[Union[Type[object], Tuple[Type[object], ...]]] = None, depth: Optional[int] = None, shape: Optional[Sequence[int]] = None ) -> bool: """ Test if object is a sequence of entirely certain class(es). Args: obj: The object to test. types: Allowed type(s). If omitted, we just test the depth/shape. depth: Level of nesting, ie if ``depth=2`` we expect a sequence of sequences. Default 1 unless ``shape`` is supplied. shape: The shape of the sequence, ie its length in each dimension. If ``depth`` is omitted, but ``shape`` included, we set ``depth = len(shape)``. Returns: bool: ``True`` if every item in ``obj`` matches ``types``. """ if not is_sequence(obj): return False if shape is None or shape == (): next_shape: Optional[Tuple[int]] = None if depth is None: depth = 1 else: if depth is None: depth = len(shape) elif depth != len(shape): raise ValueError('inconsistent depth and shape') if len(obj) != shape[0]: return False next_shape = cast(Tuple[int], shape[1:]) for item in obj: if depth > 1: if not is_sequence_of(item, types, depth=depth - 1, shape=next_shape): return False elif types is not None and not isinstance(item, types): return False return True
3762454785563c7787451efad143547f97ae8994
3,640,079
import os def anonymize_dicom(dicom_file,patient_name='anonymous', fields_to_anonymize=ANONYMIZATION_FIELDS, fields_to_return=None,path_to_save='.', new_dicom_name='anonymous.dcm'): """ Given a dicom file, alter the given fields, anonymizing the patient name seperatley. Save a new dicom in the given directory with the given name """ #having lots of issues with the character encoding # changed to python 3, now having more fun try: #im = dicom.read_file(unicode(dicom_file,'utf-8')) im = dicom.read_file(dicom_file) except UnicodeDecodeError: print("utf-8 codec can't decode byte...filename {}".format(dicom_file)) except dicom.errors.InvalidDicomError: #im = dicom.read_file(unicode(dicom_file,'utf-8'),force=True) im = dicom.read_file(dicom_file,force=True) if fields_to_return: # create dictionary to hold returned fields returned_fields ={}.fromkeys(fields_to_return) # collect fields to retrieve for attr in returned_fields: try: # expect the field not to exist returned_fields[attr]=getattr(im,attr) except AttributeError: continue # now replace fields to anonymize with '' for attr in fields_to_anonymize: if attr=='PatientsName': set_attr = patient_name else: set_attr='' try: setattr(im,attr,set_attr) #print "{} has been set to {}".format(attr, set_attr) except AttributeError: print("The following attribute not found: {}".format(set_attr)) except UnboundLocalError: print("Can't set attribute: utf-8 codec can't decode byte...filename {}".format(dicom_file)) # now save the new dicom new_name = os.path.join(path_to_save,new_dicom_name) im.save_as(new_name) if fields_to_return: return returned_fields
74b743a49fdde11befe8e5bf43da4d824cd70dba
3,640,080
def _parse_tree_height(sent): """ Gets the height of the parse tree for a sentence. """ children = list(sent._.children) if not children: return 0 else: return max(_parse_tree_height(child) for child in children) + 1
d6de5c1078701eeeb370c917478d93e7653d7f4f
3,640,081
def pandas_loss_p_g_i_t(c_m, lgd, ead, new): """ Distribution of losses at time t. long format (N_MC, G, K, T).""" mat_4D = loss_g_i_t(c_m, lgd, ead, new) names = ['paths', 'group_ID', 'credit_rating_rank', 'time_steps'] index = pds.MultiIndex.from_product([range(s)for s in mat_4D.shape], names=names) df = pds.DataFrame({'loss_p_g_i_t': mat_4D.flatten()}, index=index)['loss_p_g_i_t'] df = pds.Series.to_frame(df) df['loss_p_g_i_t_ID'] = np.arange(len(df)) df.insert(0, 'portfolio_ID', 'pilot 1 Bank A', allow_duplicates=False) return df
65e9db48eab0a40596b205a7304bd225eb5c93d0
3,640,082
import glob import os import sys def get_file_if_unique(location, ext): """Find file if unique for the provided extension.""" files = glob(os.path.join(location, ext)) if len(files) == 1: return files[0] else: print("Multiple/No " + ext[1:] + " files found in the working directory." "Specify one please.") sys.exit()
38689006199fdedcc5a9d3a2c69fff716d5345a2
3,640,083
def find_available_pacs(pacs, pac_to_unstuck=None, pac_to_super=None, pac_to_normal=None): """ Finds the available pacs that are not assigned """ available_pacs = pacs['mine'] if pac_to_unstuck is not None: available_pacs = [x for x in available_pacs if x['id'] not in pac_to_unstuck.keys()] if pac_to_super is not None: available_pacs = [x for x in available_pacs if x['id'] not in pac_to_super.keys()] if pac_to_normal is not None: available_pacs = [x for x in available_pacs if x['id'] not in pac_to_normal.keys()] return available_pacs
4b6674fd87db2127d5fffa781431ccc9a9ff775a
3,640,084
async def login_for_access_token( form_data: OAuth2PasswordRequestForm = Depends(), ): """ Log in to your account using oauth2 authorization. In response we get an jwt authorization token which is used for granting access to data """ is_auth, scope = await authenticate_authority( form_data.username, form_data.password ) if not is_auth: raise HTTPException( status_code=status.HTTP_401_UNAUTHORIZED, detail="Incorrect username or password", ) access_token_expires = timedelta( minutes=security_config.ACCESS_TOKEN_EXPIRE_MINUTES ) access_token = create_access_token( data={"sub": form_data.username, "scopes": [scope]}, expires_time=access_token_expires, ) return {"access_token": access_token, "token_type": "bearer"}
441326317f0f13275ad33e369efe419a605ac4eb
3,640,085
def get_plain_expressions(s): """Return a list of plain, non-nested shell expressions found in the shell string s. These are shell expressions that do not further contain a nested expression and can therefore be resolved indenpendently. For example:: >>> get_plain_expressions("${_pyname%${_pyname#?}}") ['${_pyname#?}'] """ return _get_non_nested_expressions(s)
a3b0f6812ffe361e291b28c4273ca7cc975eb1e7
3,640,086
def create_indices(dims): """Create lists of indices""" return [range(1,dim+1) for dim in dims]
1a83b59eb1ca2b24b9db3c9eec05db7335938cae
3,640,087
def observed_property(property_name, default, cast=None): """Default must be immutable.""" hidden_property_name = "_" + property_name if cast is None: if cast is False: cast = lambda x: x else: cast = type(default) def getter(self): try: return getattr(self, hidden_property_name) except AttributeError: return default def deleter(self): try: delattr(self, hidden_property_name) except AttributeError: pass def setter(self, value): value = cast(value) if value == default: try: delattr(self, hidden_property_name) except AttributeError: pass else: setattr(self, hidden_property_name, value) return property(getter, observed(setter), observed(deleter))
7358557b221b5d4fa18fbd29cd02b47823cfdfe0
3,640,088
from typing import Callable from io import StringIO def query_helper( source: S3Ref, query: str, dest: S3Ref = None, transform: Callable = None ) -> StringIO: """ query_helper runs the given s3_select query on the given object. - The results are saved in a in memory file (StringIO) and returned. - If dest is specified, the file is copied to the provided S3Ref - If transform callable is specified, tranform is called first with the temp file before uploading to the destination s3. """ event_stream = s3.select_object_content( Bucket=source.bucket, Key=source.key, ExpressionType="SQL", Expression=query, InputSerialization={"JSON": {"Type": "LINES"}}, OutputSerialization={"JSON": {}}, ) # Iterate over events in the event stream as they come output = StringIO() for s3_select_event in event_stream["Payload"]: if "Records" in s3_select_event: data = s3_select_event["Records"]["Payload"] output.write(data.decode("utf-8")) if transform: output.seek(0) output = transform(output) if dest is not None: upload(output, dest) output.seek(0) return output
3670734c76f615fe6deb3dfed8305cfc1740b124
3,640,089
def indicator_selector(row, indicator, begin, end): """Return Tons of biomass loss.""" dasy = {} if indicator == 4: return row[2]['value'] for i in range(len(row)): if row[i]['indicator_id'] == indicator and row[i]['year'] >= int(begin) and row[i]['year'] <= int(end): dasy[str(row[i]['year'])] = row[i]['value'] return dasy
329411837633f4e28bea4b2b261b6f4149b92fb1
3,640,090
import math def xy_from_range_bearing(range: float, bearing: float) -> map_funcs.Point: """Given a range in metres and a bearing from the camera this returns the x, y position in metres relative to the runway start.""" theta_deg = bearing - google_earth.RUNWAY_HEADING_DEG x = CAMERA_POSITION_XY.x + range * math.cos(math.radians(theta_deg)) y = CAMERA_POSITION_XY.y + range * math.sin(math.radians(theta_deg)) return map_funcs.Point(x, y)
a2575437b52003660d83b241da13f10687fa4241
3,640,091
def flask_get_modules(): """Return the list of all modules --- tags: - Modules responses: 200: description: A list of modules """ db_list = db.session.query(Module).all() return jsonify(db_list)
21352458773143f785658488e34f9e486c7f818d
3,640,092
def create_user(username, password): """Registra um novo usuario caso nao esteja cadastrado""" if User.query.filter_by(username=username).first(): raise RuntimeError(f'{username} ja esta cadastrado') user = User(username=username, password=generate_password_hash(password)) db.session.add(user) db.session.commit() return user
1a50d31b764cce10d0db78141041deafc15f7c40
3,640,093
import numpy def _get_mesh_colour_scheme(): """Returns colour scheme for MESH (maximum estimated size of hail). :return: colour_map_object: Instance of `matplotlib.colors.ListedColormap`. :return: colour_norm_object: Instance of `matplotlib.colors.BoundaryNorm`. """ colour_list = [ [152, 152, 152], [152, 203, 254], [0, 152, 254], [0, 45, 254], [0, 101, 0], [0, 152, 0], [0, 203, 0], [254, 254, 50], [254, 203, 0], [254, 152, 0], [254, 0, 0], [254, 0, 152], [152, 50, 203] ] for i in range(len(colour_list)): colour_list[i] = numpy.array(colour_list[i], dtype=float) / 255 colour_map_object = matplotlib.colors.ListedColormap(colour_list) colour_map_object.set_under(numpy.full(3, 1)) colour_bounds_mm = numpy.array([ 0.1, 15.9, 22.2, 28.6, 34.9, 41.3, 47.6, 54, 60.3, 65, 70, 75, 80, 85 ]) colour_norm_object = matplotlib.colors.BoundaryNorm( colour_bounds_mm, colour_map_object.N) return colour_map_object, colour_norm_object
4301822297d069a6cc289e72b5bf388ffae01cf4
3,640,094
def index(): """ Serve index page. """ try: data = get_latest_covid_stats() except FailedRequestError as err: # Log error response to logger logger.debug( f"Request to Public Health England COVID-19 API failed: {err}.") flash("An error occurred obtaining latest COVID-19 stats from the Public Health England API.") return redirect(url_for("error")) return render_template("index.html", data=data)
bca737abaeb6891072f64b5a6caa6cf739da4ee2
3,640,095
import os from functools import reduce def get_files(directory, include_hidden, include_empty): """Returns all FILES in the directory which apply to the filter rules.""" return (os.path.join(dir_path, filename) for dir_path, _, file_names in os.walk(directory) for filename in file_names if not os.path.islink(os.path.join(dir_path, filename)) and (include_hidden or reduce(lambda r, d: r and not d.startswith("."), os.path.abspath(os.path.join(dir_path, filename)).split(os.sep), True)) and (include_empty or os.path.getsize(os.path.join(dir_path, filename)) > 0))
439e74215f284492bd0505af3b49fd285a94e5f0
3,640,096
def _manually_create_user(username, pw): """ Create an *active* user, its server directory, and return its userdata dictionary. :param username: str :param pw: str :return: dict """ enc_pass = server._encrypt_password(pw) # Create user directory with default structure (use the server function) user_dir_state = server.init_user_directory(username) single_user_data = user_dir_state single_user_data[server.USER_IS_ACTIVE] = True single_user_data[server.PWD] = enc_pass single_user_data[server.USER_CREATION_TIME] = server.now_timestamp() single_user_data['shared_with_me'] = {} single_user_data['shared_with_others'] = {} single_user_data['shared_files'] = {} server.userdata[username] = single_user_data return single_user_data
21d523ae29121697e63460302d8027499b4d896d
3,640,097
def update_geoscale(df, to_scale): """ Updates df['Location'] based on specified to_scale :param df: df, requires Location column :param to_scale: str, target geoscale :return: df, with 5 digit fips """ # code for when the "Location" is a FIPS based system if to_scale == 'state': df.loc[:, 'Location'] = df['Location'].apply(lambda x: str(x[0:2])) # pad zeros df.loc[:, 'Location'] = df['Location'].apply(lambda x: x.ljust(3 + len(x), '0') if len(x) < 5 else x) elif to_scale == 'national': df.loc[:, 'Location'] = US_FIPS return df
e62083f176cd749a88b2e73774e70140c6c5b9ac
3,640,098
import json def translate(text, from_lang="auto", to_lang="zh-CN"): """translate text, return the result as json""" url = 'https://translate.googleapis.com/translate_a/single?' params = [] params.append('client=gtx') params.append('sl=' + from_lang) params.append('tl=' + to_lang) params.append('hl=en-US') params.append('dt=t') params.append('dt=bd') params.append('dj=1') params.append('source=input') params.append(urlencode({'q': text})) url += '&'.join(params) request = urllib2.Request(url) browser = "Mozilla/5.0 (X11; Linux x86_64; rv:45.0) Gecko/20100101 Firefox/45.0" request.add_header('User-Agent', browser) response = urllib2.urlopen(request) return json.loads(response.read().decode('utf8'))
944a5a90f60d8e54c402100e512bbce2bbb407c5
3,640,099