content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
from typing import List def readOneLineFileWithCommas(filepath: str) -> List[str]: """ Reads a file that is one line long, separated by commas """ try: with open(filepath) as fp: s: str = fp.readline() return s.split(",") except: raise Exception(f"Failed to open {filepath}")
4c181523192fab0ea01ae5da0883c543565119c6
23,400
from operator import or_ def package_search(filters, context, limit=None, catalog=False): """Search packages with different filters Catalog param controls the base query creation. Catalog queries only search packages a user can deploy. Non-catalog queries searches packages a user can edit. * Admin is allowed to browse all the packages * Regular user is allowed to browse all packages belongs to user tenant and all other packages marked is_public. Also all packages should be enabled. * Use marker (inside filters param) and limit for pagination: The typical pattern of limit and marker is to make an initial limited request and then to use the ID of the last package from the response as the marker parameter in a subsequent limited request. """ session = db_session.get_session() pkg = models.Package query = session.query(pkg) if catalog: # Only show packages one can deploy, i.e. own + public query = query.filter(or_( pkg.owner_id == context.tenant, pkg.is_public) ) else: # Show packages one can edit. if not context.is_admin: query = query.filter(pkg.owner_id == context.tenant) # No else here admin can edit everything. if not filters.get('include_disabled', '').lower() == 'true': query = query.filter(pkg.enabled) if filters.get('owned', '').lower() == 'true': query = query.filter(pkg.owner_id == context.tenant) if 'type' in filters.keys(): query = query.filter(pkg.type == filters['type'].title()) if 'category' in filters.keys(): query = query.filter(pkg.categories.any( models.Category.name.in_(filters['category']))) if 'tag' in filters.keys(): query = query.filter(pkg.tags.any( models.Tag.name.in_(filters['tag']))) if 'class_name' in filters.keys(): query = query.filter(pkg.class_definitions.any( models.Class.name == filters['class_name'])) if 'fqn' in filters.keys(): query = query.filter(pkg.fully_qualified_name == filters['fqn']) if 'search' in filters.keys(): fk_fields = {'categories': 'Category', 'tags': 'Tag', 'class_definitions': 'Class'} conditions = [] for attr in dir(pkg): if attr.startswith('_'): continue if isinstance(getattr(pkg, attr), attributes.InstrumentedAttribute): search_str = filters['search'] for delim in ',;': search_str = search_str.replace(delim, ' ') for key_word in search_str.split(): _word = '%{value}%'.format(value=key_word) if attr in fk_fields.keys(): condition = getattr(pkg, attr).any( getattr(models, fk_fields[attr]).name.like(_word)) conditions.append(condition) elif isinstance(getattr(pkg, attr) .property.columns[0].type, sa.String): conditions.append(getattr(pkg, attr).like(_word)) query = query.filter(or_(*conditions)) sort_keys = [SEARCH_MAPPING[sort_key] for sort_key in filters.get('order_by', ['name'])] marker = filters.get('marker') sort_dir = filters.get('sort_dir') if marker is not None: # set marker to real object instead of its id marker = _package_get(marker, session) query = utils.paginate_query( query, pkg, limit, sort_keys, marker, sort_dir) return query.all()
0d15d2936f713437e3d9dad794cd07faf1ca3090
23,401
def is_valid(listener_tuple): """ There are a few rules that aws has when creating listeners, this function ensures those rules are met before we try and create or update a listener. While these could be caught with boto exception handling, I would rather be nice and catch these early before we sent them out to aws. It also gives us an opportunity to create nice user warnings. This validity check should also be checked in the frontend but must also be enforced by server. :param listener_tuple: """ current_app.logger.debug(listener_tuple) lb_port, i_port, lb_protocol, arn = listener_tuple current_app.logger.debug(lb_protocol) if lb_protocol.lower() in ['ssl', 'https']: if not arn: raise InvalidListener return listener_tuple
d95db075e302753a373ed4e0efd7a3667dc2ecf3
23,402
def _jitter_boxes(gt_boxes, jitter=0.05): """ """ jittered_boxes = gt_boxes.copy() ws = jittered_boxes[:, 2] - jittered_boxes[:, 0] + 1.0 hs = jittered_boxes[:, 3] - jittered_boxes[:, 1] + 1.0 width_offset = (np.random.rand(jittered_boxes.shape[0]) - 0.5) * jitter * ws height_offset = (np.random.rand(jittered_boxes.shape[0]) - 0.5) * jitter * hs jittered_boxes[:, 0] += width_offset jittered_boxes[:, 2] += width_offset jittered_boxes[:, 1] += height_offset jittered_boxes[:, 3] += height_offset return jittered_boxes
570fa7a6bd2f898ce1d64dd9f6e666e50251fcf5
23,403
import os def last_model_path(exp_name): """ get path of the last model in the exp """ model_path = os.path.join(constants.ET_LOGS, exp_name, "latest.pth") assert os.path.islink(model_path) return model_path
8948eb304047540fc988a55a9682c841f356eb72
23,404
def lcm_gcd(a, b): """Finds the least common multiple of two integers Args: a, b: integers greater than or equal to 1 """ return a * b//greatest_common_divisor(a, b)
3b23d04164c8e69eee26e48ab2b1a60e8e99fd14
23,405
def test_ahocorasick_rs_overlapping(benchmark, test_data): """ahocorasick_rs overlapping matches.""" patterns, haystacks = test_data ac = ahocorasick_rs.AhoCorasick(patterns) def run(): for haystack in haystacks: x = ac.find_matches_as_strings(haystack, overlapping=True) return x print(benchmark(run))
3c53369e8006502a5071fb73a75ace4705421a84
23,406
import warnings def merge_frames(frames): """ Merge the multiple data files downloaded from the M2M system or the Gold Copy THREDDS server into a single xarray data set. Keep track of how many files fail to merge. :param frames: The data frames to concatenate/merge into a single data set :return data: The final, merged data set """ # merge the list of processed data frames into a single data set nfiles = len(frames) nframes = nfiles bad_files = 0 if nframes > 1: # try merging all of the frames into a single data set (some frames may be corrupted, and will be skipped) data, fail = _frame_merger(frames[0], frames) # if all of the files, except for the first one, failed that would suggest the first file is the problem. # try the merge again, reset the starting frame to skip the first one. if nframes - fail == 1: data, fail = _frame_merger(frames[1], frames[1:]) nframes -= 1 # if we still can't merge the frames, then there probably is something more fundamentally wrong, and trying # to account for it here is not going to be possible if nframes - 1 - fail == 1: message = f"Unable to merge the {len(frames)} files downloaded from the Gold Copy THREDDS server." warnings.warn(message) return None else: bad_files = nfiles - nframes + fail else: # there is just the one data = frames[0] if bad_files > 0: message = "{} of the {} downloaded files failed to merge.".format(bad_files, nfiles) warnings.warn(message) data = data.sortby(['deployment', 'time']) data.attrs['time_coverage_start'] = ('%sZ' % data.time.min().values) data.attrs['time_coverage_end'] = ('%sZ' % data.time.max().values) data.attrs['time_coverage_resolution'] = ('P%.2fS' % (np.mean(data.time.diff('time').values).astype(float) / 1e9)) return data
b8b083d8f0e9360df325fbeb812b64fffc8d1d0f
23,407
import re def sorted_nicely(l): """ This function sorts the given iterable in the way that is expected Obtained from: https://arcpy.wordpress.com/2012/05/11/sorting-alphanumeric-strings-in-python/ :param l: The iterable to be sorted :return: Sorted iterable """ convert = lambda text: int(text) if text.isdigit() else text alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)] return sorted(l, key=alphanum_key)
c2e398e7a654a1a1ec7cc113fcad500beefd876a
23,408
def run_board(effects: list, audio: np.array, sample_rate: float) -> np.array: """Run board on input audio data. Args: board (list): List of Pedalboard effects. audio (np.array): Input audio data. Returns: Output (effected) audio data """ board = Pedalboard(effects, sample_rate=sample_rate) return board(audio)
062f7d34aa7eadad5401e64df1e96857606cbcf6
23,409
def html_escape( s ): """ """ s = s.replace( '&', '&amp' ) s = s.replace( '<', '&lt' ) s = s.replace( '>', '&gt' ) return s
eb47ba4d4651763cb74f081095b78d53ee9bebc1
23,410
def model_query(context, model, *args, **kwargs): """Query helper. :param context: context to query under :param session: if present, the session to use """ session = kwargs.get('session') or object_sqla.get_session() query = session.query(model, *args) return filter_by_project(context, query)
c6e5fb09b7e9a4d85ab6c6abc1e03e227010591f
23,411
def bert_dropout_model(num_classes, bert_config, use_mc_dropout_mha=False, use_mc_dropout_att=False, use_mc_dropout_ffn=False, use_mc_dropout_output=False, channel_wise_dropout_mha=False, channel_wise_dropout_att=False, channel_wise_dropout_ffn=False): """Creates a BERT classifier model with MC dropout.""" last_layer_initializer = tf.keras.initializers.TruncatedNormal( stddev=bert_config.initializer_range) # Build encoder model. mc_dropout_bert_encoder = get_mc_dropout_transformer_encoder( bert_config, use_mc_dropout_mha=use_mc_dropout_mha, use_mc_dropout_att=use_mc_dropout_att, use_mc_dropout_ffn=use_mc_dropout_ffn, channel_wise_dropout_mha=channel_wise_dropout_mha, channel_wise_dropout_att=channel_wise_dropout_att, channel_wise_dropout_ffn=channel_wise_dropout_ffn) # Build classification model. mc_dropout_bert_model = DropoutBertClassifier( mc_dropout_bert_encoder, num_classes=num_classes, dropout_rate=bert_config.hidden_dropout_prob, use_mc_dropout=use_mc_dropout_output, initializer=last_layer_initializer) return mc_dropout_bert_model, mc_dropout_bert_encoder
6ee1d09b2070e54ba631bd6e1b8e3e453960073a
23,412
def calculate_monthly_sales(year: int, month: int, beer_style: str) -> int: """Calculates the sales of a particular type of beer in a given month. param: month -- an int ranges from 1 to 12, beer_style; return: total_sales """ total_sales = 0 for item in data: if item[2].year == year and item[2].month == month and item[3] == beer_style: total_sales += int(item[5]) return total_sales
fa448a8e9dfb7186652a6dc3000d3a8465320994
23,413
def check_canopy_height(region_info, regional_lookup): """ Check the regional canopy height. """ mean_canopy_height = region_info['mean_canopy_height'] if mean_canopy_height == 'no data': mean_canopy_height = 0 return mean_canopy_height
5f04ad71df7f0b1c9ef73e97bbe99bea1916ae5e
23,414
def annotated_var(prs): """ Parser for annotated variable in parentheses. Annotation is parsed with prs. Parser output is a var token annotation is stored in attribute 'annotation' of var token. Sample input to parser: (x : A) """ def trt(acc): v,ann = acc if len(ann) > 0: return c.copy_token(v,{'annotation':ann[0]}) return v return c.paren(var() + colon_annotation(prs)).treat(trt)
42acdf6eb09952701d17fab73a2ee8fc20c7dc5e
23,415
def action_from_json(project, value): """return a action from the given json """ json_type = value.get('type') for class_ in sftoolbox.engine.action_classes_register: if json_type == class_.json_type: return class_.from_json(project, value) return DummyAction.from_json(project, value)
69658b53e839c7d112b7509e3ecdf57a82de817a
23,416
def get_springer_doi(node): """ :param node: :return: """ for elem in find_key(node, 'occurrence'): if isinstance(elem, list): for sub_elem in elem: if isinstance(sub_elem, dict): values = sub_elem.values() if len(values) == 2 and values[0] == 'DOI': return values[1] return ''
ca8773f10e6fed6b41064a5a5ad6717afd540bb5
23,417
def check_versions(versions=[]): """ Check if there are version to build the changelog. """ if len(versions) == 0: raise NotEnoughVersionsError() return True
f9c7f81c02f08a867f27f329554ed85eddc34243
23,418
def create_fnet(widths, nfeat, nfeato, orthoinit, llbias): """ Creates feature-generating network, a multi-layer perceptron. Parameters: widths: list of widths of hidden layers nfeat, nfeato: # input and output channels of the convolution orthoinit: whether to use orthogonal weight initialization llbias: whether to use bias in the last layer """ fnet_modules = [] for k in range(len(widths) - 1): fnet_modules.append(nn.Linear(widths[k], widths[k + 1])) if orthoinit: init.orthogonal_(fnet_modules[-1].weight, gain=init.calculate_gain('relu')) fnet_modules.append(nn.ReLU(True)) fnet_modules.append(nn.Linear(widths[-1], nfeat * nfeato, bias=llbias)) if orthoinit: init.orthogonal_(fnet_modules[-1].weight) return nn.Sequential(*fnet_modules)
3bdfdd89d77b6ba172e2ac85df191b11e78ab049
23,419
def pytorch_array_setitem(op): """Implementation of array_setitem for pytorch.""" def _impl(array, begin, end, strides, value): idx = tuple(slice(b, e, s) for b, e, s in zip(begin, end, strides)) ret = array.clone() ret[idx] = value return (ret,) return _impl, op.inputs[1:]
b0c6504b2c0d1971ec16e5fdf198b20a911d4946
23,420
def time_series_seasonal_test(x: pd.Series, expected_lags: list): """ 通过自相关系数来获取不同lag的相关系数,通过相关系数来判断时序数据的周期值 PS:需要列出lag的值的列表 :param x: 时序数据x,type: Series :param expected_lags: 可供选择的的滞后值 :return: 返回滞后值值的自相关性排序序列 """ acf_scores = [] for lag in expected_lags: acf_score = acf(x.values, nlags=lag, fft=False)[-1] acf_scores.append(abs(acf_score)) sorted_idx = np.argsort(acf_scores) return [expected_lags[i] for i in sorted_idx]
5c0614b986eb8dfe576821245e80ef0244c70c69
23,421
def comment_like(): """ - 1.判断用户是否登陆 - 2.获取参数 - 3.校验参数,为空校验 - 4.操作类型校验 - 5.根据评论编号取出,评论对象 - 6.判断评论对象是否存在 - 7.根据操作类型,点赞,取消点赞 - 8.返回响应 :return: """ # - 1.判断用户是否登陆 if not g.user: return jsonify(errno=RET.NODATA, errmsg="用户未登录") # - 2.获取参数 comment_id = request.json.get("comment_id") action = request.json.get("action") # - 3.校验参数,为空校验 if not all([comment_id, action]): return jsonify(errno=RET.PARAMERR, errmsg="参数不全") # - 4.操作类型校验 if not action in ["add", "remove"]: return jsonify(errno=RET.DATAERR, errmsg="操作类型有误") # - 5.根据评论编号取出,评论对象 try: comment = Comment.query.get(comment_id) except Exception as e: current_app.logger.error(e) return jsonify(errno=RET.DBERR, errmsg="获取评论失败") # - 6.判断评论对象是否存在 if not comment: return jsonify(errno=RET.NODATA, errmsg="评论不存在") try: # - 7.根据操作类型,点赞,取消点赞 if action == "add": # 判断用户是否点过赞 comment_like = CommentLike.query.filter(CommentLike.user_id == g.user.id, CommentLike.comment_id == comment_id).first() if not comment_like: # 创建点赞对象 comment_like = CommentLike() comment_like.user_id = g.user.id comment_like.comment_id = comment_id # 保存点赞对象到数据库 db.session.add(comment_like) db.session.commit() # 点赞数量+1 comment.like_count += 1 else: # 判断用户是否点过赞 comment_like = CommentLike.query.filter(CommentLike.user_id == g.user.id, CommentLike.comment_id == comment_id).first() if comment_like: # 移除点赞对象 db.session.delete(comment_like) db.session.commit() # 点赞数量-1 if comment.like_count > 0: comment.like_count -= 1 except Exception as e: current_app.logger.error(e) db.session.rollback() return jsonify(errno=RET.DBERR, errmsg="操作失败") # - 8.返回响应 return jsonify(errno=RET.OK, errmsg="操作成功")
09564653f3d843c7d82e16946507c8a081374ce6
23,422
import os def open_fits(subject, field, wavelength, size='2x2'): """Opens a FITS image of a subject. Can be used as a context handler. subject: RGZ subject dict, from the ATLAS survey. field: 'elais' or 'cdfs' wavelength: 'ir' or 'radio' size: Optional. '2x2' or '5x5'. -> FITS image file. """ if field not in {'elais-s1', 'cdfs'}: raise ValueError('field must be either "elais-s1" or "cdfs".') if wavelength not in {'ir', 'radio'}: raise ValueError('wavelength must be either "ir" or "radio".') cid = subject['metadata']['source'] filename = '{}_{}.fits'.format(cid, wavelength) path = os.path.join(config['data_sources']['{}_fits'.format(field)], size, filename) return astropy.io.fits.open(path, ignore_blank=True)
ae101ca51d4a6687cec21d57749faf610850bbb5
23,423
def create_relationships(model_cls, data): """ Create the relationship dict of the specified model class with the data :param model_cls: :param data: :return: """ relationships = model_cls.get_relationships() relationship_map = {} for key in relationships.keys(): relationship_cls = relationships[key].mapper.class_ relationship_kwargs = data.get(key) if isinstance(relationship_kwargs, list): # 1:n relationship = [] for item in relationship_kwargs: r_ins = create_instance(relationship_cls, item) if r_ins is not None: relationship.append(r_ins) else: relationship = create_instance(relationship_cls, relationship_kwargs) # 1:1 if relationship is not None: relationship_map[key] = relationship return relationship_map
6ed811b180141190cde5eaa20d4fca817647c970
23,424
import requests def get_news_items_from_web(url): """ Calls the Athletics News RSS API, parses the resulting response and returns a list of parsed news_items to be stored in DynamoDB :param url: Url for the RSS API for UBCO Heat :return: Parsed news items in a JSON formatted list """ try: request_response = requests.get(url).text return feedparser.parse(request_response)["entries"] except RequestException as e: LOGGER.error("Error in network request to RSS Feed") detailed_exception(LOGGER) return []
aff75310b155475d185f15c5bbaadeda9902aae3
23,425
def get_node_model(manager, handle_id=None, node=None): """ :param manager: Context manager to handle transactions :type manager: Neo4jDBSessionManager :param handle_id: Nodes handle id :type handle_id: str|unicode :param node: Node object :type node: neo4j.v1.types.Node :return: Node model :rtype: models.BaseNodeModel or sub class of models.BaseNodeModel """ bundle = get_node_bundle(manager, handle_id, node) for label in bundle.get('labels'): try: classname = '{meta_type}{base}Model'.format(meta_type=bundle.get('meta_type'), base=label).replace('_', '') return getattr(models, classname)(manager).load(bundle) except AttributeError: pass for label in bundle.get('labels'): try: classname = '{base}Model'.format(base=label).replace('_', '') return getattr(models, classname)(manager).load(bundle) except AttributeError: pass try: classname = '{base}Model'.format(base=bundle.get('meta_type')) return getattr(models, classname)(manager).load(bundle) except AttributeError: return models.BaseNodeModel(manager).load(bundle)
a8c42b8e72b6ae96e897bd5c7f5a06b5820b4b56
23,426
from functools import reduce def convert_hcp_plane(plane: list) -> np.ndarray: """ four index notion to three index notion for hcp and rhombohedral plane Args: plane (list): four index notion Returns: three index notion of plane """ u1 = plane[0] v1 = plane[1] w1 = plane[3] plane = [u1, v1, w1] if reduce(gcd, plane) != 1: index = reduce(gcd, plane) plane = [int(round(x / index)) for x in plane] return np.array(plane)
aa6d7527a55d8b14bd03b2f6660ed94c8cf760a8
23,427
from sentry.plugins import plugins def should_process(data): """Quick check if processing is needed at all.""" for plugin in plugins.all(version=2): processors = safe_execute( plugin.get_event_preprocessors, data=data, _with_transaction=False ) if processors: return True if should_process_for_stacktraces(data): return True return False
8e6f013d54ac1e3a0b77f8969a3700c45efdc673
23,428
from typing import Tuple from typing import List import gzip def load_fasta_file(input_file: str) -> Tuple[str, List]: """ Load a fasta file into a list of SeqRecords. :param input_file: The path to the input fasta file. :returns: A tuple of the sequence type ('protein' or 'dna'), and the list of SeqRecords. """ if _is_gzipped(input_file): openfunc = gzip.open bit = 'rt' else: openfunc = open bit = 'r' with openfunc(input_file, bit) as handle: seqs = [x.upper() for x in SeqIO.parse(handle=handle, format='fasta', alphabet=IUPAC.ambiguous_dna)] if not all(_verify_alphabet(x.seq) for x in seqs): handle.seek(0) seqs = [x.upper() for x in SeqIO.parse(handle=handle, format='fasta', alphabet=HasStopCodon(IUPAC.extended_protein))] if not all(_verify_alphabet(x.seq) for x in seqs): raise ValueError('Invalid input file (neither DNA nor protein FASTA).') return 'protein', seqs return 'dna', seqs
8e62e7d7002d74da7a43315785f5ce663b5ba366
23,429
from typing import get_args def train_valid_test_datasets_provider(train_val_test_num_samples): """Build train, valid, and test datasets.""" args = get_args() print_rank_0('> building train, validation, and test datasets ' 'for GPT3 ...') train_ds, valid_ds, test_ds = build_train_valid_test_datasets( data_prefix=args.data_path, data_impl=args.data_impl, splits_string=args.split, train_valid_test_num_samples=train_val_test_num_samples, seq_length=args.seq_length, seed=args.seed, skip_warmup=(not args.mmap_warmup)) print_rank_0("> finished creating GPT2 datasets ...") return train_ds, valid_ds, test_ds
06f9532c6d60a3c3858dc08a43070b8aa4d19691
23,430
import requests def get(username, start): """ Second level function to pull up to 50 reviews. start - review number to start from """ r = requests.get( '{}/user/beers/?start={}&&ba={}&order=dateD&view=R'.format( BASE_URL, start, username ) ) beers = [] pq = PyQuery(r.text) pq = pq('#ba-content') pq = pq('table') pq = pq('tr') for tr in pq[3:]: # first 3 rows are table headers td = tr.getchildren()[1:] # first column is review star icon beers.append(Beer.build_from_xml(td)) return beers
7aaccda46954b629bad37e0a77f834e5b3f40c27
23,431
def isInContinent(country_name: str, continent: str): """Permet de vérifier si le pays est dans un continent Paramètres ---------- country_name : str Le nom du pays continent : str Le code du continent (alpha2) Retours ------- is_in_continent : int entier binaire positif si le pays est dans le continent Exemples ------- >>> isInContinent('Gladstone', 'OC') 1 """ try: # code a deux lettres du pays calpha2 = country_name_to_country_alpha2(country_name.strip()) except KeyError: # Certains noms de pays de nos jeux de données ne respectent pas la norme dispo sur # wikipedia : https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2 calpha2 = cn_to_ca2[country_name.strip()] # par exemple 'EU' concode = country_alpha2_to_continent_code(calpha2) return int(concode == continent)
5a78e181ace8574baa00eeadd21e7ecea8529f6c
23,432
def encoder_decoder_archi(inputs, is_train): """ Input is assumed to be a 4-D Tensor, with [batch_size, phrase_len, 1, features] """ encoder_layers = [] encoded = inputs encoder_layers.append(encoded) for i in range(config.encoder_layers): encoded = encoder_conv_block(encoded, i, is_train) encoder_layers.append(encoded) encoder_layers.reverse() decoded = encoder_layers[0] for i in range(config.encoder_layers): decoded = decoder_conv_block(decoded, encoder_layers[i+1], i, is_train) return decoded
6b75ce8a31375173e01ccd7d33078c76aff6d2b8
23,433
def build_dict_conforming_to_schema(schema, **kwargs): """ Given a schema object (for example, TIMESTAMP_SCHEMA from this module) and a set of keyword arguments, create a dictionary that conforms to the given schema, using the keyword arguments to define the elements of the new dict. Checks the result to make sure that it conforms to the given schema, raising an error if not. Returns the new dict conforming to the schema if there are no problems. """ # Check that schema supports a check_match call. # Duck typing version of this check: if not hasattr(schema, 'check_match'): raise ValueError( 'The given "schema" does not seem to be a schema. It has no ' '"check_match" method. Given schema: ' + repr(schema)) # # Strict typing version of this check: # # Check that schema_name is a SCHEMA.Object. # if not isinstance(schema, schema.Schema): # raise ValueError( # 'The first argument must be a schema.Schema object, but is not. ' # 'Given schema: ' + repr(schema)) # The return value. d = {} for key, value in kwargs.items(): d[key] = value schema.check_match(d) return d
8971b7c6e1df8fd16a1b0e0946c9f21a3c601512
23,434
def drop_non_channels(overlaps_df, filename): """ Return the overlap dataframe with all channels dropped and index reset. Save the df as a csv with the filename passed this function. """ df = overlaps_df channels_df_dict = {} for column in df.columns: # For each set of overlaps, drop all the gene names that are not # channels. They are replaced by NaNs. channels_bool = df.loc[:, column].isin(IUPHAR_Channels_names) channels_df_dict[column] = df.loc[channels_bool, column] channels_df = pd.DataFrame(channels_df_dict) clean_channels_df = channels_df.reset_index(drop=True).copy() for column in channels_df.columns: # Set all of the rows in this column to NaN so they can be replaced # by lists of channel names in each overlap. clean_channels_df.loc[:, column] = np.NaN channel_names = list(channels_df.loc[:, column].dropna()) # Put the list of channels in the overlap's row. Save the df clean_channels_df.loc[0:len(channel_names)-1, column] = channel_names clean_channels_df.to_csv(filename) return clean_channels_df
0cfa7f1ec86328179612c46c6b5f4b787984a7fa
23,435
import os def evaluate_all_flights(model, train_flights_dict, val_flights_dict, trial_folder, n_extreme_flights=10): """ Arguments model: trained tf model to make the predictions train_flights_dict: a dictionary whose key is flight name and value is a tuple of (features,labels) val_flights_dict: same but for validation flights trial_folder: string name of the trial folder "DeepNav_results/trial_###" n_extreme_flights: integer number of flights to be separated as best or worst, for example, if n_extreme_flights=5 then best (or worst) folder will contain best 5 flights return flights_summary: a dictionary of two elements (training & validation), the value is a 2D list whose colums are (flight_duration, max_pos_error, max_vel_error) Outputs - creates one pdf file containing plots of both prediction and ground truth of attitude, velocity and postion for each flight, with these pdfs, the following folders are populated # training # |_ differenced # |_ reconstructed # |_best - worst - other # validation # |_ differenced # |_ reconstructed # |_best - worst - other """ # loop on sets, one iteration for training and another for validation flights_summary = {} set_names = ["training","validation"] for flights_dict, set_name in zip([train_flights_dict, val_flights_dict], set_names): # sort flights by name (shorter flight first) flights_list = sorted(flights_dict.items()) total_flights = len(flights_list) - 1 # dictionary of (flight_name : max_pos_error) pairs, used to extract the best & worst flights flights_errors = {} # array of shape (time_steps, 3), colums are (flight_duration, max_pos_error, max_vel_error) set_summary = [] for flight_number, one_flight_data in enumerate(flights_list): ##to speedup experimenting # if flight_number > 5: # break flight_name = one_flight_data[0] print("flight " + str(flight_number) + "/" + str(total_flights) + " : " + flight_name) features = one_flight_data[1][0] ground_truth_diff = one_flight_data[1][1] predictions_diff = model.predict(features) # Reconstruct the original signals from differenced signals ground_truth_reconstructed = np.cumsum(ground_truth_diff, axis = 0) predictions_reconstructed = np.cumsum(predictions_diff, axis = 0) # reconstructed output csv file name output_csv_file_nn = os.path.join(trial_folder, set_name, "reconstructed", \ "nn_output_csv", flight_name + "_nn.csv") # differenced output csv file name output_csv_file_nn_diff = os.path.join(trial_folder, set_name, "differenced", \ "nn_output_csv", flight_name + "_nn.csv") # save the reconstructed predictions (ground truth already saved by create_dataset.py) np.savetxt(output_csv_file_nn, predictions_reconstructed, delimiter=",") # save the differenced predictions np.savetxt(output_csv_file_nn_diff, predictions_diff, delimiter=",") # maximum errors between prediction and ground truth max_velocity_error = np.max(np.linalg.norm(ground_truth_reconstructed[:,0:3] \ -predictions_reconstructed[:,0:3], axis=1)) max_position_error = np.max(np.linalg.norm(ground_truth_reconstructed[:,3:6] \ -predictions_reconstructed[:,3:6], axis=1)) # add error to the output file name pdf_name = flight_name + "_MPE_" f'{max_position_error:.2f}' + \ "_MVE_" f'{max_velocity_error:.2f}' + ".pdf" # create a pdf for this flight differenced signals pdf_name_diff = os.path.join(trial_folder, set_name, "differenced", pdf_name) flight_pdf_plots(pdf_name_diff, ground_truth_diff, predictions_diff) # create a pdf for this flight reconstructed signals pdf_name_recon = os.path.join(trial_folder, set_name, "reconstructed", "other", pdf_name) flight_pdf_plots(pdf_name_recon, ground_truth_reconstructed, predictions_reconstructed) flights_errors[pdf_name] = max_position_error flight_duration = ground_truth_reconstructed.shape[0] * 0.2 / 60 set_summary.append([int(flight_name[0:4]), flight_duration, max_position_error, max_velocity_error]) flights_summary[set_name] = set_summary # sort the flights from by position error (min error first) sorted_flights = sorted(flights_errors.items(), key=lambda x: x[1]) # move the pdfs of best & worst flights of this set to their respective folders old_name_base = os.path.join(trial_folder, set_name, "reconstructed", "other") best_name_base = os.path.join(trial_folder, set_name, "reconstructed", "best") worst_name_base = os.path.join(trial_folder, set_name, "reconstructed", "worst") for i in range(n_extreme_flights): pdf_name = sorted_flights[i][0] old_name = os.path.join(old_name_base, pdf_name) new_name = os.path.join(best_name_base, pdf_name) os.rename(old_name, new_name) for i in range(-n_extreme_flights, 0): pdf_name = sorted_flights[i][0] old_name = os.path.join(old_name_base, pdf_name) new_name = os.path.join(worst_name_base, pdf_name) os.rename(old_name, new_name) return flights_summary
eafbd5d7276ef503a2d609d3593e50751971af6e
23,436
def _REOM(y,t,pot,l2): """ NAME: _REOM PURPOSE: implements the EOM, i.e., the right-hand side of the differential equation INPUT: y - current phase-space position t - current time pot - (list of) Potential instance(s) l2 - angular momentum squared OUTPUT: dy/dt HISTORY: 2010-07-20 - Written - Bovy (NYU) """ return [y[1], l2/y[0]**3.+_evaluateplanarRforces(pot,y[0],t=t)]
427393c1eeb89214603dc8363a9b39084e9030d4
23,437
def optimize_inst(module, inst): """Simplify one instruction""" for operand in inst.operands: if isinstance(operand, ir.Id): if operand.inst.op_name not in ir.CONSTANT_INSTRUCTIONS: return inst if inst.op_name == 'OpCompositeConstruct': inst = optimize_OpCompositeConstruct(module, inst) elif inst.op_name == 'OpCompositeExtract': inst = optimize_OpCompositeExtract(inst) elif inst.op_name == 'OpIAdd': inst = optimize_OpIAdd(module, inst) elif inst.op_name == 'OpIMul': inst = optimize_OpIMul(module, inst) elif inst.op_name == 'OpLogicalAnd': inst = optimize_OpLogicalAnd(module, inst) elif inst.op_name == 'OpLogicalEqual': inst = optimize_OpLogicalEqual(module, inst) elif inst.op_name == 'OpLogicalNot': inst = optimize_OpLogicalNot(module, inst) elif inst.op_name == 'OpLogicalNotEqual': inst = optimize_OpLogicalNotEqual(module, inst) elif inst.op_name == 'OpLogicalOr': inst = optimize_OpLogicalOr(module, inst) elif inst.op_name == 'OpNot': inst = optimize_OpNot(module, inst) elif inst.op_name == 'OpSNegate': inst = optimize_OpSNegate(module, inst) elif inst.op_name == 'OpVectorShuffle': inst = optimize_OpVectorShuffle(module, inst) return inst
1de61b914bdac4076be4ffb27823ad9384504814
23,438
def table_3_3(M, lambd_nos, lambd_cil): """ Функция для вывода Су для оживальной ГЧ arguments: число Маха, относительное удлинение носка и цилиндрической части return: Значение Су ГЧ """ cy1iz_alf_0 = [0.0350, 0.0350, 0.0350, 0.0350, 0.0362, 0.0375, 0.0380, 0.0378, 0.0374, 0.0364, 0.0350, 0.0337, 0.0325, 0.0315, 0.0305, 0.0300] cy1iz_alf_05 = [0.0350, 0.0350, 0.0350, 0.0358, 0.0375, 0.0410, 0.0445, 0.0472, 0.0480, 0.0475, 0.0460, 0.0435, 0.0420, 0.0385, 0.0375, 0.0365] cy1iz_alf_1 = [0.0350, 0.0350, 0.0350, 0.0358, 0.0375, 0.0410, 0.0455, 0.0495, 0.0515, 0.0520, 0.0515, 0.0485, 0.0465, 0.0445, 0.0425, 0.0410] cy1iz_alf_2 = [0.0350, 0.0350, 0.0350, 0.0358, 0.0380, 0.0430, 0.0455, 0.0515, 0.0540, 0.0555, 0.0552, 0.0535, 0.0515, 0.0485, 0.0470, 0.0455] cy1iz_alf_4 = [0.0350, 0.0350, 0.0350, 0.0358, 0.0375, 0.0410, 0.0455, 0.0515, 0.0549, 0.0565, 0.0565, 0.0505, 0.0545, 0.0524, 0.0502, 0.0480] razm = [-0.8, -0.6, -0.4, -0.2, 0, 0.2, 0.4, 0.6, 0.8, 1, 1.2, 1.4, 1.6, 1.8, 2, 2.2] if (M**2 - 1) >= 0: razmm = np.sqrt(M**2 - 1) / lambd_nos else: razmm = -np.sqrt(1 - M**2) / lambd_nos otnos = lambd_cil / lambd_nos if otnos == 0: cy1 = np.interp(razmm, razm, cy1iz_alf_0) elif (otnos <= 0.5) and (otnos > 0): cy1 = interpol(np.interp(razmm, razm, cy1iz_alf_0), np.interp(razmm, razm, cy1iz_alf_05), otnos / 0.5) elif (otnos <= 1) and (otnos > 0.5): cy1 = interpol(np.interp(razmm, razm, cy1iz_alf_05), np.interp(razmm, razm, cy1iz_alf_1), (otnos - 0.5) / 0.5) elif (otnos <= 2) and (otnos > 1): cy1 = interpol(np.interp(razmm, razm, cy1iz_alf_1), np.interp(razmm, razm, cy1iz_alf_2), otnos - 1) elif (otnos <= 4) and (otnos > 2): cy1 = interpol(np.interp(razmm, razm, cy1iz_alf_2), np.interp(razmm, razm, cy1iz_alf_4), otnos - 2) else: cy1 = np.interp(razmm, razm, cy1iz_alf_4) return cy1
d0d4b2e1fa65f3e8ad2cd39bee1d0d4878293090
23,439
def ms_to_timestamp(ms): """Convert ms to 'HH:MM:SS,mmm'""" # XXX throw on overflow/underflow? if ms < 0: ms = 0 if ms > MAX_REPRESENTABLE_TIME: ms = MAX_REPRESENTABLE_TIME h, m, s, ms = ms_to_times(ms) return "%02d:%02d:%02d,%03d" % (h, m, s, ms)
514773d94f4e3b78594bed4f232f34bcd2956f4d
23,440
import torch def _lovasz_softmax_flat(y_pred, y_true, classes="present"): """ Multi-class Lovasz-Softmax loss y_pred: [P, C] Variable, class probabilities at each prediction (between 0 and 1) y_true: [P] Tensor, ground truth y_true (between 0 and C - 1) classes: 'all' for all, 'present' for classes present in y_true, or a list of classes to average. """ if y_pred.numel() == 0: # only void pixels, the gradients should be 0 return y_pred * 0.0 C = y_pred.size(1) losses = [] class_to_sum = list(range(C)) if classes in ["all", "present"] else classes for c in class_to_sum: fg = (y_true == c).float() # foreground for class c if classes is "present" and fg.sum() == 0: continue if C == 1: if len(classes) > 1: raise ValueError("Sigmoid output possible only with 1 class") class_pred = y_pred[:, 0] else: class_pred = y_pred[:, c] errors = (Variable(fg) - class_pred).abs() errors_sorted, perm = torch.sort(errors, 0, descending=True) perm = perm.data fg_sorted = fg[perm] losses.append(torch.dot(errors_sorted, Variable(_lovasz_grad(fg_sorted)))) return mean(losses)
9cdbab2873e198750079e560a559b1f4eb8f256c
23,441
def quantum_state_encoding_circuit(bits): """根据`bits`构建并返回量子态编码线路.""" circuit = cirq.Circuit() circuit.append(cirq.H.on_each(bits)) return circuit
75734a349187af7ac32683d5faf6aec331f25713
23,442
from datetime import datetime def parse_mov_date(date_str): """converts string to date""" try: return datetime.datetime.strptime(date_str, "%Y-%m-%dT%H:%M:%S%z") except (TypeError, ValueError): pass return None
6d4f1ad566f3e3914eeed7f9c29d914f1ced96df
23,443
def get_settable_attr(attr): """ If attr is not settable, navigate upp in the connection hierarchy until we find the settable attribute. For example, in RigSqueeze, the ikFk state attribute will be redirected to the root ctrl. Note that in some case the attribute might have been piped in an utility node, if necessary we'll try to follow the connections through the utility node. """ def is_attr_interesting(attr): if not attr: return True if not attr.isSettable() or not attr.isKeyable(): return False classification = pymel.getClassification(attr.node().type()) if any(True for token in classification if 'utility' in token): return False return True while not is_attr_interesting(attr): attr = get_input_attr_from_output_attr(attr) return attr
aca71e6e7f9e1312beaf1c4dcba897073ae3b3ea
23,444
def adds(repo, subset, x): """Changesets that add a file matching pattern. The pattern without explicit kind like ``glob:`` is expected to be relative to the current directory and match against a file or a directory. """ # i18n: "adds" is a keyword pat = getstring(x, _(b"adds requires a pattern")) return checkstatus(repo, subset, pat, 'added')
6d9d1879c77f64bb68d43483cc2d3095328fd26f
23,445
def data_context_topology_context_topologyuuid_linklink_uuid_available_capacity_bandwidth_profile_committed_information_rate_get(uuid, link_uuid): # noqa: E501 """data_context_topology_context_topologyuuid_linklink_uuid_available_capacity_bandwidth_profile_committed_information_rate_get returns tapi.common.CapacityValue # noqa: E501 :param uuid: Id of topology :type uuid: str :param link_uuid: Id of link :type link_uuid: str :rtype: TapiCommonCapacityValue """ return 'do some magic!'
b44e48aa0fff6b01da22576fc73352deba812636
23,446
import os def CreateMD5ChecksumFile(filename, mangled_filename=None): """Create and upload an MD5 checksum file for filename.""" if not mangled_filename: mangled_filename = os.path.basename(filename) checksum = CalculateMD5Checksum(filename) checksum_filename = '%s.md5sum' % filename with open(checksum_filename, 'w') as f: f.write('%s *%s' % (checksum, mangled_filename)) return checksum_filename
8de75d2ab9e82ca663f29a6ca377bfe44576932d
23,447
import argparse def parse_arguments() -> argparse.Namespace: """Parse the arguments.""" parser = argparse.ArgumentParser( description="Panoptic segmentation evaluation." ) parser.add_argument( "--gt", "-g", required=True, help="path to panseg ground truth" ) parser.add_argument( "--result", "-r", required=True, help="path to panseg results" ) parser.add_argument( "--config", "-c", default=None, help="Path to config toml file. Contains definition of categories, " "and optionally attributes and resolution. For an example " "see scalabel/label/testcases/configs.toml", ) parser.add_argument( "--out-file", default="", help="Output file for panseg evaluation results.", ) parser.add_argument( "--ignore-unknown-cats", action="store_true", help="ignore unknown categories for panseg evaluation", ) parser.add_argument( "--nproc", "-p", type=int, default=NPROC, help="number of processes for panseg evaluation", ) return parser.parse_args()
5d9b968015282340973fb0b631f0fe9539d08f50
23,448
from typing import Union import os import tqdm def get_similarity_graph( *, fullgraph: Union[str, BELGraph] = DEFAULT_FULLGRAPH_WITHOUT_CHEMSIM_PICKLE, rebuild: bool = False, mapping_file: str = DEFAULT_CHEMICALS_MAPPING_PATH, chemsim_graph_path=None, clustered: bool = True, weighted: bool = False, minimum_similarity: float = 0.7, name: str = 'Chemical Similarity Graph', version: str = '1.1.0', ): """ Create a BELGraph with chemicals as nodes, and similarity as edges. :param minimum_similarity: the percent in which the chemicals are similar :param mapping_file: an existing dataframe with pubchemIDs and Smiles """ if not rebuild and weighted and os.path.exists(DEFAULT_CHEMSIM_WEIGHTED_PICKLE): return nx.read_edgelist(DEFAULT_CHEMSIM_WEIGHTED_PICKLE) elif not rebuild and not weighted and os.path.exists(DEFAULT_CHEMSIM_PICKLE): return nx.read_edgelist(DEFAULT_CHEMSIM_PICKLE) if isinstance(fullgraph, BELGraph): fullgraph_without_chemsim = fullgraph else: fullgraph_without_chemsim = pybel.from_pickle(fullgraph) pubchem_ids = [] for node in fullgraph_without_chemsim: if node.namespace != PUBCHEM_NAMESPACE: continue pubchem_ids.append(node.identifier) if os.path.exists(mapping_file): pubchem_id_to_smiles = parse_chemical_mapping(mapping_file, pubchem_ids) else: pubchem_id_to_smiles = get_smiles(pubchem_ids) pubchem_id_to_fingerprint = get_fingerprints(pubchem_id_to_smiles) chemsim_graph = pybel.BELGraph(name=name, version=version) if clustered: chemsim_graph = create_clustered_chemsim_graph( pubchem_id_to_fingerprint=pubchem_id_to_fingerprint, chemsim_graph=chemsim_graph, weighted=weighted, ) else: similarities = get_similarity(pubchem_id_to_fingerprint) similarities_it = tqdm(similarities.items(), desc='Creating similarity BELGraph') for (source_pubchem_id, target_pubchem_id), similarity in similarities_it: if similarity < minimum_similarity: continue source = pybel.dsl.Abundance(namespace=PUBCHEM_NAMESPACE, identifier=source_pubchem_id) target = pybel.dsl.Abundance(namespace=PUBCHEM_NAMESPACE, identifier=target_pubchem_id) chemsim_graph.add_unqualified_edge(source, target, 'association') if weighted: for key in chemsim_graph[source][target]: chemsim_graph[source][target][key]['weight'] = similarity if chemsim_graph_path is not None: pybel.to_pickle(chemsim_graph, chemsim_graph_path) elif weighted: pybel.to_pickle(chemsim_graph, DEFAULT_CHEMSIM_WEIGHTED_PICKLE) else: pybel.to_pickle(chemsim_graph, DEFAULT_CHEMSIM_PICKLE) return chemsim_graph
768ffa099af70e6a0fc12416dded6776a048d3f2
23,449
from typing import List from typing import Dict from operator import and_ def update_mlwh_with_cog_uk_ids(samples: List[Dict[str, str]]) -> None: """Update the MLWH to write the COG UK barcode for each sample. Arguments: samples {List[Dict[str, str]]} -- list of samples to be updated """ if len(samples) == 0: return None # assign db_connection to avoid UnboundLocalError in 'finally' block, in case of exception db_connection = None try: data = [] for sample in samples: # using 'b_' prefix for the keys because bindparam() doesn't allow you to use the real # column names data.append( { "b_root_sample_id": sample[FIELD_ROOT_SAMPLE_ID], "b_rna_id": sample[FIELD_RNA_ID], "b_result": sample[FIELD_RESULT], "b_cog_uk_id": sample[FIELD_COG_BARCODE], } ) sql_engine = create_mysql_connection_engine( app.config["WAREHOUSES_RW_CONN_STRING"], app.config["ML_WH_DB"] ) table = get_table(sql_engine, app.config["MLWH_LIGHTHOUSE_SAMPLE_TABLE"]) stmt = ( table.update() .where( and_( table.c.root_sample_id == bindparam("b_root_sample_id"), table.c.rna_id == bindparam("b_rna_id"), table.c.result == bindparam("b_result"), ) ) .values(cog_uk_id=bindparam("b_cog_uk_id")) ) db_connection = sql_engine.connect() results = db_connection.execute(stmt, data) rows_matched = results.rowcount if rows_matched != len(samples): msg = f""" Updating MLWH {app.config['MLWH_LIGHTHOUSE_SAMPLE_TABLE']} table with COG UK ids was only partially successful. Only {rows_matched} of the {len(samples)} samples had matches in the MLWH {app.config['MLWH_LIGHTHOUSE_SAMPLE_TABLE']} table. """ logger.error(msg) raise UnmatchedSampleError(msg) except (Exception) as e: msg = f""" Error while updating MLWH {app.config['MLWH_LIGHTHOUSE_SAMPLE_TABLE']} table with COG UK ids. {type(e).__name__}: {str(e)} """ logger.error(msg) raise finally: if db_connection is not None: db_connection.close()
b4d6dfaec4bb40a59cbdfef619f7f4542e55e2a9
23,450
def make_09f9(): """倉庫インベントリーフッタ""" return ""
91d21aeb58fc004865db91846d73f978f48f9be4
23,451
def get_last_successful_hour_or_start_hour(): """Get the last hour that ran successfully or the start hour.""" last_hour = crash_stats.get_last_successful_hour() if last_hour: return last_hour return get_start_hour()
86518100bafe3296d63a8ac3612de1fa2c2ed8d4
23,452
import copy from datetime import datetime def encode_jwt(payload, secret): """ Return ``payload`` as a JWT encoded with ``secret``. Return a JWT whose payload is ``payload`` and that is signed using ``secret``. :arg payload: the payload to encode :type payload: dict :arg secret: the secret to sign the JWT with :type secret: str :return: the JWT string :rtype: str """ payload = copy.deepcopy(payload) payload["exp"] = datetime.datetime.utcnow() + datetime.timedelta(hours=1) jwt_bytes = jwt.encode(payload, secret, algorithm="HS256") # PyJWT returns JWT's as UTF8-encoded byte strings (this isn't # documented, but see # https://github.com/jpadilla/pyjwt/blob/ed28e495f937f50165a252fd5696a82942cd83a7/jwt/api_jwt.py#L62). # We need a unicode string, so decode it. jwt_str = jwt_bytes.decode("utf-8") return jwt_str
497d5180e8956a737ad6edbd1113d73aeb915e80
23,453
def make_model(): """ Loads pretrained torchvision model and redefines fc layer for car classification """ # uses about 1 GiB of GPU memory model = models.vgg19(pretrained = True) #model = models.resnet50(pretrained = True) in_feat_num = model.classifier[3].in_features mid_feat_num = int(np.sqrt(in_feat_num)) out_feat_num = 2 # redefine the last two layers of the classifier for car classification model.classifier[3] = nn.Linear(in_feat_num,mid_feat_num) model.classifier[6] = nn.Linear(mid_feat_num, out_feat_num) return model
cd189f4b4d4dcadf6dd686aad08e2e494e0c2200
23,454
def empty_call_false(*args, **kwargs) -> bool: """ Do nothing and return False """ return False
3b3964c859a47698f0000e1b26963953980fad51
23,455
def cookie_is_encoded(data): """ Tests whether or not a cookie is encoded / HMAC signed -> #bool True if encoded .. from vital.security import cookie_is_encoded cookie_is_encoded( "!YuOoKwDp8GhrwwojdjTxSCj1c2Z+7yz7r6cC7E3hBWo=?IkhlbGxvLCB3b3JsZC4i") # -> True .. """ return data.startswith('!') and '?' in data
baf2a05b516a23cacca4985944974112019abfda
23,456
import torch def l2_normalize(x: torch.Tensor, eps: float = 1e-12) -> torch.Tensor: """Normalizes the input tensor using L2-norm. Args: x: Tensor to be normalized. eps: Small value to avoid division by zero. Returns: Normalized tensor. """ return x / (torch.norm(x, p=2, dim=1, keepdim=True) + eps).expand_as(x)
22273bbbda7bece511d31d517790bfa14427d76f
23,457
from re import S def ssq_cwt(x, wavelet='gmw', scales='log-piecewise', nv=None, fs=None, t=None, ssq_freqs=None, padtype='reflect', squeezing='sum', maprange='peak', difftype='trig', difforder=None, gamma=None, vectorized=True, preserve_transform=None, astensor=True, order=0, patience=0, flipud=True, cache_wavelet=None, get_w=False, get_dWx=False): """Synchrosqueezed Continuous Wavelet Transform. Implements the algorithm described in Sec. III of [1]. Uses `wavelet.dtype` precision. # Arguments: x: np.ndarray Input vector(s), 1D or 2D. See `help(cwt)`. wavelet: str / tuple[str, dict] / `wavelets.Wavelet` Wavelet sampled in Fourier frequency domain. See `help(cwt)`. scales: str['log', 'linear', 'log:maximal', ...] / np.ndarray CWT scales. See `help(cwt)`. nv: int / None Number of voices (wavelets per octave). Suggested >= 16. fs, t See `help(_cwt.cwt)`. ssq_freqs: str['log', 'linear'] / np.ndarray / None Frequencies to synchrosqueeze CWT scales onto. Scale-frequency mapping is only approximate and wavelet-dependent. If None, will infer from and set to same distribution as `scales`. padtype: str / None Pad scheme to apply on input. See `help(utils.padsignal)`. `None` -> no padding. squeezing: str['sum', 'lebesgue'] / function See `help(ssqueezing.ssqueeze)`. maprange: str['maximal', 'peak', 'energy'] / tuple(float, float) Kind of frequency mapping used, determining the range of frequencies spanned (fm to fM, min to max). - 'maximal': fm=1/dT, fM=1/(2*dt), always. Data's fundamental and Nyquist frequencies, determined from `fs` (or `t`). Other mappings can never span outside this range. - ('peak', 'energy'): sets fm and fM based on center frequency associated with `wavelet` at maximum and minimum scale, respectively. See `help(wavelets.center_frequency)`. - 'peak': the frequency-domain trimmed bell will have its peak at Nyquist, meaning all other frequencies are beneath, so each scale is still correctly resolved but with downscaled energies. With sufficiently-spanned `scales`, coincides with 'maximal'. - 'energy': however, the bell's spectral energy is centered elsewhere, as right-half of bell is partly or entirely trimmed (left-half can be trimmed too). Use for energy-centric mapping, which for sufficiently-spanned `scales` will always have lesser fM (but ~same fM). - tuple: sets `ssq_freqrange` directly. difftype: str['trig', 'phase', 'numeric'] Method by which to differentiate Wx (default='trig') to obtain instantaneous frequencies: w(a,b) = Im( (1/2pi) * (1/Wx(a,b)) * d/db[Wx(a,b)] ) - 'trig': use `dWx`, obtained via trigonometric (frequency-domain interpolant) differentiation (see `cwt`, `phase_cwt`). - 'phase': differentiate by taking forward finite-difference of unwrapped angle of `Wx` (see `phase_cwt`). - 'numeric': first-, second-, or fourth-order (set by `difforder`) numeric differentiation (see `phase_cwt_num`). difforder: int[1, 2, 4] Order of differentiation for difftype='numeric' (default=4). gamma: float / None CWT phase threshold. Sets `w=inf` for small values of `Wx` where phase computation is unstable and inaccurate (like in DFT): w[abs(Wx) < beta] = inf This is used to zero `Wx` where `w=0` in computing `Tx` to ignore contributions from points with indeterminate phase. Default = sqrt(machine epsilon) = np.sqrt(np.finfo(np.float64).eps) vectorized: bool (default True) Whether to vectorize CWT, i.e. compute quantities for all scales at once, which is faster but uses more memory. preserve_transform: bool (default None) / None Whether to return `Wx` as directly output from `cwt` (it might be altered by `ssqueeze` or `phase_transform`). Uses more memory per storing extra copy of `Wx`. - Defaults to True if `'SSQ_GPU' == '0'`, else False. astensor: bool (default True) If `'SSQ_GPU' == '1'`, whether to return arrays as on-GPU tensors or move them back to CPU & convert to Numpy arrays. order: int (default 0) / tuple[int] `order > 0` computes ssq of `cwt` taken with higher-order GMWs. If tuple, computes ssq of average of `cwt`s taken at each specified order. See `help(_cwt.cwt_higher_order)`. patience: int / tuple[int, int] pyFFTW parameter for faster FFT on CPU; see `help(ssqueezepy.FFT)`. flipud: bool (default True) See `help(ssqueeze)`. cache_wavelet: bool (default None) / None See `help(cwt)`. get_w, get_dWx: bool (default False) `get_w`: True: will compute phase transform separately, assign it to array `w` and return it. False: will compute synchrosqueezing directly from `Wx` and `dWx` without assigning to intermediate array, which is faster (by 20-30%) and takes less memory. `get_dWx`: True: will return dWx False: discards dWx after computing `w` or synchrosqueezing. `get_dWx=True` with `get_w=True` uses most memory. These options do not affect `Tx`. # Returns: Tx: np.ndarray [nf x n] Synchrosqueezed CWT of `x`. (rows=~frequencies, cols=timeshifts) (nf = len(ssq_freqs); n = len(x)) `nf = na` by default, where `na = len(scales)`. Wx: np.ndarray [na x n] Continuous Wavelet Transform of `x`, L1-normed (see `cwt`). ssq_freqs: np.ndarray [nf] Frequencies associated with rows of `Tx`. scales: np.ndarray [na] Scales associated with rows of `Wx`. w: np.ndarray [na x n] (if `get_w=True`) Phase transform for each element of `Wx`. dWx: [na x n] np.ndarray (if `get_dWx=True`) See `help(_cwt.cwt)`. # References: 1. The Synchrosqueezing algorithm for time-varying spectral analysis: robustness properties and new paleoclimate applications. G. Thakur, E. Brevdo, N.-S. Fučkar, and H.-T. Wu. https://arxiv.org/abs/1105.0010 2. A Nonlinear squeezing of the CWT Based on Auditory Nerve Models. I. Daubechies, S. Maes. https://services.math.duke.edu/%7Eingrid/publications/DM96.pdf 3. Synchrosqueezed Wavelet Transforms: a Tool for Empirical Mode Decomposition. I. Daubechies, J. Lu, H.T. Wu. https://arxiv.org/pdf/0912.2437.pdf 4. Synchrosqueezing-based Recovery of Instantaneous Frequency from Nonuniform Samples. G. Thakur and H.-T. Wu. https://arxiv.org/abs/1006.2533 5. Synchrosqueezing Toolbox, (C) 2014--present. E. Brevdo, G. Thakur. https://github.com/ebrevdo/synchrosqueezing/blob/master/synchrosqueezing/ synsq_cwt_fw.m """ def _process_args(x, scales, fs, t, nv, difftype, difforder, squeezing, maprange, wavelet, get_w): if x.ndim == 2 and get_w: raise NotImplementedError("`get_w=True` unsupported with batched " "input.") difforder = _check_ssqueezing_args(squeezing, maprange, wavelet, difftype, difforder, get_w, transform='cwt') if nv is None and not isinstance(scales, np.ndarray): nv = 32 N = x.shape[-1] dt, fs, t = _process_fs_and_t(fs, t, N) return N, dt, fs, difforder, nv def _phase_transform(Wx, dWx, N, dt, gamma, difftype, difforder): if difftype == 'trig': # calculate instantaneous frequency directly from the # frequency-domain derivative w = phase_cwt(Wx, dWx, difftype, gamma) elif difftype == 'phase': # !!! bad; yields negatives, and forcing abs(w) doesn't help # calculate inst. freq. from unwrapped phase of CWT w = phase_cwt(Wx, None, difftype, gamma) elif difftype == 'numeric': # !!! tested to be very inaccurate for small scales # calculate derivative numericly _, n1, _ = p2up(N) Wx = Wx[:, (n1 - 4):(n1 + N + 4)] w = phase_cwt_num(Wx, dt, difforder, gamma) return Wx, w N, dt, fs, difforder, nv = _process_args(x, scales, fs, t, nv, difftype, difforder, squeezing, maprange, wavelet, get_w) wavelet = Wavelet._init_if_not_isinstance(wavelet, N=N) # CWT with higher-order GMWs if isinstance(order, (tuple, list, range)) or order > 0: # keep padding for `trigdiff` kw = dict(wavelet=wavelet, scales=scales, fs=fs, t=t, nv=nv, l1_norm=True, derivative=False, padtype=padtype, rpadded=True, vectorized=vectorized, cache_wavelet=cache_wavelet) _, n1, _ = p2up(N) average = isinstance(order, (tuple, list, range)) Wx, scales = cwt(x, order=order, average=average, **kw) dWx = trigdiff(Wx, fs, rpadded=True, N=N, n1=n1) Wx = Wx[:, n1:n1 + N] if S.is_tensor(Wx): Wx = Wx.contiguous() scales, cwt_scaletype, *_ = process_scales(scales, N, wavelet, nv=nv, get_params=True) # regular CWT if order == 0: # l1_norm=True to spare a multiplication; for SSQ_CWT L1 & L2 are exactly # same anyway since we're inverting CWT over time-frequency plane rpadded = (difftype == 'numeric') Wx, scales, dWx = cwt(x, wavelet, scales=scales, fs=fs, nv=nv, l1_norm=True, derivative=True, padtype=padtype, rpadded=rpadded, vectorized=vectorized, patience=patience, cache_wavelet=cache_wavelet) # make copy of `Wx` if specified if preserve_transform is None: preserve_transform = not S.is_tensor(Wx) if preserve_transform: _Wx = (Wx.copy() if not S.is_tensor(Wx) else Wx.detach().clone()) else: _Wx = Wx # gamma if gamma is None: gamma = np.sqrt(EPS64 if S.is_dtype(Wx, 'complex128') else EPS32) # compute `w` if `get_w` and free `dWx` from memory if `not get_dWx` if get_w: _Wx, w = _phase_transform(_Wx, dWx, N, dt, gamma, difftype, difforder) _dWx = None # don't use in `ssqueeze` if not get_dWx: dWx = None else: w = None _dWx = dWx # default to same scheme used by `scales` if ssq_freqs is None: ssq_freqs = cwt_scaletype # affects `maprange` computation if non-tuple was_padded = bool(padtype is not None) # synchrosqueeze Tx, ssq_freqs = ssqueeze(_Wx, w, ssq_freqs, scales, fs=fs, t=t, squeezing=squeezing, maprange=maprange, wavelet=wavelet, gamma=gamma, was_padded=was_padded, flipud=flipud, dWx=_dWx, transform='cwt') if difftype == 'numeric': Wx = Wx[:, 4:-4] Tx = Tx[:, 4:-4] w = w[:, 4:-4] if w is not None else None if not astensor and S.is_tensor(Tx): Tx, Wx, w, dWx = [g.cpu().numpy() if S.is_tensor(g) else g for g in (Tx, Wx, w, dWx)] if get_w and get_dWx: return Tx, Wx, ssq_freqs, scales, w, dWx elif get_w: return Tx, Wx, ssq_freqs, scales, w elif get_dWx: return Tx, Wx, ssq_freqs, scales, dWx else: return Tx, Wx, ssq_freqs, scales
2776e85dde171b1c47fdce028bf9c845298b3a93
23,458
import torch def predict_image_classification(model: nn.Module, input_: torch.Tensor): """ Predict using an image classification model. Args: model (`nn.Module`): Pytorch model. input_ (`Tensor`): Input image tensor. Returns: (`tuple`) Prediction score which max is 1, and label idx. """ output = model(input_) output = F.softmax(output, dim=1) prediction_score, pred_label_idx = torch.topk(output, 1) if isinstance(pred_label_idx, torch.Tensor): pred_label_idx = pred_label_idx.squeeze().item() prediction_score = prediction_score.squeeze().detach().item() return prediction_score, pred_label_idx
2343d4db9b93910337e0e55b9935783714710330
23,459
def _id_to_box(id_, dim): """Convert id to box ID""" row = id_ // (dim ** 3) col = (id_ % (dim ** 2)) // dim return row * dim + col
8e6c4779872fff5cdc5a6ca6b4143a1519d8aaf2
23,460
import string def _load_hex(instream): """Load font from a .hex file.""" global_comment = [] glyphs = [] comment = [] for line in instream: line = line.rstrip('\r\n') if ':' in line: # parse code line key, value = line.rsplit(':', 1) value = value.strip() if ( # preserve empty lines if they separate comments (not line and comment and comment[-1] != '') # marked as comment or line[0] == '#' # pass through lines without : as comments - allows e.g. to convert diffs, like hexdraw or (':' not in line) # not a valid line, treat as comment or set(value) - set(string.hexdigits + ',') ): comment.append(line) else: # when first glyph is found, split comment lines between global and glyph if not glyphs and comment: global_comment, comment = split_global_comment(comment) glyphs.append(_convert_glyph(key, value, comment)) comment = [] # preserve any comment at end of file as part of global comment global_comment = '\n'.join([*_clean_comment(global_comment), *_clean_comment(comment)]) return Font(glyphs, comments=global_comment, properties=dict(encoding='unicode'))
6e5980e53ee598d813f10bdbcb775e8d47102fa8
23,461
def make_small_graph(graph_description, create_using=None): """ Return the small graph described by graph_description. graph_description is a list of the form [ltype,name,n,xlist] Here ltype is one of "adjacencylist" or "edgelist", name is the name of the graph and n the number of nodes. This constructs a graph of n nodes with integer labels 0,..,n-1. If ltype="adjacencylist" then xlist is an adjacency list with exactly n entries, in with the j'th entry (which can be empty) specifies the nodes connected to vertex j. e.g. the "square" graph C_4 can be obtained by >>> G = nx.make_small_graph( ... ["adjacencylist", "C_4", 4, [[2, 4], [1, 3], [2, 4], [1, 3]]] ... ) or, since we do not need to add edges twice, >>> G = nx.make_small_graph(["adjacencylist", "C_4", 4, [[2, 4], [3], [4], []]]) If ltype="edgelist" then xlist is an edge list written as [[v1,w2],[v2,w2],...,[vk,wk]], where vj and wj integers in the range 1,..,n e.g. the "square" graph C_4 can be obtained by >>> G = nx.make_small_graph( ... ["edgelist", "C_4", 4, [[1, 2], [3, 4], [2, 3], [4, 1]]] ... ) Use the create_using argument to choose the graph class/type. """ if graph_description[0] not in ("adjacencylist", "edgelist"): raise NetworkXError("ltype must be either adjacencylist or edgelist") ltype = graph_description[0] name = graph_description[1] n = graph_description[2] G = empty_graph(n, create_using) nodes = G.nodes() if ltype == "adjacencylist": adjlist = graph_description[3] if len(adjlist) != n: raise NetworkXError("invalid graph_description") G.add_edges_from([(u - 1, v) for v in nodes for u in adjlist[v]]) elif ltype == "edgelist": edgelist = graph_description[3] for e in edgelist: v1 = e[0] - 1 v2 = e[1] - 1 if v1 < 0 or v1 > n - 1 or v2 < 0 or v2 > n - 1: raise NetworkXError("invalid graph_description") else: G.add_edge(v1, v2) G.name = name return G
deb1cf0d08bba91a538c7d2c47c1d89e2c2a28da
23,462
def get_masksize(mask, labelnum = None): """ Compute mask size in surface space Parameters: ---------- mask: label image (mask) labelnum: mask's label number, use for group analysis Return: -------- masksize: mask size of each roi Example: -------- >>> masksize = get_masksize(mask) """ if mask.ndim == 3: mask = mask[:,0,0] labels = np.unique(mask)[1:] masksize = [] if len(labels) != 0: if labelnum is None: labelnum = int(np.max(labels)) for i in range(labelnum): masksize.append(len(mask[mask == i+1])) else: masksize.append(0) return np.array(masksize)
c8ccd82d9887f923e3d2581f97dd2a8f016cc182
23,463
def _context_py2rpmversion(context): """get a python PEP0440 compatible version and translate it to an RPM version""" # the context needs a variable set via {% set upstream_version = 'ver' %} _context_check_variable(context, CONTEXT_VAR_UPSTREAM_VERSION, 'py2rpmversion') version = context.vars[CONTEXT_VAR_UPSTREAM_VERSION] v_python = parse(version) # fedora does not allow '~' in versions but uses a combination of Version # and Release # https://fedoraproject.org/wiki/Packaging:Versioning\#Pre-Release_packages if context['spec_style'] == 'fedora': if len(v_python._version.release) >= 4: return "%d.%d.%d" % (v_python._version.release[0:3]) else: return v_python.base_version else: v_rpm = v_python.public if v_python.is_prerelease: # we need to add the 'x' in front of alpha/beta releases because # in the python world, "1.1a10" > "1.1.dev10" # but in the rpm world, "1.1~a10" < "1.1~dev10" v_rpm = v_rpm.replace('a', '~xalpha') v_rpm = v_rpm.replace('b', '~xbeta') v_rpm = v_rpm.replace('rc', '~xrc') v_rpm = v_rpm.replace('.dev', '~dev') return v_rpm
3f9110dff377a6c819e6b87ab5fd9c81a7532694
23,464
def check_and_format_address(address): """ check address """ try: formatted_address = to_checksum_address(address) return formatted_address except Exception as e: raise ArgumentsError("invalid address {}, reason: {}" .format(address, e))
1b0c88aede34386d1ccd5facd1bdbd4724538ab7
23,465
from typing import Optional def get_cache_name(cache_type: str, tag: Optional[str] = None) -> str: """ Get the canonical cache name (e.g., "tmp.cache.mem.tag") for a type of cache. :param cache_type: type of a cache :param tag: optional unique tag of the cache, empty by default :return: name of the folder for a cache """ _check_valid_cache_type(cache_type) cache_name = "tmp.cache" cache_name += f".{cache_type}" if tag is not None: cache_name += f".{tag}" return cache_name
ff933829314dd1794406ca4282eaf4efdf860b39
23,466
def _aves2_cfg(): """ Read aipctl config """ config = ConfigObj() # The result is a merge of all the files as they appear in the list f_list = cfg_files() if not f_list: print("error: configuration file not found") exit(1) for f in cfg_files(): _cfg = ConfigObj(f, encoding='UTF8') config.merge(_cfg) return config
527f1e94d5ec2c5cd13aa1a886d4c56914828f4d
23,467
import os def create_readme(top_dir,package_name,description="",docs=False): """ README requires the name of the package and the directory in which to write the file in. Optionally, give a description and whether or not to create a 'docs' directory. """ readme_str=""" # {package} ## Description {description} ## Examples ## Repo Structure {package}:<br/> ┣━ README.md<br/> ┣━ LICENSE<br/> ┣━ setup.py<br/> ┣━ {package}:<br/> ┃ ┗━ __init__.py<br/> """ if docs: readme_str= readme_str + \ """┣━ tests:<br/> ┃ ┗━ test_basic.py<br/> ┗━ docs:<br/> ┗━""" else: readme_str= readme_str + \ """┗━ tests: ┗━ test_basic.py """ readme_str=readme_str.format(package=package_name,description=description) # Write to file with open(os.path.join(top_dir,'README.md'),'w') as f: f.write(readme_str) return readme_str
70f7221536078a5d5c13eb97b28c394b12621941
23,468
def estimate_responsivity(mis_MU, norm_MU): """from the estimated base intensities, we return onlu users which have zero base intensity for misinformation and greater than zero base intensity for normal content. """ no_bad_intentions_ids = [] for id in range(len(mis_MU)): if mis_MU[id] == 0 and norm_MU[id] != 0: no_bad_intentions_ids.append(id) return no_bad_intentions_ids
4d944478694f1be1474eea963fad284079d5fe57
23,469
from typing import Union from typing import Any from datetime import datetime def parse_field_constraint( x: Union[str, int, float, bool, list], constraint: str, type: str = "string", **field: Any, ) -> Union[str, int, float, bool, list, datetime.datetime, ConstraintTypeError]: """ Parse field constraint. Arguments: x: Constraint value. constraint: Constraint type. type: Field type. field: Additional field attributes (https://specs.frictionlessdata.io/table-schema/#field-descriptors). Returns: Parsed field constraint. """ is_list = isinstance(x, list) X = pd.Series(x) is_str = X.apply(lambda xi: isinstance(xi, str)) if not is_str.any(): return x result = parse_field(X[is_str], type=type, **field) if isinstance(result, ValueTypeError): return ConstraintTypeError( fieldName=field.get("name", ""), constraintName=constraint, constraintValue=X[is_str].unique().tolist() if is_list else x, fieldType=type, fieldFormat=result["fieldFormat"], ) X[is_str] = result return X.tolist() if is_list else X[0]
531e33a1bc79e8a232032ebe9d340f829a3f513c
23,470
def compute_ab_cycles(c_cycles, linear_combinations, g, tretkoff_graph): """ Returns the a- and b-cycles of the Riemann surface given the intermediate 'c-cycles' and linear combinations matrix. Input: - c_cycles - linear_combinations: output of the Frobenius transform of the """ lincomb = linear_combinations M,N = lincomb.shape a_cycles = [] b_cycles = [] for i in range(g): a = [] b = [] for j in range(N): cij = lincomb[i,j] c = c_cycles[j] if cij >= 0 else reverse_cycle(c_cycles[j]) a.extend(abs(cij)*c[:-1]) cij = lincomb[i+g,j] c = c_cycles[j] if cij >= 0 else reverse_cycle(c_cycles[j]) b.extend(abs(cij)*c[:-1]) a = a + [0] b = b + [0] a = compress_cycle(a, tretkoff_graph) b = compress_cycle(b, tretkoff_graph) a_cycles.append(a) b_cycles.append(b) return a_cycles, b_cycles
645d569ee06cb87161b12158603b1b6dcfb92077
23,471
import pickle import pathlib def pmlb_multiclass_classification_dataset_names(): """Returns list of multiclass classification datasets in PMLB.""" try: name = pickle.load(open(".pmlb/mcdn.pkl", "rb")) except FileNotFoundError: pathlib.Path(".pmlb").mkdir(parents=True, exist_ok=True) name = [] for dataset in pmlb.classification_dataset_names: X, y = pmlb.fetch_data(dataset, return_X_y=True, local_cache_dir=".pmlb") if np.unique(y).size != 2: name.append(dataset) pickle.dump(name, open(".pmlb/mcdn.pkl", "wb")) return name
d3030441c119de0c96c9d83df026b7f922fe21e6
23,472
from losses.loss_functions import BalancedCrossEntropyLoss from losses.loss_functions import SoftMaxwithLoss from losses.loss_functions import NormalsLoss from losses.loss_functions import BalancedCrossEntropyLoss from losses.loss_functions import DepthLoss def get_loss(p, task=None): """ Return loss function for a specific task """ if task == 'edge': criterion = BalancedCrossEntropyLoss(size_average=True, pos_weight=p['edge_w']) elif task == 'semseg' or task == 'human_parts': criterion = SoftMaxwithLoss() elif task == 'normals': criterion = NormalsLoss(normalize=True, size_average=True, norm=p['normloss']) elif task == 'sal': criterion = BalancedCrossEntropyLoss(size_average=True) elif task == 'depth': criterion = DepthLoss(p['depthloss']) else: raise NotImplementedError('Undefined Loss: Choose a task among ' 'edge, semseg, human_parts, sal, depth, or normals') return criterion
6284d2e40fc8aa220c153307fc7199a47549d15d
23,473
def compute_embeddings(image): """A mock function for a call to a deep learning model or a web service.""" del image # this is just a mock and doesn't do anything with the input return 42
31536d4a2371140e962aadb63b8645685328b3df
23,474
from pathlib import Path import os def dir(path: str) -> Path: """Get equivalent directory in cache""" _, filename = os.path.split(path) if filename: # TODO fix; this won't work as intended # If file return __get_cache_filepath(path).parent else: # If directory return __get_cache_filepath(os.path.join(path, 'tmp')).parent
203ebec92b19e82fe92d0cc4043db3aad2278a2f
23,475
def text_to_string(filename): """Read a text file and return a string.""" with open(filename) as infile: return infile.read()
dbd79e78c84c3374c0252544086885b909ae9bd9
23,476
def lgsvlToScenicElevation(pos): """Convert LGSVL positions to Scenic elevations.""" return pos.y
d90f7509285b08c791eac56c1a119f91120cf556
23,477
import jinja2 def render_to_string(backend, filename, context): # type: (str, str, Dict) -> str """ Render a template using the specified context :param backend: The backend for which the template is rendered :param filename: The template name :param context: The data to use when rendering the template :return: The rendered template as a string """ template_directory = "./swagger_django_generator/templates/{}".format(backend) loaders = [jinja2.FileSystemLoader(template_directory)] try: loaders.append(jinja2.PackageLoader("swagger_django_generator", "templates/{}".format(backend))) except ImportError: pass environment = jinja2.Environment( loader=jinja2.ChoiceLoader(loaders), trim_blocks=True, lstrip_blocks=True, ) environment.filters["clean_schema"] = clean_schema environment.filters["parse_array"] = parse_array environment.filters["capitalize_splitter"] = capitalize_splitter return environment.get_template(filename).render(context)
c645a9867acdb50236a5604144a104cb38e841f9
23,478
def customfield_by_name(self, name): """ Get the value of a customfield by name """ # Get all fields from Jira. This is expensive, so only do it once if not hasattr(self, '_fields'): response = self._session.get( self._base_url.format( server=self._options['server'], rest_path=self._options['rest_path'], rest_api_version=self._options['rest_api_version'], path='field', ), auth=self._session.auth, ) if response.status_code != 200: raise JIRAError(response.text) else: self._fields = response.json() for field in self._fields: if field.get('name') == name: break else: raise JIRAError('Could not find customfield') return getattr(self.fields, field.get('id'))
35f7ee1e88029201086fc75bbc280beb386cca44
23,479
from pathlib import Path def download_images(imgs): """Save any images on page to local directory""" had_download_issue = False for img in imgs: image_url = 'https://projecteuler.net/{}'.format(img.get('src')) logger.info(f'downloading image {image_url}') image_name = Path(image_url).name image = get_the_response(image_url) if image: (LOCAL_IMAGES_DIR / image_name).write_bytes(image.content) else: had_download_issue = True return not had_download_issue
7d39dff40797a698215a589f9ff65f3df4a85e9f
23,480
def admin_order_pdf(request, order_id): """ 1. Get data (and templates for displaying data) 2. Set type (cuz you'll need to download it, right?) 3. Using the module (configuring stuff, e.g. the CSS :P) """ order = get_object_or_404(Order, id=order_id) html = render_to_string('orders/order/pdf.html', { 'order': order }) response = HttpResponse(content_type='application/pdf') response['Content-Disposition'] = ( 'filename=order_{}.pdf'.format(order.id) ) weasyprint.HTML(string=html).write_pdf( response, stylesheets=[ weasyprint.CSS( settings.STATIC_ROOT + 'css/pdf.css' ) ] ) return response
c4cf5a38743f573ef8dfa704cfe2d12bb47a679c
23,481
import traceback def delete_container(request, container): """ Deletes a container """ storage_url = request.session.get('storage_url', '') #meta_storage_url = request.session.get('meta_storage_url', '') auth_token = request.session.get('auth_token', '') #meta_auth_token = request.session.get('meta_auth_token', '') username = request.session.get('username', '') project_id = request.session.get('project_id','') try: conn = EncSwiftclientAPI(auth_token, project_id) conn.delete_container(container) messages.add_message(request, messages.INFO, _("Container deleted.")) except client.ClientException: traceback.print_exc() messages.add_message(request, messages.ERROR, _("Access denied. If there are some files in %s, before delete them!" % container)) return redirect(containerview)
ce205d6112239905707064f0357b6c19fe3bd688
23,482
def dense_encoder(X, params): """Dense model encoder subgraph that produces latent matrix. Given data matrix tensor X and dictionary of parameters, process through dense model encoder subgraph and return encoder latent vector for each example in batch. Args: X: tf.float64 matrix tensor of input data. params: Dictionary of parameters. Returns: tf.float64 matrix tensor encoder latent vector for each example in batch. """ # Create the input layer to our DNN network = X # Add hidden layers with the given number of units/neurons per layer for units in params["enc_dnn_hidden_units"]: network = tf.layers.dense( inputs=network, units=units, activation=tf.nn.relu) return tf.layers.dense( inputs=network, units=params["latent_vector_size"], activation=tf.nn.relu)
1dfe2b876cb32b5d8b89e70e451a732730762a14
23,483
def __asset_inventory_espanol(asset): """ Renombra los encabezados del inventario de bases de datos de Datos \ Abiertos Colombia a términos en español. :param asset: (pandas.DataFrame) - Tabla de inventario del portal de datos\ abiertos Colombia (https://www.datos.gov.co). :return: base de datos en formato dataframe. """ lista_columnas = list(DIC_RENAME.keys()) asset = asset[lista_columnas].rename(columns=DIC_RENAME) # Cambiar las fechas asset["fecha_creacion"] = asset["fecha_creacion"].apply(lambda x: x[0:10]) asset["fecha_actualizacion"] = asset["fecha_actualizacion"].apply( lambda x: x[0:10]) # Pasar filas y columnas a float asset["filas"] = asset["filas"].astype(float) asset["columnas"] = asset["columnas"].astype(float) # Traducir las categorías de 'base_publica' asset["base_publica"] = asset["base_publica"].map( {"published": "Si", "unpublished": "No"}) # Traducir las categorías de asset["tipo"] = asset["tipo"].map({ "dataset": "conjunto de datos", "federatet_href": "enlace externo", "href": "enlace externo", "map": "mapa", "chart": "grafico", "filter": "vista filtrada", "file": "archivo o documento", "visualization": "visualizacion", "story": "historia", "datalens": "lente de datos", "form": "formulario", "calendar": "calendario", "invalid_datatype": "tipo_invalido"}) return asset
dfb508cec458ecb63c371849d84cb3b3d79335ba
23,484
def end_of_sign_found(token: str, preceding_token: str): """ This function receives a token and its preceding token and returns whether that token ends an Akkadian sign. """ if not preceding_token: return False if '-' in token or '.' in token: return True if not preceding_token.endswith('-') and not token.startswith('##'): return True return False
30024ddad31c3149d1d2363842b085d2923c1387
23,485
import sys def main(argv=None): """Execute the application from CLI.""" if argv is None: argv = sys.argv[1:] if not argv: argv = [curdir] args = _parse_args(argv) data = csft2data(args.path) if args.top: data = data.head(args.top) if args.with_raw: data['raw'] = data[column.SIZE] data[column.SIZE] = data[column.SIZE].map(format_size) print(data) return 0
c6a0200c26373ca6f7ea2544608b5a9382ea9d96
23,486
import os def get_base_path(node=None): """ get the base path for the system """ if node==None: node = get_system() ## ## Base path try: path = os.environ['sdss_catl_path'] assert(os.path.exists(path)) except: proj_dict = cookiecutter_paths(__file__) ## ## Path to `base` path = proj_dict['base_dir'] return path
35f321f93e1bfcbe32cb3dd0fd3497da19bfa264
23,487
from typing import Optional from typing import Dict import datasets def get_loaders( dataset: str, batch_size: int, num_workers: Optional[int] ) -> Dict[str, DataLoader]: """Init loaders based on parsed parametrs. Args: dataset: dataset for the experiment batch_size: batch size for loaders num_workers: number of workers to process loaders Returns: {"train":..., "valid":...} """ transforms = datasets[dataset]["train_transform"] transform_original = datasets[dataset]["valid_transform"] train_data = SelfSupervisedDatasetWrapper( datasets[dataset]["dataset"](root="data", train=True, transform=None, download=True), transforms=transforms, transform_original=transform_original, ) valid_data = SelfSupervisedDatasetWrapper( datasets[dataset]["dataset"](root="data", train=False, transform=None, download=True), transforms=transforms, transform_original=transform_original, ) train_loader = DataLoader(train_data, batch_size=batch_size, num_workers=num_workers) valid_loader = DataLoader(valid_data, batch_size=batch_size, num_workers=num_workers) return {"train": train_loader, "valid": valid_loader}
2340d05f69057bcb034a8ec4ad5515055d0bde71
23,488
def pipeline(): """ Creates a pipeline configured to use a given model with a specified configuration. Notes ----- Pipeline can be executed only if its config contains the following parameters: model_class : TFModel Architecture of model. List of available models is defined at 'AVAILABLE_MODELS'. model_config : Config Model parameters. Returns ------- Pipeline A pipeline that contains model initialization and training with a given config. """ test_pipeline = (Pipeline() .init_variable('current_loss') .init_model('dynamic', C('model_class'), 'model', C('model_config')) .to_array() .train_model('model', fetches='loss', images=B('images'), labels=B('labels'), save_to=V('current_loss')) ) return test_pipeline
f8fbbe3898b58b1b1621d742e4acdf80f17ba11c
23,489
import copy def get_screen_point_array(width: float, height: float): """Get screen points(corners) in pixels from normalized points_in_square :param width: screen width :param height: screen height :return: """ points = copy.deepcopy(points_in_square) for i in range(len(points_in_square)): points[i] = points[i][0] * width, points[i][1] * height result = list_points_to_triangle(points) return np.array(result, dtype=np.float32)
34d88ddb1a24e4e3ebc81f0c7e99530548ed8a8b
23,490
def get_spacing_matrix(size, spacing, offset): """Returns a sparse matrix LinOp that spaces out an expression. Parameters ---------- size : tuple (rows in matrix, columns in matrix) spacing : int The number of rows between each non-zero. offset : int The number of zero rows at the beginning of the matrix. Returns ------- LinOp A sparse matrix constant LinOp. """ val_arr = [] row_arr = [] col_arr = [] # Selects from each column. for var_row in range(size[1]): val_arr.append(1.0) row_arr.append(spacing*var_row + offset) col_arr.append(var_row) mat = sp.coo_matrix((val_arr, (row_arr, col_arr)), size).tocsc() return lu.create_const(mat, size, sparse=True)
5871385bcdcb9ce538fe1e4525c947c2cfa582c9
23,491
def next_power2(x): """ :param x: an integer number :return: the power of 2 which is the larger than x but the smallest possible >>> result = next_power2(5) >>> np.testing.assert_equal(result, 8) """ return 2 ** np.ceil(np.log2(x)).astype(int)
379c2170d0dbd25ee01a47eb0765f4dfd143efbb
23,492
def category_induced_page(): """Form to compute the Category induced.""" return render_template('category-induced.html')
176af8bbbb67afce78c11483f66b3d5ac15f6d76
23,493
import array from operator import concat def zext(value, n): """Extend `value` by `n` zeros""" assert (isinstance(value, (UInt, SInt, Bits)) or (isinstance(value, Array) and issubclass(value.T, Digital))) if not is_int(n) or n < 0: raise TypeError(f"Expected non-negative integer, got '{n}'") if n == 0: return value if isinstance(value, UInt): zeros = uint(0, n) elif isinstance(value, SInt): zeros = sint(0, n) elif isinstance(value, Bits): zeros = bits(0, n) elif isinstance(value, Array): zeros = array(0, n) result = concat(value, zeros) if isinstance(value, UInt): return uint(result) elif isinstance(value, SInt): return sint(result) elif isinstance(value, Bits): return bits(result) return result
dfd666446f1b93ebdeeb94b932d8de7b243f6a4e
23,494
import math def _distance(point0, point1, point2, seg_len): """Compute distance between point0 and segment [point1, point2]. Based on Mark McClure's PolylineEncoder.js.""" if (point1[0] == point2[0]) and (point1[1] == point2[1]): out = _dist(point0, point2) else: uuu = ((point0[0] - point1[0]) * (point2[0] - point1[0]) + (point0[1] - point1[1]) * (point2[1] - point1[1])) / seg_len if uuu <= 0: out = _dist(point0, point1) elif uuu >= 1: out = _dist(point0, point2) else: out = math.sqrt(math.pow((point0[0] - point1[0]) - (uuu * (point2[0] - point1[0])), 2) + math.pow((point0[1] - point1[1]) - (uuu * (point2[1] - point1[1])), 2)) return out
1927a5fe46dcb0245031b395aade67ec01270930
23,495
def delete_node( graph: xpb2.GraphProto, node_name: str = "", **kwargs): """ Add node appends a node to graph g and returns the extended graph Prints a message and returns False if fails. Args: graph: A graph, onnx.onnx_ml_pb2.GraphProto. node_name: Name of the node to remove. **kwargs Returns: The extended graph. """ if type(graph) is not xpb2.GraphProto: _print("The graph is not a valid ONNX graph.") return False if not node_name: _print("Please specify a node name.") return False found = False try: for elem in graph.node: if elem.name == node_name: graph.node.remove(elem) found = True except Exception as e: _print("Unable to iterate the nodes. " + str(e)) return False if not found: _print("Unable to find the node by name.") return False return graph
620e325a0ea9da7cd83e897fee49fb6ef9183da4
23,496
from PIL import Image def image_to_term256(pil_image): """Convert image to a string that resembles it when printed on a terminal Needs a PIL image as input and a 256-color xterm for output. """ result = [] im = pil_image.convert('RGBA') try: except ImportError: im.thumbnail((80, 80)) else: im.thumbnail((80, 80), Image.ANTIALIAS) width, height = im.size for y in range(height // 2): try: for x in range(width): result.append('\033[48;5;%dm\033[38;5;%dm' % ( term256color(*im.getpixel((x, y * 2))), term256color(*im.getpixel((x, y * 2 + 1))))) result.append('\N{LOWER HALF BLOCK}') finally: result.append('\033[0m\n') return ''.join(result)
482f6c868adf5f302d88898abeff426d9ed000e7
23,497
def false_discovery(alpha,beta,rho): """The false discovery rate. The false discovery rate is the probability that an observed edge is incorrectly identified, namely that is doesn't exist in the 'true' network. This is one measure of how reliable the results are. Parameters ---------- alpha : float The estimate of the true-positive rate. beta : float The estimate of the false-positive rate. rho : float The estimate of network density. Returns ------- float The false discovery rate (probability). References ---------- .. [1] Newman, M.E.J. 2018. “Network structure from rich but noisy data.” Nature Physics 14 6 (June 1): 542–545. doi:10.1038/s41567-018-0076-1. """ return (1-rho)*beta/(rho*alpha + (1-rho)*beta)
849c236157070c5d1becfec3e4e5f46a63d232d2
23,498
def add_default_legend(axes, subplots, traces): """ Add legend to the axes of the plot. This is needed to be done using matplotlib shapes rather than the build in matplotlib legend because otherwise the animation will add a legend at each time step rather than just once. Parameters ---------- axes: axes object the axes of the matplotlib figure subplots: int number of subplots in the figure traces: list of dictionaries a list of dictionaries where each dictionary corresponds to one of the passed in filenames or dataframes, the keys of the dictionaries are subplots (0-indexed), and the values are a list of values for that subplot from that filename (ex. traces = [{0: ["bg", "bg_sensor"], 1: ["iob"], 2: ["sbr"]}]) Returns ------- """ # Add the corresponding shape and label for each field in the plot to the legend for subplot in range(subplots): legend_items = [] for trace_dict in traces: if subplot in trace_dict.keys(): for field in trace_dict[subplot]: features = get_features_dictionary(field) legend_items.append( Line2D( [0], [0], color=features["color"], label=features["legend_label"], marker=features["marker"], markersize=3, linestyle=features["linestyle"], ) ) # Syntax is slightly different if there is only 1 subplot if subplots < 2: add_to = axes else: add_to = axes[subplot] add_to.legend(handles=legend_items, loc="upper right") # Return the updated axes return axes
d352c1d90dac882f687be426d63dea35dca4ba46
23,499