content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def get_potentially_supported_ops(): """Gets potentially supported ops. Returns: list of str for op names. """ supported_ops = _get_potentially_supported_ops() op_names = [s.op for s in supported_ops] return op_names
4fcbc8fd8e10f28d7c10e8b7a9b9d9475ef6b4b6
3,633,400
def hard_sigmoid_me(input_, inplace: bool = False): """jit-scripted hard_sigmoid_me function""" return HardSigmoidJitAutoFn.apply(input_)
83cb4ed8802b5e6a275167c3a44e51719efe6212
3,633,401
def demoji(tokens): """ This function describes each emoji with a text that can be later used for vectorization and ML predictions :param tokens: :return: """ emoji_description = [] for token in tokens: detect = emoji.demojize(token) emoji_description.append(detect) return emoji_description
ab0a200fca87b3b1dc22dfd6bbf374d35d7b8b50
3,633,402
from pathlib import Path async def get_journal_entries_by_permalink_handler( journal_permalink: str = Path(...), entry_permalink: str = Path(...), db_session: Session = Depends(db.yield_connection_from_env), ) -> RedirectResponse: """ Get specific journal entry by short link. """ try: journal_id, journal_public = await actions.extract_permalink( db_session, data.RecordType.journal, journal_permalink ) entry_id, _ = await actions.extract_permalink( db_session, data.RecordType.entry, entry_permalink ) except actions.JournalPermalinkNotFound: raise HTTPException( status_code=404, detail="There is no permalink for requested journal", ) except actions.JournalEntryPermalinkNotFound: raise HTTPException( status_code=404, detail="There is no permalink for requested entry", ) except Exception as e: logger.error(f"Unexpected error: {str(e)}") raise HTTPException(status_code=500) if journal_public: url = f"/public/{str(journal_id)}/entries/{str(entry_id)}" else: url = f"/journals/{str(journal_id)}/entries/{str(entry_id)}" return RedirectResponse(url=url)
d6dcd12b7db6a57f518621212fda72c769e671aa
3,633,403
def ignore_pre_big_bang(run): """ Remove metrics before timestamp 0. """ return [m for m in run if m[TS] >= 0] #return [m for m in run if m[TS] >= 0 and m[TS] < MAX_TIME]
b5ff1cf5f3f67618c31da1defa5a397fbea8f9bb
3,633,404
def signUp(): """Sign up a new user. :field phone [int]: user phone number :field name [str]: user name :field password [str]: user password (will be encrypted) :returns [dict]: newly created user's info with auth token """ phone = handler.parse('phone', int) name = handler.parse('name', str) password = handler.parse('password', str) encryptedPassword = authenticator.encrypt(password) newUser = models.User(phone=phone, name=name, password=encryptedPassword).save() newUserInfo = newUser.dict() newUserInfo['auth_token'] = authenticator.tokenize(newUser) # attach auth token return newUserInfo
0ad7bb71135e86fe3f4d3873e510d4bb375c061c
3,633,405
def next_player(player,list_player_names,player_index,open_card,given_card): """ returns the next player >>> list_player_names=['Mark','John','Harry','Henry'] >>> player_index=0 >>> player='Mark' >>> next_player(player,list_player_names,player_index,('A', '♥', 11)) 'John' >>> player_index=3 >>> player='Henry' >>> next_player(player,list_player_names,player_index,('3', '♥', 3)) 'Mark' >>> player_index=1 >>> next_player(player,list_player_names,player_index,('8', '♥', 8)) 'John' """ if open_card[0]=='8' and matches(given_card,open_card)==True: return list_player_names[player_index] else: if player==list_player_names[-1]: return list_player_names[0] else: return list_player_names[player_index + 1]
7c56bd1983b60ed2177914cc4203462c9b83f375
3,633,406
def loss_function(image, idx, c, omega): """ :param last_image: the previous generated frame :param outputs: Generated image :return: The sum of the style and content loss """ outputs = extractor(image) style_outputs = outputs["style"] content_outputs = outputs["content"] style_loss = tf.add_n( [ tf.reduce_mean((style_outputs[name] - style_targets[name]) ** 2) for name in style_outputs.keys() ] ) style_loss *= style_weight / num_style_layers content_loss = tf.add_n( [ tf.reduce_mean((content_outputs[name] - content_targets[name]) ** 2) for name in content_outputs.keys() ] ) content_loss *= content_weight / num_content_layers temporal_loss = 0 if idx > 0: temporal_loss = tf.add_n([tf.reduce_mean(((image - omega) * c) ** 2)]) temporal_loss *= temporal_weight loss = style_loss + content_loss + temporal_loss return loss
2c6dee61f1af7e48be34cbc47501062a0dc7fa03
3,633,407
def bra(seq, dim=2): """ Produces a multiparticle bra state for a list or string, where each element stands for state of the respective particle. Parameters ---------- seq : str / list of ints or characters Each element defines state of the respective particle. (e.g. [1,1,0,1] or a string "1101"). For qubits it is also possible to use the following conventions: - 'g'/'e' (ground and excited state) - 'u'/'d' (spin up and down) - 'H'/'V' (horizontal and vertical polarization) Note: for dimension > 9 you need to use a list. dim : int (default: 2) / list of ints Space dimension for each particle: int if there are the same, list if they are different. Returns ------- bra : qobj Examples -------- >>> bra("10") Quantum object: dims = [[1, 1], [2, 2]], shape = [1, 4], type = bra Qobj data = [[ 0. 0. 1. 0.]] >>> bra("Hue") Quantum object: dims = [[1, 1, 1], [2, 2, 2]], shape = [1, 8], type = bra Qobj data = [[ 0. 1. 0. 0. 0. 0. 0. 0.]] >>> bra("12", 3) Quantum object: dims = [[1, 1], [3, 3]], shape = [1, 9], type = bra Qobj data = [[ 0. 0. 0. 0. 0. 1. 0. 0. 0.]] >>> bra("31", [5, 2]) Quantum object: dims = [[1, 1], [5, 2]], shape = [1, 10], type = bra Qobj data = [[ 0. 0. 0. 0. 0. 0. 0. 1. 0. 0.]] """ return ket(seq, dim=dim).dag()
1199ac8336963a785e2d258416733612dc7a5558
3,633,408
import os def file_exists(filepath): """Check whether a file exists by given file path.""" return os.path.isfile(filepath)
157caa4e5ce39243b46dda915808de79d7cf76c0
3,633,409
def rmse_diff(model_data, subj_data): """this rmse only consider diff""" R = np.array(model_data) D = np.array(subj_data) r_DIFF = np.round([np.mean(R[0:2])-np.mean(R[2:4]), R[0]-R[1], R[2]-R[3]], 4) d_DIFF = np.round([np.mean(D[0:2]) - np.mean(D[2:4]), D[0] - D[1], D[2] - D[3]], 4) RMSE = np.sqrt(np.mean((d_DIFF-r_DIFF)**2)) return(RMSE)
f9b06e74a95663ad3036b7c658bb55880eba1a03
3,633,410
def preparation_time_in_minutes(number_of_layers: int) -> int: """Calculate the preparation time per layer. .:param number_of_layers: int number of layers. .:return: int time in minutes derived from 'PREPARATION_TIME'. Function that takes the actual number of layer of the lasagna and return how much time does it take to make them based on the `PREPARATION_TIME`. """ return PREPARATION_TIME * number_of_layers
3377dbb30ef7f1ffdd41680b7f270baffb81a2ef
3,633,411
def eliminate(board, i, j): """ Propagates the effects of fixing a cell to the affected neighbors within the same square and vertical and horizontal lines """ value = board[i][j][0] # Horizontal propagation for k in range(n): if j!=k and value in board[i][k]: board[i][k].remove(value) # Vertical propagation for l in range(n): if i!=l and value in board[l][j]: board[l][j].remove(value) # Square propagation for k in range(i/3*3, i/3*3+3): for l in range(j/3*3, j/3*3+3): if k!=i and l!=j and (value in board[k][l]): board[k][l].remove(value) return 0
dd860418a1e57ed2484c20cf5765d926a653c9ab
3,633,412
def prep_tweet_body(tweet_obj, args, processed_text): """ Format the incoming tweet Args: tweet_obj (dict): Tweet to preprocess. args (list): Various datafields to append to the object. 0: subj_sent_check (bool): Check for subjectivity and sentiment. 1: subjectivity (num): Subjectivity result. 2: sentiment (dict): Sentiment result. processed_text (list): List of tokens and ngrams etc. Returns: dict: Tweet with formatted fields """ subj_sent_check = args[0] result = tweet_obj if subj_sent_check: subjectivity = args[1] sentiment = args[2] result["subjectivity"] = subjectivity result["compound_score"] = sentiment["compound"] result["neg_score"] = sentiment["neg"] result["neu_score"] = sentiment["neu"] result["pos_score"] = sentiment["pos"] result["hs_keyword_count"] = len(processed_text[4]) result["hs_keyword_matches"] = processed_text[4] result["tokens"] = processed_text[0] result["stopwords"] = processed_text[1] result["hashtags"] = processed_text[2] result["user_mentions"] = processed_text[3] result["unigrams"] = processed_text[5][0] result["bigrams"] = processed_text[5][1] result["trigrams"] = processed_text[5][2] result["quadgrams"] = processed_text[5][3] result["pentagrams"] = processed_text[5][4] result["stopword_ngrams"] = processed_text[6] result["ordered_tokens"] = processed_text[7] return result
9163d7bb10e3bb31849090d8ebfe4d00c19db2df
3,633,413
import zlib import time import logging def send_mfg_inspector_data(inspector_proto, credentials, destination_url, payload_type): """Upload MfgEvent to steam_engine.""" envelope = guzzle_pb2.TestRunEnvelope() envelope.payload = zlib.compress(inspector_proto.SerializeToString()) envelope.payload_type = payload_type envelope_data = envelope.SerializeToString() for _ in range(5): try: result = _send_mfg_inspector_request(envelope_data, credentials, destination_url) return result except UploadFailedError: time.sleep(1) logging.critical( 'Could not upload to mfg-inspector after 5 attempts. Giving up.') return {}
e809e49c2babe215c547960f60d6edca495601d4
3,633,414
def cache_lookup_only(key): """Turns a function into a fallback for a cache lookup. Like the `cache` decorator, but never actually writes to the cache. This is good for when a function already caches its return value somewhere in its body, or for providing a default value for a value that is supposed to be cached by a worker process. """ def outer(fn): def inner(*args, **kwargs): try: return cache_lookup(key(*args, **kwargs)) except KeyError: return fn(*args, **kwargs) return inner return outer
c142de5fb967860f8a5108d9b65cf21e32e9e674
3,633,415
def social_distancing_policy(): """ Real Name: b'social distancing policy' Original Eqn: b'1-PULSE(social distancing start, FINAL TIME-social distancing start+1)*social distancing effectiveness' Units: b'dmnl' Limits: (None, None) Type: component b'' """ return 1 - functions.pulse( __data['time'], social_distancing_start(), final_time() - social_distancing_start() + 1) * social_distancing_effectiveness()
8cd71fb4cdfcffb11bb488beee6f33a5495e2eeb
3,633,416
def mersenne_prime(n_max): """ This is the description of the function 4 ~ Loves it + 3 Parameters ---------- n_max : int for p up to n_max Returns ------- list list of q """ primes = [] for a in range(0,n_max): b = 2**a - 1 if isprime1(a) and isprime1(b): primes.append(b) return primes
4aff17a7ed6c22b2817d0c37d1fb6b9dbaf243c2
3,633,417
def import_locus_intervals(path, reference_genome='default', skip_invalid_intervals=False, contig_recoding=None, **kwargs) -> Table: """Import a locus interval list as a :class:`.Table`. Examples -------- Add the row field `capture_region` indicating inclusion in at least one locus interval from `capture_intervals.txt`: >>> intervals = hl.import_locus_intervals('data/capture_intervals.txt', reference_genome='GRCh37') >>> result = dataset.annotate_rows(capture_region = hl.is_defined(intervals[dataset.locus])) Notes ----- Hail expects an interval file to contain either one, three or five fields per line in the following formats: - ``contig:start-end`` - ``contig start end`` (tab-separated) - ``contig start end direction target`` (tab-separated) A file in either of the first two formats produces a table with one field: - **interval** (:class:`.tinterval`) - Row key. Genomic interval. If `reference_genome` is defined, the point type of the interval will be :class:`.tlocus` parameterized by the `reference_genome`. Otherwise, the point type is a :class:`.tstruct` with two fields: `contig` with type :py:data:`.tstr` and `position` with type :py:data:`.tint32`. A file in the third format (with a "target" column) produces a table with two fields: - **interval** (:class:`.tinterval`) - Row key. Same schema as above. - **target** (:py:data:`.tstr`) If `reference_genome` is defined **AND** the file has one field, intervals are parsed with :func:`.parse_locus_interval`. See the documentation for valid inputs. If `reference_genome` is **NOT** defined and the file has one field, intervals are parsed with the regex ```"([^:]*):(\\d+)\\-(\\d+)"`` where contig, start, and end match each of the three capture groups. ``start`` and ``end`` match positions inclusively, e.g. ``start <= position <= end``. For files with three or five fields, ``start`` and ``end`` match positions inclusively, e.g. ``start <= position <= end``. Parameters ---------- path : :obj:`str` Path to file. reference_genome : :obj:`str` or :class:`.ReferenceGenome`, optional Reference genome to use. skip_invalid_intervals : :obj:`bool` If ``True`` and `reference_genome` is not ``None``, skip lines with intervals that are not consistent with the reference genome. contig_recoding: :obj:`dict` of (:obj:`str`, :obj:`str`) Mapping from contig name in file to contig name in loaded dataset. All contigs must be present in the `reference_genome`, so this is useful for mapping differently-formatted data onto known references. **kwargs Additional optional arguments to :func:`import_table` are valid arguments here except: `no_header`, `comment`, `impute`, and `types`, as these are used by :func:`import_locus_intervals`. Returns ------- :class:`.Table` Interval-keyed table. """ if contig_recoding is not None: contig_recoding = hl.literal(contig_recoding) def recode_contig(x): if contig_recoding is None: return x return contig_recoding.get(x, x) t = import_table(path, comment="@", impute=False, no_header=True, types={'f0': tstr, 'f1': tint32, 'f2': tint32, 'f3': tstr, 'f4': tstr}, **kwargs) if t.row.dtype == tstruct(f0=tstr): if reference_genome: t = t.select(interval=hl.parse_locus_interval(t['f0'], reference_genome)) else: interval_regex = r"([^:]*):(\d+)\-(\d+)" def checked_match_interval_expr(match): return hl.or_missing(hl.len(match) == 3, locus_interval_expr(recode_contig(match[0]), hl.int32(match[1]), hl.int32(match[2]), True, True, reference_genome, skip_invalid_intervals)) expr = ( hl.bind(t['f0'].first_match_in(interval_regex), lambda match: hl.cond(hl.bool(skip_invalid_intervals), checked_match_interval_expr(match), locus_interval_expr(recode_contig(match[0]), hl.int32(match[1]), hl.int32(match[2]), True, True, reference_genome, skip_invalid_intervals)))) t = t.select(interval=expr) elif t.row.dtype == tstruct(f0=tstr, f1=tint32, f2=tint32): t = t.select(interval=locus_interval_expr(recode_contig(t['f0']), t['f1'], t['f2'], True, True, reference_genome, skip_invalid_intervals)) elif t.row.dtype == tstruct(f0=tstr, f1=tint32, f2=tint32, f3=tstr, f4=tstr): t = t.select(interval=locus_interval_expr(recode_contig(t['f0']), t['f1'], t['f2'], True, True, reference_genome, skip_invalid_intervals), target=t['f4']) else: raise FatalError("""invalid interval format. Acceptable formats: 'chr:start-end' 'chr start end' (tab-separated) 'chr start end strand target' (tab-separated, strand is '+' or '-')""") if skip_invalid_intervals and reference_genome: t = t.filter(hl.is_defined(t.interval)) return t.key_by('interval')
3d27332ac4194f5f823bb234016d3941751e2072
3,633,418
def speech_tagging(test_data, model, tags): """ Inputs: - test_data: (1*num_sentence) a list of sentences, each sentence is an object of line class - model: an object of HMM class Returns: - tagging: (num_sentence*num_tagging) a 2D list of output tagging for each sentences on test_data """ tagging = [] ################################################### # Edit here ################################################### N,M = model.B.shape new_model = model new_column = 1e-6 * np.ones([N,1]) new_feature_number = 0 new_b = model.B new_obs_dict = model.obs_dict for sentence in test_data: for word in sentence.words: if word not in model.obs_dict: # add new features and set number of new features # add new column to b # sample : np.append(???,new_column,axis=1) new_b = np.append(new_b,new_column,axis=1) # add new features to obs_dict new_obs_dict[word] = len(new_b[0,:]) - 1 # augment new features number new_feature_number += 1 if new_feature_number != 0: new_model = HMM(model.pi, model.A, new_b, new_obs_dict, model.state_dict) for sentence in test_data: tag_row = new_model.viterbi(sentence.words) tagging.append(tag_row) return tagging
8390fc6ff0b1008d50b248da0d348ac31b42626a
3,633,419
def evaluate_if(hook_dict: dict, context: 'Context', append_hook_value: bool) -> bool: """Evaluate the when condition and return bool.""" if hook_dict.get('for', None) is not None and not append_hook_value: # We qualify `if` conditions within for loop logic return True if hook_dict.get('if', None) is None: return True return render_variable(context, wrap_jinja_braces(hook_dict['if']))
b9d733568abf9d4bd7e7b7ed6e1ac43582728080
3,633,420
def find_vgg_layer(arch, target_layer_name): """Find vgg layer to calculate GradCAM and GradCAM++ Args: arch: default torchvision densenet models target_layer_name (str): the name of layer with its hierarchical information. please refer to usages below. target_layer_name = 'features' target_layer_name = 'features_42' target_layer_name = 'classifier' target_layer_name = 'classifier_0' Return: target_layer: found layer. this layer will be hooked to get forward/backward pass information. """ hierarchy = target_layer_name.split('_') if len(hierarchy) >= 1: target_layer = arch.backbone.slice5 if len(hierarchy) == 2: target_layer = target_layer[int(hierarchy[1])] return target_layer
97e578e061a592f5762313f4b7aecc42cda39cb7
3,633,421
def plain_bst(): """Returns a plain binary search tree and a tuple of its nodes. The tree has the same structure as ref_bst.""" t = Tree.tree() n1 = Tree.tree().treeNode(1) n3 = Tree.tree().treeNode(3) n4 = Tree.tree().treeNode(4) n6 = Tree.tree().treeNode(6) n7 = Tree.tree().treeNode(7) n8 = Tree.tree().treeNode(8) n10 = Tree.tree().treeNode(10) n13 = Tree.tree().treeNode(13) n14 = Tree.tree().treeNode(14) t.root = n8 n8.left = n3 n8.right = n10 n3.left = n1 n3.right = n6 n6.left = n4 n6.right = n7 n10.right = n14 n14.left = n13 node_tuple = (n8, n3, n10, n1, n6, n14, n4, n7, n13) return t, node_tuple
81667b4b122c88ec29146b5b739b44cbafda6c0f
3,633,422
def test_confirm_name(monkeypatch, single_with_trials): """Test name must be confirmed for update""" def incorrect_name(*args): return "oops" monkeypatch.setattr("builtins.input", incorrect_name) execute("db set test_single_exp status=broken status=interrupted", assert_code=1) def correct_name(*args): return "test_single_exp" monkeypatch.setattr("builtins.input", correct_name) assert len(get_storage()._fetch_trials({"status": "broken"})) > 0 execute("db set test_single_exp status=broken status=interrupted") assert len(get_storage()._fetch_trials({"status": "broken"})) == 0
9b9aee3fccda50d886d5c3362e5f3e19806a1929
3,633,423
import torch def get_one_hot_reprs(batch_stds): """ Get one-hot representation of batch ground-truth labels """ batch_size = batch_stds.size(0) hist_size = batch_stds.size(1) int_batch_stds = batch_stds.type(torch.cuda.LongTensor) if gpu else batch_stds.type(torch.LongTensor) hot_batch_stds = torch.cuda.FloatTensor(batch_size, hist_size, 3) if gpu else torch.FloatTensor(batch_size, hist_size, 3) hot_batch_stds.zero_() hot_batch_stds.scatter_(2, torch.unsqueeze(int_batch_stds, 2), 1) return hot_batch_stds
84dbf251039144b2bad5f461f40cec830d9331ca
3,633,424
import os import click import socket import requests import time def ursula(config, action, rest_port, rest_host, db_name, checksum_address, debug, teacher_uri, min_stake ) -> None: """ Manage and run an Ursula node Here is the procedure to "spin-up" an Ursula node. 0. Validate CLI Input 1. Initialize UrsulaConfiguration (from configuration file or inline) 2. Initialize Ursula with Passphrase 3. Initialize Staking Loop 4. Run TLS deployment (Learning Loop + Reactor) """ log = Logger("ursula/launch") password = os.environ.get(config._KEYRING_PASSPHRASE_ENVVAR, None) if not password: password = click.prompt("Password to unlock Ursula's keyring", hide_input=True) def __make_ursula(): if not checksum_address and not config.dev: raise click.BadArgumentUsage("No Configuration file found, and no --checksum address <addr> was provided.") if not checksum_address and not config.dev: raise click.BadOptionUsage(message="No account specified. pass --checksum-address, --dev, " "or use a configuration file with --config-file <path>") return UrsulaConfiguration(temp=config.dev, auto_initialize=config.dev, is_me=True, rest_host=rest_host, rest_port=rest_port, db_name=db_name, federated_only=config.federated_only, registry_filepath=config.registry_filepath, provider_uri=config.provider_uri, checksum_address=checksum_address, poa=config.poa, save_metadata=False, load_metadata=True, start_learning_now=True, learn_on_same_thread=False, abort_on_learning_error=config.dev) # # Configure # overrides = dict() if config.dev: ursula_config = __make_ursula() else: try: filepath = config.config_file or UrsulaConfiguration.DEFAULT_CONFIG_FILE_LOCATION click.secho("Reading Ursula node configuration file {}".format(filepath), fg='blue') ursula_config = UrsulaConfiguration.from_configuration_file(filepath=filepath) except FileNotFoundError: ursula_config = __make_ursula() config.operating_mode = "federated" if ursula_config.federated_only else "decentralized" click.secho("Running in {} mode".format(config.operating_mode), fg='blue') # # Seed # teacher_nodes = list() if teacher_uri: if '@' in teacher_uri: checksum_address, teacher_uri = teacher_uri.split("@") if not is_checksum_address(checksum_address): raise click.BadParameter("{} is not a valid checksum address.".format(checksum_address)) else: checksum_address = None # federated # HTTPS Explicit Required parsed_teacher_uri = urlparse(teacher_uri) if not parsed_teacher_uri.scheme == "https": raise click.BadParameter("Invalid teacher URI. Is the hostname prefixed with 'https://' ?") port = parsed_teacher_uri.port or UrsulaConfiguration.DEFAULT_REST_PORT while not teacher_nodes: try: teacher = Ursula.from_seed_and_stake_info(host=parsed_teacher_uri.hostname, port=port, federated_only=ursula_config.federated_only, checksum_address=checksum_address, minimum_stake=min_stake, certificates_directory=ursula_config.known_certificates_dir) teacher_nodes.append(teacher) except (socket.gaierror, requests.exceptions.ConnectionError, ConnectionRefusedError): log.warn("Can't connect to seed node. Will retry.") time.sleep(5) # # Produce # try: URSULA = ursula_config.produce(passphrase=password, known_nodes=teacher_nodes, **overrides) # 2 except CryptoError: click.secho("Invalid keyring passphrase") return click.secho("Initialized Ursula {}".format(URSULA), fg='green') # # Run # if action == 'run': try: # GO! click.secho("Running Ursula on {}".format(URSULA.rest_interface), fg='green', bold=True) stdio.StandardIO(UrsulaCommandProtocol(ursula=URSULA)) URSULA.get_deployer().run() except Exception as e: config.log.critical(str(e)) click.secho("{} {}".format(e.__class__.__name__, str(e)), fg='red') if debug: raise raise click.Abort() finally: click.secho("Stopping Ursula") ursula_config.cleanup() click.secho("Ursula Stopped", fg='red') elif action == "save-metadata": metadata_path = URSULA.write_node_metadata(node=URSULA) click.secho("Successfully saved node metadata to {}.".format(metadata_path), fg='green') else: raise click.BadArgumentUsage
27460deb03aa600474c6bc8c10c2e6133a882266
3,633,425
from typing import Tuple def absolute_confusion_from_incidence(true_incidence, predicted_incidence) -> Tuple[float, float, float, float]: """Return the absolute number of true positives, true negatives, false positives and false negatives. Parameters ---------- true_incidence: numpy.ndarray the true target incidence matrix predicted_incidence: numpy.ndarray the predicted incidence matrix Returns ------- Tuple[float, float, float, float] a tuple of four values that contain the the absolute number of true positives, true negatives, false positives and false negatives (in that order) """ bool_true_incidence = true_incidence > 0.0 bool_predicted_incidence = predicted_incidence > 0.0 true_positives = (bool_true_incidence & bool_predicted_incidence) true_negatives = (~bool_true_incidence & ~bool_predicted_incidence) false_positives = (~bool_true_incidence & bool_predicted_incidence) false_negatives = (bool_true_incidence & ~bool_predicted_incidence) return np.sum(true_positives), np.sum(true_negatives), np.sum(false_positives), np.sum(false_negatives)
d235363a249523347087940d803e7dfbfc01a6de
3,633,426
def test_every_iteration_model_updater_with_cost(): """ Tests that the model updater can use a different attribute from loop_state as the training targets """ class MockModel(IModel): def optimize(self): pass def set_data(self, X: np.ndarray, Y: np.ndarray): self._X = X self._Y = Y @property def X(self): return self._X @property def Y(self): return self._Y mock_model = MockModel() updater = FixedIntervalUpdater(mock_model, 1, lambda loop_state: loop_state.cost) loop_state_mock = mock.create_autospec(LoopState) loop_state_mock.iteration = 1 loop_state_mock.X.return_value(np.random.rand(5, 1)) loop_state_mock.cost = np.random.rand(5, 1) cost = np.random.rand(5, 1) loop_state_mock.cost = cost updater.update(loop_state_mock) assert np.array_equiv(mock_model.X, cost)
5775c0f2141f75cad46b143310f1fed64b508f37
3,633,427
def correlating_weight2_data(shots_discr, idx_qubit_ro, correlations, num_segments): """ """ correlations_idx = [ [idx_qubit_ro.index(c[0]), idx_qubit_ro.index(c[1])] for c in correlations] correl_discr = np.zeros((shots_discr.shape[0], len(correlations_idx))) correl_avg = np.zeros((num_segments, len(correlations_idx))) for i, c in enumerate(correlations_idx): correl_discr[:, i] = shots_discr[:, c[0]]*shots_discr[:, c[1]] correl_avg[:, i] = [ np.mean(correl_discr[i_seg::num_segments, i]) for i_seg in range(num_segments)] return correl_discr, correl_avg
4afb8c95f081e70fe50ed2b209e8e930ea0c4825
3,633,428
def create_test_network_6(): """Aligned network with dropout for test. The graph is similar to create_test_network_1(), except that the right branch has dropout normalization. Returns: g: Tensorflow graph object (Graph proto). """ g = tf.Graph() with g.as_default(): # An input test image with unknown spatial resolution. x = tf.placeholder(tf.float32, (None, None, None, 1), name='input_image') # Left branch. l1 = conv2d(x, 1, False, None, filter_size=1, stride=4, name='L1', padding='VALID') # Right branch. l2_pad = tf.pad(x, [[0, 0], [1, 0], [1, 0], [0, 0]]) l2 = conv2d(l2_pad, 1, False, None, filter_size=3, stride=2, name='L2', padding='VALID') l3 = conv2d(l2, 1, False, None, filter_size=1, stride=2, name='L3', padding='VALID') dropout = tf.nn.dropout(l3, 0.5, name='dropout') # Addition. tf.nn.relu(l1 + dropout, name='output') return g
820bea3f33b0f56d1d6148ee55764eb504bb977a
3,633,429
import time def timedcall(fn, *args): """ Run a function and measure execution time. Arguments: fn : function to be executed args : arguments to function fn Return: dt : execution time result : result of function Usage example: You want to time the function call "C = foo(A,B)". --> "T, C = timedcall(foo, A, B)" """ t0 = time.time() result = fn(*args) t1 = time.time() dt = t1 - t0 return dt, result
60779c4f4b63796995d722133c304edf519ecd8f
3,633,430
from pybind11_tests import ord_char, ord_char16, ord_char32, ord_wchar, wchar_size def test_single_char_arguments(): """Tests failures for passing invalid inputs to char-accepting functions""" def toobig_message(r): return "Character code point not in range({0:#x})".format(r) toolong_message = "Expected a character, but multi-character string found" assert ord_char(u'a') == 0x61 # simple ASCII assert ord_char(u'é') == 0xE9 # requires 2 bytes in utf-8, but can be stuffed in a char with pytest.raises(ValueError) as excinfo: assert ord_char(u'Ā') == 0x100 # requires 2 bytes, doesn't fit in a char assert str(excinfo.value) == toobig_message(0x100) with pytest.raises(ValueError) as excinfo: assert ord_char(u'ab') assert str(excinfo.value) == toolong_message assert ord_char16(u'a') == 0x61 assert ord_char16(u'é') == 0xE9 assert ord_char16(u'Ā') == 0x100 assert ord_char16(u'‽') == 0x203d assert ord_char16(u'♥') == 0x2665 with pytest.raises(ValueError) as excinfo: assert ord_char16(u'🎂') == 0x1F382 # requires surrogate pair assert str(excinfo.value) == toobig_message(0x10000) with pytest.raises(ValueError) as excinfo: assert ord_char16(u'aa') assert str(excinfo.value) == toolong_message assert ord_char32(u'a') == 0x61 assert ord_char32(u'é') == 0xE9 assert ord_char32(u'Ā') == 0x100 assert ord_char32(u'‽') == 0x203d assert ord_char32(u'♥') == 0x2665 assert ord_char32(u'🎂') == 0x1F382 with pytest.raises(ValueError) as excinfo: assert ord_char32(u'aa') assert str(excinfo.value) == toolong_message assert ord_wchar(u'a') == 0x61 assert ord_wchar(u'é') == 0xE9 assert ord_wchar(u'Ā') == 0x100 assert ord_wchar(u'‽') == 0x203d assert ord_wchar(u'♥') == 0x2665 if wchar_size == 2: with pytest.raises(ValueError) as excinfo: assert ord_wchar(u'🎂') == 0x1F382 # requires surrogate pair assert str(excinfo.value) == toobig_message(0x10000) else: assert ord_wchar(u'🎂') == 0x1F382 with pytest.raises(ValueError) as excinfo: assert ord_wchar(u'aa') assert str(excinfo.value) == toolong_message
dce3ef537fcc312d92b9f5ff5eb2ac00ff731a5e
3,633,431
def tokuda_gap(i): """Returns the i^th Tokuda gap for Shellsort (starting with i=0). The first 20 terms of the sequence are: [1, 4, 9, 20, 46, 103, 233, 525, 1182, 2660, 5985, 13467, 30301, 68178, 153401, 345152, 776591, 1747331, 3931496, 8845866, ...] h_i = ceil( (9*(9/4)**i-4)/5 ) for i>=0. If 9*(9/4)**i-4)/5 is not an integer, I believe this is the same as h_i = ((9**(i+1)>>(i<<1))-4)//5 + 1, and I believe the above should be non-integer valued for all i>0. (We have to explicitly return 1 when i=0, as the above formula would return 2.) """ return 1 if i==0 else ((9**(i+1)>>(i<<1))-4)//5 + 1
710633e924cb6e31a866683b91da6489c781ba4a
3,633,432
import os def product_codes_with_parent(parent_code): """ Returns a python dictionary with all entries that belong to parent_code. """ if not os.path.exists('classificationHS.csv'): download_product_codes_file() df = load_product_codes_file() mask = df.parent == parent_code return df.text[mask].to_dict()
ed9e975110754061615c180d81eb5331d00f875c
3,633,433
def trimf(x, p): """ Triangular membership function generator. Parameters ---------- x : any sequence Independent variable. p: list of 4 values lower than p[0] and higher than p[3] it returns 0 between p[1] and p[2] it returns 1 Returns ------- y : 1d array Triangular membership function. """ assert len(p) == 3, "trimf requires 3 parameters." assert ( p[0] <= p[1] and p[1] <= p[2] ), "trimf requires 3 parameters: p[0] <= p[1] <= p[2]." x = np.asanyarray(x) y = np.nan * np.ones_like(x) y[np.nonzero(x <= p[0])] = 0 # Left side if p[0] != p[1]: idx = np.nonzero(np.logical_and(p[0] < x, x < p[1])) y[idx] = (x[idx] - p[0]) / float(p[1] - p[0]) y[np.nonzero(x == p[1])] = 1 # Right side if p[1] != p[2]: idx = np.nonzero(np.logical_and(p[1] < x, x < p[2])) y[idx] = (p[2] - x[idx]) / float(p[2] - p[1]) y[np.nonzero(x >= p[2])] = 0 return y
7df01e466e55186c4d9e74466440077a0824eb47
3,633,434
def band_atom_orbitals_spin_polarized( folder, atom_orbital_dict, output='band_atom_orbitals_sp.png', display_order=None, scale_factor=5, color_list=None, legend=True, linewidth=0.75, band_color='black', unprojected_band_color='gray', unprojected_linewidth=0.6, fontsize=12, annotations=['$\\uparrow$ ', '$\\downarrow$ '], annotation_xy=(0.02, 0.98), figsize=(4, 3), erange=[-6, 6], stack='vertical', kpath=None, custom_kpath=None, n=None, unfold=False, M=None, high_symm_points=None, save=True, shift_efermi=0, interpolate=False, new_n=200, soc_axis=None, ): """ This function generates an atom orbital spin polarized band structure. This will plot two plots stacked on top or eachother or next to eachother. The top or left plot will project on the spin up bands and the bottom or right plot will project onto the spin down bands. Parameters: folder (str): This is the folder that contains the VASP files atom_orbital_dict (dict[int:list]): A dictionary that contains the individual atoms and the corresponding orbitals to project onto. For example, if the user wants to project onto the s, py, pz, and px orbitals of the first atom and the s orbital of the second atom then the dictionary would be {0:[0,1,2,3], 1:[0]} output (str): File name of the resulting plot. scale_factor (float): Factor to scale weights. This changes the size of the points in the scatter plot. display_order (str / None): If None, the projections will be displayed in the same order the user inputs them. If 'all' the projections will be plotted from largest to smallest so every point is visable. If 'dominant' the projections will be plotted from smallest to largest so only the dominant projection is shown. color_list (list): List of colors that is the same length as the number of projections in the plot. legend (bool): Determines if the legend should be included or not. unprojected_band_color (str): Color of the unprojected band unprojected_linewidth (float): Line width of the unprojected bands annotations (list): Annotations to put on the top and bottom (left and right) figures. By default it will show the spin up and spin down arrows. annotation_xy (list / tuple): Fractional (x, y) coordinated of the annotation location stack (str): Determines how the plots are stacked (vertical or horizontal) linewidth (float): Line width of the plain band structure plotted in the background. band_color (string): Color of the plain band structure. figsize (list / tuple): Desired size of the image in inches. (width, height) erange (list / tuple): Range of energy to show in the plot. [low, high] kpath (list[list]): High symmetry k-point path of band structure calculation Due to the nature of the KPOINTS file for unfolded calculations this information is a required input for proper labeling of the figure for unfolded calculations. This information is extracted from the KPOINTS files for non-unfolded calculations. (G is automatically converted to \\Gamma) (e.g. For the path X-G-X, kpath=[['X', 'G'], ['G', 'X']]) custom_kpath (list): This gives the option to only plot specific segments of a given band structure calculation. For example if the kpath was G-X-W-L then there are three segements to choose from: G-X, X-W, and W-L. In this case the default kpath could be plotted by defining custom_kpath=[1,2,3], where 1 -> G-X, 2 -> X-W, and 3 -> W-L. If only G-X and X-W were desired then custom_kpath=[1,2]. If one of the segements should be flipped it can be done by making its value negative (e.g. -1 -> X-G, -2 -> W-X, -3 -> L-W) n (int): Number of points between each high symmetry points. This is also only required for unfolded calculations and band unfolding. This number should be known by the user, as it was used to generate the KPOINTS file. unfold (bool): Determines if the plotted band structure is from a band unfolding calculation. M (list[list]): Transformation matrix from the primitive bulk structure to the slab structure. Only required for a band unfolding calculation. high_symm_points (list[list]): List of fractional coordinated for each high symmetry point in the band structure path. Only required for a band unfolding calculation. fontsize (float): Font size of the text in the figure. save (bool): Determines whether to automatically save the figure or not. If not the figure and axis are return for further manipulation. Returns: If save == True, this function will return nothing and directly save the image as the output name. If save == False, the function will return the matplotlib figure and axis for further editing. (fig, ax1, ax2) """ band_up = Band( folder=folder, spin='up', projected=True, unfold=unfold, high_symm_points=high_symm_points, interpolate=interpolate, new_n=new_n, soc_axis=soc_axis, kpath=kpath, custom_kpath=custom_kpath, n=n, M=M, shift_efermi=shift_efermi, ) band_down = Band( folder=folder, spin='down', projected=True, unfold=unfold, high_symm_points=high_symm_points, interpolate=interpolate, new_n=new_n, soc_axis=soc_axis, kpath=kpath, custom_kpath=custom_kpath, n=n, M=M, shift_efermi=shift_efermi, ) if stack == 'vertical': fig = plt.figure(figsize=(figsize[0], 2 * figsize[1]), dpi=400) ax1 = fig.add_subplot(211) ax2 = fig.add_subplot(212) _figure_setup(ax=ax1, fontsize=fontsize, ylim=[erange[0], erange[1]]) _figure_setup(ax=ax2, fontsize=fontsize, ylim=[erange[0], erange[1]]) elif stack == 'horizontal': fig = plt.figure(figsize=(2 * figsize[0], figsize[1]), dpi=400) ax1 = fig.add_subplot(121) ax2 = fig.add_subplot(122) _figure_setup(ax=ax1, fontsize=fontsize, ylim=[erange[0], erange[1]]) _figure_setup(ax=ax2, fontsize=fontsize, ylim=[erange[0], erange[1]]) bbox = dict(boxstyle='round', fc='white', edgecolor='gray', alpha=0.95, pad=0.3) ax1.annotate( annotations[0], xy=annotation_xy, xycoords='axes fraction', zorder=200, va='top', ha='left', bbox=bbox, fontsize=fontsize, ) ax2.annotate( annotations[1], xy=annotation_xy, xycoords='axes fraction', zorder=200, va='top', ha='left', bbox=bbox, fontsize=fontsize, ) band_up.plot_atom_orbitals( ax=ax1, atom_orbital_dict=atom_orbital_dict, display_order=display_order, scale_factor=scale_factor, color_list=color_list, legend=legend, linewidth=linewidth, band_color=band_color, erange=erange, ) band_down.plot_plain( sp_scale_factor=0, ax=ax1, color=unprojected_band_color, linewidth=unprojected_linewidth, erange=erange, ) band_down.plot_atom_orbitals( ax=ax2, atom_orbital_dict=atom_orbital_dict, display_order=display_order, scale_factor=scale_factor, color_list=color_list, legend=legend, linewidth=linewidth, band_color=band_color, erange=erange, ) band_up.plot_plain( sp_scale_factor=0, ax=ax2, color=unprojected_band_color, linewidth=unprojected_linewidth, erange=erange, ) plt.tight_layout(pad=0.4) if save: plt.savefig(output) else: return fig, ax1, ax2
8ed107df12d8ef037116f0e87e8a62b6794ecbbb
3,633,435
from typing import List def _interpolate(mesh_1: Mesh, mesh_2: Mesh, steps: int = 1) -> List[Mesh]: """Interpolate two alike meshes. This is suitable to fill the blank frames of an animated object This function makes the assumption that same indices will be forming the same triangle. This function returns at least 3 meshes; [mesh_1, interpolated mesh, mesh_2] """ if len(mesh_1._vertices) != len(mesh_2._vertices): raise MeshException("Mesh 1 and Mesh 2 vertex counts do not match") if len(mesh_1.children) > 0: raise MeshException("Mesh 1 has children") if len(mesh_2.children) > 0: raise MeshException("Mesh 2 has children") # Generate meshes results: List[Mesh] = [mesh_1] # Calculate vertex distances; distances = [] for v_i, vertex in enumerate(mesh_1._vertices): dist = [0.0, 0.0, 0.0] dist[0] = (mesh_2._vertices[v_i][0] - vertex[0]) / (steps + 1) dist[1] = (mesh_2._vertices[v_i][1] - vertex[1]) / (steps + 1) dist[2] = (mesh_2._vertices[v_i][2] - vertex[2]) / (steps + 1) distances.append(dist) for i in range(1, steps + 1): mesh = Mesh() mesh.materials = deepcopy(results[i - 1].materials) mesh._texcoords = deepcopy(results[i - 1]._texcoords) mesh._indices = deepcopy(results[i - 1]._indices) mesh.destroy() # Destroy references to previous objects opengl for v_i, vertex in enumerate(results[i - 1]._vertices): mesh._vertices.append( [vertex[0] + distances[v_i][0], vertex[1] + distances[v_i][1], vertex[2] + distances[v_i][2]] ) mesh.fix_normals() results.append(mesh) results.append(mesh_2) return results
7209e00ac3cfc7996ec7e8cd1b0184b6ada40dea
3,633,436
def dataset_service(): """ :rtype: dart.service.dataset.DatasetService """ return current_app.dart_context.get(DatasetService)
f2c8a3dfc39454449554930d2939cc45ed06f109
3,633,437
def calculate_concordance(aei_pvalues, eqtl_pvalues, threshold=0.05): """ Returns """ eqtl_pvalues_i = np.nanargmin(eqtl_pvalues) print(eqtl_pvalues_i) print(aei_pvalues.iloc[eqtl_pvalues_i]) if aei_pvalues.iloc[eqtl_pvalues_i] <= threshold: return(True) else: return(False)
f0371c81096ad9f1598c1291d853d717002e09e0
3,633,438
from typing import Dict from typing import Pattern import re def get_xclock_hints() -> Dict[str, Pattern]: """Retrieves hints to match an xclock window.""" return {"name": re.compile(r"^xclock$")}
99e1fe51b46cb5e101c2a1c86cf27b2b60c0a38e
3,633,439
def calciteSaturationAtFixedPCO2( logPCO2, phreeqcInputFile, PHREEQC_PATH, DATABASE_FILE, newInputFile=None ): """ Function used in root finding of saturation PCO2. Function is used by findPCO2atCalciteSaturation(). As a stand alone function, it's better to use phreeqcRunSetPCO2(). Parameters ---------- phreecInputFile : str The name of the input file to modify. logPCO2 : float Log10 of the PCO2 value to set the sample to. PHREEQC_PATH : string The path to the PHREEQC executable. DATABASE_FILE : string The database file to be used by PHREEQC. newInputFile : string The name of the set-PCO2 file to create and run. If not specified, the final 5 characters will be stripped off of phreeqcInputFile and 'set-PCO2.phrq' will be added. Returns ------- SI : float Saturation index of calcite for given PCO2. """ simulationDict = phreeqcRunSetPCO2( logPCO2, phreeqcInputFile, PHREEQC_PATH, DATABASE_FILE, newInputFile=newInputFile, ) return simulationDict["SI_Calcite"]
2b805c31ee80230a71e6c8eff27d5b8ed6167d20
3,633,440
import math def fnCalculate_ReceivedPower(P_Tx,G_Tx,G_Rx,rho_Rx,rho_Tx,wavelength,RCS): """ Calculate the received power at the bistatic radar receiver. equation 5 in " PERFORMANCE ASSESSMENT OF THE MULTIBEAM RADAR SENSOR BIRALES FOR SPACE SURVEILLANCE AND TRACKING" Note: ensure that the distances rho_Rx,rho_Tx,wavelength are converted to metres before passing into this function. Created on: 26 May 2017 """ denominator = (4*math.pi)**3 * (rho_Rx**2)*(rho_Tx**2); numerator = P_Tx*G_Tx*G_Rx*RCS*(wavelength**2); P_Rx = numerator/denominator; return P_Rx
944fb485e9d9a3d2da130e4ddc415e63ab814380
3,633,441
import os def main(src_features, src_labels, subset_index, column, class_map, num_background, outdir, weak_null_classes=None, prefix='', random_state=None): """Produce a filtered subset given a dataset and a set of IDs. Parameters ---------- src_features : np.ndarray, shape=(n, d) The feature array. src_labels : pd.DataFrame, len=n Corresponding metadata, aligned to the source feature array. subset_index : dict Map of generic IDs to class labels. The gid namespace should match that specified by `column`. column : str Column to use for filtering gids from the source label dataframe. class_map : dict Mapping between labels (in subset_index) to integer positions. num_background : int Number of background class samples to draw. outdir : str Path for writing the various outputs. weak_null_classes : list or set, default=None Object containing integer label indices to be filtered when buidling. prefix : str, default='' Optional string with which to prefix created files, like: {prefix}features.npy, {prefix}labels.csv, {prefix}classes.npy random_state : int, default=None Seed to use for the random number generator. Returns ------- success : bool True if all files were created correctly. """ def is_strong_null(row): """Return True if the labels correspond to a strong null condition.""" return not any([y in weak_null_classes for y in parse_labels(row['labels'])]) gids = sorted(list(subset_index.keys())) dst_labels = pd.DataFrame(data=dict(keep=True), index=gids) dst_labels = src_labels.join(dst_labels, on=column, how='inner') del dst_labels['keep'] dst_index = dst_labels.index dst_features = src_features[dst_index] if num_background > 0: null_index = src_labels.index.difference(dst_index) # Need to keep labels.. null_labels = src_labels.loc[null_index] del null_labels['time'] null_labels.drop_duplicates(inplace=True) # Tag videos that are sufficiently strong nulls strong_null_index = null_labels.apply(is_strong_null, axis=1) strong_null_gids = null_labels[strong_null_index][column].values # Slice a subset of corresponding GIDs rng = np.random.RandomState(random_state) rng.shuffle(strong_null_gids) strong_null_labels = pd.DataFrame( data=dict(keep=True), index=strong_null_gids[:num_background]) strong_null_labels = src_labels.join(strong_null_labels, on=column, how='inner') del strong_null_labels['keep'] # Concatenate strong nulls dst_features = np.concatenate( [dst_features, src_features[strong_null_labels.index]], axis=0) dst_labels = pd.concat([dst_labels, strong_null_labels], axis=0) features_file = os.path.join(outdir, "{}features.npy".format(prefix)) np.save(features_file, dst_features) labels_file = os.path.join(outdir, "{}labels.csv".format(prefix)) dst_labels.to_csv(labels_file, index=True) y_true = np.zeros([len(dst_features), len(class_map)], dtype=bool) for n, gid in enumerate(dst_labels[column]): for y_label in subset_index.get(gid, []): y_true[n, class_map[y_label]] = True y_true_file = os.path.join(outdir, "{}classes.npy".format(prefix)) np.save(y_true_file, y_true) output_files = (features_file, labels_file, y_true_file) return all([os.path.exists(fn) for fn in output_files])
e14e303b465da64b57d5da005edc9ebf394de256
3,633,442
import time def datetime_creator(): """ 返回标准格式的datetime Returns: """ return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
1d55b0f3f93bcc850f961902d74a0f7fd8200f27
3,633,443
def get_l2_loss(excluded_keywords=None): """Traverse `tf.trainable_variables` compute L2 reg. Ignore `batch_norm`.""" def _is_excluded(v): """Guess whether a variable belongs to `batch_norm`.""" keywords = ['batchnorm', 'batch_norm', 'bn', 'layernorm', 'layer_norm'] if excluded_keywords is not None: keywords += excluded_keywords return any([k in v.name.lower() for k in keywords]) l2_losses = [tf.nn.l2_loss(v) for v in tf.trainable_variables() if not _is_excluded(v)] return tf.add_n(l2_losses)
7ec4a42d92f652f40ac3bdf939490edf2912697d
3,633,444
from datetime import datetime def tzdt(fulldate: str): """ Converts an ISO 8601 full timestamp to a Python datetime. Parameters ---------- fulldate: str ISO 8601 UTC timestamp, e.g. `2017-06-02T16:23:14.815Z` Returns ------- :class:`datetime.datetime` Python datetime representing ISO timestamp. """ if fulldate[-1] == "Z": fulldate = fulldate[0:-1] + "+0000" return datetime.strptime(fulldate, "%Y-%m-%dT%H:%M:%S.%f%z")
e327c23f9aecf587432fa0170c8bcd3a9a534bd1
3,633,445
import sys def tcex(): """Return an instance of tcex.""" # create log structure for feature/test (e.g., args/test_args.log) config_data_ = dict(_config_data) config_data_['tc_log_file'] = _tc_log_file() # clear sys.argv to avoid invalid arguments sys.argv = sys.argv[:1] return TcEx(config=config_data_)
e6d8f20b2bc0086f141293f40ca8f44b372c1608
3,633,446
def join_data(msg_fields): """ Helper method. Gets a list, joins all of it's fields to one string divided by the data delimiter. :param msg_fields: (int) times the fields in the message. :return: string that looks like cell1#cell2#cell3 """ msg = "" for word in msg_fields: msg += DATA_DELIMITER + str(word) return msg[1:]
09afba0944dce292ad701f7342f28576bc4d156a
3,633,447
def MDA(input_dims, encoding_dims): """Multi-modal autoencoder. """ # input layers input_layers = [] for dim in input_dims: input_layers.append(Input(shape=(dim, ))) # hidden layers hidden_layers = [] for j in range(0, len(input_dims)): hidden_layers.append(Dense(encoding_dims[0]//len(input_dims), activation='sigmoid')(input_layers[j])) # Concatenate layers if len(encoding_dims) == 1: hidden_layer = concatenate(hidden_layers, name='middle_layer') else: hidden_layer = concatenate(hidden_layers) # middle layers for i in range(1, len(encoding_dims)-1): if i == len(encoding_dims)//2: hidden_layer = Dense(encoding_dims[i], name='middle_layer', activation='sigmoid')(hidden_layer) else: hidden_layer = Dense(encoding_dims[i], activation='sigmoid')(hidden_layer) if len(encoding_dims) != 1: # reconstruction of the concatenated layer hidden_layer = Dense(encoding_dims[0], activation='sigmoid')(hidden_layer) # hidden layers hidden_layers = [] for j in range(0, len(input_dims)): hidden_layers.append(Dense(encoding_dims[-1]//len(input_dims), activation='sigmoid')(hidden_layer)) # output layers output_layers = [] for j in range(0, len(input_dims)): output_layers.append(Dense(input_dims[j], activation='sigmoid')(hidden_layers[j])) # autoencoder model sgd = SGD(lr=0.01, momentum=0.9, decay=0.0, nesterov=False) model = Model(inputs=input_layers, outputs=output_layers) model.compile(optimizer=sgd, loss='binary_crossentropy') print(model.summary()) return model
8c8b777668e3dbdedf815da280e10c6567619d58
3,633,448
import math def lat2y(latitude): """ Translate a latitude coordinate to a projection on the y-axis, using spherical Mercator projection. :param latitude: float :return: float """ return 180.0 / math.pi * (math.log(math.tan(math.pi / 4.0 + latitude * (math.pi / 180.0) / 2.0)))
59a0a111c22c99dd23e80ed64d6355b67ecffd42
3,633,449
def normalize(train_data, test_data): """ Calculate the mean and std of each feature from the training set """ feature_means = np.mean(train_data, axis=(0, 2)) feature_std = np.std(train_data, axis=(0, 2)) train_data_n = train_data - feature_means[np.newaxis, :, np.newaxis] / \ np.where(feature_std == 0, 1, feature_std)[np.newaxis, :, np.newaxis] test_data_n = test_data - feature_means[np.newaxis, :, np.newaxis] /\ np.where(feature_std == 0, 1, feature_std)[np.newaxis, :, np.newaxis] return train_data_n, test_data_n
42538164a6a1bfdae43e986134bc408a72aa3621
3,633,450
def buildDataForm(form=None, type="form", fields=[], title=None, data=[]): """ Provides easier method to build data forms using dict for each form object Parameters: form: xmpp.DataForm object type: form type fields: list of form objects represented as dict, e.g. [{"var": "cool", "type": "text-single", "desc": "my cool description", "value": "cool"}] title: form title data: advanced data for form. e.g. instructions (if string in the list), look at xmpp/protocol.py:1326 """ form = form or xmpp.DataForm(type, data, title) for key in fields: field = form.setField(key["var"], key.get("value"), key.get("type"), key.get("desc"), key.get("options")) if key.has_key("payload"): field.setPayload(key["payload"]) if key.has_key("label"): field.setLabel(key["label"]) if key.has_key("requred"): field.setRequired() return form
91773c2fc91766715133b01550c295e746963a27
3,633,451
import re def calc(equation): """Evaluates an equation, accepting time values.""" items = [i for i in re.split(r'([\d\:]+)', equation) if i] has_time = False for i, v in enumerate(items): if ':' in v: has_time = True items[i] = to_sec(v) result = eval(''.join(map(str, items))) if has_time: result = '%s (%s)' % (to_time(result), result) return str(result)
3e40e28421527627d14efb70b3da3beb8b047ff6
3,633,452
def format_input_crf(data, destination_file, model=None, distance_threshold=None, window=None): """ This procedure takes in input the train and test set and then annotates with iob notation with the specified wordToVec model, window and threshold :param data: the data dictionary with keys, list of sentences :param destination_file: :param model: the trained wordToVec model :param distance_threshold: the maximum distance in word2vec space to consider a word part of his key concept :param window: the size of the span that will be annotated with the I-tag if a word in positive to a concept :type data: dict :type destination_file: str :type model: KeyedVectors :type distance_threshold: float :type window: int """ def annotate_set(data, model, distance_threshold=.2, window=3): """ This procedure is getting a list of sentences and theyr overall annotation and it will compute and output its full IOB-annotation :param dataset: :param data: dictionary containing the overall annotation and list of sentences :param model: pretrained word2vec model with :param distance_threshold: max distance allowed in word2vec space to consider a word as referring to a concept :param window: size of the span that will be annotated with the given concept when a word is within distance_threshold :type dataset: dict :type data: dict :type model: KeyedVectors :type distance_threshold: float :type window: int :return: data_iob :rtype data_iob: defaultdict """ def word_to_vec(model, word=''): """ This procedure returns the relative vector in word2vec space of the given word, if the word is composite return the medium point of the found ones. If model does not contain the word returns None :param model: word2vec pretrained model :param words: word to be translated into point in word2vec space :type model: KeyedVectors :type words: str :return: retval :rtype retval: list """ try: retval = np.array([ model[ word ] ], dtype=float) except KeyError: retval = None return retval def sentence_to_vec(model, sentence=''): """ This procedure translates a list of words in a list of points in word2vec space :param model: word2vec pretrained model :param sentence: list of words :type model: KeyedVectors :type sentence: str :return: list of points :rtype: list """ return [ word_to_vec(word=word, model=model) for word in sentence.split() ] def annotate(vec, concepts_embedding, distance_threshold=.2): """ This procedure decided either a word in within minimum distance from a concept or not :param vec: word representation in word2vec space :param concepts_embedding: concept representation in word2vec space :param distance_threshold: max distance allowed from concept to consider work represented by the concept :type vec: list :type concepts_embedding: dict :type distance_threshold: float :return: 'I' or 'O' :rtype: str """ if vec is not None: for k, concept_embedding in concepts_embedding.items(): if concept_embedding is not None and distance.euclidean(vec, concept_embedding) < distance_threshold: return 'I-' + k return 'O' def propagate_iob(values, concept): """Given a concept and the subset to annotate it returns the subset proper annotation :param values : list of annotation :param concept : concept to propagate over the list :type values: list :type concept: str :return : (values, annotation) :rtype : tuple """ try : first_value = next(values.index(x) for x in values if x == 'O') values[ first_value ] = 'B-' + concept for value in values[ first_value: ]: if value == 'O': values[ values.index(value) ] = 'I-' + concept except: pass return values concept_embeddings = { c : word_to_vec(model=model, word=c) for c in set(sum([ list(k[1:]) for k in data.keys() ],[])) } data_zipped = [] for concepts, sentences in data.items(): concepts_e = { c : e for c, e in concept_embeddings.items() if c in concepts } for sentence in sentences: sentence_vec = sentence_to_vec(sentence=sentence, model=model) sentence_iob = [ annotate(vec=vec, concepts_embedding=concepts_e, distance_threshold=distance_threshold) for vec in sentence_vec ] for position in range(len(sentence_iob)): if sentence_iob[ position ].startswith('I') and ( sentence_iob[ position - 1 ] is None or sentence_iob[ position - 1 ] == 'O'): if position - window < 0: sentence_iob[ 0: position + window + 1 ] = propagate_iob( values=sentence_iob[ 0: position + window + 1 ], concept=sentence_iob[ position ][ 2: ]) elif position + window > len(sentence_iob): sentence_iob[ position - window: len(sentence_iob) ] = propagate_iob( values=sentence_iob[ position - window: len(sentence_iob) ], concept=sentence_iob[ position ][ 2: ]) else: sentence_iob[ position - window: position + window + 1 ] = propagate_iob( values=sentence_iob[ position - window: position + window + 1 ], concept=sentence_iob[ position ][ 2: ]) data_zipped += [ (sentence, sentence_iob) ] return data_zipped data_iob = None if model and distance_threshold and window : data_iob = annotate_set(data=data, model=model, distance_threshold=distance_threshold, window=window) with open('%s/%s' % (DATA, destination_file), 'w+') as f: if data_iob is not None: for sentence, annotation in data_iob: [ f.write(sentence.split()[ cursor ] + '\t' + annotation[ cursor ] + '\n') for cursor in range(len(annotation)) ] f.write('\n') else: for sentences in data.values(): for sentence in sentences: [ f.write(word + '\n') for word in sentence.split() ] f.write('\n') f.write('\n')
6224e0270cacbb331853a7aa9be5bd0f9a489e8f
3,633,453
def _GetSecurityAttributes(handle) -> win32security.SECURITY_ATTRIBUTES: """Returns the security attributes for a handle. Args: handle: A handle to an object. """ security_descriptor = win32security.GetSecurityInfo( handle, win32security.SE_WINDOW_OBJECT, win32security.DACL_SECURITY_INFORMATION) result = win32security.SECURITY_ATTRIBUTES() result.SECURITY_DESCRIPTOR = security_descriptor return result
bfaeaa72d7912c5826f6f504076c58c45ef6b39a
3,633,454
def evalMatrix(false_friends, devectors, envectors, vm, model, output=True, n=5): """ Evaluates the quality of a matrix """ average_diff = 0 similarities = [] # Calulating the average difference of a false-friend-pair for pair in false_friends: try: if devectors[pair[1]] == []: continue elif envectors[pair[0]] == []: continue mapped = mapVector(vm, model, devectors[pair[1]]) mapped = [x.item((0, 0)) for x in mapped] sim = cosine_similarity(mapped, envectors[pair[0]]) similarities.append((sim, pair)) average_diff += sim except KeyError: continue average_diff /= len(similarities) # Output if output: print "Average similarity was %.2f%% for %i elements"\ %(average_diff*50+50, len(similarities)) sorted_ = sorted(similarities) print "Highest differences:\n" for similarity in sorted_[:n]: w("%.2f%% similarity with %s - %s\n"\ %(similarity[0]*50+50, similarity[1][0], similarity[1][1])) print "\nLowest differences:\n" sorted_.reverse() for similarity in sorted_[:n]: w("%.2f%% similarity with %s - %s\n"\ %(similarity[0]*50+50, similarity[1][0], similarity[1][1])) print "\n" return average_diff*50+50
80e8384be6ace9ab2bc014dbeaac0eec82ef18f5
3,633,455
from typing import OrderedDict import os import configparser def load_cfg_files(cfg_files): """Load config from config files.""" cfg = {"main": OrderedDict(), "output": {}, "watcher": {}} cfg_timestamps = {} for filepath in cfg_files: cfg_timestamps[filepath] = None actual_filepath = os.path.expanduser(filepath) if not os.path.exists(actual_filepath): log.debug("Config file %s does not exist", filepath) continue # read config from file log.debug('Reading config file "%s"', filepath) try: config_parser = get_config_parser(actual_filepath) except OSError as err: log.error("Error while reading config file %s: %s", filepath, err.strerror) except configparser.Error as err: log.error("Error while reading config file %s: %s", filepath, err) except UnicodeDecodeError as err: log.error("Error while decoding config file %s: %s", filepath, err) else: cfg["main"][filepath] = read_config_file_main_section( config_parser, filepath ) for section_name in config_parser.sections(): if section_name == "reflexec": continue if not section_name.startswith(("output-", "watcher-")): log.debug("Skipping unknown config section [%s]", section_name) continue if not config_parser[section_name]: log.debug("Skipping empty config section [%s]", section_name) continue load_config_section(section_name, cfg, filepath, config_parser) # register config file timestamp try: cfg_timestamps[filepath] = os.stat(actual_filepath).st_mtime except OSError as err: log.debug("Error while reading stat for config file %s: %s", filepath, err) return cfg, cfg_timestamps
7dcbfcfc966a5ff61872ec60377cdf6613acbe52
3,633,456
async def get_all_terms(): """All terms with frequency count.""" try: return workflow.get_all_terms() except HarperExc as exc: raise HTTPException(status_code=exc.code, detail=exc.message)
05b7ec9289b4cca88ef19f84277075036e44f31e
3,633,457
def update(callback=None, path=None, method=Method.PUT, resource=None, tags=None, summary="Update specified resource.", middleware=None): # type: (Callable, Path, Methods, Resource, Tags, str, List[Any]) -> Operation """ Decorator to configure an operation that updates a resource. """ def inner(c): op = ResourceOperation(c, path or PathParam('{key_field}'), method, resource, tags, summary, middleware) op.responses.add(Response(HTTPStatus.NO_CONTENT, "{name} has been updated.")) op.responses.add(Response(HTTPStatus.BAD_REQUEST, "Validation failed.", Error)) op.responses.add(Response(HTTPStatus.NOT_FOUND, "Not found", Error)) return op return inner(callback) if callback else inner
8b68084cce64073a1012317f27375106c91954cb
3,633,458
def start_shared_memory_manager() -> SharedMemoryManager: """Starts the shared memory manager. :return: Shared memory manager instance. """ smm = create_shared_memory_manager(address=("", PORT), authkey=AUTH_KEY) smm.start() return smm
026e9e59661566d680cbe2d58842636d0e4b1050
3,633,459
def filenameValidator(text): """ TextEdit validator for filenames. """ return not text or len(set(text) & set('\\/:*?"<>|')) == 0
435032f32080b52165756cf147830308537e292d
3,633,460
def add_post(): """Upload a new post to the website :return: add_post.html """ if request.method == 'POST': if request.form['submit'] == "preview": title = request.form['title'] markdown_text = request.form['markdown_text'] html = filter_markdown(markdown_text) return render_template("admin/add_post.html", html=Markup(html), markdown=markdown_text, title=title) if request.form['submit'] == "post": title = request.form['title'] markdown_text = request.form['markdown_text'] if title and markdown_text: html = filter_markdown(markdown_text) if Post.add_post(title, html): flash("Added post successfully") logger.info('A new post has been added: %s', title) return redirect(url_for('.posts')) else: flash("The title already exist") return render_template("admin/add_post.html", html=html, markdown=markdown_text, title=title) else: return render_template("admin/add_post.html")
a4202c81f4c303f58780e3bfd836298c06089f45
3,633,461
def split_model(y, X, sigma=1, lam_frac=1., split_frac=0.9, stage_one=None): """ Fit a LASSO with a default choice of Lagrange parameter equal to `lam_frac` times $\sigma \cdot E(|X^T\epsilon|)$ with $\epsilon$ IID N(0,1) on a proportion (`split_frac`) of the data. Parameters ---------- y : np.float Response vector X : np.float Design matrix sigma : np.float Noise variance lam_frac : float (optional) Multiplier for choice of $\lambda$. Defaults to 2. split_frac : float (optional) What proportion of the data to use in the first stage? Defaults to 0.9. stage_one : [np.array(np.int), None] (optional) Index of data points to be used in first stage. If None, a randomly chosen set of entries is used based on `split_frac`. Returns ------- first_stage : `lasso` Lasso object from stage one. stage_one : np.array(int) Indices used for stage one. stage_two : np.array(int) Indices used for stage two. """ n, p = X.shape if stage_one is None: splitn = int(n*split_frac) indices = np.arange(n) np.random.shuffle(indices) stage_one = indices[:splitn] stage_two = indices[splitn:] else: stage_two = [i for i in np.arange(n) if i not in stage_one] y1, X1 = y[stage_one], X[stage_one] first_stage = standard_lasso(y1, X1, sigma=sigma, lam_frac=lam_frac) return first_stage, stage_one, stage_two
23f02d0baedf4800d0f4a4eaaff95cd37db104a3
3,633,462
def makepdb(title,parm,traj): """ Make pdb file from first frame of a trajectory """ cpptrajdic ={'title':title,'parm':parm,'traj':traj} cpptrajscript="""parm {parm} trajin {traj} 0 1 1 center rms first @CA,C,N strip :WAT strip :Na+ strip :Cl- trajout {title}.pdb pdb run exit""" return cpptrajscript.format(**cpptrajdic)
8ca8c95adef74525ac6018146418dd5e2314ff94
3,633,463
def get_face_position_with_eye(image): """ get face position with eye """ gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) face_list = FACE_CASCADE.detectMultiScale(gray, scaleFactor=1.3, minNeighbors=5, minSize=(50, 50)) ret = [] for (x, y, w, h) in face_list: gray_face = gray[y:y+h, x:x+w] eye_list = EYE_CASCADE.detectMultiScale(gray_face, minSize=(25, 25)) if len(eye_list): ret += [(x, y, w, h)] return ret
4a54ef0b5be36bfb9f5b6539458d1f997f5c5f70
3,633,464
def get_pandas_df(data, validate=True): """ GetPandasDF reads all observations in a SDMX file as Pandas Dataframe(s) :param data: Path, URL or SDMX data file as string :param validate: Validation of the XML file against the XSD (default: True) :return: A dict of `Pandas Dataframe \ <https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.html>`_ """ datasets = read_xml(data, "Data", validate=validate) return {ds: datasets[ds].data for ds in datasets}
1ee1edc9ce2931066675ebe8b0f57ff920749bd3
3,633,465
def basic_collate(batch): """Puts batch of inputs into a tensor and labels into a list Args: batch: (list) [inputs, labels]. In this simple example, I'm just assuming the inputs are tensors and labels are strings Output: minibatch: (Tensor) targets: (list[str]) """ minibatch, targets = zip(*[(a, b) for (a,b) in batch]) minibatch = stack(minibatch, dim=0) return minibatch, targets
7e5f36e20125effaa310654856dc84199dbcb169
3,633,466
import random def secure_randint(min_value, max_value, system_random=None): """ Return a random integer N such that a <= N <= b. Uses SystemRandom for generating random numbers. (which uses os.urandom(), which pulls from /dev/urandom) """ if not system_random: system_random = random.SystemRandom() return system_random.randint(min_value, max_value)
f4b61457c6e384e6185a5d22d95539001903670d
3,633,467
import scipy.sparse as sps import numpy as np import pandas as pd import os def read_UCM_cold_all_with_user_act(num_users, root_path="../data/"): """ :return: all the UCM in csr format """ # Reading age data df_age = pd.read_csv(os.path.join(root_path, "data_UCM_age.csv")) user_id_list = df_age['row'].values age_id_list = df_age['col'].values UCM_age = sps.coo_matrix((np.ones(len(user_id_list)), (user_id_list, age_id_list)), shape=(num_users, np.max(age_id_list) + 1)) # Reading region data df_region = pd.read_csv(os.path.join(root_path, "data_UCM_region.csv")) user_id_list = df_region['row'].values region_id_list = df_region['col'].values UCM_region = sps.coo_matrix((np.ones(len(user_id_list)), (user_id_list, region_id_list)), shape=(num_users, np.max(region_id_list) + 1)) # Reading user_act data from URM df_original = pd.read_csv(os.path.join(root_path, "data_train.csv")) user_act = df_original.groupby(by='row')['data'].sum() user_act = (user_act - 0) / (user_act.max() - 0) user_id_list = user_act.index feature_list = [0] * len(user_id_list) data_list = user_act.values.astype(np.float32) UCM_user_act = sps.coo_matrix((data_list, (user_id_list, feature_list)), shape=(num_users, 1)) # Create UCM_all_dict UCM_all_dict = {"UCM_age": UCM_age, "UCM_region": UCM_region, "UCM_user_act": UCM_user_act} UCM_all_dict = apply_transformation_UCM(UCM_all_dict, UCM_name_to_transform_mapper={"UCM_user_act": np.log1p}) UCM_all_dict = apply_discretization_UCM(UCM_all_dict, UCM_name_to_bins_mapper={"UCM_user_act": 50}) # Merge UCMs UCM_all = build_UCM_all_from_dict(UCM_all_dict) return UCM_all
968e21fa006c130ed33cef90943d6c0f1cadcc6b
3,633,468
def get_runner_image_url(benchmark, fuzzer, cloud_project): """Get the URL of the docker runner image for fuzzing the benchmark with fuzzer.""" base_tag = experiment_utils.get_base_docker_tag(cloud_project) if is_oss_fuzz(benchmark): return '{base_tag}/oss-fuzz/runners/{fuzzer}/{project}'.format( base_tag=base_tag, fuzzer=fuzzer, project=get_project(benchmark)) return '{base_tag}/runners/{fuzzer}/{benchmark}'.format(base_tag=base_tag, fuzzer=fuzzer, benchmark=benchmark)
ce958eb66743f265edb81b9e11e40a34ba718660
3,633,469
def extend_gmx_npt_prod(job): """Run GROMACS grompp for the npt step.""" # Extend the npt run by 1000 ps (1 ns) extend = "gmx convert-tpr -s npt_prod.tpr -extend 1000 -o npt_prod.tpr" mdrun = _mdrun_str("npt_prod") return f"{extend} && {mdrun}"
1775d63dce08b590c8feeacf966cb40e24f32d14
3,633,470
from operator import mul from operator import inv def is_rotation(R,tol=1e-5): """Returns true if R is a rotation matrix, i.e. is orthogonal to the given tolerance and has + determinant""" RRt = mul(R,inv(R)) err = vectorops.sub(RRt,identity()) if any(abs(v) > tol for v in err): return False if det(R) < 0: return False return True
4d1c9ba52ca49ba5977ce6e85974abb3962f1a5b
3,633,471
import time def date(): """ Return date string """ return time.strftime("%B %d, %Y")
b26cf8a5012984bbd76f612b19f79a3c387b9d27
3,633,472
def contained_circle_aq(poly): """ The contained circle areal quotient is defined by the ratio of the area of the largest contained circle and the shape itself. """ pointset = _get_pointset(poly) radius, (cx, cy) = _mcc(pointset) return poly.area / (_PI * radius ** 2)
a019405ae2a34b25cc34574a83c30dfe577a044c
3,633,473
def kubernetes_clusters(request, tenant): """ On ``GET`` requests, return a list of the deployed Kubernetes clusters for the tenancy. On ``POST`` requests, create a new Kubernetes cluster. """ if not cloud_settings.CLUSTER_API_PROVIDER: return response.Response( { "detail": "Kubernetes clusters are not supported.", "code": "unsupported_operation" }, status = status.HTTP_404_NOT_FOUND ) with request.auth.scoped_session(tenant) as session: with cloud_settings.CLUSTER_API_PROVIDER.session(session) as capi_session: if request.method == "POST": input_serializer = serializers.CreateKubernetesClusterSerializer( data = request.data, context = { "session": session, "capi_session": capi_session } ) input_serializer.is_valid(raise_exception = True) cluster = capi_session.create_cluster(**input_serializer.validated_data) output_serializer = serializers.KubernetesClusterSerializer( cluster, context = { "request": request, "tenant": tenant } ) return response.Response(output_serializer.data) else: serializer = serializers.KubernetesClusterSerializer( capi_session.clusters(), many = True, context = { "request": request, "tenant": tenant } ) return response.Response(serializer.data)
f928a2b438fcf57bf1e74ce277ab8bc921cdc28d
3,633,474
def clip_to_spec(value, spec): """Clips value to a given bounded tensor spec. Args: value: (tensor) value to be clipped. spec: (BoundedTensorSpec) spec containing min. and max. values for clipping. Returns: clipped_value: (tensor) `value` clipped to be compatible with `spec`. """ return tf.clip_by_value(value, spec.minimum, spec.maximum)
9f09cb09d00f6fd3bcf6f2dccd982befd26510e3
3,633,475
def publish_dataset( datalad_dataset_dir, dryrun=False ): """ Function that publishes the dataset repository to GitHub and the annexed files to a SSH special remote. Parameters ---------- datalad_dataset_dir : string Local path of Datalad dataset to be published dryrun : bool If `True`, only generates the commands and do not execute them (Default: `False`) Returns ------- `res` : string Output of `datalad.api.publish() `cmd` : string Equivalent bash command """ res = None if not dryrun: res = datalad.api.push( dataset=datalad_dataset_dir, to='github' ) cmd = f'datalad push --dataset "{datalad_dataset_dir}" --to github' return res, cmd
1f65749e2d4bbc26d8929684791e38e8579c2c58
3,633,476
import math def convert_weight(prob): """Convert probility to weight in WFST""" weight = -1.0 * math.log(10.0) * float(prob) return weight
d9f6c38fd2efa49ddd515878a0943f9c82d42e1a
3,633,477
def is_exception(ocdid): """Check whether given ocdid is contained in the exception list Keyword arguments: ocdid -- ocdid value to check if exists in the exception list Returns: True -- ocdid exists False -- ocdid not found (could be candidate for new ocdid) """ if ocdid in exceptions: return True else: return False
bde5beaf3e9f5eff4489972036820cf5b758ceea
3,633,478
import numpy def retrieve_m_hf(eri): """Retrieves TDHF matrix directly.""" d = eri.tdhf_diag() m = numpy.array([ [d + 2 * eri["knmj"] - eri["knjm"], 2 * eri["kjmn"] - eri["kjnm"]], [- 2 * eri["mnkj"] + eri["mnjk"], - 2 * eri["mjkn"] + eri["mjnk"] - d], ]) return m.transpose(0, 2, 1, 3).reshape( (m.shape[0] * m.shape[2], m.shape[1] * m.shape[3]) )
ad407f0294f906125ef6b5ecd7f8300114afb4a5
3,633,479
def laplacian(A): """ Returns the laplacian matrix from a given adjacency matrix Parameters ---------- A : Tensor an adjacency matrix Returns ------- Tensor the laplacian matrix """ return degree(A)-A
75fd7985572a3612b238fbd90ad706b7d2c9d503
3,633,480
import os def annotation_to_dataframe(annotation_number,filename): """ input: - the number of the annotation (written in the xml) - the filename (ex: tumor_110) output: 'dataframe with 3 columns: 1_ the order of the vertex 2_ the value of the X coordinate of the vertex 3_ the value of the Y coordinate of the vertex The values of X and Y are the values in the WSI """ with open(os.path.join(annotations_folder,filename)+'.tif.xml') as xml_file: data_dict = xmltodict.parse(xml_file.read()) nodes = data_dict['ASAP_Annotations']['Annotations']['Annotation'][annotation_number]['Coordinates']['Coordinate'] length = len(nodes) coord = np.zeros((length,3)) for i in range(length): iter_ = nodes[i] coord[i] = np.array([iter_['@Order'], iter_['@X'], iter_['@Y']]) df = pd.DataFrame(data=coord, columns=['Order', "X",'Y']) return df
52b766d14ddf476c1017ae084cf91a31cfa715e3
3,633,481
def GetDiv(number): """Разложить число на множители""" #result = [1] listnum = [] stepnum = 2 while stepnum*stepnum <= number: if number % stepnum == 0: number//= stepnum listnum.append(stepnum) else: stepnum += 1 if number > 1: listnum .append(number) return listnum
fbbd4b9e73ebe9af6ef6dcc0151b8d241adbb45d
3,633,482
def my_decorator(view_func): """定义装饰器""" def wrapper(request, *args, **kwargs): print('装饰器被调用了') return view_func(request, *args, **kwargs) return wrapper
1e857263d6627f1a2216e0c2573af5935ba58637
3,633,483
def make_rect_containing(points: [Point]): """ Computes the smallest rectangle containing all the passed points. :param points: `[Point]` :return: `Rect` """ if not points: raise ValueError('Expected at least one point') first_point = points[0] min_x, max_x = first_point.x, first_point.x min_y, max_y = first_point.y, first_point.y for point in points[1:]: min_x, max_x = min(min_x, point.x), max(max_x, point.x) min_y, max_y = min(min_y, point.y), max(max_y, point.y) return Rect( Point(min_x, min_y), Size(max_x - min_x, max_y - min_x) )
b3dbcad3473551837e72ea7ac4257b07276ed5de
3,633,484
import os def check_documentation(gvar): """ Check for complete documentation. """ if gvar['retrieve_options']: return [] def scan_1_doc_dir(gvar, man_path): for fn in os.listdir(man_path): if os.path.isdir('%s/%s' % (man_path, fn)): scan_1_doc_dir(gvar, '%s/%s' % (man_path, fn)) elif os.path.isfile('%s/%s' % (man_path, fn)): gvar['docs'][fn] = {'dir': '../%s' % man_path[len(gvar['command_dir'])-3:], 'count': 0} def scan_2_doc_dir(gvar, man_path): for fn in os.listdir(man_path): if os.path.isdir('%s/%s' % (man_path, fn)): scan_2_doc_dir(gvar, '%s/%s' % (man_path, fn)) elif os.path.isfile('%s/%s' % (man_path, fn)): fd = open('%s/%s' % (man_path, fn)) doc_data = fd.read() fd.close() for fn2 in gvar['docs']: words = doc_data.split('.so %s/%s' % (gvar['docs'][fn2]['dir'], fn2)) gvar['docs'][fn2]['count'] += len(words)-1 gvar['docs'] = {} scan_1_doc_dir(gvar, os.path.realpath('%s/../man' % gvar['command_dir'])) scan_2_doc_dir(gvar, os.path.realpath('%s/../man' % gvar['command_dir'])) cks = {} gvar['retrieve_options'] = True for object in gvar['actions']: for action in gvar['actions'][object][1]: for ck in gvar['actions'][object][1][action](gvar): if ck not in cks: cks[ck] = [] cks[ck].append('%s/%s' % (object, action)) gvar['retrieve_options'] = False p = Popen([ 'awk', '-r', '/^.so/', '%s/../man/*' % gvar['command_dir'] ], stdout=PIPE, stderr=PIPE) stdout, stderr = p.communicate() ckx = {} for ix in range(len(gvar['command_keys'])): ck = gvar['command_keys'][ix][0] ckx[ck] = {'ix': ix, 'doc': False, 'ref1': [], 'ref2': 0} fn = '%s.so' % gvar['command_keys'][ix][1][1:].replace('-', '_').lower() if fn in gvar['docs']: ckx[ck]['doc'] = True ckx[ck]['ref2'] = gvar['docs'][fn]['count'] fmt_string = '%-48s %-10s %-10s %8s (%s) %s' print(fmt_string % ('Command Parameter', 'Short Name', 'Documented', 'Includes', 'Count', 'Calls')) for ck in ckx: ix = ckx[ck]['ix'] if ck in cks: ckx[ck]['ref1'] = cks[ck] if ckx[ck]['doc'] == False or len(ckx[ck]['ref1']) < 1 or ckx[ck]['ref2'] < 1: print(fmt_string % (gvar['command_keys'][ix][1], gvar['command_keys'][ix][0], ckx[ck]['doc'], ckx[ck]['ref2'], len(ckx[ck]['ref1']), ckx[ck]['ref1']))
f38a4c31948e212d98f4a78905454d807ac4e78f
3,633,485
def check_login(): """检查登陆状态""" # 尝试从session中获取用户的名字 name = session.get("user_name") # 如果session中数据name名字存在,则表示用户已登录,否则未登录 if name is not None: return jsonify(errno=RET.OK, errmsg="true", data={"name": name}) else: return jsonify(errno=RET.SESSIONERR, errmsg="false")
f650c054ffaa23164e2697de706246072aba3146
3,633,486
def calc_delta(startdate: dt.date, enddate: dt.date, no_of_ranges: int) -> dt.timedelta: """Find the delta between two dates based on a desired number of ranges""" date_diff = enddate - startdate steps = date_diff / no_of_ranges return steps
3522e6059c69dbae175c768104c9fe1c55f9d764
3,633,487
def get_email_config(): """Returns email notifier related configuration.""" email_config = {} email_config["hostname"] = context.config["SMTP_HOSTNAME"] email_config["port"] = context.config["SMTP_PORT"] email_config["username"] = context.config["SMTP_USERNAME"] email_config["password"] = context.config["SMTP_PASSWORD"] email_config["sender"] = context.config["EMAIL_SENDER"] email_config["recipients"] = context.config["EMAIL_RECIPIENTS"] return email_config
7ede3901ba8896f1b0ad49ab726d23c541548510
3,633,488
from typing import List def check_status_instances(instance_names: List[str] = None, filters: List[str] = None, secrets: Secrets = None, force: bool = False, status: str = None, configuration: Configuration = None): """ demo """ if status is None: raise FailedActivity( "You need to tell us what status: Active or Shutoff") list_found = [] list_not_found = [] nova_client = openstack_novaclient("nova-client", configuration, secrets) for instance in instance_names: server = nova_client.servers.list( search_opts={'status': status, 'name': instance}) if server: list_found.append(instance) else: list_not_found.append(instance) if status == 'Active': if len(list_found) == len(instance_names): return True else: return False if status == 'Shutoff': if len(list_not_found) == len(instance_names): return True else: return False
5cadd77aa453335da416938799223e21a4de5535
3,633,489
def format_seconds(seconds: int) -> str: """ Convert seconds to a formatted string Convert seconds: 3661 To formatted: " 1:01:01" """ # print(seconds, type(seconds)) hours = seconds // 3600 minutes = seconds % 3600 // 60 seconds = seconds % 60 return f"{hours:4d}:{minutes:02d}:{seconds:02d}"
766d244b9927cca21ea913e9c5e1641c16f17327
3,633,490
import os from typing import Dict import types def computeCMSstats( Ddata, thinSfx, scenario, putativeMutPop = None, sampleSize = 120, pop2name = pop2name, pop2sampleSize = {}, oldMerged = False, DdataMerged = None, outFile = None, statsSfx = '', simsOut = 'simsOut', stats = CMSBins.CMSstats, ihsSfx = '', ihhBackgroundArgs = None, limitToPop = None, getio = None ): """Compute CMS stats for all SNPs in all replicas of a given scenario. Params: Ddata - the root directory containing simulations and their analyses thinSfx - specifies which thinning version of the simulations are used. scen - the simulation scenario. Here we compute stats for all replicas within that scenario, for all SNPs within each replica. mutPop - if the scenario neutral, this gives the putative selected population; that is, we will do analyses as if we were thinking (wrongly) that this is actually a selected region with selection in mutPop. This is needed because, when we localize a region, we currently assume that selection occurred in one particular population. So to explore false positive rates, we need to run CMS on a neutral region while assuming selection in a particular population. oldMerged - determines the path to the merged.data/ file -- is it located under an old or a new (more uniform) scheme. See also: DefineRulesTo_computeCMSstats() """ if not Ddata.endswith( '/' ): Ddata += '/' if putativeMutPop == None: putativeMutPop = scenario.mutPop putativeMutPop = int( putativeMutPop ) if not DdataMerged: DdataMerged = Ddata snpStatsDir = os.path.join( Ddata, 'snpStats' + thinSfx, scenario.scenDir() ) snpStatsDirMerged = os.path.join( DdataMerged, 'snpStats' + thinSfx, scenario.scenDir() ) mergedData = GetCreates( addIHHDiff, Ddata = DdataMerged, **Dict( 'scenario putativeMutPop simsOut statsSfx thinSfx pop2name ihsSfx' ) )[0] if oldMerged: mergedData = os.path.join( DdataMerged, AddFileSfx( 'merged.data/', '%dky' % ( scenario.mutAge if not scenario.isNeutral() else 10 ), scenario.scenName(), putativeMutPop if scenario.isNeutral() else None ) ) cmsStatsRawFN = outFile if outFile else os.path.join( snpStatsDir, AddFileSfx( 'cmsStatsRaw.tsv', statsSfx, putativeMutPop, ihsSfx ) ) args = Dict( 'Ddata thinSfx scenario putativeMutPop simsOut statsSfx pop2name ihsSfx' ) if ihhBackgroundArgs is not None: args = MergeDicts( ihhBackgroundArgs, Dict( 'scenario putativeMutPop' ) ) ihhDiff_sumsByFreqFN = getFN_ihhDiff_sumsByFreq( **args ) ihhDiff_sumsFN = getFN_ihhDiff_sums( **args ) fileDescrs = { cmsStatsRawFN: ( 'CMS stats for all SNPs in all replicas of scenario $scenario, assuming selection happened in ' + pop2name[putativeMutPop] + '. Each line is one SNP in one replica.', ( ('Chrom', 'Chromosome or simulation replica of this SNP' ), ('gdPos', 'Genetic map position of this SNP on its chromosome, in cM'), ('iHS', 'Both iHS' ), ( 'Pos', 'Position on chromosome, plus 1e6' ), ( 'StdDiff', 'deltaiHH: ( Both_iHH_A - Both_iHH_D ), normalized by SNP frequency. ' 'Both_iHH_A is the sum of iHH_A_left and iHH_A_right, etc.' ), ( 'meanFst', 'Mean Fst comparison between the selected population and the other populations.' ), ( 'derFreq', 'derived allele frequency of this SNP in the selected population' ), ( 'max_xpop', 'the highest xpop comparison between the selected pop and the other pops' ), ( 'meanAnc', 'the mean ancestral frequency in the non-selected pops. deltaDAF is derived freq in ' 'the selected pop, minus this.' ), ( 'freqDiff', 'deltaDAF: Difference between the derived allele frequency in the selected population, ' 'and the average of derived allele frequencies in the non-selected populations.' ) ) ) } if getio: return dict( depends_on = ( mergedData, ihhDiff_sumsByFreqFN, ihhDiff_sumsFN ) + ( ( pop2sampleSize, ) if isinstance( pop2sampleSize, types.StringTypes ) else () ), creates = cmsStatsRawFN, splitByCols = { mergedData: dict( keyCols = () ) }, mediumRuleNameSfx = ( scenario.scenDir(), putativeMutPop ), fileDescrs = fileDescrs ) if isinstance( pop2sampleSize, types.StringTypes ): pop2sampleSize = dict( IDotData( pop2sampleSize ) ) dbg( 'pop2sampleSize' ) theData = IDotData(mergedData) popNames = sorted( pop2name.values() ) popNums = sorted( pop2name.keys() ) minPopNum = popNums[ 0 ] popPairs = [ '%s_%s' % ( popNames[ pop1idx ], popNames[ pop2idx ] ) for pop1idx in range( len( popNames ) ) for pop2idx in range( pop1idx+1, len( popNames ) ) if limitToPop is None or limitToPop in ( popNames[ pop1idx ], popNames[ pop2idx ] ) ] xpopComparisonPairs = [] xpopComparisonSigns = [] for otherPop in popNums: if otherPop != putativeMutPop: popAname, popBname = pop2name[ putativeMutPop ], pop2name[ otherPop ] if popAname > popBname: popAname, popBname = popBname, popAname xpopComparisonPairs.append( '%s_%s' % ( popAname, popBname ) ) xpopComparisonSigns.append( 1 if popAname == pop2name[ putativeMutPop ] else -1 ) dbg( 'zip(xpopComparisonPairs,xpopComparisonSigns)' ) # # For normalizing iHHDiff by frequency bin, get the mean and stddev # within each frequency bin. # ihhDiff_statsByFreq = IDotData( ihhDiff_sumsByFreqFN ).addMeanStdCols( 'iHHDiff' ).toDotData() ihhDiff_stats = IDotData( ihhDiff_sumsFN ) ihhDiff_stats = ihhDiff_stats.addMeanStdCols( 'iHHDiff' ) ihhDiff_stats = ihhDiff_stats[0] totStd = ihhDiff_stats.iHHDiff_std ihhDiffMeans = dict( ( r.freqBinId, r.iHHDiff_mean ) for r in ihhDiff_statsByFreq ) dbg( 'ihhDiffMeans ihhDiff_stats totStd' ) # # ok so the next thing is to rewrite this, and you should not need # to make a one-line DotData. and make sure each record we're writing is a value. # # # then, check that all stat values we compute are almost equal to each other. # with IDotData.openForWrite( cmsStatsRawFN, headings = 'Chrom Pos gdPos iHS StdDiff meanFst derFreq max_xpop meanAnc freqDiff iHS_nanReason StdDiff_nanReason' ) \ as cmsStatsRaw: for r in theData: pop2ancFreq = dict( ( popNum, 1 - r['FREQ1 %d' % popNum ] ) for popNum in popNums ) Fst = fst_oneSnp( **Dict( 'sampleSize pop2name pop2ancFreq pop2sampleSize' ) ) meanFst = np.mean([ Fst[comp] if not np.isnan( Fst[comp] ) else 0.0 for comp in xpopComparisonPairs ]) # Max xpop xpopAll = np.zeros( 2 * len( xpopComparisonPairs ) ) j = 0 for xpopComparisonPair, xpopComparisonSign in zip( xpopComparisonPairs, xpopComparisonSigns ): xpopAll[ j ] = xpopComparisonSign * r['L AllEHH logratio Deviation ' + xpopComparisonPair] xpopAll[ j + 1 ] = xpopComparisonSign * r['R AllEHH logratio Deviation ' + xpopComparisonPair] j += 2 max_xpop = np.nanmax(xpopAll) derFreq = r['FREQ1 %d' % putativeMutPop] ancFreq = 1 - derFreq ihs = r['Both iHS'] ihsNanReason = -1 if np.isnan( ihs ): if derFreq < .05: ihsNanReason = 0 if derFreq > 0.95: ihsNanReason = 1 aPopPair = 'European_WestAfrican' if 'European_WestAfrican' in popPairs else popPairs[0] gdPos = r['SNP pos (cM) ' + aPopPair] bpPos = r['CHROM_POS %d' % minPopNum] # Calculate iHH difference iHHDiff = r['Both iHH_D'] - r['Both iHH_A'] StdDiff = ( r.iHHDiff - ihhDiffMeans[ r.freqBinWith01Id ] ) / totStd # Mean ancestral freq mean_anc = 0 for popNum in popNums: if popNum != putativeMutPop: mean_anc += (1 - r['FREQ1 %d' % popNum]) mean_anc /= ( len( popNums ) - 1 ) # Freq diff freqDiff = derFreq - (1 - mean_anc) # Make new pos column Pos = r['CHROM_POS %d' % minPopNum] Chrom = float( r.replicaNum if 'replicaNum' in theData.headings else ( r[ 'Chrom ' + aPopPair ] if np.isnan( r.Chrom) else r.Chrom ) ) cmsStatsRaw.writeRecord( Chrom, Pos, gdPos, ihs, StdDiff, meanFst, derFreq, max_xpop, mean_anc, freqDiff, # nan reasons ihsNanReason, ihsNanReason ) assert set( stats ) <= set( cmsStatsRaw.headings[3:] )
2f430c03e5fb4707caaa3d0430b02768808a3e61
3,633,491
def build_ddsc(inputs, num_classes, preset_model='DDSC', frontend="ResNet101", weight_decay=1e-5, is_training=True, pretrained_dir="models"): """ Builds the Dense Decoder Shortcut Connections model. Arguments: inputs: The input tensor= preset_model: Which model you want to use. Select which ResNet model to use for feature extraction num_classes: Number of classes Returns: Dense Decoder Shortcut Connections model """ logits, end_points, frontend_scope, init_fn = frontend_builder.build_frontend(inputs, frontend, pretrained_dir=pretrained_dir, is_training=is_training) ### Adapting features for all stages decoder_4 = EncoderAdaptionBlock(end_points['pool5'], n_filters=1024) decoder_3 = EncoderAdaptionBlock(end_points['pool4'], n_filters=512) decoder_2 = EncoderAdaptionBlock(end_points['pool3'], n_filters=256) decoder_1 = EncoderAdaptionBlock(end_points['pool2'], n_filters=128) decoder_4 = SemanticFeatureGenerationBlock(decoder_4, D_features=1024, D_prime_features = 1024 / 4, O_features=1024) ### Fusing features from 3 and 4 decoder_4 = ConvBlock(decoder_4, n_filters=512, kernel_size=[3, 3]) decoder_4 = Upsampling(decoder_4, scale=2) decoder_3 = ConvBlock(decoder_3, n_filters=512, kernel_size=[3, 3]) decoder_3 = tf.add_n([decoder_4, decoder_3]) decoder_3 = SemanticFeatureGenerationBlock(decoder_3, D_features=512, D_prime_features = 512 / 4, O_features=512) ### Fusing features from 2, 3, 4 decoder_4 = ConvBlock(decoder_4, n_filters=256, kernel_size=[3, 3]) decoder_4 = Upsampling(decoder_4, scale=4) decoder_3 = ConvBlock(decoder_3, n_filters=256, kernel_size=[3, 3]) decoder_3 = Upsampling(decoder_3, scale=2) decoder_2 = ConvBlock(decoder_2, n_filters=256, kernel_size=[3, 3]) decoder_2 = tf.add_n([decoder_4, decoder_3, decoder_2]) decoder_2 = SemanticFeatureGenerationBlock(decoder_2, D_features=256, D_prime_features = 256 / 4, O_features=256) ### Fusing features from 1, 2, 3, 4 decoder_4 = ConvBlock(decoder_4, n_filters=128, kernel_size=[3, 3]) decoder_4 = Upsampling(decoder_4, scale=8) decoder_3 = ConvBlock(decoder_3, n_filters=128, kernel_size=[3, 3]) decoder_3 = Upsampling(decoder_3, scale=4) decoder_2 = ConvBlock(decoder_2, n_filters=128, kernel_size=[3, 3]) decoder_2 = Upsampling(decoder_2, scale=2) decoder_1 = ConvBlock(decoder_1, n_filters=128, kernel_size=[3, 3]) decoder_1 = tf.add_n([decoder_4, decoder_3, decoder_2, decoder_1]) decoder_1 = SemanticFeatureGenerationBlock(decoder_1, D_features=128, D_prime_features = 128 / 4, O_features=num_classes) ### Final upscaling and finish net = Upsampling(decoder_1, scale=4) net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None, scope='logits') return net, init_fn
4cb126dd5814816026f6141474dd029865e08040
3,633,492
def bitstring_to_bytes(bitstring): """Convert PyASN1's strings of 1s and 0s to actual bytestrings.""" if len(bitstring) % 8 != 0: raise ValueError("Unaligned bitstrings cannot be converted to bytes") integer = int(''.join(str(x) for x in bitstring), 2) return bytes(int_to_bytearray(integer))
a037a485e082c813b768f8162f031b0ca45ec7ab
3,633,493
def plot_corr(fig, ax, corr, labels=None): """ Plot a correlation matrix with a heatmap. """ ax = sns.heatmap(corr, vmin=-1, vmax=1, center=0, cmap=sns.diverging_palette(10, 240, as_cmap=True), cbar=True, square=True, ax=ax, xticklabels=labels, yticklabels=labels) ax.set_xticklabels(labels=labels, rotation=45, size=9) ax.set_yticklabels(labels=labels, rotation=0, size=9) return ax
1b40b85bfcb646ca2dc8539018c43d727882083f
3,633,494
def once(f): """ Return a function that will be called only once, and it's result cached. """ cached = None @wraps(f) def wraped(): nonlocal cached if cached is None: cached = Some(f()) return cached.val return wraped
00fac90ddc4083ad28738284b8e0471381db1994
3,633,495
def from_greatfet_error(error_number): """ Returns the error class appropriate for the given GreatFET error. """ error_class = GREATFET_ERRORS.get(error_number, GreatFETError) message = "Error {}".format(error_number) return error_class(message)
18460872c797e2f7ec93e1d7174afe6848a1bad9
3,633,496
def compute_all_distances_to_nucleus_centroid3d(heightmap: np.ndarray, nucleus_centroid: np.ndarray, image_width=None, image_height=None) -> np.ndarray: """ Compute distances within the cytoplasm between all points and nucleus_centroid in a IMAGE_WIDTH x IMAGE_HEIGHT x cytoplasm_height matrix (max height of the cytoplasm) """ image_width = image_width or constants.dataset_config['IMAGE_WIDTH'] image_height = image_height or constants.dataset_config['IMAGE_HEIGHT'] cytoplsam_height = np.max(heightmap) nucleus_centroid_z = heightmap[nucleus_centroid[0], nucleus_centroid[1]] // 2 if image_width != image_height: raise IndexError("Implemented only for images with IMAGE_WIDTH == IMAGE_HEIGHT, {} != {}", image_width, image_height) i, j, k = np.meshgrid(np.arange(image_height), np.arange(image_width), np.arange(cytoplsam_height)) dist = np.sqrt((j - nucleus_centroid[0]) ** 2 + (i - nucleus_centroid[1]) ** 2 + (k - nucleus_centroid_z) ** 2) return dist
677566894f2b37686f81b8d7e1fac97ada0d9162
3,633,497
import re def strip_md_links(md): """strip markdown links from markdown text md Args: md: str, markdown text Returns: str with markdown links removed Note: This uses a very basic regex that likely fails on all sorts of edge cases but works for the links in the osxphotos docs """ links = r"(?:[*#])|\[(.*?)\]\(.+?\)" def subfn(match): return match.group(1) return re.sub(links, subfn, md)
fc730b88d536ec23ec8a1c9c3465fca2adb85b74
3,633,498
def tf_distort_color(image): """ Distorts color. """ image = image / 255.0 image = image[:, :, ::-1] brightness_max_delta = 16. / 255. color_ordering = tf.random.uniform([], maxval=5, dtype=tf.int32) if tf.equal(color_ordering, 0): image = tf.image.random_brightness(image, max_delta=brightness_max_delta) image = tf.image.random_saturation(image, lower=0.5, upper=1.5) image = tf.image.random_hue(image, max_delta=0.1) image = tf.image.random_contrast(image, lower=0.5, upper=1.5) elif tf.equal(color_ordering, 1): image = tf.image.random_saturation(image, lower=0.5, upper=1.5) image = tf.image.random_brightness(image, max_delta=brightness_max_delta) image = tf.image.random_contrast(image, lower=0.5, upper=1.5) image = tf.image.random_hue(image, max_delta=0.1) elif tf.equal(color_ordering, 2): image = tf.image.random_contrast(image, lower=0.5, upper=1.5) image = tf.image.random_hue(image, max_delta=0.1) image = tf.image.random_brightness(image, max_delta=brightness_max_delta) image = tf.image.random_saturation(image, lower=0.5, upper=1.5) elif tf.equal(color_ordering, 3): image = tf.image.random_hue(image, max_delta=0.1) image = tf.image.random_saturation(image, lower=0.5, upper=1.5) image = tf.image.random_contrast(image, lower=0.5, upper=1.5) image = tf.image.random_brightness(image, max_delta=brightness_max_delta) image = tf.clip_by_value(image, 0.0, 1.0) image = image * 255 image = image[:, :, ::-1] return image
8949e3efdb0057abe7830c7d35ec1da4dc9ee2dc
3,633,499