content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def get_entry_values(): """Get entry values""" entry = {} for key, question in ENTRY_QUESTIONS.items(): input_type = int if key == "time" else str while True: print_title(MAIN_MENU[1].__doc__) print(question) user_input = validate(get_input(), input_type) if user_input or key == "notes": entry[key] = user_input break return entry
6736ac24bbbe83a0dcbd7a43cd12a1c1b1acbdab
31,600
def _create_snapshot(provider_id, machine_uuid, skip_store, wait_spawning): """Create a snapshot. """ _retrieve_machine(provider_id, machine_uuid, skip_store) manager = _retrieve_manager(provider_id) return manager.create_snapshot(machine_uuid, wait_spawning)
0d35309341dd27cc41e713c4fd950fee735c866d
31,601
def get_masked_lm_output(bert_config, input_tensor, positions, label_ids, label_weights): """Get loss and log probs for the masked LM.""" input_tensor = gather_indexes(input_tensor, positions) with tf.variable_scope("cls/predictions"): # We apply one more non-linear transformation before the output layer. # This matrix is not used after pre-training. with tf.variable_scope("transform"): input_tensor = tf.layers.dense( input_tensor, units=bert_config.hidden_size, activation=modeling.get_activation(bert_config.hidden_act), kernel_initializer=modeling.create_initializer( bert_config.initializer_range)) input_tensor = modeling.layer_norm(input_tensor) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. output_weights = tf.get_variable( "output_weights", shape=[2, bert_config.hidden_size], initializer=modeling.create_initializer(bert_config.initializer_range)) output_bias = tf.get_variable( "output_bias", shape=[2], initializer=tf.zeros_initializer()) logits = tf.matmul(input_tensor, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) log_probs = tf.nn.log_softmax(logits, axis=-1) label_ids = tf.reshape(label_ids, [-1]) label_weights = tf.reshape(label_weights, [-1]) one_hot_labels = tf.one_hot( label_ids, depth=2, dtype=tf.float32) # The `positions` tensor might be zero-padded (if the sequence is too # short to have the maximum number of predictions). The `label_weights` # tensor has a value of 1.0 for every real prediction and 0.0 for the # padding predictions. per_example_loss = -tf.reduce_sum(log_probs * one_hot_labels, axis=[-1]) numerator = tf.reduce_sum(label_weights * per_example_loss) denominator = tf.reduce_sum(label_weights) + 1e-5 loss = numerator / denominator return (loss, per_example_loss, log_probs)
7668ff4c4bd18cb14ff625dc0de593250cedb794
31,602
import torch def binary_classification_loss(logits, targets, reduction='mean'): """ Loss. :param logits: predicted classes :type logits: torch.autograd.Variable :param targets: target classes :type targets: torch.autograd.Variable :param reduction: reduction type :type reduction: str :return: error :rtype: torch.autograd.Variable """ assert logits.size()[0] == targets.size()[0] assert len(list(targets.size())) == 1# or (len(list(targets.size())) == 2 and targets.size(1) == 1) assert len(list(logits.size())) == 2 targets = one_hot(targets, logits.size(1)) if logits.size()[1] > 1: return torch.nn.functional.binary_cross_entropy_with_logits(logits, targets, reduction=reduction) else: raise NotImplementedError
507f3b076f6b59a8629bf02aa69ece05f5063f45
31,603
import os def setup_rezconfig_file(local_packages_folder, release_packages_path): """ Write a rezconfig.py file for packages folder settings and create an env var to let rez reading it """ rez_config_filename = os.path.join((os.path.split(local_packages_folder)[0]), "rezconfig.py") os.environ["REZ_CONFIG_FILE"] = rez_config_filename run(["setx.exe", "REZ_CONFIG_FILE", rez_config_filename]) print(f"\nREZ_CONFIG_FILE set to: {os.environ.get('REZ_CONFIG_FILE')}\n") try: rez_config_file = open(os.path.join(rez_config_filename), "w+") rez_config_file.write(f"# The package search path. Rez uses this to find packages. A package with the\n" f"# same name and version in an earlier path takes precedence.\n" f"packages_path = [\n\tr\"{local_packages_folder}\",\n\tr\"{release_packages_path}\"]\n") rez_config_file.write(f"#REZ_LOCAL_PACKAGES_PATH\n" f"# The path that Rez will locally install packages to when rez-build is used\n" f"local_packages_path = r\"{local_packages_folder}\"\n") if release_packages_path is not None: rez_config_file.write(f"#REZ_RELEASE_PACKAGES_PATH\n" f"# The path that Rez will deploy packages to when rez-release is used. For\n" f"# production use, you will probably want to change this to a site-wide location.\n" f"release_packages_path = r\"{release_packages_path}\"") os.environ["REZ_RELEASE_PACKAGES_PATH"] = release_packages_path except IOError: print(f"An error has occurred while creating rezconfig.py") exit() # Add the packages paths to current env os.environ["REZ_LOCAL_PACKAGES_PATH"] = local_packages_folder return rez_config_filename
96c4933a4a917f78e99b964d362e15e7c63abd6d
31,604
def transform_with(sample, transformers): """Transform a list of values using a list of functions. :param sample: list of values :param transformers: list of functions """ assert not isinstance(sample, dict) assert isinstance(sample, (tuple, list)) if transformers is None or len(transformers) == 0: return sample result = list(sample) ntransformers = len(transformers) for i in range(len(sample)): f = transformers[i%ntransformers] if f is not None: result[i] = f(sample[i]) return result
9a1d7741070b670e7bf8dbf88e8a23361521265f
31,605
def concat_eval(x, y): """ Helper function to calculate multiple evaluation metrics at once """ return { "recall": recall_score(x, y, average="macro", zero_division=0), "precision": precision_score(x, y, average="macro", zero_division=0), "f1_score": f1_score(x, y, average="macro", zero_division=0), "mcc": mcc(x, y), }
5a0732ac5926173f12e3f0bd6d6e0ace653c7494
31,606
from typing import List def split_into_regions(arr: np.ndarray, mode=0) -> List[np.ndarray]: """ Splits an array into its coherent regions. :param mode: 0 for orthogonal connection, 1 for full connection :param arr: Numpy array with shape [W, H] :return: A list with length #NumberOfRegions of arrays with shape [W, H] """ res = [] if mode == 0: rs, num_regions = label(arr) elif mode == 1: rs, num_regions = label(arr, structure=generate_binary_structure(2, 2)) else: raise Exception("Please specify a valid Neighborhood mode for split_into_regions") for i in range(1, num_regions + 1): res.append(rs == i) return res
59e46f5877f3f4fd12a918e9aa26a67a92eb4d5b
31,607
def register_model(model_uri, name): """ Create a new model version in model registry for the model files specified by ``model_uri``. Note that this method assumes the model registry backend URI is the same as that of the tracking backend. :param model_uri: URI referring to the MLmodel directory. Use a ``runs:/`` URI if you want to record the run ID with the model in model registry. ``models:/`` URIs are currently not supported. :param name: Name of the registered model under which to create a new model version. If a registered model with the given name does not exist, it will be created automatically. :return: Single :py:class:`mlflow.entities.model_registry.ModelVersion` object created by backend. """ client = MlflowClient() try: create_model_response = client.create_registered_model(name) eprint("Successfully registered model '%s'." % create_model_response.name) except MlflowException as e: if e.error_code == ErrorCode.Name(RESOURCE_ALREADY_EXISTS): eprint("Registered model '%s' already exists. Creating a new version of this model..." % name) else: raise e if RunsArtifactRepository.is_runs_uri(model_uri): source = RunsArtifactRepository.get_underlying_uri(model_uri) (run_id, _) = RunsArtifactRepository.parse_runs_uri(model_uri) create_version_response = client.create_model_version(name, source, run_id) else: create_version_response = client.create_model_version(name, source=model_uri, run_id=None) eprint("Created version '{version}' of model '{model_name}'.".format( version=create_version_response.version, model_name=create_version_response.get_name())) return create_version_response
7dcdaa54717e6e0ea45390a5af48b1e350574d12
31,608
def noreplace(f): """Method decorator to indicate that a method definition shall silently be ignored if it already exists in the full class.""" f.__noreplace = True return f
88b6e8fdf7064ed04d9a0c310bcf1717e05e7fa8
31,609
def position_encoding(length, depth, min_timescale=1, max_timescale=1e4): """ Create Tensor of sinusoids of different frequencies. Args: length (int): Length of the Tensor to create, i.e. Number of steps. depth (int): Dimensions of embedding. min_timescale (float): Minimum time scale. max_timescale (float): Maximum time scale. Returns: Tensor of shape (T, D) """ depth = depth // 2 positions = np.arange(length, dtype=np.float32) log_timescale_increment = (np.log(max_timescale / min_timescale) / (depth - 1)) inv_timescales = min_timescale * np.exp( np.arange(depth, dtype=np.float32) * -log_timescale_increment) scaled_time = np.expand_dims(positions, 1) * np.expand_dims(inv_timescales, 0) # instead of using SIN and COS interleaved # it's the same to first use SIN then COS # as they are applied to the same position x = np.concatenate([np.sin(scaled_time), np.cos(scaled_time)], axis=1) return x
9d8c9082d82fd41ea6b6655a50b3e802a12f6694
31,610
def perform_exchange(ctx): """ Attempt to exchange attached NEO for tokens :param ctx:GetContext() used to access contract storage :return:bool Whether the exchange was successful """ attachments = get_asset_attachments() # [receiver, sender, neo, gas] address = attachments[1] neo_amount = attachments[2] # calculate the amount of tokens that can be exchanged exchange_amount = calculate_exchange_amount(ctx, attachments, False) if exchange_amount == 0: # This should only happen in the case that there are a lot of TX on the final # block before the total amount is reached. An amount of TX will get through # the verification phase because the total amount cannot be updated during that phase # because of this, there should be a process in place to manually refund tokens if neo_amount > 0: OnRefund(address, neo_amount) return False didMint = mint_tokens(ctx, address, exchange_amount) # dispatch mintTokens event if didMint: OnMintTokens(attachments[0], address, exchange_amount) return didMint
6c2f01a27b40a284e89da1e84de696baa1464e1d
31,611
def Pose_2_Staubli_v2(H): """Converts a pose to a Staubli target target""" x = H[0,3] y = H[1,3] z = H[2,3] a = H[0,0] b = H[0,1] c = H[0,2] d = H[1,2] e = H[2,2] if c > (1.0 - 1e-10): ry1 = pi/2 rx1 = 0 rz1 = atan2(H[1,0],H[1,1]) elif c < (-1.0 + 1e-10): ry1 = -pi/2 rx1 = 0 rz1 = atan2(H[1,0],H[1,1]) else: sy = c cy1 = +sqrt(1-sy*sy) sx1 = -d/cy1 cx1 = e/cy1 sz1 = -b/cy1 cz1 = a/cy1 rx1 = atan2(sx1,cx1) ry1 = atan2(sy,cy1) rz1 = atan2(sz1,cz1) return [x, y, z, rx1*180.0/pi, ry1*180.0/pi, rz1*180.0/pi]
9fae83e10df544b7d2c096c7a59aca60567de538
31,612
def create_gru_model(fingerprint_input, model_settings, model_size_info, is_training): """Builds a model with multi-layer GRUs model_size_info: [number of GRU layers, number of GRU cells per layer] Optionally, the bi-directional GRUs and/or GRU with layer-normalization can be explored. """ if is_training: dropout_prob = tf.placeholder(tf.float32, name='dropout_prob') input_frequency_size = model_settings['dct_coefficient_count'] input_time_size = model_settings['spectrogram_length'] fingerprint_4d = tf.reshape(fingerprint_input, [-1, input_time_size, input_frequency_size]) num_classes = model_settings['label_count'] layer_norm = False bidirectional = False num_layers = model_size_info[0] gru_units = model_size_info[1] gru_cell_fw = [] gru_cell_bw = [] if layer_norm: for i in range(num_layers): gru_cell_fw.append(LayerNormGRUCell(gru_units)) if bidirectional: gru_cell_bw.append(LayerNormGRUCell(gru_units)) else: for i in range(num_layers): gru_cell_fw.append(tf.contrib.rnn.GRUCell(gru_units)) if bidirectional: gru_cell_bw.append(tf.contrib.rnn.GRUCell(gru_units)) if bidirectional: outputs, output_state_fw, output_state_bw = \ tf.contrib.rnn.stack_bidirectional_dynamic_rnn(gru_cell_fw, gru_cell_bw, fingerprint_4d, dtype=tf.float32) flow = outputs[:, -1, :] else: cells = tf.contrib.rnn.MultiRNNCell(gru_cell_fw) _, last = tf.nn.dynamic_rnn(cell=cells, inputs=fingerprint_4d, dtype=tf.float32) flow = last[-1] with tf.name_scope('Output-Layer'): # linear layer # # print(flow.get_shape()[-1]) W = tf.get_variable('W', shape=[flow.get_shape()[-1], 128], initializer=tf.contrib.layers.xavier_initializer()) b = tf.get_variable('b', shape=[128]) linear_output = tf.matmul(flow, W) + b # first fc first_fc_weights = tf.Variable( tf.truncated_normal([128, 256], stddev=0.01), name="first_fc_w") first_fc_bias = tf.Variable(tf.zeros([256]), name="first_fc_b") first_fc = tf.matmul(linear_output, first_fc_weights) + first_fc_bias first_fc = tf.nn.relu(first_fc) W_o = tf.get_variable('W_o', shape=[first_fc.get_shape()[-1], num_classes], initializer=tf.contrib.layers.xavier_initializer()) b_o = tf.get_variable('b_o', shape=[num_classes]) logits = tf.matmul(first_fc, W_o) + b_o if is_training: return logits, dropout_prob else: return logits
222581216edaf6225fabe850d977d14955c66c6e
31,613
def convergence_rates(N, solver_function, num_periods=8): """ Returns N-1 empirical estimates of the convergence rate based on N simulations, where the time step is halved for each simulation. solver_function(I, V, F, c, m, dt, T, damping) solves each problem, where T is based on simulation for num_periods periods. """ def F(t): """External driving force""" return A*np.sin(2*np.pi*f*t) b, c, m = 0, 1.6, 1.3 # just some chosen values I = 0 # init. cond. u(0) V = 0 # init. cond. u'(0) A = 1.0 # amplitude of driving force f = 1.0 # chosen frequency of driving force damping = 'zero' P = 1/f dt = P/30 # 30 time step per period 2*pi/w T = P*num_periods dt_values = [] E_values = [] for i in range(N): u, t = solver_function(I, V, F, b, c, m, dt, T, damping) u_e = u_exact(t, I, V, A, f, c, m) E = np.sqrt(dt*np.sum((u_e-u)**2)) dt_values.append(dt) E_values.append(E) dt = dt/2 #plt.plot(t, u, 'b--', t, u_e, 'r-'); plt.grid(); plt.show() r = [np.log(E_values[i-1]/E_values[i])/ np.log(dt_values[i-1]/dt_values[i]) for i in range(1, N, 1)] print r return r
e66b4395557e0a254636546555d87716e4b0cc50
31,614
import cProfile import io import pstats def profile(fnc): """A decorator that uses cProfile to profile a function""" def inner(*args, **kwargs): pr = cProfile.Profile() pr.enable() retval = fnc(*args, **kwargs) pr.disable() s = io.StringIO() sortby = 'cumulative' ps = pstats.Stats(pr, stream=s).sort_stats(sortby) ps.print_stats() print(s.getvalue()) return retval return inner
9b5d248e2bd13d792e7c3cce646aa4c0432af8db
31,615
def _decode(y_pred, input_length, greedy=True, beam_width=100, top_paths=1): """Decodes the output of a softmax. Can use either greedy search (also known as best path) or a constrained dictionary search. # Arguments y_pred: tensor `(samples, time_steps, num_categories)` containing the prediction, or output of the softmax. input_length: tensor `(samples, )` containing the sequence length for each batch item in `y_pred`. greedy: perform much faster best-path search if `true`. This does not use a dictionary. beam_width: if `greedy` is `false`: a beam search decoder will be used with a beam of this width. top_paths: if `greedy` is `false`, how many of the most probable paths will be returned. # Returns Tuple: List: if `greedy` is `true`, returns a list of one element that contains the decoded sequence. If `false`, returns the `top_paths` most probable decoded sequences. Important: blank labels are returned as `-1`. Tensor `(top_paths, )` that contains the log probability of each decoded sequence. """ decoded = K.ctc_decode(y_pred=y_pred, input_length=input_length, greedy=greedy, beam_width=beam_width, top_paths=top_paths) paths = [path.eval(session=K.get_session()) for path in decoded[0]] logprobs = decoded[1].eval(session=K.get_session()) return (paths, logprobs)
7a73aa329245136ae560e92ebe67d997e57557f9
31,616
def rand_xyz_box(image_arrays, label, n, depth, img_size): """Returns n number of randomly chosen box. Args: image_arrays: 3D np array of images. label: label of images. normally is A or V n: number of random boxes generated from this function. depth : number of slices in Z direction. default is 50 if not specified. img_size: image size in X,Y directions. default is 50 if not specified. Returns: List object. ['Z','X','Y','im_array','labels']. Each im_array is a randomly chosen box with volume of depth*img_size*img_size. """ z = np.random.randint(len(image_arrays)-depth+1, size=n) x = np.random.randint(len(image_arrays[1])-img_size+1, size=n) y = np.random.randint(len(image_arrays[2])-img_size+1, size=n) n_box = [] for z, x, y in zip(z, x, y): box = image_arrays[z:z+depth, x:x+img_size, y:y+img_size] box = np.reshape(box, (depth, img_size, img_size, 1)) n_box.append([z, x, y, box, label]) return n_box
3127522a7d08b5694fc92ab058736db1d7471676
31,617
def pageviews_by_document(start_date, end_date, verbose=False): """Return the number of pageviews by document in a given date range. * Only returns en-US documents for now since that's what we did with webtrends. Returns a dict with pageviews for each document: {<document_id>: <pageviews>, 1: 42, 7: 1337,...} """ counts = {} request = _build_request() max_results = 10000 end_date_step = end_date while True: # To reduce the size of result set request 3 months at a time start_date_step = end_date_step - timedelta(90) if start_date_step < start_date: start_date_step = start_date if verbose: print("Fetching data for %s to %s:" % (start_date_step, end_date_step)) start_index = 1 while True: # To deal with pagination @retry_503 def _make_request(): return request.get( ids="ga:" + profile_id, start_date=str(start_date_step), end_date=str(end_date_step), metrics="ga:pageviews", dimensions="ga:pagePath", filters=("ga:pagePathLevel2==/kb/;" "ga:pagePathLevel1==/en-US/"), max_results=max_results, start_index=start_index, ).execute() results = _make_request() if verbose: d = ( max_results - 1 if start_index + max_results - 1 < results["totalResults"] else results["totalResults"] - start_index ) print("- Got %s of %s results." % (start_index + d, results["totalResults"])) for result in results.get("rows", []): path = result[0] pageviews = int(result[1]) doc = Document.from_url(path, id_only=True, check_host=False) if not doc: continue # The same document can appear multiple times due to url params counts[doc.pk] = counts.get(doc.pk, 0) + pageviews # Move to next page of results. start_index += max_results if start_index > results.get("totalResults", 0): break end_date_step = start_date_step - timedelta(1) if start_date_step == start_date or end_date_step < start_date: break return counts
c1a2c4ba2711803ca4b5e0cb8959a99b36f928ec
31,618
from re import T def format_time_string(seconds): """ Return a formatted and translated time string """ def unit(single, n): # Seconds and minutes are special due to historical reasons if single == "minute" or (single == "second" and n == 1): single = single[:3] if n == 1: return T(single) return T(single + "s") # Format the string, size by size seconds = int_conv(seconds) completestr = [] days = seconds // 86400 if days >= 1: completestr.append("%s %s" % (days, unit("day", days))) seconds -= days * 86400 hours = seconds // 3600 if hours >= 1: completestr.append("%s %s" % (hours, unit("hour", hours))) seconds -= hours * 3600 minutes = seconds // 60 if minutes >= 1: completestr.append("%s %s" % (minutes, unit("minute", minutes))) seconds -= minutes * 60 if seconds > 0: completestr.append("%s %s" % (seconds, unit("second", seconds))) # Zero or invalid integer if not completestr: completestr.append("0 %s" % unit("second", 0)) return " ".join(completestr)
27e0a084165605aa4b1a2b42c87840439686c255
31,619
import torch def warp_grid(flow: Tensor) -> Tensor: """Creates a warping grid from a given optical flow map. The warping grid determines the coordinates of the source pixels from which to take the color when inverse warping. Args: flow: optical flow tensor of shape (B, H, W, 2). The flow values are expected to already be in normalized range, see :func:`normalize` for more information. Returns: The warping grid """ b, h, w, _ = flow.shape range_x = torch.linspace(-1.0, 1.0, w, device=flow.device) range_y = torch.linspace(-1.0, 1.0, h, device=flow.device) grid_y, grid_x = torch.meshgrid(range_y, range_x) # grid has shape (B, H, W, 2) grid = torch.stack((grid_x, grid_y), dim=-1).unsqueeze(0).repeat(b, 1, 1, 1) grid = grid + flow return grid
21f5765603f8fb42d5fe70668ab6d52b60c16bfe
31,620
def FORMULATEXT(*args) -> Function: """ Returns the formula as a string. Learn more: https//support.google.com/docs/answer/9365792. """ return Function("FORMULATEXT", args)
17cb21ee8b36439395b64fd410006ff03db7fedc
31,621
def photometer_and_plot(kicid, quarter, fake=False, makeplots=True, k2=False): """ ## inputs: - `kicid` - KIC number - `quarter` - Kepler observing quarter (or really place in list of files) ## outputs: - [some plots] - `time` - times in KBJD - `sap_photometry` - home-built SAP equivalent photometry - `owl_photometry` - OWL photometry #3 bugs: - Does unnecessary reformatting galore. - Should split off plotting to a separate function. - Comment header not complete. """ fsf = 2.5 # MAGIC number used to stretch plots if fake: prefix = "./plots/fake" title = "fake data" intensities, kplr_mask = get_fake_data(4700) time_in_kbjd = np.arange(len(intensities)) / 24. / 2. elif k2: time_in_kbjd, intensities, kplr_mask, prefix, title = get_k2_data() tb_output = np.loadtxt("../data/wasp28_lc_tom.txt").T else: time_in_kbjd, intensities, kplr_mask, prefix, title = get_kepler_data(kicid, quarter, makeplots=makeplots) nt, ny, nx = intensities.shape # get SAP weights and photometry sap_weights = np.zeros(kplr_mask.shape) sap_weights[kplr_mask == 3] = 1 sap_weights = sap_weights[kplr_mask > 0] def reformat_as_image(bar): foo = np.zeros_like(intensities[0]) foo[kplr_mask > 0] = bar return foo sap_weight_img = reformat_as_image(sap_weights) pixel_mask = get_pixel_mask(intensities, kplr_mask) epoch_mask = get_epoch_mask(pixel_mask, kplr_mask) fubar_intensities = intensities fubar_intensities[pixel_mask == 0] = 0. sap_lightcurve = np.sum(np.sum(fubar_intensities * sap_weight_img[None, :, :], axis=2), axis=1) print "SAP", np.min(sap_lightcurve), np.max(sap_lightcurve) # get OWL weights and photometry means, covars = get_robust_means_and_covariances(intensities, kplr_mask) owl_weights = get_owl_weights(means, covars) owl_weights *= np.sum(sap_weights * means) / np.sum(owl_weights * means) owl_weight_img = reformat_as_image(owl_weights) owl_lightcurve = np.sum(np.sum(fubar_intensities * owl_weight_img[None, :, :], axis=2), axis=1) print "OWL", np.min(owl_lightcurve), np.max(owl_lightcurve) # get OPW weights and photometry opw_weights = get_opw_weights(means, covars, owl_weights=owl_weights) opw_weights *= np.sum(sap_weights * means) / np.sum(opw_weights * means) opw_weight_img = reformat_as_image(opw_weights) opw_lightcurve = np.sum(np.sum(fubar_intensities * opw_weight_img[None, :, :], axis=2), axis=1) print "OPW", np.min(opw_lightcurve), np.max(opw_lightcurve) if not makeplots: return time_in_kbjd, sap_lightcurve, owl_lightcurve, opw_lightcurve # fire up the TSA tsa_intensities, tsa_mask = get_tsa_intensities_and_mask(intensities, kplr_mask) tsa_means, tsa_covars = get_robust_means_and_covariances(tsa_intensities, tsa_mask) tsa_weights = get_opw_weights(tsa_means, tsa_covars) tsa_weights *= np.sum(sap_weights * means) / np.sum(tsa_weights * tsa_means) tsa_weight_img = np.zeros_like(intensities[0]) tsa_weight_img[kplr_mask == 3] = tsa_weights[0] tsa_weight_img[kplr_mask == 1] = tsa_weights[1:] tsa_lightcurve = np.sum(np.sum(fubar_intensities * tsa_weight_img[None, :, :], axis=2), axis=1) print "TSA", np.min(tsa_lightcurve), np.max(tsa_lightcurve) # create and use differential covariances clip_mask = get_sigma_clip_mask(intensities, means, covars, kplr_mask) # need this to mask shit diff_intensities = np.diff(intensities, axis=0) / np.sqrt(2.) # exercise for reader: WHY SQRT(2)? diff_means, diff_covars = get_robust_means_and_covariances(diff_intensities, kplr_mask, clip_mask) dowl_weights = get_owl_weights(means, diff_covars) dowl_weights *= np.sum(sap_weights * means) / np.sum(dowl_weights * means) dowl_weight_img = reformat_as_image(dowl_weights) dowl_lightcurve = np.sum(np.sum(fubar_intensities * dowl_weight_img[None, :, :], axis=2), axis=1) print "DOWL", np.min(dowl_lightcurve), np.max(dowl_lightcurve) dopw_weights = get_opw_weights(means, diff_covars, owl_weights=dowl_weights) dopw_weights *= np.sum(sap_weights * means) / np.sum(dopw_weights * means) dopw_weight_img = reformat_as_image(dopw_weights) dopw_lightcurve = np.sum(np.sum(fubar_intensities * dopw_weight_img[None, :, :], axis=2), axis=1) print "DOPW", np.min(dopw_lightcurve), np.max(dopw_lightcurve) # fire up the DTSA clip_mask = get_sigma_clip_mask(tsa_intensities, tsa_means, tsa_covars, tsa_mask) diff_tsa_intensities = np.diff(tsa_intensities, axis=0) diff_tsa_means, diff_tsa_covars = get_robust_means_and_covariances(diff_tsa_intensities, tsa_mask, clip_mask) dtsa_weights = get_opw_weights(tsa_means, diff_tsa_covars) dtsa_weights *= np.sum(sap_weights * means) / np.sum(dtsa_weights * tsa_means) dtsa_weight_img = np.zeros_like(intensities[0]) dtsa_weight_img[kplr_mask == 3] = dtsa_weights[0] dtsa_weight_img[kplr_mask == 1] = dtsa_weights[1:] dtsa_lightcurve = np.sum(np.sum(fubar_intensities * dtsa_weight_img[None, :, :], axis=2), axis=1) print "DTSA", np.min(dtsa_lightcurve), np.max(dtsa_lightcurve) # create and use compound covariances comp_covars = get_composite_covars(covars, diff_covars, means) cowl_weights = get_owl_weights(means, comp_covars) cowl_weights *= np.sum(sap_weights * means) / np.sum(cowl_weights * means) cowl_weight_img = reformat_as_image(cowl_weights) cowl_lightcurve = np.sum(np.sum(fubar_intensities * cowl_weight_img[None, :, :], axis=2), axis=1) print "COWL", np.min(cowl_lightcurve), np.max(cowl_lightcurve) copw_weights = get_opw_weights(means, comp_covars, owl_weights=cowl_weights) copw_weights *= np.sum(sap_weights * means) / np.sum(copw_weights * means) copw_weight_img = reformat_as_image(copw_weights) copw_lightcurve = np.sum(np.sum(fubar_intensities * copw_weight_img[None, :, :], axis=2), axis=1) print "COPW", np.min(copw_lightcurve), np.max(copw_lightcurve) # get the eigenvalues and top eigenvector (for plotting) for foo, cc in [("diff-", diff_covars), ("", covars)]: eig = np.linalg.eig(cc) eigval = eig[0] eigvec = eig[1] II = (np.argsort(eigval))[::-1] eigval = eigval[II] eigvec = eigvec[:,II] eigvec0 = eigvec[0] plt.figure(figsize=(fsf * nx, fsf * ny)) # MAGIC plt.clf() plt.title(title) plt.plot(eigval, "ko") plt.xlabel("%s$\hat{C}$ eigenvector index" % foo) plt.ylabel("%s$\hat{C}$ eigenvalue (ADU$^2$)" % foo) plt.xlim(-0.5, len(eigval) - 0.5) plt.ylim(-0.1 * np.max(eigval), 1.1 * np.max(eigval)) plt.axhline(0., color="k", alpha=0.5) savefig("%s_%seigenvalues.png" % (prefix, foo)) # more reformatting mean_img = reformat_as_image(means) covar_diag_img = reformat_as_image(np.diag(covars)) eigvec0_img = reformat_as_image(eigvec0) sap_frac_contribs_img = sap_weight_img * mean_img owl_frac_contribs_img = owl_weight_img * mean_img opw_frac_contribs_img = opw_weight_img * mean_img # make images plot plt.gray() plt.figure(figsize=(fsf * nx, fsf * ny)) # MAGIC plt.clf() plt.title(title) vmax = np.percentile(intensities[:, kplr_mask > 0], 99.) vmin = -1. * vmax for ii, sp in [(0, 331), (nt / 2, 332), (nt-1, 333)]: plt.subplot(sp) plt.imshow(intensities[ii], interpolation="nearest", vmin=vmin, vmax=vmax) plt.title("exposure %d" % ii) plt.colorbar() plt.subplot(334) plt.imshow(mean_img, interpolation="nearest", vmin=vmin, vmax=vmax) plt.title(r"mean $\hat{\mu}$") plt.colorbar() plt.subplot(335) plt.imshow(np.log10(covar_diag_img), interpolation="nearest") plt.title(r"log diag($\hat{C})$") plt.colorbar() plt.subplot(336) plt.imshow(eigvec0_img, interpolation="nearest") plt.title(r"dominant $\hat{C}$ eigenvector") plt.colorbar() vmax = 1.2 * np.max(sap_weight_img) vmin = -1. * vmax plt.subplot(337) plt.imshow(sap_weight_img, interpolation="nearest", vmin=vmin, vmax=vmax) plt.title(r"SAP weights") plt.colorbar() plt.subplot(338) plt.imshow(owl_weight_img, interpolation="nearest", vmin=vmin, vmax=vmax) plt.title(r"OWL weights") plt.colorbar() plt.subplot(339) plt.imshow(owl_weight_img * mean_img, interpolation="nearest", vmin=-np.max(owl_weight_img * mean_img)) plt.title(r"OWL mean contribs") plt.colorbar() savefig("%s_images_owl.png" % prefix) for TLA, wimg, suffix in [("OPW", opw_weight_img, "opw"), ("TSA", tsa_weight_img, "tsa"), ("DOWL", dowl_weight_img, "dowl"), ("DOPW", dopw_weight_img, "dopw"), ("DTSA", dtsa_weight_img, "dtsa"), ("COWL", cowl_weight_img, "cowl"), ("COPW", copw_weight_img, "copw")]: # make OPW plot plt.figure(figsize=(fsf * nx, fsf * ny / 3.)) # MAGIC plt.clf() plt.title(title) plt.subplot(131) plt.imshow(sap_weight_img, interpolation="nearest", vmin=vmin, vmax=vmax) plt.title(r"SAP weights") plt.colorbar() plt.subplot(132) plt.imshow(wimg, interpolation="nearest", vmin=vmin, vmax=vmax) plt.title(r"%s weights" % TLA) plt.colorbar() plt.subplot(133) plt.imshow(wimg * mean_img, interpolation="nearest", vmin=-np.max(wimg * mean_img)) plt.title(r"%s mean contribs" % TLA) plt.colorbar() savefig("%s_images_%s.png" % (prefix, suffix)) # make photometry plot for suffix, list in [("photometry", [(0, owl_lightcurve, "OWL"), (1, opw_lightcurve, "OPW"), (2, tsa_lightcurve, "TSA")]), ("diff_photometry", [(0, dowl_lightcurve, "DOWL"), (1, dopw_lightcurve, "DOPW"), (2, dtsa_lightcurve, "DTSA")]), ("comp_photometry", [(0, cowl_lightcurve, "COWL"), (1, copw_lightcurve, "COPW")])]: plt.figure(figsize=(fsf * nx, 0.5 * fsf * nx)) plt.clf() plt.title(title) clip_mask = get_sigma_clip_mask(intensities, means, covars, kplr_mask) I = (epoch_mask > 0) * (clip_mask > 0) try: tb_time = tb_output[0] tb_lightcurve = (tb_output[1] + 1.) * np.median(sap_lightcurve) except NameError: plt.plot(time_in_kbjd[I], sap_lightcurve[I], "k-", alpha=0.5) plt.text(time_in_kbjd[0], sap_lightcurve[0], "SAP-", alpha=0.5, ha="right") plt.text(time_in_kbjd[-1], sap_lightcurve[-1], "-SAP", alpha=0.5) else: plt.plot(tb_time, tb_lightcurve, "k-", alpha=0.5) plt.text(tb_time[0], tb_lightcurve[0], "TommyB-", alpha=0.5, ha="right") plt.text(tb_time[-1],tb_lightcurve[-1], "-TommyB", alpha=0.5) shift1 = 0. dshift = 0.2 * (np.min(sap_lightcurve[I]) - np.max(sap_lightcurve[I])) for ii, lc, tla in list: ss = shift1 + ii * dshift plt.plot(time_in_kbjd[ I], ss + lc[I], "k-") plt.text(time_in_kbjd[ 0], ss + lc[0], "%s-" % tla, ha="right") plt.text(time_in_kbjd[-1], ss + lc[-1], "-%s" % tla) plt.xlim(np.min(time_in_kbjd[I]) - 4., np.max(time_in_kbjd[I]) + 4.) # MAGIC plt.xlabel("time (KBJD in days)") plt.ylabel("flux (in Kepler SAP ADU)") savefig("%s_%s.png" % (prefix, suffix)) # phone home return time_in_kbjd, sap_lightcurve, owl_lightcurve
84dc5131317c85a5bb9be6928da53ca5af1ae76b
31,622
def num_prim_vertices(prim: hou.Prim) -> int: """Get the number of vertices belonging to the primitive. :param prim: The primitive to get the vertex count of. :return: The vertex count. """ return prim.intrinsicValue("vertexcount")
298a4a67133fc857c129b922f7f5a0f21d6d0b40
31,623
def read_geoparquet(path: str) -> GeoDataFrame: """ Given the path to a parquet file, construct a geopandas GeoDataFrame by: - loading the file as a pyarrow table - reading the geometry column name and CRS from the metadata - deserialising WKB into shapely geometries """ # read parquet file into pyarrow Table table = pq.read_table(path) # deserialise metadata for first geometry field # (geopandas only supports one geometry column) geometry_metadata = _deserialise_metadata(table)["geometry_fields"][0] # extract CRS crs = geometry_metadata["crs"] # convert pyarrow Table to pandas DataFrame df = table.to_pandas() # identify geometry column name geom_col_name = geometry_metadata["field_name"] # deserialise geometry column df = df._deserialise_geometry(geom_col_name) # convert to geopandas GeoDataFrame df = GeoDataFrame(df, crs=crs, geometry=geom_col_name) return df
0fddb5452010e5d4546b3b34e7afae93698cd953
31,624
def cmd_run_json_block_file(file): """`file` is a file containing a FullBlock in JSON format""" return run_json_block_file(file)
594e10a7ef4e20b130a5b39c22a834208df846a6
31,625
def collide_mask(left, right): """collision detection between two sprites, using masks. pygame.sprite.collide_mask(SpriteLeft, SpriteRight): bool Tests for collision between two sprites by testing if their bitmasks overlap. If the sprites have a "mask" attribute, that is used as the mask; otherwise, a mask is created from the sprite image. Intended to be passed as a collided callback function to the *collide functions. Sprites must have a "rect" and an optional "mask" attribute. New in pygame 1.8.0 """ xoffset = right.rect[0] - left.rect[0] yoffset = right.rect[1] - left.rect[1] try: leftmask = left.mask except AttributeError: leftmask = from_surface(left.image) try: rightmask = right.mask except AttributeError: rightmask = from_surface(right.image) return leftmask.overlap(rightmask, (xoffset, yoffset))
fcb309e0c5ca7bc59e5b39b8fd67a45a5281d262
31,626
import requests def fetch_production(zone_key='IN-GJ', session=None, target_datetime=None, logger=getLogger('IN-GJ')) -> list: """Requests the last known production mix (in MW) of a given country.""" session = session or requests.session() if target_datetime: raise NotImplementedError( 'This parser is not yet able to parse past dates') value_map = fetch_data(zone_key, session, logger=logger) data = { 'zoneKey': zone_key, 'datetime': value_map['date'].datetime, 'production': { 'biomass': None, 'coal': value_map.get('coal', 0), 'gas': value_map.get('gas', 0), 'hydro': value_map.get('hydro', 0), 'nuclear': value_map.get('nuclear', 0), 'oil': None, 'solar': value_map.get('solar', 0), 'wind': value_map.get('wind', 0), 'geothermal': None, 'unknown': value_map.get('unknown', 0) }, 'storage': { 'hydro': None }, 'source': 'sldcguj.com', } valid_data = validate(data, logger, remove_negative=True, floor=7000) return valid_data
e23e409d24349e998eb9c261805a050de12ed30c
31,627
def xyz_order(coordsys, name2xyz=None): """ Vector of orders for sorting coordsys axes in xyz first order Parameters ---------- coordsys : ``CoordinateSystem`` instance name2xyz : None or mapping Object such that ``name2xyz[ax_name]`` returns 'x', or 'y' or 'z' or raises a KeyError for a str ``ax_name``. None means use module default. Returns ------- xyz_order : list Ordering of axes to get xyz first ordering. See the examples. Raises ------ AxesError : if there are not all of x, y and z axes Examples -------- >>> from nipy.core.api import CoordinateSystem >>> xyzt_cs = mni_cs(4) # coordsys with t (time) last >>> xyzt_cs CoordinateSystem(coord_names=('mni-x', 'mni-y', 'mni-z', 't'), name='mni', coord_dtype=float64) >>> xyz_order(xyzt_cs) [0, 1, 2, 3] >>> tzyx_cs = CoordinateSystem(xyzt_cs.coord_names[::-1], 'reversed') >>> tzyx_cs CoordinateSystem(coord_names=('t', 'mni-z', 'mni-y', 'mni-x'), name='reversed', coord_dtype=float64) >>> xyz_order(tzyx_cs) [3, 2, 1, 0] """ if name2xyz is None: name2xyz = known_names names = coordsys.coord_names N = len(names) axvals = np.zeros(N, dtype=int) for i, name in enumerate(names): try: xyz_char = name2xyz[name] except KeyError: axvals[i] = N+i else: axvals[i] = 'xyz'.index(xyz_char) if not set(axvals).issuperset(range(3)): raise AxesError("Not all of x, y, z recognized in coordinate map") return list(np.argsort(axvals))
983c7adc5df8f54ecc92423eed0cd744971d4ec3
31,628
def parse_item(year, draft_type, row): """Parses the given row out into a DraftPick item.""" draft_round = parse_int(row, 'th[data-stat="draft_round"]::text', -1) draft_pick = parse_int(row, 'td[data-stat="draft_pick"]::text', -1) franchise = '/'.join( row.css('td[data-stat="team"] a::attr(href)').get().split('/')[:-1]) player = row.css('td[data-stat="player"] a::attr(href)').get() if not player: player = row.css('td[data-stat="player"]::text').get() position = row.css('td[data-stat="pos"]::text').get() age = parse_int(row, 'td[data-stat="age"]::text', -1) first_team_all_pros = parse_int( row, 'td[data-stat="all_pros_first_team"]::text', 0) pro_bowls = parse_int(row, 'td[data-stat="pro_bowls"]::text', 0) career_approx_value = parse_int(row, 'td[data-stat="career_av"]::text', 0) draft_approx_value = parse_int(row, 'td[data-stat="draft_av"]::text', 0) college = row.css('td[data-stat="college_id"] a::attr(href)').get() if not college: college = row.css('td[data-stat="college_id"]::text').get() return DraftPick(year=year, draft_type=draft_type, draft_round=draft_round, draft_pick=draft_pick, franchise=franchise, player=player, position=position, age=age, first_team_all_pros=first_team_all_pros, pro_bowls=pro_bowls, career_approx_value=career_approx_value, draft_approx_value=draft_approx_value, college=college)
822a596e0c3e381658a853899920347b95a7ff59
31,629
def buildJointChain(prefix, suffix, startPos, endPos, jointNum, orientJoint="xyz", saoType="yup"): """ Build a straight joint chain defined by start and end position. :param prefix: `string` prefix string in joint name :param suffix: `string` suffix string in joint name :param startPos: `list` [x,y,z] start position in the world space :param endPos: `list` [x,y,z] end position in the world space :param jointNum: number of joints in the joint chain :param orientJoint: `string` orient joint flag :param saoType: `string` secondary axis orient flag :return: `list` list of joint nodes in the joint chain. sorted by hierarchy. """ pm.select(d=1) step = (om.MVector(*endPos)-om.MVector(*startPos))/(jointNum-1.0) jnts = [] for i in range(jointNum): crtPos = om.MVector(*startPos)+step*i crtSuffix = suffix#suffix[1] if i==jointNum-1 else suffix[0] jnts.append(pm.joint(p=(crtPos.x, crtPos.y, crtPos.z), n="{0}_{1:0>2d}_{2}".format(prefix, i, crtSuffix))) pm.joint(jnts, e=True, oj=orientJoint, sao=saoType) return jnts
fda63b96d2e5a1316fab9d2f9dc268ae0ff270d2
31,630
import time import torch def predict(model, img_load, resizeNum, is_silent, gpu=0): """ input: model: model img_load: A dict of image, which has two keys: 'img_ori' and 'img_data' the value of the key 'img_ori' means the original numpy array the value of the key 'img_data' is the list of five resize images output: the mean predictions of the resize image list: 'img_data' """ starttime = time.time() segSize = (img_load['img_ori'].shape[0], img_load['img_ori'].shape[1]) #print('segSize',segSize) img_resized_list = img_load['img_data'] with torch.no_grad(): scores = torch.zeros(1, cfg.DATASET.num_class, segSize[0], segSize[1], device=torch.device("cuda", gpu)) for img in img_resized_list: feed_dict = img_load.copy() feed_dict['img_data']=img del feed_dict['img_ori'] #feed_dict = {'img_data': img} feed_dict=async_copy_to(feed_dict, gpu) # forward pass pred_tmp = model(feed_dict, segSize = segSize) #shape of pred_temp is (1, 150, height, width) scores = scores + pred_tmp / resizeNum endtime = time.time() if not is_silent: print('model inference time: {}s' .format(endtime-starttime)) return scores
04da68453aab79f732deb153cdcbed9ea267355c
31,631
def reverse_preorder(root): """ @ input: root of lcrs tree @ output: integer list of id's reverse preorder """ node_list = [] temp_stack = [root] while len(temp_stack) != 0: curr = temp_stack.pop() node_list.append(curr.value) if curr.child is not None: temp_stack.append(curr.child) if curr.next is not None: temp_stack.append(curr.next) return node_list
06a53756db0f5c990537d02de4fcaa57cc93169d
31,632
import scipy def calc_binned_percentile(bin_edge,xaxis,data,per=75): """Calculate the percentile value of an array in some bins. per is the percentile at which to extract it. """ percen = np.zeros(np.size(bin_edge)-1) for i in xrange(0,np.size(bin_edge)-1): ind = np.where((xaxis > bin_edge[i])*(xaxis < bin_edge[i+1])) if np.size(ind) > 5: percen[i] = scipy.stats.scoreatpercentile(data[ind],per) return percen
798cd1e4f1070b27766f2390442fa81dfad15aaa
31,633
def run_services(container_factory, config, make_cometd_server, waiter): """ Returns services runner """ def _run(service_class, responses): """ Run testing cometd server and example service with tested entrypoints Before run, the testing cometd server is preloaded with passed responses. """ cometd_server = make_cometd_server(responses) container = container_factory(service_class, config) cometd_server.start() container.start() waiter.wait() container.kill() cometd_server.stop() return _run
df7d1c3fdf7e99ebf054cfc6881c8073c2cf4dee
31,634
import requests def cleaned_request(request_type, *args, **kwargs): """ Perform a cleaned requests request """ s = requests.Session() # this removes netrc checking s.trust_env = False return s.request(request_type, *args, **kwargs)
b6c99c85a64e5fd78cf10cc986c9a4b1542f47d3
31,635
from typing import List from typing import Set def construct_speech_to_text_phrases_context(event: EventIngestionModel) -> List[str]: """ Construct a list of phrases to use for Google Speech-to-Text speech adaption. See: https://cloud.google.com/speech-to-text/docs/speech-adaptation Parameters ---------- event: EventIngestionModel The event details to pull context from. Returns ------- phrases: List[str] Compiled list of strings to act as target weights for the model. Notes ----- Phrases are added in order of importance until GCP phrase limits are met. The order of importance is defined as: 1. body name 2. event minutes item names 3. councilmember names 4. matter titles 5. councilmember role titles """ # Note: Google Speech-to-Text allows max 500 phrases phrases: Set[str] = set() PHRASE_LIMIT = 500 CUM_CHAR_LIMIT = 9900 # In line def for get character count # Google Speech-to-Text allows cumulative max 9900 characters def _get_total_char_count(phrases: Set[str]) -> int: chars = 0 for phrase in phrases: chars += len(phrase) return chars def _get_if_added_sum(phrases: Set[str], next_addition: str) -> int: current_len = _get_total_char_count(phrases) return current_len + len(next_addition) def _within_limit(phrases: Set[str]) -> bool: return ( _get_total_char_count(phrases) < CUM_CHAR_LIMIT and len(phrases) < PHRASE_LIMIT ) # Get body name if _within_limit(phrases): if _get_if_added_sum(phrases, event.body.name) < CUM_CHAR_LIMIT: phrases.add(event.body.name) # Extras from event minutes items if event.event_minutes_items is not None: # Get minutes item name for event_minutes_item in event.event_minutes_items: if _within_limit(phrases): if ( _get_if_added_sum(phrases, event_minutes_item.minutes_item.name) < CUM_CHAR_LIMIT ): phrases.add(event_minutes_item.minutes_item.name) # Get councilmember names from sponsors and votes for event_minutes_item in event.event_minutes_items: if event_minutes_item.matter is not None: if event_minutes_item.matter.sponsors is not None: for sponsor in event_minutes_item.matter.sponsors: if _within_limit(phrases): if ( _get_if_added_sum(phrases, sponsor.name) < CUM_CHAR_LIMIT ): phrases.add(sponsor.name) if event_minutes_item.votes is not None: for vote in event_minutes_item.votes: if _within_limit(phrases): if ( _get_if_added_sum(phrases, vote.person.name) < CUM_CHAR_LIMIT ): phrases.add(vote.person.name) # Get matter titles for event_minutes_item in event.event_minutes_items: if event_minutes_item.matter is not None: if _within_limit(phrases): if ( _get_if_added_sum(phrases, event_minutes_item.matter.title) < CUM_CHAR_LIMIT ): phrases.add(event_minutes_item.matter.title) # Get councilmember role titles from sponsors and votes for event_minutes_item in event.event_minutes_items: if event_minutes_item.matter is not None: if event_minutes_item.matter.sponsors is not None: for sponsor in event_minutes_item.matter.sponsors: if sponsor.seat is not None: if sponsor.seat.roles is not None: for role in sponsor.seat.roles: if ( _get_if_added_sum(phrases, role.title) < CUM_CHAR_LIMIT ): phrases.add(role.title) if event_minutes_item.votes is not None: for vote in event_minutes_item.votes: if vote.person.roles is not None: for role in vote.person.roles: if _within_limit(phrases): if ( _get_if_added_sum(phrases, role.title) < CUM_CHAR_LIMIT ): phrases.add(role.title) return list(phrases)
e8834afd4e53d446f2dda1fd79383a0266010e5b
31,636
def data_science_community(articles, authors): """ Input: Articles and authors collections. You may use only one of them Output: 3-tuple reporting on subgraph of authors of data science articles and their co-authors: (number of connected components,size of largest connected component, size of smallest connected component) """ graph = Graph() match_1_stage = {"$match": {"fos.name": "Data science"}} project_stage = {"$project": {"authors.id": 1}} unwind_stage = {"$unwind": "$authors"} res_set = list(articles.aggregate([match_1_stage, project_stage, unwind_stage])) data_science_authors = set() for res in res_set: data_science_authors.add(res["authors"]["id"]) graph.add_node(res["authors"]["id"]) for author in data_science_authors: author_records = authors.find({"_id": author}) for author_record in author_records: co_authors = author_record["coauthors"] for co_author in co_authors: graph.add_edge(author, co_author) connected_items = connected_components(graph) connected_graph = sorted(connected_items, key=len, reverse=True) largest = len(list(graph.subgraph(connected_graph[0]).nodes())) smallest = len(list(graph.subgraph(connected_graph[-1]).nodes())) connected_items = connected_components(graph) no_connected_components = len(list(connected_items)) return no_connected_components, largest, smallest
3a81fc7674a2d421ff4649759e61797a743b7aae
31,637
def breadth_first_search(G, seed): """Breadth First search of a graph. Parameters ---------- G : csr_matrix, csc_matrix A sparse NxN matrix where each nonzero entry G[i,j] is the distance between nodes i and j. seed : int Index of the seed location Returns ------- order : int array Breadth first order level : int array Final levels Examples -------- 0---2 | / | / 1---4---7---8---9 | /| / | / | / 3/ 6/ | | 5 >>> import numpy as np >>> import pyamg >>> import scipy.sparse as sparse >>> edges = np.array([[0,1],[0,2],[1,2],[1,3],[1,4],[3,4],[3,5], ... [4,6], [4,7], [6,7], [7,8], [8,9]]) >>> N = np.max(edges.ravel())+1 >>> data = np.ones((edges.shape[0],)) >>> A = sparse.coo_matrix((data, (edges[:,0], edges[:,1])), shape=(N,N)) >>> c, l = pyamg.graph.breadth_first_search(A, 0) >>> print(l) [0 1 1 2 2 3 3 3 4 5] >>> print(c) [0 1 2 3 4 5 6 7 8 9] """ G = asgraph(G) N = G.shape[0] order = np.empty(N, G.indptr.dtype) level = np.empty(N, G.indptr.dtype) level[:] = -1 BFS = amg_core.breadth_first_search BFS(G.indptr, G.indices, int(seed), order, level) return order, level
047596e378f0496189f2e164e2b7ede4a6212f19
31,638
def main_page(): """ Pass table of latest sensor readings as context for main_page """ LOG.info("Main Page triggered") context = dict( sub_title="Latest readings:", table=recent_readings_as_html() ) return render_template('main_page.html', **context)
6c9ac7c3306eb10d03269ca4e0cbca9c68a19644
31,639
import yaml def load_config_file(filename): """Load configuration from YAML file.""" docs = yaml.load_all(open(filename, 'r'), Loader=yaml.SafeLoader) config_dict = dict() for doc in docs: for k, v in doc.items(): config_dict[k] = v return config_dict
d61bb86e605a1e744ce3f4cc03e866c61137835d
31,640
def CausalConv(x, dilation_rate, filters, kernel_size=2, scope = ""): """Performs causal dilated 1D convolutions. Args: x : Tensor of shape (batch_size, steps, input_dim). dilation_rate: Dilation rate of convolution. filters: Number of convolution filters. kernel_size: Width of convolution kernel. SNAIL paper uses 2 for all experiments. scope: Variable scope for this layer. Returns: y: Tensor of shape (batch_size, new_steps, D). """ with tf.variable_scope(scope): causal_pad_size = (kernel_size - 1) * dilation_rate # Pad sequence dimension. x = tf.pad(x, [[0, 0], [causal_pad_size, 0], [0, 0]]) return layers.conv1d( x, filters, kernel_size=kernel_size, padding="VALID", rate=dilation_rate)
08ffde5e4a9ae9ebdbb6ed83a22ee1987bf02b1e
31,641
import functools def makeTable(grid): """Create a REST table.""" def makeSeparator(num_cols, col_width, header_flag): if header_flag == 1: return num_cols * ("+" + (col_width) * "=") + "+\n" else: return num_cols * ("+" + (col_width) * "-") + "+\n" def normalizeCell(string, length): return string + ((length - len(string)) * " ") cell_width = 2 + max( functools.reduce( lambda x, y: x + y, [[len(item) for item in row] for row in grid], [] ) ) num_cols = len(grid[0]) rst = makeSeparator(num_cols, cell_width, 0) header_flag = 1 for row in grid: rst = ( rst + "| " + "| ".join([normalizeCell(x, cell_width - 1) for x in row]) + "|\n" ) rst = rst + makeSeparator(num_cols, cell_width, header_flag) header_flag = 0 return rst
c889a4cf505b5f0b3ef75656acb38f621c7fff31
31,642
from pathlib import Path import os import configparser import io def generate_and_validate(config: Config) -> str: """Validate and generate mypy config.""" config_path = config.root / ".strict-typing" with config_path.open() as fp: lines = fp.readlines() # Filter empty and commented lines. parsed_modules: list[str] = [ line.strip() for line in lines if line.strip() != "" and not line.startswith("#") ] strict_modules: list[str] = [] strict_core_modules: list[str] = [] for module in parsed_modules: if module.startswith("homeassistant.components"): strict_modules.append(module) else: strict_core_modules.append(module) ignored_modules_set: set[str] = set(IGNORED_MODULES) for module in strict_modules: if ( not module.startswith("homeassistant.components.") and module != "homeassistant.components" ): config.add_error( "mypy_config", f"Only components should be added: {module}" ) if ignored_module := _strict_module_in_ignore_list(module, ignored_modules_set): config.add_error( "mypy_config", f"Module '{ignored_module}' is in ignored list in mypy_config.py", ) # Validate that all modules exist. all_modules = ( strict_modules + strict_core_modules + IGNORED_MODULES + list(NO_IMPLICIT_REEXPORT_MODULES) ) for module in all_modules: if module.endswith(".*"): module_path = Path(module[:-2].replace(".", os.path.sep)) if not module_path.is_dir(): config.add_error("mypy_config", f"Module '{module} is not a folder") else: module = module.replace(".", os.path.sep) module_path = Path(f"{module}.py") if module_path.is_file(): continue module_path = Path(module) / "__init__.py" if not module_path.is_file(): config.add_error("mypy_config", f"Module '{module} doesn't exist") # Don't generate mypy.ini if there're errors found because it will likely crash. if any(err.plugin == "mypy_config" for err in config.errors): return "" mypy_config = configparser.ConfigParser() general_section = "mypy" mypy_config.add_section(general_section) for key, value in GENERAL_SETTINGS.items(): mypy_config.set(general_section, key, value) for key in STRICT_SETTINGS: mypy_config.set(general_section, key, "true") # By default enable no_implicit_reexport only for homeassistant.* # Disable it afterwards for all components components_section = "mypy-homeassistant.*" mypy_config.add_section(components_section) mypy_config.set(components_section, "no_implicit_reexport", "true") for core_module in strict_core_modules: core_section = f"mypy-{core_module}" mypy_config.add_section(core_section) for key in STRICT_SETTINGS_CORE: mypy_config.set(core_section, key, "true") # By default strict checks are disabled for components. components_section = "mypy-homeassistant.components.*" mypy_config.add_section(components_section) for key in STRICT_SETTINGS: mypy_config.set(components_section, key, "false") mypy_config.set(components_section, "no_implicit_reexport", "false") for strict_module in strict_modules: strict_section = f"mypy-{strict_module}" mypy_config.add_section(strict_section) for key in STRICT_SETTINGS: mypy_config.set(strict_section, key, "true") if strict_module in NO_IMPLICIT_REEXPORT_MODULES: mypy_config.set(strict_section, "no_implicit_reexport", "true") for reexport_module in NO_IMPLICIT_REEXPORT_MODULES.difference(strict_modules): reexport_section = f"mypy-{reexport_module}" mypy_config.add_section(reexport_section) mypy_config.set(reexport_section, "no_implicit_reexport", "true") # Disable strict checks for tests tests_section = "mypy-tests.*" mypy_config.add_section(tests_section) for key in STRICT_SETTINGS: mypy_config.set(tests_section, key, "false") for ignored_module in IGNORED_MODULES: ignored_section = f"mypy-{ignored_module}" mypy_config.add_section(ignored_section) mypy_config.set(ignored_section, "ignore_errors", "true") with io.StringIO() as fp: mypy_config.write(fp) fp.seek(0) return HEADER + fp.read().strip()
b3c4a94aab6404f4ab88a8dc700cebd6a484a422
31,643
def coords_to_bin( x: npt.NDArray, y: npt.NDArray, x_bin_width: float, y_bin_width: float, ) -> tuple[npt.NDArray[np.int_], npt.NDArray[np.int_]]: """ x: list of positive east-west coordinates of some sort y: list of positive north-south coordinates of some sort x_bin_width: bin width for x y_bin_width: bin width for y """ assert np.all(x > 0) assert np.all(y > 0) assert x_bin_width > 0 assert y_bin_width > 0 # Compute bins x_bin_list = np.array(np.floor(x / x_bin_width), dtype=int) y_bin_list = np.array(np.floor(y / y_bin_width), dtype=int) return (x_bin_list, y_bin_list)
874950836d6d03e1dc0f39bdb53653789fe64605
31,644
from typing import Callable def _gcs_request(func: Callable): """ Wrapper function for gcs requests in order to create more helpful error messages. """ @wraps(func) def wrapper(url: str, *args, **kwargs): try: return func(url, *args, **kwargs) except NotFound: raise FileNotFoundError("file {} not found".format(url)) return wrapper
a57867df668eb9b139ee8e07a405868676c9e0f2
31,645
def nllsqfunc(params: np.ndarray, qm: HessianOutput, qm_hessian: np.ndarray, mol: Molecule, loss: list[float]=None) -> np.ndarray: """Residual function for non-linear least-squares optimization based on the difference of MD and QM hessians. Keyword arguments ----------------- params : np.ndarray[float](sum of n_params for each term to be fit,) stores all parameters for the terms qm : HessianOutput output from QM hessian file read qm_hessian : np.ndarray[float]((3*n_atoms)(3*n_atoms + 1)/2,) the flattened 1D QM hessian mol : Molecule the Molecule object loss : list[float] (default None) the list to keep track of the loss function over the optimization process Returns ------- The np.ndarray[float]((3*n_atoms)(3*n_atoms + 1)/2,) of residuals """ hessian = [] non_fit = [] # print("Calculating the MD hessian matrix elements...") full_md_hessian = calc_hessian_nl(qm.coords, mol, params) # print("Fitting the MD hessian parameters to QM hessian values") for i in range(mol.topo.n_atoms * 3): for j in range(i + 1): hes = (full_md_hessian[i, j] + full_md_hessian[j, i]) / 2 hessian.append(hes[:-1]) non_fit.append(hes[-1]) hessian = np.array(hessian) agg_hessian = np.sum(hessian, axis=1) # Aggregate contribution of terms difference = qm_hessian - np.array(non_fit) # Compute residual vector residual = agg_hessian - difference # Append loss to history if loss is not None: loss.append(0.5 * np.sum(residual**2)) return residual
0debdca80de9e7ea136683de04bc838ceb2f42e2
31,646
import time def wait_for_mongod_shutdown(mongod_control, timeout=2 * ONE_HOUR_SECS): """Wait for for mongod to shutdown; return 0 if shutdown occurs within 'timeout', else 1.""" start = time.time() status = mongod_control.status() while status != "stopped": if time.time() - start >= timeout: LOGGER.error("The mongod process has not stopped, current status is %s", status) return 1 LOGGER.info("Waiting for mongod process to stop, current status is %s ", status) time.sleep(3) status = mongod_control.status() LOGGER.info("The mongod process has stopped") # We wait a bit, since files could still be flushed to disk, which was causing # rsync "file has vanished" errors. time.sleep(60) return 0
837271069f8aa672372aec944abedbd44664a3d3
31,647
from typing import List import re def get_installed_antivirus_software() -> List[dict]: """ Not happy with it either. But yet here we are... Thanks Microsoft for not having SecurityCenter2 on WinServers So we need to detect used AV engines by checking what is installed and do "best guesses" This test does not detect Windows defender since it's not an installed product """ av_engines = [] potential_seccenter_av_engines = [] potential_av_engines = [] result = windows_tools.wmi_queries.query_wmi( "SELECT * FROM AntivirusProduct", namespace="SecurityCenter", name="windows_tools.antivirus.get_installed_antivirus_software", ) try: for product in result: av_engine = { "name": None, "version": None, "publisher": None, "enabled": None, "is_up_to_date": None, "type": None, } try: av_engine["name"] = product["displayName"] except KeyError: pass try: state = product["productState"] av_engine["enabled"] = securitycenter_get_product_exec_state(state) av_engine["is_up_to_date"] = securitycenter_get_product_update_state( state ) av_engine["type"] = securitycenter_get_product_type(state) except KeyError: pass potential_seccenter_av_engines.append(av_engine) # TypeError may happen when securityCenter namespace does not exist except (KeyError, TypeError): pass for product in windows_tools.installed_software.get_installed_software(): product["enabled"] = None product["is_up_to_date"] = None product["type"] = None try: if re.search( r"anti.*(virus|viral)|malware", product["name"], re.IGNORECASE ): potential_av_engines.append(product) continue if re.search( r"|".join(KNOWN_ANTIVIRUS_PRODUCTS_REGEX), product["publisher"], re.IGNORECASE, ): potential_av_engines.append(product) # Specific case where name is unknown except KeyError: pass # SecurityCenter seems to be less precise than registry search # Now make sure we don't have "double entries" from securiycenter, then add them for seccenter_engine in potential_seccenter_av_engines: for engine in potential_av_engines: if seccenter_engine["name"] not in engine["name"]: # Do not add already existing entries from securitycenter av_engines.append(seccenter_engine) av_engines = av_engines + potential_av_engines return av_engines
b122960b48edfb0e193c354293b28bc1ead0a936
31,648
def mi(x,y,k=3,base=2): """ Mutual information of x and y x,y should be a list of vectors, e.g. x = [[1.3],[3.7],[5.1],[2.4]] if x is a one-dimensional scalar and we have four samples """ x = [[entry] for entry in x] y = [[entry] for entry in y] assert len(x)==len(y), "Lists should have same length" assert k <= len(x) - 1, "Set k smaller than num. samples - 1" intens = 1e-10 #small noise to break degeneracy, see doc. x = [list(p + intens*nr.rand(len(x[0]))) for p in x] y = [list(p + intens*nr.rand(len(y[0]))) for p in y] points = zip2(x,y) #Find nearest neighbors in joint space, p=inf means max-norm tree = ss.cKDTree(points) dvec = [tree.query(point,k+1,p=float('inf'))[0][k] for point in points] a,b,c,d = avgdigamma(x,dvec), avgdigamma(y,dvec), digamma(k), digamma(len(x)) return (-a-b+c+d)/log(base)
960501be5134dcfe99ca29b50622dbfc0b403b78
31,649
def _xls_cc_ir_impl_wrapper(ctx): """The implementation of the 'xls_cc_ir' rule. Wrapper for xls_cc_ir_impl. See: xls_cc_ir_impl. Args: ctx: The current rule's context object. Returns: ConvIRInfo provider DefaultInfo provider """ ir_conv_info, built_files, runfiles = _xls_cc_ir_impl(ctx) return [ ir_conv_info, DefaultInfo( files = depset( direct = built_files, transitive = _get_transitive_built_files_for_xls_cc_ir(ctx), ), runfiles = runfiles, ), ]
c76bddc8b05322b2df4af67415f783aa1f2635bb
31,650
from typing import List from typing import Tuple from typing import DefaultDict def create_dataset(message_sizes: List[int], labels: List[int], window_size: int, num_samples: int, rand: np.random.RandomState) -> Tuple[np.ndarray, np.ndarray]: """ Creates the attack dataset by randomly sampling message sizes of the given window. Args: message_sizes: The size of each message (in bytes) labels: The true label for each message window_size: The size of the model's features (D) num_samples: The number of samples to create rand: The random state used to create samples in a reproducible manner Returns: A tuple of two elements. (1) A [N, D] array of input features composed of message sizes (2) A [N] array of labels for each input """ assert len(message_sizes) == len(labels), 'Must provide the same number of message sizes and labels' num_messages = len(message_sizes) # Group the message sizes by label bytes_dist: DefaultDict[int, List[int]] = defaultdict(list) for label, size in zip(labels, message_sizes): bytes_dist[label].append(size) inputs: List[np.ndarray] = [] output: List[int] = [] for label in bytes_dist.keys(): sizes = bytes_dist[label] num_to_create = int(round(num_samples * (len(sizes) / num_messages))) for _ in range(num_to_create): raw_sizes = rand.choice(sizes, size=window_size) # [D] iqr = np.percentile(raw_sizes, 75) - np.percentile(raw_sizes, 25) features = [np.average(raw_sizes), np.std(raw_sizes), np.median(raw_sizes), np.max(raw_sizes), np.min(raw_sizes), iqr, geometric_mean(raw_sizes)] inputs.append(np.expand_dims(features, axis=0)) output.append(label) return np.vstack(inputs), np.vstack(output).reshape(-1)
081e0c6ddc18988d8e24a08ec4a4e565f318d23a
31,651
def infer_Tmap_from_clonal_info_alone_private( adata_orig, method="naive", clonal_time_points=None, selected_fates=None ): """ Compute transition map using only the lineage information. Here, we compute the transition map between neighboring time points. We simply average transitions across all clones (or selected clones when method='Weinreb'), assuming that the intra-clone transition is uniform within the same clone. Parameters ---------- adata_orig: :class:`~anndata.AnnData` object method: `str`, optional (default: 'naive') Method used to compute the transition map. Choice: {'naive', 'weinreb'}. For the naive method, we simply average transitions across all clones, assuming that the intra-clone transitions are uniform within the same clone. For the 'weinreb' method, we first find uni-potent clones, then compute the transition map by simply averaging across all clonal transitions as the naive method. selected_fates: `list`, optional (default: all selected) List of targeted fate clusters to define uni-potent clones for the weinreb method, which are used to compute the transition map. clonal_time_points: `list` of `str`, optional (default: all time points) List of time points to be included for analysis. We assume that each selected time point has clonal measurements. later_time_points: `list`, optional (default: None) If specified, the function will produce a map T between these early time points among `clonal_time_points` and the `later_time_point`. If not specified, it produces a map T between neighboring time points. Returns ------- adata: :class:`~anndata.AnnData` object The transition map is stored at adata.uns['clonal_transition_map'] """ adata_1 = tmap_util.select_time_points( adata_orig, time_point=clonal_time_points, extend_Tmap_space=True ) if method not in ["naive", "weinreb"]: logg.warn("method not in ['naive','weinreb']; set to be 'weinreb'") method = "weinreb" cell_id_t2_all = adata_1.uns["Tmap_cell_id_t2"] cell_id_t1_all = adata_1.uns["Tmap_cell_id_t1"] T_map = np.zeros((len(cell_id_t1_all), len(cell_id_t2_all))) clone_annot = adata_1.obsm["X_clone"] N_points = len(adata_1.uns["multiTime_cell_id_t1"]) for k in range(N_points): cell_id_t1_temp = adata_1.uns["multiTime_cell_id_t1"][k] cell_id_t2_temp = adata_1.uns["multiTime_cell_id_t2"][k] if method == "naive": logg.info("Use all clones (naive method)") T_map_temp = clone_annot[cell_id_t1_temp] * clone_annot[cell_id_t2_temp].T else: logg.info("Use only uni-potent clones (weinreb et al., 2020)") state_annote = np.array(adata_1.obs["state_info"]) if selected_fates == None: selected_fates = list(set(state_annote)) potential_vector_clone, fate_entropy_clone = tl.compute_state_potential( clone_annot[cell_id_t2_temp].T, state_annote[cell_id_t2_temp], selected_fates, fate_count=True, ) sel_unipotent_clone_id = np.array( list(set(np.nonzero(fate_entropy_clone == 1)[0])) ) clone_annot_unipotent = clone_annot[:, sel_unipotent_clone_id] T_map_temp = ( clone_annot_unipotent[cell_id_t1_temp] * clone_annot_unipotent[cell_id_t2_temp].T ) logg.info( f"Used uni-potent clone fraction {len(sel_unipotent_clone_id)/clone_annot.shape[1]}" ) idx_t1 = np.nonzero(np.in1d(cell_id_t1_all, cell_id_t1_temp))[0] idx_t2 = np.nonzero(np.in1d(cell_id_t2_all, cell_id_t2_temp))[0] idx_t1_temp = np.nonzero(np.in1d(cell_id_t1_temp, cell_id_t1_all))[0] idx_t2_temp = np.nonzero(np.in1d(cell_id_t2_temp, cell_id_t2_all))[0] T_map[idx_t1[:, np.newaxis], idx_t2] = T_map_temp[idx_t1_temp][:, idx_t2_temp].A T_map = T_map.astype(int) adata_1.uns["clonal_transition_map"] = ssp.csr_matrix(T_map) return adata_1
9926e2a6faf50bed2d1668de031a600e0f65c1af
31,652
import math def percentile(seq: t.Iterable[float], percent: float) -> float: """ Find the percentile of a list of values. prometheus-client 0.6.0 doesn't support percentiles, so we use this implementation Stolen from https://github.com/heaviss/percentiles that was stolen from http://code.activestate.com/recipes/511478-finding-the-percentile-of-the-values/ """ if not seq: raise ValueError('seq must be non-empty iterable') if not (0 < percent < 100): raise ValueError('percent parameter must be between 0 and 100') seq = sorted(seq) k = (len(seq) - 1) * percent / 100 prev_index = math.floor(k) next_index = math.ceil(k) if prev_index == next_index: return seq[int(k)] d0 = seq[prev_index] * (next_index - k) d1 = seq[next_index] * (k - prev_index) return d0 + d1
640f132366bad8bf0c58aa318b5be60136925ab9
31,653
from typing import Union def select_view_by_cursors(**kwargs): """ Selects the Text View ( visible selection ) for the given cursors Keyword Args: sel (Tuple[XTextRange, XTextRange], XTextRange): selection as tuple of left and right range or as text range. o_doc (GenericTextDocument, optional): current document (xModel). Defaults to current document. o_text (XText, optional): xText object used only when sel is a xTextRangeObject. require_selection (bool, optional): If ``True`` then a check is preformed to see if anything is selected; Otherwise, No check is done. Default ``True`` Raises: TypeError: if ``sel`` is ``None`` ValueError: if ``sel`` is passed in as ``tuple`` and length is not ``2``. ValueError: if ``sel`` is missing. Excpetion: If Error selecting view. """ o_doc: 'GenericTextDocument' = kwargs.get('o_doc', None) if o_doc is None: o_doc = get_xModel() _sel_check = kwargs.get('require_selection', True) if _sel_check == True and is_anything_selected(o_doc=o_doc) == False: return None l_cursor: 'XTextCursor' = None r_cursor: 'XTextCursor' = None _sel: 'Union[tuple, XTextRange]' = kwargs.get('sel', None) if _sel is None: raise ValueError("select_view_by_cursors() 'sel' argument is required") if isinstance(_sel, tuple): if len(_sel) < 2: raise ValueError( "select_view_by_cursors() sel argument when passed as a tuple is expected to have two elements") l_cursor = _sel[0] r_cursor = _sel[1] else: x_text: 'Union[XText, None]' = kwargs.get("o_text", None) if x_text is None: x_text = get_selected_text(o_doc=o_doc) if x_text == None: # there is an issue. Something should be selected. # msg = "select_view_by_cursors() Something was expected to be selected but xText object does not exist" return None l_cursor = _get_left_cursor(o_sel=_sel, o_text=x_text) r_cursor = _get_right_cursor(o_sel=_sel, o_text=x_text) vc = get_view_cursor(o_doc=o_doc) try: vc.setVisible(False) vc.gotoStart(False) vc.collapseToStart() vc.gotoRange(l_cursor, False) vc.gotoRange(r_cursor, True) except Exception as e: raise e finally: if not vc.isVisible(): vc.setVisible(True)
42c42c4b60d802a66e942ac8fa8efe97a8253ea3
31,654
from typing import List from typing import Dict def load_types( directories: List[str], loads: LoadedFiles = DEFAULT_LOADS, ) -> Dict[str, dict]: """Load schema types and optionally register them.""" schema_data: Dict[str, dict] = {} # load raw data for directory in directories: load_dir(directory, schema_data, None, loads) return schema_data
8dc1f3625c03451eb9ac28804715ccf260400536
31,655
def fit_circle(img, show_rect_or_cut='show'): """ fit an ellipse to the contour in the image and find the overlaying square. Either cut the center square or just plot the resulting square Code partly taken from here: https://stackoverflow.com/questions/55621959/opencv-fitting-a-single-circle-to-an-image-in-python :param img: numpy array with width, height,3 :param show_rect_or_cut: string 'show' or 'cut' :return: image, either cut center piece or with drawn square flag, whether algorithm thinks this image is difficult (if the circle is too small or narrow """ # convert image to grayscale and use otsu threshold to binarize gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) _, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) thresh = cv2.bitwise_not(thresh) # fill holes element = cv2.getStructuringElement(shape=cv2.MORPH_RECT, ksize=(15, 15)) morph_img = thresh.copy() cv2.morphologyEx(src=thresh, op=cv2.MORPH_CLOSE, kernel=element, dst=morph_img) # find contours in image and use the biggest found contour contours, _ = cv2.findContours(morph_img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) areas = [cv2.contourArea(c) for c in contours] sorted_areas = np.sort(areas) cnt = contours[areas.index(sorted_areas[-1])] # the biggest contour if len(cnt) < 10: return img, 'Diff' # fit ellipse and use found center as center for square ellipse = cv2.fitEllipse(cnt) if np.min((ellipse[1][0], ellipse[1][1])) < 900: flag = 'Diff' else: flag = False r_center_x = int(ellipse[0][0]) r_center_y = int(ellipse[0][1]) r_center_x = np.max((r_center_x, 1024)) r_center_x = np.min((r_center_x, img.shape[0] - 1024)) r_center_y = np.max((r_center_y, 1024)) r_center_y = np.min((r_center_y, img.shape[1] - 1024)) if show_rect_or_cut == 'show': half_width = 1024 cv2.rectangle(img, (r_center_x - half_width, r_center_y - half_width), (r_center_x + half_width, r_center_y + half_width), (0, 150, 0), 40) elif show_rect_or_cut == 'cut': img = img[r_center_y - 1024:r_center_y + 1024, r_center_x - 1024:r_center_x + 1024, :] return img, flag
fdeb8f9a24159236609eac271016624f95f62504
31,656
from typing import Mapping from typing import Any from typing import MutableMapping def unflatten_dict( d: Mapping[str, Any], separator: str = '.', unflatten_list: bool = False, sort: bool = False ) -> MutableMapping[str, Any]: """ Example: In []: unflatten_dict({'count.chans.HU_SN': 10}) Out[]: {'count': {'chans': {'HU_SN': 10}}} In []: unflatten_dict({'a.0.b.f.0': 1, 'a.0.b.f.1': 2, 'a.0.b.f.2': 3, 'a.1.c': 2, 'd.e': 1}, unflatten_list=True) Out[]: {'a': [{'b': {'f': [1, 2, 3]}}, {'c': 2}], 'd': {'e': 1}} """ out: dict[str, Any] = {} for key in sorted(d.keys()) if sort else d: parts = key.split(separator) target: dict[str, Any] = out for part in parts[:-1]: if part not in target: target[part] = {} target = target[part] target[parts[-1]] = d[key] if unflatten_list: return _unflatten_lists(out) return out
40662a4884171c444ed40c654497f6a0e17a132d
31,657
def _xinf_1D(xdot,x0,args=(),xddot=None,xtol=1.49012e-8): """Private function for wrapping the solving for x_infinity for a variable x in 1 dimension""" try: if xddot is None: xinf_val = float(fsolve(xdot,x0,args,xtol=xtol)) else: xinf_val = float(newton_meth(xdot,x0,fprime=xddot,args=args)) except RuntimeError: xinf_val = NaN return xinf_val
e69c08b914395d93a94544d9ba085a440951a03c
31,658
import types from typing import Dict import operator def to_bag_of_words( doclike: types.DocLike, *, by: TokenGroupByType = "lemma_", weighting: WeightingType = "count", **kwargs, ) -> Dict[int, int | float] | Dict[str, int | float]: """ Transform a ``Doc`` or ``Span`` into a bag-of-words: the set of unique words therein mapped to their absolute, relative, or binary frequencies of occurrence. Args: doclike by: Attribute by which spaCy ``Token`` s are grouped before counting, as given by ``getattr(token, by)``. If "lemma", tokens are grouped by their base form w/o inflectional suffixes; if "lower", by the lowercase form of the token text; if "norm", by the normalized form of the token text; if "orth", by the token text exactly as it appears in ``doc``. To output keys as strings, simply append an underscore to any of these; for example, "lemma_" creates a bag whose keys are token lemmas as strings. weighting: Type of weighting to assign to unique words given by ``by``. If "count", weights are the absolute number of occurrences (i.e. counts); if "freq", weights are counts normalized by the total token count, giving their relative frequency of occurrence; if "binary", weights are set equal to 1. **kwargs: Passed directly on to :func:`textacy.extract.words()` - filter_stops: If True, stop words are removed before counting. - filter_punct: If True, punctuation tokens are removed before counting. - filter_nums: If True, number-like tokens are removed before counting. Returns: Mapping of a unique word id or string (depending on the value of ``by``) to its absolute, relative, or binary frequency of occurrence (depending on the value of ``weighting``). Note: For "freq" weighting, the resulting set of frequencies won't (necessarily) sum to 1.0, since all tokens are used when normalizing counts but some (punctuation, stop words, etc.) may be filtered out of the bag afterwards. See Also: :func:`textacy.extract.words()` """ words = basics.words(doclike, **kwargs) bow = cytoolz.recipes.countby(operator.attrgetter(by), words) bow = _reweight_bag(weighting, bow, doclike) return bow
0065eba8ff7f74b420efc8c65688ab293dee1dda
31,659
def get_trip_info(origin, destination, date): """ Provides basic template for response, you can change as many things as you like. :param origin: from which airport your trip beings :param destination: where are you flying to :param date: when :return: """ template = { "kind": "qpxExpress#tripsSearch", "trips": { "kind": "qpxexpress#tripOptions", "requestId": "SYzLMFMFPCrebUp5H0NaGL", "data": { "kind": "qpxexpress#data", "airport": [ { "kind": "qpxexpress#airportData", "code": "AMS", "city": "AMS", "name": "Amsterdam Schiphol Airport" }, { "kind": "qpxexpress#airportData", "code": "LGW", "city": "LON", "name": "London Gatwick" } ], "city": [ { "kind": "qpxexpress#cityData", "code": "AMS", "name": "Amsterdam" }, { "kind": "qpxexpress#cityData", "code": "LON", "name": "London" } ], "aircraft": [ { "kind": "qpxexpress#aircraftData", "code": "319", "name": "Airbus A319" }, { "kind": "qpxexpress#aircraftData", "code": "320", "name": "Airbus A320" } ], "tax": [ { "kind": "qpxexpress#taxData", "id": "GB_001", "name": "United Kingdom Air Passengers Duty" }, { "kind": "qpxexpress#taxData", "id": "UB", "name": "United Kingdom Passenger Service Charge" } ], "carrier": [ { "kind": "qpxexpress#carrierData", "code": "BA", "name": "British Airways p.l.c." } ] }, "tripOption": [ { "kind": "qpxexpress#tripOption", "saleTotal": "GBP47.27", "id": "OAcAQw8rr9MNhwQoBntUKJ001", "slice": [ { "kind": "qpxexpress#sliceInfo", "duration": 75, "segment": [ { "kind": "qpxexpress#segmentInfo", "duration": 75, "flight": { "carrier": "BA", "number": "2762" }, "id": "GStLakphRYJX3LbK", "cabin": "COACH", "bookingCode": "O", "bookingCodeCount": 1, "marriedSegmentGroup": "0", "leg": [ { "kind": "qpxexpress#legInfo", "id": "LgJHYCVgG0AiE1PH", "aircraft": "320", "arrivalTime": "%sT18:05+01:00" % date, "departureTime": "%sT15:50+00:00" % date, "origin": origin, "destination": destination, "originTerminal": "N", "duration": 75, "mileage": 226, "meal": "Snack or Brunch" } ] } ] } ], "pricing": [ { "kind": "qpxexpress#pricingInfo", "fare": [ { "kind": "qpxexpress#fareInfo", "id": "A855zsItBCELBykaeqeBDQb5hPZQIOtkOZ8uDq0lD5VU", "carrier": "BA", "origin": origin, "destination": destination, "basisCode": "OV1KO" } ], "segmentPricing": [ { "kind": "qpxexpress#segmentPricing", "fareId": "A855zsItBCELBykaeqeBDQb5hPZQIOtkOZ8uDq0lD5VU", "segmentId": "GStLakphRYJX3LbK" } ], "baseFareTotal": "GBP22.00", "saleFareTotal": "GBP22.00", "saleTaxTotal": "GBP25.27", "saleTotal": "GBP47.27", "passengers": { "kind": "qpxexpress#passengerCounts", "adultCount": 1 }, "tax": [ { "kind": "qpxexpress#taxInfo", "id": "UB", "chargeType": "GOVERNMENT", "code": "UB", "country": "GB", "salePrice": "GBP12.27" }, { "kind": "qpxexpress#taxInfo", "id": "GB_001", "chargeType": "GOVERNMENT", "code": "GB", "country": "GB", "salePrice": "GBP13.00" } ], "fareCalculation": "LON BA AMS 33.71OV1KO NUC 33.71 END ROE 0.652504 FARE GBP 22.00 XT 13.00GB 12.27UB", "latestTicketingTime": "2016-01-11T23:59-05:00", "ptc": "ADT" } ] }, { "kind": "qpxexpress#tripOption", "saleTotal": "GBP62.27", "id": "OAcAQw8rr9MNhwQoBntUKJ002", "slice": [ { "kind": "qpxexpress#sliceInfo", "duration": 80, "segment": [ { "kind": "qpxexpress#segmentInfo", "duration": 80, "flight": { "carrier": "BA", "number": "2758" }, "id": "GW8rUjsDA234DdHV", "cabin": "COACH", "bookingCode": "Q", "bookingCodeCount": 9, "marriedSegmentGroup": "0", "leg": [ { "kind": "qpxexpress#legInfo", "id": "Lp08eKxnXnyWfJo4", "aircraft": "319", "arrivalTime": "%sT10:05+01:00" % date, "departureTime": "%sT07:45+00:00" % date, "origin": origin, "destination": destination, "originTerminal": "N", "duration": 80, "mileage": 226, "meal": "Snack or Brunch" } ] } ] } ], "pricing": [ { "kind": "qpxexpress#pricingInfo", "fare": [ { "kind": "qpxexpress#fareInfo", "id": "AslXz8S1h3mMcnYUQ/v0Zt0p9Es2hj8U0We0xFAU1qDE", "carrier": "BA", "origin": origin, "destination": destination, "basisCode": "QV1KO" } ], "segmentPricing": [ { "kind": "qpxexpress#segmentPricing", "fareId": "AslXz8S1h3mMcnYUQ/v0Zt0p9Es2hj8U0We0xFAU1qDE", "segmentId": "GW8rUjsDA234DdHV" } ], "baseFareTotal": "GBP37.00", "saleFareTotal": "GBP37.00", "saleTaxTotal": "GBP25.27", "saleTotal": "GBP62.27", "passengers": { "kind": "qpxexpress#passengerCounts", "adultCount": 1 }, "tax": [ { "kind": "qpxexpress#taxInfo", "id": "UB", "chargeType": "GOVERNMENT", "code": "UB", "country": "GB", "salePrice": "GBP12.27" }, { "kind": "qpxexpress#taxInfo", "id": "GB_001", "chargeType": "GOVERNMENT", "code": "GB", "country": "GB", "salePrice": "GBP13.00" } ], "fareCalculation": "LON BA AMS 56.70QV1KO NUC 56.70 END ROE 0.652504 FARE GBP 37.00 XT 13.00GB 12.27UB", "latestTicketingTime": "%sT23:59-05:00" % date, "ptc": "ADT" } ] } ] } } return template
d1dfd35f41538e800b5c6f5986faac7fcd30ebf3
31,660
def serialise(data, data_type=None): """ Serialises the specified data. The result is a ``bytes`` object. The ``deserialise`` operation turns it back into a copy of the original object. :param data: The data that must be serialised. :param data_type: The type of data that will be provided. If no data type is provided, the data type is found automatically. :return: A ``bytes`` object representing exactly the state of the data. """ if data_type is None: data_type = type_of(data) if data_type is None: raise SerialisationException("The data type of object {instance} could not automatically be determined.".format(instance=str(data))) try: return luna.plugins.plugins_by_type["data"][data_type]["data"]["serialise"](data) except KeyError as e: #Plug-in with specified data type is not available. raise KeyError("There is no activated data plug-in with data type {data_type} to serialise with.".format(data_type=data_type)) from e
6c4e7b144e3e938d30cceee5503290f8cf31ca27
31,661
from sys import prefix def localenv(*args, **kwargs): """Execute cmd in local environment.""" # Remove empty keys kwargs = {k: v for k, v in kwargs.items() if v} template_vars = { "root_path": ROOT_PATH, "environment": kwargs.pop("environment", DEFAULT_ENVIRONMENT), "config_filename": kwargs.pop("config_filename", DEFAULT_CONFIG_FILENAME), } # By default, the config filename includes the role name, # that's why we need to format it. template_vars["config_filename"] = kwargs.pop( "config_filename", DEFAULT_CONFIG_FILENAME).format(**template_vars) env_prefix = ENV_VAR_PREFIX.format(**template_vars) with prefix(env_prefix): return local(*args, **kwargs)
20a9326e0f9852eb83757655540925a83a9b3cff
31,662
def topopebreptool_RegularizeShells(*args): """ * Returns <False> if the shell is valid (the solid is a set of faces connexed by edges with connexity 2). Else, splits faces of the shell; <OldFacesnewFaces> describes (face, splits of face). :param aSolid: :type aSolid: TopoDS_Solid & :param OldSheNewShe: :type OldSheNewShe: TopTools_DataMapOfShapeListOfShape & :param FSplits: :type FSplits: TopTools_DataMapOfShapeListOfShape & :rtype: bool """ return _TopOpeBRepTool.topopebreptool_RegularizeShells(*args)
8aa44c5b79f98f06596a5e6d9db8a4cf18f7dad3
31,663
import threading from typing import Optional from typing import Callable def _cancel_task_if( logger: gluetool.log.ContextAdapter, cancel: threading.Event, undo: Optional[Callable[[], None]] = None ) -> bool: """ Check given cancellation event, and if it's set, call given (optional) undo callback. Returns ``True`` if task is supposed to be cancelled, ``False`` otherwise. """ if not cancel.is_set(): logger.debug('cancellation not requested') return False logger.warning('cancellation requested') if undo: logger.debug('running undo step') undo() return True
0c553e8c9191fb8dffab5c34f18fac1302fa53bc
31,664
def empty_filter(item, *args, **kwargs): """ Placeholder function to pass along instead of filters """ return True
d72ac5a0f787557b78644bcedd75e71f92c38a0b
31,665
def Get_User_Tags(df, json_response, i, github_user): """ Calculate the tags for a user. """ all_repos_tags = pd.DataFrame(0, columns=df.columns, index=pyjq.all(".[] | .name", json_response)) num_repos = len(pyjq.all(".[] | .name", json_response)) # new_element = pd.DataFrame(0, np.zeros(1), columns =df.columns) tags = {} # for i in range(num_repos): repo_names = pyjq.all(".[%s] | .name" % i, json_response) repo_languages = pyjq.all(".[%s] | .language" % i, json_response) repo_description = pyjq.all(".[%s] | .description" % i, json_response) repo_topics = pyjq.all(".[%s] | .topics" % i, json_response) # # print (repo_names,repo_languages,repo_languages,repo_topics) # # We have two structure: # # all_repos_tags = a dataframe with a row per repo with values [0,1] # new_element = One row dataframa with the sum of frecuencies of all repos. reponame_lower = repo_names[0].lower() all_repos_tags.loc[reponame_lower] = 0 if repo_description[0] is None: repo_description = ['kk'] if repo_languages[0] is None: repo_languages = ['kk'] # if repo_topics[0] is None: repo_topics = ['kk'] # try: repo_names[0] = repo_names[0].lower() except Exception: pass try: repo_languages[0] = repo_languages[0].lower() except Exception: pass try: repo_description[0] = repo_description[0].lower() except Exception: pass try: repo_topics[0] = repo_topics[0].lower() except Exception: pass # # Avoid this names because of are substring of another tag () COLUMNS_TO_SKIP=["java" , "c"] if repo_languages[0] in df.columns : new_element[repo_languages[0]] += (i+1) tags[repo_languages[0]] = 0 all_repos_tags.loc[reponame_lower][repo_languages[0]] = 1 #print("Added tag 1 : ", (i+1)," " ,repo_names[0] ," " , repo_languages[0]) for column in df.columns: if column in COLUMNS_TO_SKIP : continue if column in repo_topics[0] : new_element[column] += (i+1) all_repos_tags.loc[reponame_lower][column] = 1 tags[column] = 0 #print("Added tag 2 : ", (i+1)," " ,repo_names[0] ," " , column) else: if len(column) > 4 : if column in repo_names[0] or column.replace("-"," ") in repo_names[0]: #print("Added tag 3 : ", (i+1)," " ,repo_names[0] ," " , column) new_element[column] += (i+1) all_repos_tags.loc[reponame_lower][column] = 1 tags[column] = 0 else : if column in repo_description[0] or column.replace("-"," ") in repo_description[0]: #print("Added tag 4 : ", (i+1)," " ,repo_names[0] ," " , column) new_element[column] += (i+1) all_repos_tags.loc[reponame_lower][column] = 1 tags[column] = 0 # end range repos #print("new_element.shape: ", new_element.shape , " github_user:", github_user) # total=new_element.iloc[0].sum() #print(tags) if total != 0 : for i in tags : if new_element[i].iloc[0] != 0 : new_element[i] = ( new_element[i].iloc[0]/total) #print (i , new_element[i].iloc[0] ) # try: all_repos_tags['repos'] = all_repos_tags['Unnamed: 0'] del all_repos_tags['Unnamed: 0'] all_repos_tags = all_repos_tags.set_index('repos') except Exception: pass new_element['names']=github_user new_element = new_element.set_index(new_element.names) del(new_element['names']) # df = pd.concat([df, new_element]) print("Added : ", github_user ,df.shape) return df, all_repos_tags
80955e2794e9f9d4f65a3f048bc7dc0d450ebb3d
31,666
import ctypes def ssize(newsize, cell): """ Set the size (maximum cardinality) of a CSPICE cell of any data type. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ssize_c.html :param newsize: Size (maximum cardinality) of the cell. :type newsize: int :param cell: The cell. :type cell: spiceypy.utils.support_types.SpiceCell :return: The updated cell. :rtype: spiceypy.utils.support_types.SpiceCell """ assert isinstance(cell, stypes.SpiceCell) newsize = ctypes.c_int(newsize) libspice.ssize_c(newsize, ctypes.byref(cell)) return cell
52eb884e7477ddb98dc905ab848c61b83ac16123
31,667
from typing import Optional import os def env_interactive() -> Optional[bool]: """ Check the `GLOBUS_CLI_INTERACTIVE` environment variable for a boolean, and *let* `strtobool` raise a `ValueError` if it doesn't parse. """ explicit_val = os.getenv("GLOBUS_CLI_INTERACTIVE") if explicit_val is None: return None return bool(strtobool(explicit_val.lower()))
ecdddad354757066fc2dde170a044b6462d9c78b
31,668
def extend_data(data, length, offset): """Extend data using a length and an offset.""" if length >= offset: new_data = data[-offset:] * (alignValue(length, offset) // offset) return data + new_data[:length] else: return data + data[-offset:-offset+length]
923372c1fde14335331eb38b40e118b426cc9219
31,669
def RAND_egd(path): # real signature unknown; restored from __doc__ """ RAND_egd(path) -> bytes Queries the entropy gather daemon (EGD) on the socket named by 'path'. Returns number of bytes read. Raises SSLError if connection to EGD fails or if it does not provide enough data to seed PRNG. """ return ""
5ef4e3e065c44058996c1793541cd9f2a599b106
31,670
from typing import List from typing import Dict from typing import Any def get_types_map(types_array: List[Dict[str, Any]]) -> Dict[str, Dict[str, Any]]: """Get the type name of a metadata or a functionality.""" return {type_["name"]: type_ for type_ in types_array}
9354eff434b589a19360ee13d8bf7d9ab9e1002d
31,671
def update_flavor(request, **kwargs): """Update a flavor. """ data = request.DATA flavor_id = data['flavor']['id'] conn = _get_sdk_connection(request) flavor = conn.load_balancer.update_flavor( flavor_id, name=data['flavor'].get('name'), description=data['flavor'].get('description'), enabled=data['flavor'].get('enabled'), ) return _get_sdk_object_dict(flavor)
9f165df73f3c557956d466e3fec6d720a1ee76cb
31,672
from typing import List import re async def get_all_product_features_from_cluster() -> List[str]: """ Returns a list of all product.feature in the cluster. """ show_lic_output = await scontrol_show_lic() PRODUCT_FEATURE = r"LicenseName=(?P<product>[a-zA-Z0-9_]+)[_\-.](?P<feature>\w+)" RX_PRODUCT_FEATURE = re.compile(PRODUCT_FEATURE) parsed_features = [] output = show_lic_output.split("\n") for line in output: parsed_line = RX_PRODUCT_FEATURE.match(line) if parsed_line: parsed_data = parsed_line.groupdict() product = parsed_data["product"] feature = parsed_data["feature"] parsed_features.append(f"{product}.{feature}") return parsed_features
9822c952654b3e2516e0ec3b5cf397ced8b3eaaf
31,673
def call_status(): """ 入浴状態を取得 入浴前:0 入浴中:1 入浴後:2 :return: """ user_id = "testuser" result_dict = check_status(user_id) return jsonify(result_dict)
9ef37eeb309c64cb7b4759323b4cb9569b910c65
31,674
def dice_loss(pred, target, smooth=1.): """Dice loss """ pred = pred.contiguous() target = target.contiguous() intersection = (pred * target).sum(dim=2).sum(dim=2) loss = (1 - ((2. * intersection + smooth) / (pred.sum(dim=2).sum(dim=2) + target.sum(dim=2).sum(dim=2) + smooth))) return loss.mean()
5879769ac379395e35f9accda9d917094aa07301
31,675
def _get_scope_contacts_by_object_id(connection, object_id, object_type, scope_contact_type): """Gets scope contacts by object id. Args: connection: Database connection. object_id: An integer value of scope object id. object_type: A string value of scope object type. scope_contact_type: A string value of scope contact. Returns: Set of emails by scope contacts. """ scope_contacts = connection.execute( text(""" SELECT `people`.`email` FROM `access_control_people` INNER JOIN `people` ON `access_control_people`.`person_id` = `people`.`id` INNER JOIN `access_control_list` ON `access_control_people`.`ac_list_id` = `access_control_list`.`id` INNER JOIN `access_control_roles` ON `access_control_list`.`ac_role_id` = `access_control_roles`.`id` WHERE `access_control_roles`.`name` = :scope_contact_type AND `access_control_list`.`object_id` = :object_id AND `access_control_list`.`object_type` = :object_type """), scope_contact_type=scope_contact_type, object_id=object_id, object_type=object_type).fetchall() return {scope_contact.email for scope_contact in scope_contacts}
c7c1fe05cb48fb5e28499f995a8c606c9e421d6e
31,676
import re def remove_mentions(text): """Remove @-mentions from the text""" return re.sub('@\w+', '', text)
5cbdd40a602f24f8274369e92f9159cbb2f6a230
31,677
def put_thread(req_thread: ReqThreadPut): """Put thread for video to DynamoDB""" input = create_update_item_input(req_thread) try: res = client.update_item(**input) return res except ClientError as err: err_message = err.response["Error"]["Message"] raise HTTPException(status_code=404, detail=err_message) except BaseException as err: raise HTTPException(status_code=404, detail=str(err))
733370296c022a985b49193a1b528e0df8271624
31,678
from admin.admin_blueprint import admin_blueprint from questionnaire.questionnaire_blueprint import questionnaire_blueprint from user.user_blueprint import user_blueprint def create_app(config_name: str): """Application factory Args: config_name (str): the application config name to determine which env to run on Returns: The Flask application object """ app = Flask(__name__) app.config.from_object(config[config_name]) db.init_app(app) ma.init_app(app) flask_bcrypt.init_app(app) jwt.init_app(app) admin.init_app(app) redis_client.init_app(app) app.register_blueprint(admin_blueprint) app.register_blueprint(user_blueprint) app.register_blueprint(questionnaire_blueprint) @app.errorhandler(ValidationError) def handle_exception(error): """Error handler called when a ValidationError Exception is raised""" response = jsonify(error.to_dict()) response.status_code = error.status_code return response @app.errorhandler(500) def internal_server_error(error): return {"message": "Internal Server Error", "status": "error"}, 500 @jwt.invalid_token_loader def invalid_token_loader(expired_token): return { "status": "error", "message": "Your session is invalid. Kindly login again", }, 401 @jwt.unauthorized_loader def no_auth_header_handler(error): return { "status": "error", "message": "Authentication type should be a bearer type.", }, 401 @jwt.expired_token_loader def my_expired_token_handler(error): return { "status": "error", "message": "Your session has expired. Kindly login again.", }, 401 return app
13e171d780f87ffad0802703ab72483669bc3453
31,679
import numpy def get_clean_num(num): """ Get the closest clean number match to num with bases 1, 2, 5. Args: num: the number Returns: the closest number """ if num == 0: return 0 sign = num > 0 and 1 or -1 exp = get_exp(num) nums = numpy.array((1, 2, 5, 10))*(10**exp) return sign*nums[numpy.argmin(numpy.abs(nums - abs(num)))]
09b4f5893e8d33a16217a2292ca75d7730e5d90e
31,680
def with_config_error(func): """Add config error context.""" @wraps(func) def wrapper(*args, **kwargs): with config_key_error(): return func(*args, **kwargs) return wrapper
667ab25648c17d087fbc54fd9d284c98c40b4b0b
31,681
def _flat(l): """Flattens a list. """ f = [] for x in l: f += x return f
9b2e432d79f08840d417601ff950ff9fa28073ef
31,682
def axesDict(T_axes): """Check connectivity based on Interval Vectors.""" intervalList = [ T_axes[0], T_axes[1], T_axes[2], (12 - T_axes[0]), (12 - T_axes[1]), (12 - T_axes[2])] return intervalList
6b1e8c59d12a3c2c548b95f3bcd8d7a3de4ef931
31,683
def _settings_to_component( name: str, settings: configuration.ProjectSettings, options: amos.Catalog) -> bases.Projectbases.Component: """[summary] Args: name (str): [description] settings (configuration.ProjectSettings): [description] options (amos.Catalog): [description] Returns: bases.Projectbases.Component: [description] """ design = settings.designs.get(name, None) kind = settings.kinds.get(name, None) lookups = _get_lookups(name = name, design = design, kind = kind) base = _get_base(lookups = lookups, options = options) parameters = amos.get_annotations(item = base) attributes, initialization = _parse_initialization( name = name, settings = settings, parameters = parameters) initialization['parameters'] = _get_runtime( lookups = lookups, settings = settings) component = base(name = name, **initialization) for key, value in attributes.items(): setattr(component, key, value) return component
7e709a27b275587ce742c08d10efbd7b0aa171ce
31,684
import ipdb import tqdm def iss(data, gamma21, gamma32, KDTree_radius, NMS_radius, max_num=100): """ Description: intrinsic shape signatures algorithm based on cuda FRNN and cuda maximum suppression Args: data: numpy array of point cloud, shape(num_points, 3) gamma21: gamma32: KDTree_radius: radiusNN range NMS_radius: max_num: max number of keypoints Return: is_keypoint->list[bool]: mask indicating whether point is a keypoint or not """ # import ipdb;ipdb.set_trace() print(f'iss algo started...{data.shape[0]} of points prepared') #transfer data dtype to float32 before processing it on GPU temp_data = data.astype(np.float32) # create mask to indicate whether point i and point j are within range r adj = gpu_frnn(temp_data,KDTree_radius) adj = adj.reshape((data.shape[0],data.shape[0])) # initialize empty list to store neighbor points r_list = [] l3_list = [] is_keypoint = np.full(data.shape[0],False) # number of neighbor points for each point weights = np.sum(adj,axis=1) for i in tqdm(range(data.shape[0])): indices = np.argwhere(adj[i,:]>0)[:,0] weight = 1 / weights[indices] neighbors = data[indices] # store neighbor indices r_list.append(indices) # (pj - pi) in matrix format P = neighbors - data[i] # Compute Weighted covariance matrix Cov(pi) Cov = weight * P.T @ P / np.sum(weight) # Compute eigenvalues of Cov(pi) as lambda_1, lambda_2, lambda_3 in descending order e_values, e_vectors = np.linalg.eig(Cov) l1, l2, l3 = e_values[np.argsort(-e_values)] # Store point's lambda_3 value l3_list.append(l3) # Initialize keypoint proposal with the criterion: l2 / l1 < g21 and l3 / l2 < g32 if l2 / l1 < gamma21 and l3 / l2 < gamma32: is_keypoint[i] = True ipdb.set_trace() print("performing nms based on cuda") is_keypoint = is_keypoint.astype(np.int32) is_keypoint_idx = np.argwhere(is_keypoint==1)[:,0].astype(np.int32) l3_array = np.asarray(l3_list).astype(np.float32) gpu_nms(is_keypoint, is_keypoint_idx, adj, l3_array) is_keypoint = is_keypoint.astype(bool) """ # For each point (pi) in the point cloud for i in tqdm(range(len(is_keypoint))): # Initialize an empty list to store keypoints' indices and lambda_3 values keypoint_list = [] # If the point itself is a keypoint if is_keypoint[i]: # Store its index and lambda_3 value keypoint_list.append([i, l3_list[i]]) # for each neighbor for j in r_list[i]: # If the neighbor is itself, skip if j == i: continue # If the neighbor is a keypoint if is_keypoint[j]: # Store its index and lambda_3 value keypoint_list.append([j, l3_list[j]]) # If there is no keypoints in keypoint_list, skip if len(keypoint_list) == 0: continue # Convert keypoint_list to numpy ndarray keypoint_list = np.asarray(keypoint_list) # Sort keypoint_list using lambda_3 value in descending order keypoint_list = keypoint_list[keypoint_list[:, -1].argsort()[::-1]] # Only the keypoint with the largest lambda_3 value is considered as the final keypoint # Get all the indices to be suppressed except for the first row filter_ind = keypoint_list[1:, 0].astype(int) # Set keypoint status of point at those indices to False is_keypoint[filter_ind] = False """ return is_keypoint
bf2b84ed179314334a6e7a88f84f6f86468006dd
31,685
import os import aiohttp import traceback import sys async def main(request): """Handle requests.""" try: # Get payload payload = await request.read() # Get authentication secret = os.environ.get("GH_SECRET") token = os.environ.get("GH_AUTH") bot = os.environ.get("GH_BOT") event = sansio.Event.from_http(request.headers, payload, secret=secret) # Handle ping if event.event == "ping": return web.Response(status=200) # Handle the event async with aiohttp.ClientSession() as session: gh = gh_aiohttp.GitHubAPI(session, bot, oauth_token=token, cache=cache) await router.dispatch(event, gh, request) return web.Response(status=200) except Exception: traceback.print_exc(file=sys.stderr) return web.Response(status=500)
c2148bc11e12b241c0f6984a9e3f140a8f5799cf
31,686
from green.version import pretty_version import copy import configparser import sys import logging import os import tempfile def mergeConfig(args, testing=False): # pragma: no cover """ I take in a namespace created by the ArgumentParser in cmdline.main() and merge in options from configuration files. The config items only replace argument items that are set to default value. Returns: I return a new argparse.Namespace, adding members: shouldExit = default False exitCode = default 0 include patterns = include-patterns setting converted to list. omit_patterns = omit-patterns settings converted to list and extended, taking clear-omit into account. cov = coverage object default None """ config = getConfig(getattr(args, "config", default_args.config)) new_args = copy.deepcopy(default_args) # Default by default! for name, default_value in dict(default_args._get_kwargs()).items(): # Config options overwrite default options config_getter = None if name in [ "termcolor", "notermcolor", "allow_stdout", "quiet_stdout", "help", "logging", "version", "disable_unidecode", "failfast", "run_coverage", "options", "completions", "completion_file", "clear_omit", "no_skip_report", "no_tracebacks", "disable_windows", "quiet_coverage", ]: config_getter = config.getboolean elif name in ["processes", "debug", "verbose", "minimum_coverage"]: config_getter = config.getint elif name in [ "file_pattern", "finalizer", "initializer", "cov_config_file", "include_patterns", "omit_patterns", "warnings", "test_pattern", "junit_report", ]: config_getter = config.get elif name in ["targets", "help", "config"]: pass # Some options only make sense coming on the command-line. elif name in ["store_opt", "parser"]: pass # These are convenience objects, not actual settings else: raise NotImplementedError(name) if config_getter: try: config_value = config_getter("green", name.replace("_", "-")) setattr(new_args, name, config_value) except (configparser.NoSectionError, configparser.NoOptionError): pass # Command-line values overwrite defaults and config values when # specified args_value = getattr(args, name, "unspecified") if args_value != "unspecified": setattr(new_args, name, args_value) new_args.shouldExit = False new_args.exitCode = 0 new_args.cov = None # Help? if new_args.help: # pragma: no cover new_args.parser.print_help() new_args.shouldExit = True return new_args # Did we just print the version? if new_args.version: sys.stdout.write(pretty_version() + "\n") new_args.shouldExit = True return new_args # Handle logging options if new_args.debug: logging.basicConfig( level=logging.DEBUG, format="%(asctime)s %(levelname)9s %(message)s", datefmt="%Y-%m-%d %H:%M:%S", ) elif not new_args.logging: logging.basicConfig(filename=os.devnull) # Disable termcolor? if new_args.notermcolor: new_args.termcolor = False # Coverage. We must enable it here because we cannot cover module-level # code after it is imported, and this is the earliest place we can turn on # coverage. omit_patterns = [ "*/argparse*", "*/colorama*", "*/django/*", "*/distutils*", # Gets pulled in on Travis-CI CPython "*/extras*", # pulled in by testtools "*/linecache2*", # pulled in by testtools "*/mimeparse*", # pulled in by testtools "*/mock*", "*/pbr*", # pulled in by testtools "*/pkg_resources*", # pulled in by django "*/pypy*", "*/pytz*", # pulled in by django "*/six*", # pulled in by testtools "*/termstyle*", "*/test*", "*/traceback2*", # pulled in by testtools "*/unittest2*", # pulled in by testtools "*Python.framework*", # OS X system python "*site-packages*", # System python for other OS's tempfile.gettempdir() + "*", ] if new_args.clear_omit: omit_patterns = [] if new_args.omit_patterns: omit_patterns.extend(new_args.omit_patterns.split(",")) new_args.omit_patterns = omit_patterns if new_args.include_patterns: new_args.include_patterns = new_args.include_patterns.split(",") else: new_args.include_patterns = [] if new_args.quiet_coverage or (type(new_args.cov_config_file) == str): new_args.run_coverage = True if new_args.minimum_coverage != None: new_args.run_coverage = True if new_args.run_coverage: if not testing: cov = coverage.coverage( data_file=".coverage", omit=omit_patterns, include=new_args.include_patterns, config_file=new_args.cov_config_file, ) cov.start() new_args.cov = cov return new_args
73f057adead67d4cdaa7d06f19ee72ffe8f9bb91
31,687
import requests def login_wechat(): """ This api logins a user through wechat app. """ code = request.json.get('code', None) wechat_code2session_url = 'https://api.weixin.qq.com/sns/jscode2session' payload = { 'appid': current_app.config['WECHAT_APPID'], 'secret': current_app.config['WECHAT_APP_SECRET'], 'js_code': code, 'grant_type': 'authorization_code' } r = requests.get(wechat_code2session_url, params=payload) if "errcode" in str(r.content): return jsonify(message="Something wrong with the code"), 201 openid = r.json().get('openid', None) session_key = r.json().get('session_key', None) # check if the openid already exists in the DB. user = query_existing_openid_user(openid) if not user: user = User() user.openid = openid user.session_key = session_key db.session.add(user) db.session.commit() access_token = create_access_token(identity=user.to_dict()) refresh_token = create_refresh_token(identity=user.to_dict()) ret = { 'access_token': access_token, 'refresh_token': refresh_token } add_token_to_db(access_token, current_app.config['JWT_IDENTITY_CLAIM']) add_token_to_db(refresh_token, current_app.config['JWT_IDENTITY_CLAIM']) return jsonify(ret), 201
985eddbcb39ade8aebad3d9d179a0b174df50280
31,688
def is_no_entitled(request): """Check condition for needing to entitled user.""" no_entitled_list = ["source-status"] no_auth = any(no_auth_path in request.path for no_auth_path in no_entitled_list) return no_auth
feee0962568b20c685fd85096ce00dbb91b91fe5
31,689
def _make_selector_from_key_distribution_options( options) -> reverb_types.SelectorType: """Returns a Selector from its KeyDistributionOptions description.""" one_of = options.WhichOneof('distribution') if one_of == 'fifo': return item_selectors.Fifo() if one_of == 'uniform': return item_selectors.Uniform() if one_of == 'prioritized': return item_selectors.Prioritized(options.prioritized.priority_exponent) if one_of == 'heap': if options.heap.min_heap: return item_selectors.MinHeap() return item_selectors.MaxHeap() if one_of == 'lifo': return item_selectors.Lifo() raise ValueError(f'Unknown distribution field: {one_of}')
3b932328f7b3e226e3dada54c8f1ca08e32167af
31,690
import base64 def json_numpy_obj_hook(dct): """ Decodes a previously encoded numpy ndarray with proper shape and dtype from: http://stackoverflow.com/a/27948073/5768001 :param dct: (dict) json encoded ndarray :return: (ndarray) if input was an encoded ndarray """ if isinstance(dct, dict) and '__ndarray__' in dct: data = base64.b64decode(dct['__ndarray__']) return np.frombuffer(data, dct['dtype']).reshape(dct['shape']) return dct
50aab4855d63206534981bce95ec0219dec9724e
31,691
from typing import Optional import copy def redirect_edge(state: SDFGState, edge: graph.MultiConnectorEdge[Memlet], new_src: Optional[nodes.Node] = None, new_dst: Optional[nodes.Node] = None, new_src_conn: Optional[str] = None, new_dst_conn: Optional[str] = None, new_data: Optional[str] = None, new_memlet: Optional[Memlet] = None) -> graph.MultiConnectorEdge[Memlet]: """ Redirects an edge in a state. Choose which elements to override by setting the keyword arguments. :param state: The SDFG state in which the edge resides. :param edge: The edge to redirect. :param new_src: If provided, redirects the source of the new edge. :param new_dst: If provided, redirects the destination of the new edge. :param new_src_conn: If provided, renames the source connector of the edge. :param new_dst_conn: If provided, renames the destination connector of the edge. :param new_data: If provided, changes the data on the memlet of the edge, and the entire associated memlet tree. :param new_memlet: If provided, changes only the memlet of the new edge. :return: The new, redirected edge. :note: ``new_data`` and ``new_memlet`` cannot be used at the same time. """ if new_data is not None and new_memlet is not None: raise ValueError('new_data and new_memlet cannot both be given.') mtree = None if new_data is not None: mtree = state.memlet_tree(edge) state.remove_edge(edge) if new_data is not None: memlet = copy.deepcopy(edge.data) memlet.data = new_data # Rename on full memlet tree for e in mtree: e.data.data = new_data else: memlet = new_memlet or edge.data new_edge = state.add_edge(new_src or edge.src, new_src_conn or edge.src_conn, new_dst or edge.dst, new_dst_conn or edge.dst_conn, memlet) return new_edge
368ff8dace5b781d05f7e75fe9d57cae648aee9d
31,692
def svn_fs_lock(*args): """ svn_fs_lock(svn_fs_t fs, char path, char token, char comment, svn_boolean_t is_dav_comment, apr_time_t expiration_date, svn_revnum_t current_rev, svn_boolean_t steal_lock, apr_pool_t pool) -> svn_error_t """ return _fs.svn_fs_lock(*args)
f711b280a24f5d3c595a81013d1dc5275a997a60
31,693
def maybe_double_last(hand): """ :param hand: list - cards in hand. :return: list - hand with Jacks (if present) value doubled. """ if hand[-1] == 11: hand[-1] = 22 return hand
378546e8dd650a67a5d9d9eed490969fd085bfb1
31,694
def length(draw, min_value=0, max_value=None): """Generates the length for Blast+6 file format. Arguments: - `min_value`: Minimum value of length to generate. - `max_value`: Maximum value of length to generate. """ return draw(integers(min_value=min_value, max_value=max_value))
e3ac6b5d9bcc6380e475785047438b3be8a81288
31,695
def ppw(text): """PPW -- Percentage of Polysyllabic Words.""" ppw = None polysyllabic_words_num = 0 words_num, words = word_counter(text, 'en') for word in words: if syllable_counter(word) >= 3: polysyllabic_words_num += 1 if words_num != 0: ppw = polysyllabic_words_num / words_num return ppw
b8c8c92a4947404a7166e63458e7d4eb2f9a00fc
31,696
def is_running_in_azure_ml(aml_run: Run = RUN_CONTEXT) -> bool: """ Returns True if the given run is inside of an AzureML machine, or False if it is on a machine outside AzureML. When called without arguments, this functions returns True if the present code is running in AzureML. Note that in runs with "compute_target='local'" this function will also return True. Such runs execute outside of AzureML, but are able to log all their metrics, etc to an AzureML run. :param aml_run: The run to check. If omitted, use the default run in RUN_CONTEXT :return: True if the given run is inside of an AzureML machine, or False if it is a machine outside AzureML. """ return hasattr(aml_run, "experiment")
3d2d6bcf95c34def5fff9c8bf1053c785b075895
31,697
def format_test_case(test_case): """Format test case from `-[TestClass TestMethod]` to `TestClass_TestMethod`. Args: test_case: (basestring) Test case id in format `-[TestClass TestMethod]` or `[TestClass/TestMethod]` Returns: (str) Test case id in format TestClass/TestMethod. """ test_case = _sanitize_str(test_case) test = test_case.replace('[', '').replace(']', '').replace('-', '').replace(' ', '/') return test
f2d4530fbcc9d07409bfc7a88225653a7f550185
31,698
def _truncate_seed(seed): """ Truncate the seed with MAXINT32. Args: seed (int): The seed to be truncated. Returns: Integer. The seed with MAXINT32. """ return seed % _MAXINT32
2caf14236ec1697d6ab7144604e0f2be05d525d2
31,699