content
stringlengths
22
815k
id
int64
0
4.91M
def test_reduce_rows(): """Tests the `reduce_rows` function.""" matrix = tf.constant( [[30.0, 25.0, 10.0], [15.0, 10.0, 20.0], [25.0, 20.0, 15.0]], tf.float32, ) actual = reduce_rows(matrix) expected = tf.constant( [[20.0, 15.0, 0.0], [5.0, 0.0, 10.0], [10.0, 5.0, 0.0]], tf.float32 ) assert tf.reduce_all(tf.equal(actual, expected))
5,332,500
def _vmf_normalize(kappa, dim): """Compute normalization constant using built-in numpy/scipy Bessel approximations. Works well on small kappa and mu. """ num = np.power(kappa, dim / 2.0 - 1.0) if dim / 2.0 - 1.0 < 1e-15: denom = np.power(2.0 * np.pi, dim / 2.0) * i0(kappa) else: denom = np.power(2.0 * np.pi, dim / 2.0) * iv(dim / 2.0 - 1.0, kappa) if np.isinf(num): raise ValueError("VMF scaling numerator was inf.") if np.isinf(denom): raise ValueError("VMF scaling denominator was inf.") if np.abs(denom) < 1e-15: raise ValueError("VMF scaling denominator was 0.") return num / denom
5,332,501
def twitter_map(): """ Gets all the required information and returns the start page or map with people locations depending on input """ # get arguments from url account = request.args.get('q') count = request.args.get('count') if account and count: # create map and add custom styles to html or display error try: new_map = create_map(account, count) new_map += render_template('styles.html') return new_map except urllib.error.HTTPError: return render_template('error.html', error='User was not found.') else: # render start page return render_template('index.html')
5,332,502
def path_states(node): """The sequence of states to get to this node.""" if node in (cutoff, failure, None): return [] return path_states(node.parent) + [node.state]
5,332,503
def find_named_variables(mapping): """Find correspondance between variable and relation and its attribute.""" var_dictionary = dict() for relation_instance in mapping.lhs: for i, variable in enumerate(relation_instance.variables): name = relation_instance.relation.name field = relation_instance.relation.fields[i] if variable not in var_dictionary.keys(): var_dictionary.update({variable: []}) var_dictionary[variable].append((name, field)) else: if (name, field) not in var_dictionary[variable]: var_dictionary[variable].append((name, field)) return var_dictionary
5,332,504
def attribute_formatter(attribute): """ translate non-alphabetic chars and 'spaces' to a URL applicable format :param attribute: text string that may contain not url compatible chars (e.g. ' 무작위의') :return: text string with riot API compatible url encoding (e.g. %20%EB%AC%B4%EC%9E%91%EC%9C%84%EC%9D%98) """ tempdict = {'': attribute} formatted = urllib.parse.urlencode(tempdict)[1:].replace('+', '%20') return formatted
5,332,505
def feature_evaluation(X: pd.DataFrame, y: pd.Series, output_path: str = ".") -> NoReturn: """ Create scatter plot between each feature and the response. - Plot title specifies feature name - Plot title specifies Pearson Correlation between feature and response - Plot saved under given folder with file name including feature name Parameters ---------- X : DataFrame of shape (n_samples, n_features) Design matrix of regression problem y : array-like of shape (n_samples, ) Response vector to evaluate against output_path: str (default ".") Path to folder in which plots are saved """ if not os.path.exists("graphs"): os.mkdir("graphs") figs = [] for i, feature in enumerate(X.columns): correlation = X[feature].cov(y) / (np.std(X[feature]) * np.std(y)) table = pd.DataFrame(dict( feature=X[feature], price=y )) fig = px.scatter(table, x='feature', y='price', title=f"Pearson Correlation " f"between {feature} and price is {correlation}", trendline="ols", trendline_color_override="red") pio.write_image(fig, f"graphs/Pearson correlation between {feature} " f"and price.png") figs.append(fig) return "./graphs/"
5,332,506
def applyRequests(ram, requests): """ request has to be tuple (WRITE, addr, data) or (READ, addr) data can be only 0 or 1 (because width of data port is 1) """ for req in requests: m = req[0] if m == WRITE: data = req[2] assert data == 1 or data == 0 ram.d._ag.data.append(data) ram.we._ag.data.append(1) elif m == READ: ram.we._ag.data.append(0) else: raise Exception(f"invalid mode {req[0]}") addr = req[1] # ram addr has 6 bits for i in range(6): addrbit = getattr(ram, f"a{i:d}") addrBitval = get_bit(addr, i) addrbit._ag.data.append(addrBitval)
5,332,507
def maestro_splits(): """ Get list of indices for each split. Stolen from my work on Perceptual Evaluation of AMT Resynthesized. Leve here for reference. """ d = asmd.Dataset().filter(datasets=['Maestro']) maestro = json.load(open(MAESTRO_JSON)) train, validation, test = [], [], [] for i in range(len(d)): filename = d.paths[i][0][0][23:] split = search_audio_filename_in_original_maestro(filename, maestro) if split == "train": train.append(i) elif split == "validation": validation.append(i) elif split == "test": test.append(i) else: raise RuntimeError(filename + " not found in maestro original json") return train, validation, test
5,332,508
def get_choice_selectivity(trials, perf, r): """ Compute d' for choice. """ N = r.shape[-1] L = np.zeros(N) L2 = np.zeros(N) R = np.zeros(N) R2 = np.zeros(N) nL = 0 nR = 0 for n, trial in enumerate(trials): if not perf.decisions[n]: continue stimulus = trial['epochs']['stimulus'] r_n = r[stimulus,n] left_right = trial['left_right'] if left_right < 0: L += np.sum(r_n, axis=0) L2 += np.sum(r_n**2, axis=0) nL += len(stimulus) else: R += np.sum(r_n, axis=0) R2 += np.sum(r_n**2, axis=0) nR += len(stimulus) mean_L = L/nL var_L = L2/nL - mean_L**2 mean_R = R/nR var_R = R2/nR - mean_R**2 return -utils.div(mean_L - mean_R, np.sqrt((var_L + var_R)/2))
5,332,509
def author_productivity(pub2author_df, colgroupby = 'AuthorId', colcountby = 'PublicationId', show_progress=False): """ Calculate the total number of publications for each author. Parameters ---------- pub2author_df : DataFrame, default None, Optional A DataFrame with the author2publication information. colgroupby : str, default 'AuthorId', Optional The DataFrame column with Author Ids. If None then the database 'AuthorId' is used. colcountby : str, default 'PublicationId', Optional The DataFrame column with Publication Ids. If None then the database 'PublicationId' is used. Returns ------- DataFrame Productivity DataFrame with 2 columns: 'AuthorId', 'Productivity' """ # we can use show_progress to pass a label for the progress bar if show_progress: show_progress='Author Productivity' newname_dict = zip2dict([str(colcountby)+'Count', '0'], ['Productivity']*2) return groupby_count(pub2author_df, colgroupby, colcountby, count_unique=True, show_progress=show_progress).rename(columns=newname_dict)
5,332,510
def smoothed_epmi(matrix, alpha=0.75): """ Performs smoothed epmi. See smoothed_ppmi for more info. Derived from this: #(w,c) / #(TOT) -------------- (#(w) / #(TOT)) * (#(c)^a / #(TOT)^a) ==> #(w,c) / #(TOT) -------------- (#(w) * #(c)^a) / #(TOT)^(a+1)) ==> #(w,c) ---------- (#(w) * #(c)^a) / #(TOT)^a ==> #(w,c) * #(TOT)^a ---------- #(w) * #(c)^a """ row_sum = matrix.sum(axis=1) col_sum = matrix.sum(axis=0).power(alpha) total = row_sum.sum(axis=0).power(alpha)[0, 0] inv_col_sum = 1 / col_sum # shape (1,n) inv_row_sum = 1 / row_sum # shape (n,1) inv_col_sum = inv_col_sum * total mat = matrix * inv_row_sum mat = mat * inv_col_sum return mat
5,332,511
def encode(input, errors='strict'): """ convert from unicode text (with possible UTF-16 surrogates) to wtf-8 encoded bytes. If this is a python narrow build this will actually produce UTF-16 encoded unicode text (e.g. with surrogates). """ # method to convert surrogate pairs to unicode code points permitting # lone surrogate pairs (aka potentially ill-formed UTF-16) def to_code_point(it): hi = None try: while True: c = ord(next(it)) if c >= 0xD800 and c <= 0xDBFF: # high surrogate hi = c c = ord(next(it)) if c >= 0xDC00 and c <= 0xDFFF: # paired c = 0x10000 + ((hi - 0xD800) << 10) + (c - 0xDC00) else: yield hi hi = None yield c except StopIteration: if hi is not None: yield hi buf = six.binary_type() for code in to_code_point(iter(input)): if (0 == (code & 0xFFFFFF80)): buf += six.int2byte(code) continue elif (0 == (code & 0xFFFFF800)): buf += six.int2byte(((code >> 6) & 0x1F) | 0xC0) elif (0 == (code & 0xFFFF0000)): buf += six.int2byte(((code >> 12) & 0x0F) | 0xE0) buf += six.int2byte(((code >> 6) & 0x3F) | 0x80) elif (0 == (code & 0xFF300000)): buf += six.int2byte(((code >> 18) & 0x07) | 0xF0) buf += six.int2byte(((code >> 12) & 0x3F) | 0x80) buf += six.int2byte(((code >> 6) & 0x3F) | 0x80) buf += six.int2byte((code & 0x3F) | 0x80) return buf, len(buf)
5,332,512
def forgot_password(request, mobile=False): """Password reset form. This view sends an email with a reset link. """ if request.method == "POST": form = PasswordResetForm(request.POST) valid = form.is_valid() if valid: form.save(use_https=request.is_secure(), token_generator=default_token_generator, email_template_name='users/email/pw_reset.ltxt') if mobile: if valid: return HttpResponseRedirect(reverse('users.mobile_pw_reset_sent')) else: if not valid: return {'status': 'error', 'errors': dict(form.errors.iteritems())} else: return {'status': 'success'} else: form = PasswordResetForm() if mobile: return jingo.render(request, 'users/mobile/pw_reset_form.html', {'form': form})
5,332,513
def rl_forward_char() -> None: """Move forward a character. This acts like readline's forward-char. """ bridge.forward_char()
5,332,514
def test_tracker_dealer(): """Test TrackerDealer.""" # test TrackerDealer with TrackerUD trackers = [[TrackerUD(None, 1, 1, 0.06, 0.02, 20, np.inf, 1) for _ in range(2)] for _ in range(3)] dealer_ud = TrackerDealer(callback, trackers) # can't respond to a trial twice dealer_ud.next() dealer_ud.respond(True) with pytest.raises(RuntimeError, match="You must get a trial before you can respond."): dealer_ud.respond(True) dealer_ud = TrackerDealer(callback, np.array(trackers)) # can't respond before you pick a tracker and get a trial with pytest.raises(RuntimeError, match="You must get a trial before you can respond."): dealer_ud.respond(True) rand = np.random.RandomState(0) for sub, x_current in dealer_ud: dealer_ud.respond(rand.rand() < x_current) assert(np.abs(dealer_ud.trackers[0, 0].n_reversals - dealer_ud.trackers[1, 0].n_reversals) <= 1) # test array-like indexing dealer_ud.trackers[0] dealer_ud.trackers[:] dealer_ud.trackers[[1, 0, 1]] [d for d in dealer_ud.trackers] dealer_ud.shape dealer_ud.history() dealer_ud.history(True) # bad rand type trackers = [TrackerUD(None, 1, 1, 0.06, 0.02, 20, 50, 1) for _ in range(2)] with pytest.raises(TypeError, match="argument"): TrackerDealer(trackers, rand=1) # test TrackerDealer with TrackerBinom trackers = [TrackerBinom(None, 0.05, 0.5, 50, stop_early=False) for _ in range(2)] # start_value scalar type checking with pytest.raises(TypeError, match="start_value must be a scalar"): TrackerUD(None, 3, 1, [1, 0.5], [1, 0.5], 10, np.inf, [9, 5], change_indices=[2]) dealer_binom = TrackerDealer(callback, trackers, pace_rule='trials') for sub, x_current in dealer_binom: dealer_binom.respond(True) # if you're dealing from TrackerBinom, you can't use stop_early feature trackers = [TrackerBinom(None, 0.05, 0.5, 50, stop_early=True, x_current=3) for _ in range(2)] with pytest.raises(ValueError, match="be False to deal trials from a TrackerBinom"): TrackerDealer(callback, trackers, 1, 'trials') # if you're dealing from TrackerBinom, you can't use reversals to pace with pytest.raises(ValueError, match="be False to deal trials from a TrackerBinom"): TrackerDealer(callback, trackers, 1)
5,332,515
def test_run_suite(irunner, sync): """Test running a single test suite.""" ret = irunner.run_test_suite("test_1", "Suite", await_results=sync) if not sync: assert ret.result() is None # The test report should have been updated as a side effect. assert irunner.report["test_1"]["Suite"].passed
5,332,516
def rotation_components(x, y, eps=1e-12, costh=None): """Components for the operator Rotation(x,y) Together with `rotation_operator` achieves best memory complexity: O(N_batch * N_hidden) Args: x: a tensor from where we want to start y: a tensor at which we want to finish eps: the cutoff for the normalizations (avoiding division by zero) Returns: Five components: u, v, [u,v] and `2x2 rotation by theta`, cos(theta) """ size_batch = tf.shape(x)[0] hidden_size = tf.shape(x)[1] # construct the 2x2 rotation u = tf.nn.l2_normalize(x, 1, epsilon=eps) if costh == None: costh = tf.reduce_sum(u * tf.nn.l2_normalize(y, 1, epsilon=eps), 1) sinth = tf.sqrt(1 - costh ** 2) step1 = tf.reshape(costh, [size_batch, 1]) step2 = tf.reshape(sinth, [size_batch, 1]) Rth = tf.reshape( tf.concat([step1, -step2, step2, step1], axis=1), [size_batch, 2, 2]) # get v and concatenate u and v v = tf.nn.l2_normalize( y - tf.reshape(tf.reduce_sum(u * y, 1), [size_batch, 1]) * u, 1, epsilon=eps) step3 = tf.concat([tf.reshape(u, [size_batch, 1, hidden_size]), tf.reshape(v, [size_batch, 1, hidden_size])], axis=1) # do the batch matmul step4 = tf.reshape(u, [size_batch, hidden_size, 1]) step5 = tf.reshape(v, [size_batch, hidden_size, 1]) return step4, step5, step3, Rth, costh
5,332,517
def erode(np_image_bin, struct_elem='rect', size=3): """Execute erode morphological operation on binaryzed image Keyword argument: np_image_bin -- binaryzed image struct_elem: cross - cross structural element rect - rectangle structural element circ -- cricle structural element(maybe implemente) size: size of struct element, should be 2N+1 Return: Binarized image after erode operation """ np_image_bin = np_image_bin.astype(np.uint8) np_image_er = np.zeros(np_image_bin.shape, dtype=np.uint8) #np_image_bin = np.arange(625).reshape((25,25)) #rectangle dir_size = int((size-1)/2) #print(x_max, y_max) for index, x in np.ndenumerate(np_image_bin): np_window = bs.getWindow(np_image_bin, index, dir_size, struct_elem) if np_window.max() == 255: np_image_er[index[0], index[1]] = 255 return np_image_er
5,332,518
def test_converter_conversion_item_initialization( conversion_item, expected_dest, expected_src, expected_transfomers, expected_raw_input, ): """Tests the ConversionItem initialization.""" assert conversion_item.dest == expected_dest assert conversion_item.src == expected_src for i, expected_transfomer in enumerate(expected_transfomers): current_transformer_code = conversion_item.transformers[i].__code__.co_code assert current_transformer_code == expected_transfomer.__code__.co_code assert conversion_item.raw_input == expected_raw_input
5,332,519
def from_argparse_args( cls: Type[ParseArgparserDataType], args: Union[Namespace, ArgumentParser], **kwargs: Any ) -> ParseArgparserDataType: """Create an instance from CLI arguments. Eventually use varibles from OS environement which are defined as "PL_<CLASS-NAME>_<CLASS_ARUMENT_NAME>" Args: cls: Lightning class args: The parser or namespace to take arguments from. Only known arguments will be parsed and passed to the :class:`Trainer`. **kwargs: Additional keyword arguments that may override ones in the parser or namespace. These must be valid Trainer arguments. Example: >>> from pytorch_lightning import Trainer >>> parser = ArgumentParser(add_help=False) >>> parser = Trainer.add_argparse_args(parser) >>> parser.add_argument('--my_custom_arg', default='something') # doctest: +SKIP >>> args = Trainer.parse_argparser(parser.parse_args("")) >>> trainer = Trainer.from_argparse_args(args, logger=False) """ if isinstance(args, ArgumentParser): args = cls.parse_argparser(args) params = vars(args) # we only want to pass in valid Trainer args, the rest may be user specific valid_kwargs = inspect.signature(cls.__init__).parameters trainer_kwargs = {name: params[name] for name in valid_kwargs if name in params} trainer_kwargs.update(**kwargs) return cls(**trainer_kwargs)
5,332,520
def setup_counter_and_timer(nodemap): """ This function configures the camera to setup a Pulse Width Modulation signal using Counter and Timer functionality. By default, the PWM signal will be set to run at 50hz, with a duty cycle of 70%. :param nodemap: Device nodemap. :type nodemap: INodeMap :return: True if successful, False otherwise. :rtype: bool """ print('Configuring Pulse Width Modulation signal') try: result = True # Set Counter Selector to Counter 0 node_counter_selector = PySpin.CEnumerationPtr(nodemap.GetNode('CounterSelector')) # Check to see if camera supports Counter and Timer functionality if not PySpin.IsAvailable(node_counter_selector): print('\nCamera does not support Counter and Timer Functionality. Aborting...\n') return False if not PySpin.IsWritable(node_counter_selector): print('\nUnable to set Counter Selector (enumeration retrieval). Aborting...\n') return False entry_counter_0 = node_counter_selector.GetEntryByName('Counter0') if not PySpin.IsAvailable(entry_counter_0) or not PySpin.IsReadable(entry_counter_0): print('\nUnable to set Counter Selector (entry retrieval). Aborting...\n') return False counter_0 = entry_counter_0.GetValue() node_counter_selector.SetIntValue(counter_0) # Set Counter Event Source to MHzTick node_counter_event_source = PySpin.CEnumerationPtr(nodemap.GetNode('CounterEventSource')) if not PySpin.IsAvailable(node_counter_event_source) or not PySpin.IsWritable(node_counter_event_source): print('\nUnable to set Counter Event Source (enumeration retrieval). Aborting...\n') return False entry_counter_event_source_mhz_tick = node_counter_event_source.GetEntryByName('MHzTick') if not PySpin.IsAvailable(entry_counter_event_source_mhz_tick) \ or not PySpin.IsReadable(entry_counter_event_source_mhz_tick): print('\nUnable to set Counter Event Source (entry retrieval). Aborting...\n') return False counter_event_source_mhz_tick = entry_counter_event_source_mhz_tick.GetValue() node_counter_event_source.SetIntValue(counter_event_source_mhz_tick) # Set Counter Duration to 14000 node_counter_duration = PySpin.CIntegerPtr(nodemap.GetNode('CounterDuration')) if not PySpin.IsAvailable(node_counter_duration) or not PySpin.IsWritable(node_counter_duration): print('\nUnable to set Counter Duration (integer retrieval). Aborting...\n') return False node_counter_duration.SetValue(14000) # Set Counter Delay to 6000 node_counter_delay = PySpin.CIntegerPtr(nodemap.GetNode('CounterDelay')) if not PySpin.IsAvailable(node_counter_delay) or not PySpin.IsWritable(node_counter_delay): print('\nUnable to set Counter Delay (integer retrieval). Aborting...\n') return False node_counter_delay.SetValue(6000) # Determine Duty Cycle of PWM signal duty_cycle = float(node_counter_duration.GetValue()) / (float(node_counter_duration.GetValue() + node_counter_delay.GetValue())) * 100 print('\nThe duty cycle has been set to {}%'.format(duty_cycle)) # Determine pulse rate of PWM signal pulse_rate = 1000000 / float(node_counter_duration.GetValue() + node_counter_delay.GetValue()) print('\nThe pulse rate has been set to {} Hz'.format(pulse_rate)) # Set Counter Trigger Source to Frame Trigger Wait node_counter_trigger_source = PySpin.CEnumerationPtr(nodemap.GetNode('CounterTriggerSource')) if not PySpin.IsAvailable(node_counter_trigger_source) or not PySpin.IsWritable(node_counter_trigger_source): print('\nUnable to set Counter Trigger Source (enumeration retrieval). Aborting...\n') return False entry_counter_trigger_source_ftw = node_counter_trigger_source.GetEntryByName('FrameTriggerWait') if not PySpin.IsAvailable(entry_counter_trigger_source_ftw)\ or not PySpin.IsReadable(entry_counter_trigger_source_ftw): print('\nUnable to set Counter Trigger Source (entry retrieval). Aborting...\n') return False counter_trigger_source_ftw = entry_counter_trigger_source_ftw.GetValue() node_counter_trigger_source.SetIntValue(counter_trigger_source_ftw) # Set Counter Trigger Activation to Level High node_counter_trigger_activation = PySpin.CEnumerationPtr(nodemap.GetNode('CounterTriggerActivation')) if not PySpin.IsAvailable(node_counter_trigger_activation) or \ not PySpin.IsWritable(node_counter_trigger_activation): print('\nUnable to set Counter Trigger Activation (enumeration retrieval). Aborting...\n') return False entry_counter_trigger_source_lh = node_counter_trigger_activation.GetEntryByName('LevelHigh') if not PySpin.IsAvailable(entry_counter_trigger_source_lh) \ or not PySpin.IsReadable(entry_counter_trigger_source_lh): print('\nUnable to set Counter Trigger Activation (entry retrieval). Aborting...\n') return False counter_trigger_level_high = entry_counter_trigger_source_lh.GetValue() node_counter_trigger_activation.SetIntValue(counter_trigger_level_high) except PySpin.SpinnakerException as ex: print('Error: {}'.format(ex)) return False return result
5,332,521
def concat(l1, l2): """ Join two possibly None lists """ if l1 is None: return l2 if l2 is None: return l1 return l1 + l2
5,332,522
def main(): """Main Fuction""" parser = ArgumentParser() group = parser.add_mutually_exclusive_group() group.add_argument('-s', '--speed', type=Decimal, help="Speed expressed as a multiple of the speed of light. \ Speed of light = 1") group.add_argument('-ms', '--meterspersec', type=Decimal, help="Speed expressed in meters per second") parser.add_argument('-l', '--lightyears', type=Decimal, default=1, help='Light-years to travel. Defaults to 1.') parser.add_argument('-sl', '--shiplength', type=Decimal, default=10, help='Length of your ship in meters. Defaults to 10.') parser.add_argument('-p', '--precision', type=int, default=6, help="Number of decimal places. Defaults to 6.") parser.add_argument('-m', '--mass', type=Decimal, default=10000, help="Mass of your ship in kg. Defaults to 10000.") parser.add_argument('-t', '--time', type=Decimal, default=0.5, help="Proportion of trip spent accelerating (0-1). Defaults to 0.5") args = parser.parse_args() if not args.speed and not args.meterspersec: print("Must include at least one of --speed or --meterspersec!") parser.print_help() exit() # Constants c = 299792458 # The speed of light in meters/sec c2 = c ** 2 # The speed of light, squared getcontext().prec = args.precision # Calculate time of trip (in years) for non-traveling observer if args.speed: v = args.speed * c obs_time = args.lightyears/args.speed elif args.meterspersec: v = args.meterspersec obs_time = args.lightyears/(args.meterspersec/c) v2 = int(v ** 2) B = v2/c2 if args.speed == 1: gamma = Decimal('Infinity') ship_time = 0 ship_length = 0 ship_mass = args.mass * gamma kinetic_energy_classic = Decimal('Infinity') kinetic_energy_relative = Decimal('Infinity') else: gamma = Decimal(1 / ((abs(1 - B)) ** 0.5)) ship_time = Decimal(obs_time / gamma) ship_length = args.shiplength / gamma ship_mass = args.mass * gamma kinetic_energy_classic = Decimal(0.5) * args.mass * v2 kinetic_energy_relative = (args.mass * gamma * c2) - (args.mass * c2) ship_energy = ship_mass * c2 # E=mc^2 time_diff = abs(obs_time - ship_time) output_items = [ f"-----------------------------------------", f"Lightyears to travel: {args.lightyears}", f"Speed in m/s: {v:,} m/s", f"Percent of c: {v / c*100}%", f"Lorentz Factor: {Decimal(gamma)}", f"Observer Time: {time_breakdown(obs_time)}", f"Ship Time: {time_breakdown(ship_time)}", f"Difference in times: {time_breakdown(time_diff)}", f"Ship Length: {ship_length} meters", f"Ship Mass: {ship_mass} kg", f"Ship Mass Energy Equivalent: {ship_energy} joules", f"Kinetic Energy Required (Classic): {kinetic_energy_classic} joules", f"Kinetic Energy Required (Relativity): {kinetic_energy_relative} joules", ] print("\n".join(output_items))
5,332,523
def raise_ParseList_error(call_location_string): """ Handle problems found at some point during the coding/evaluation of ParseList, and is called when the problem seems sufficiently important that the record should not be coded. Logs the error and raises HasParseError. """ #global SentenceID, ValidError warningstr = call_location_string + \ '; record skipped: {}'.format(SentenceID) logger = logging.getLogger('petr_log') logger.warning(warningstr) raise HasParseError
5,332,524
def get_model_data(n_samples=None, ratio=None): """ Provides train and validation data to train the model. If n_samples and ratio are not None, it returns data according to the ratio between v1 and v2. V1 is data comming from the original distribution of SIRD parameters, and V2 is data comming from distributions based on errors of trained ML models. Parameters ---------- n_samples : int, optional Subset of samples from the original set. The default is None. ratio : float, optional Ratio of the data from distribudion based on errors. The default is None. Returns ------- df_train_val : pandas.DataFrame """ df_train_val = pd.read_pickle( f"{root_project}/data/processed/train_val_set.pickle") df_train_val_rev = pd.read_pickle( f"{root_project}/data/processed/train_val_set_rev.pickle") df_v1_train_val = pd.read_pickle( f"{root_project}/data/processed/train_val_set_v1.pickle") df_v2_train_val = pd.read_pickle( f"{root_project}/data/processed/train_val_set_v2.pickle") if n_samples is not None and ratio is not None: df_train_val = take_samples(df_v1_train_val, df_v2_train_val, n_samples, ratio) return df_train_val elif n_samples is not None: df_train_val = df_train_val.sample(n_samples, random_state=42) return df_train_val else: return df_train_val_rev
5,332,525
def skip(): """ Decorator for marking test function that should not be executed.""" def wrapper(fn): fn.__status__ = "skip" return fn return wrapper
5,332,526
def upload_original_to(instance, filename): """ Return the path this file should be stored at. """ filename_base, filename_ext = os.path.splitext(filename) filename_ext = filename_ext.lower() origin_path = instance.release_agency_slug if '--' in instance.release_agency_slug: agency_slug, office_slug = instance.release_agency_slug.split('--') origin_path = '%s/%s' % (agency_slug, office_slug) right_now = now().strftime("%Y%m%d%H%M") upload_path = '%s/%s/%s%s' % ( origin_path, right_now, filename_base, filename_ext) return upload_path
5,332,527
def _GetThumbnailType(destination_id): """Returns the thumbnail type for the destination with the id.""" destination_type = _GetDestinationType(destination_id) if destination_type == _DestinationType.HOTSPOT: return _ThumbnailType.PRETTY_EARTH else: return _ThumbnailType.GEOMETRY_OUTLINE
5,332,528
def rmproj(): """ Remove project """ run('cd {root_dir} && vagrant destroy -f'.format(**VARS)) run('rm {root_dir} -rf'.format(**VARS))
5,332,529
def template_4_bc(template) -> None: """Apply the displacement boundary conditions for case 3 Parameters ---------- template : template The unit template parameters """ # Add the boundary conditions modify.add_bc_fd_edge("yy1", "y", "y", 0, 0) modify.add_bc_fd_edge("yy2", "y", "y", template.y_s, 0) modify.add_bc_fd_node("xf1", "x", 1, 0) modify.add_bc_fd_node("xf2", "x", template.x_n, 0) modify.add_bc_fd_node("xn", "x", template.n_n - template.x_n + 1, -template.d_mag, tab_nam = template.tab_nam) modify.add_bc_fd_node("xp", "x", template.n_n, template.d_mag, tab_nam = template.tab_nam) return
5,332,530
def barchart(bins, values, xlabel, ylabel, title, name=None, ticks=None): """Wapper around the matplotlib bar function. Useful when we need bars from multiple trends. Parameters ---------- bins : np.array x values of where the bars should reside 1-D array values : np.array heights of the bars 2-D array [# trends][ len(bins)] xlabel : str label to display on the x axis ylabel : str label to display on the y axis title : str title of the figure name : list str trend names of length len(values) # trends ticks : list str x axis ticks to display of length len(bins) See Also ---------- histo() """ global numFigs plot.figure(numFigs) width = 1./len(values) if name == None: name = ["" for i in xrange(len(values))] for set in range(0, len(values)): plot.bar(bins+width*set, values[set][:], width, label=name[set], color=colors[set]) plot.xlabel(xlabel) plot.ylabel(ylabel) plot.legend(fancybox = True, shadow=True) if ticks != None: plot.xticks(bins+.5, ticks, rotation=60) plot.title(title) plot.tight_layout() numFigs += 1
5,332,531
def cli(p_articles, count, path): """ Generate fake data to populate database. """ click.echo('Working...') if p_articles: random_title = [] for _ in range(count): random_title.append(fake.company()) random_title = list(set(random_title)) while True: if len(random_title) == 0: break fake_title = random_title.pop() # Scelto di un username random tra quelli degli utenti registrati random_author = random.choice(Users.query.all()).username fake_body = fake.text(max_nb_chars=999, ext_word_list=None) article = Articles( title=fake_title, author=random_author, body=fake_body) with app.app_context(): db.session.add(article) db.session.commit() else: random_name = [] for _ in range(count): random_name.append(fake.name()) random_name = list(set(random_name)) while True: if len(random_name) == 0: break fake_name = random_name.pop() fake_password = sha256_crypt.hash(str("daniele")) fake_email = fake.email() fake_username = fake.first_name()+secrets.token_hex(3) user = Users(name=fake_name, password=fake_password, email=fake_email, username=fake_username) with app.app_context(): db.session.add(user) db.session.commit() click.echo("I've finished. Rows generated: %d" % count)
5,332,532
def auto_differential_demo(): """ 自动微分示例 - Tensor 是包的核心类 - requires_grad 为 True 来跟踪对其的所有操作,完成计算之后,使用 .backward() 方法来自动计算所有梯度 - 停止 tensor 历史记录的跟踪,使用 .detach() 方法,将其计算历史记录分离,防止未来的计算被跟踪 - grad_fn 保存张量被创建时候的函数引用,如果自己创建张量,则为 None :return: """ x = torch.ones(2, 2, requires_grad=True) print(f'x: {x}') y = x + 2 print(f'y: {y}') print(f'y.grad_fn: {y.grad_fn}') z = y * y * 3 out = z.mean() print(f'z: {z}\nout: {out}') # 向后传播 out.backward() # 输出梯度, d(out)/d(x) print(f'x.grad: {x.grad}') # 改变张量的 requires_grad 标记,默认为 False print(f'x.requires_grad: {x.requires_grad}') x.requires_grad_(False) print(f'x.requires_grad: {x.requires_grad}')
5,332,533
async def postAsync(text: str, *, url: str = "auto", config: ConfigOptions = ConfigOptions(), timeout: float = 30.0, retries: int = 3): """Alias function for AsyncHaste().post(...)""" return await AsyncHaste().post(text, url=url, config=config, timeout=timeout, retries=retries)
5,332,534
def parse_bibtex(file, build_dir): """ Parse merged bibtex file again with customization to clean citations. @type file: .bib file @param file: file to be parsed @type build_dir: file path @param build_dir: where to save """ parser = BibTexParser() parser.customization = customizations years = [] with open(file, 'r') as f: bibtex = bibtexparser.load(f, parser=parser) for i in range (len(bibtex.entries)): for key, value in bibtex.entries[i].items(): if key == 'year': years.append(int(value)) years.sort() years.reverse() years_no_repeat = [] for i in range(len(years)): if years_no_repeat.count(years[i]) == 0: years_no_repeat.append(years[i]) for i in range(len(years_no_repeat)): bibtext = copy.deepcopy(bibtex) array = [] for j in range (len(bibtex.entries)): for key, value in bibtex.entries[j].items(): if key == 'year': if int(value) == years_no_repeat[i]: array.append(bibtex.entries[j]) bibtext.entries = array parse_file = os.path.join(build_dir, str(years_no_repeat[i]) + 'parsed.bib') writer = BibTexWriter() writer.order_entries_by = ('ENTRYTYPE', ) with open(parse_file, 'w') as f: f.write(writer.write(bibtext))
5,332,535
def SetupSSHKeys(config_path, private_key_path, public_key_path): """Setup the pair of the ssh key for acloud.config. User can use the default path: "~/.ssh/acloud_rsa". Args: config_path: String, acloud config path. private_key_path: Path to the private key file. e.g. ~/.ssh/acloud_rsa public_key_path: Path to the public key file. e.g. ~/.ssh/acloud_rsa.pub """ private_key_path = os.path.expanduser(private_key_path) if (private_key_path == "" or public_key_path == "" or private_key_path == _DEFAULT_SSH_PRIVATE_KEY): utils.CreateSshKeyPairIfNotExist(_DEFAULT_SSH_PRIVATE_KEY, _DEFAULT_SSH_PUBLIC_KEY) UpdateConfigFile(config_path, "ssh_private_key_path", _DEFAULT_SSH_PRIVATE_KEY) UpdateConfigFile(config_path, "ssh_public_key_path", _DEFAULT_SSH_PUBLIC_KEY)
5,332,536
def gpu_load_acquisition_csv(acquisition_path, **kwargs): """ Loads acquisition data Returns ------- GPU DataFrame """ chronometer = Chronometer.makeStarted() cols = [ 'loan_id', 'orig_channel', 'seller_name', 'orig_interest_rate', 'orig_upb', 'orig_loan_term', 'orig_date', 'first_pay_date', 'orig_ltv', 'orig_cltv', 'num_borrowers', 'dti', 'borrower_credit_score', 'first_home_buyer', 'loan_purpose', 'property_type', 'num_units', 'occupancy_status', 'property_state', 'zip', 'mortgage_insurance_percent', 'product_type', 'coborrow_credit_score', 'mortgage_insurance_type', 'relocation_mortgage_indicator' ] dtypes = OrderedDict([ ("loan_id", "int64"), ("orig_channel", "category"), ("seller_name", "category"), ("orig_interest_rate", "float64"), ("orig_upb", "int64"), ("orig_loan_term", "int64"), ("orig_date", "date"), ("first_pay_date", "date"), ("orig_ltv", "float64"), ("orig_cltv", "float64"), ("num_borrowers", "float64"), ("dti", "float64"), ("borrower_credit_score", "float64"), ("first_home_buyer", "category"), ("loan_purpose", "category"), ("property_type", "category"), ("num_units", "int64"), ("occupancy_status", "category"), ("property_state", "category"), ("zip", "int64"), ("mortgage_insurance_percent", "float64"), ("product_type", "category"), ("coborrow_credit_score", "float64"), ("mortgage_insurance_type", "float64"), ("relocation_mortgage_indicator", "category") ]) print(acquisition_path) acquisition_table = pyblazing.create_table(table_name='acq', type=get_type_schema(acquisition_path), path=acquisition_path, delimiter='|', names=cols, dtypes=get_dtype_values(dtypes), skip_rows=1) Chronometer.show(chronometer, 'Read Acquisition CSV') return acquisition_table
5,332,537
def test_check_maxfail_1(testdir, example): """ Should stop after first failed check """ result = testdir.runpytest("--maxfail=1") result.assert_outcomes(failed=1, passed=0) result.stdout.fnmatch_lines(["*AssertionError: one*"])
5,332,538
def get_flows_src_dst_address_pairs(device, flow_monitor): """ Gets flows under flow_monitor and returns source and destination address pairs Args: device ('obj'): Device to use flow_monitor ('str'): Flow monitor name Raises: N/A Returns: [('source_address', 'destination_address'), ...] """ log.info('Getting all source and destination address pairs under flow monitor {name}' .format(name=flow_monitor)) try: output = device.parse('show flow monitor {name} cache format table' .format(name=flow_monitor)) except SchemaEmptyParserError: return [] pairs = [] # All hardcoded keys are mandatory in the parser for src in output.get('ipv4_src_addr', {}): for dst in output['ipv4_src_addr'][src]['ipv4_dst_addr']: pairs.append((src, dst)) return pairs
5,332,539
def imthresh(im, thresh): """ Sets pixels in image below threshold value to 0 Args: im (ndarray): image thresh (float): threshold Returns: ndarray: thresholded image """ thresh_im = im.copy() thresh_im[thresh_im < thresh] = 0 return thresh_im
5,332,540
def test_geometry_collection_iteration(): """test if feature collection is iterable""" gc = FeatureCollection(features=[test_feature, test_feature]) iter(gc)
5,332,541
def set_serv_parms(service, args): """ Set the service command line parameters in Registry """ import _winreg uargs = [] for arg in args: uargs.append(unicoder(arg)) try: key = _winreg.CreateKey(_winreg.HKEY_LOCAL_MACHINE, _SERVICE_KEY + service) _winreg.SetValueEx(key, _SERVICE_PARM, None, _winreg.REG_MULTI_SZ, uargs) _winreg.CloseKey(key) except WindowsError: return False return True
5,332,542
def isRenderNode(): # type: () -> bool """ Returns ------- bool """ return flavor() == 'Render'
5,332,543
def test_clean_data_contains_instance_value(): """ Test values from instances remain when not in data. """ data = {'first_name': 'John'} fields = ['job_title', 'first_name'] class Job(object): job_title = 'swamper' first_name = '' class Swamper(BaseSwamper): def build_instances(self): obj = Job() self.instances = {} self.instances[Job] = obj def clean_instances(self): """ `clean` depends on having both job_title and first_name, so provide the data which isn't available from self.data by copying it from an instance to self.cleaned_data. """ if self.instances: for model, instance in self.instances.items(): # Update cleaned_data with fields from instance that aren't # part of the new data. initial_fields = set(fields) - set(self.data.keys()) obj_data = {field: getattr(instance, field) for field in initial_fields} self.cleaned_data.update(obj_data) def clean(self): if self.cleaned_data['job_title'] == 'swamper': if not self.cleaned_data['first_name'].startswith('J'): raise ValueError('Only people with a first name that begin ' 'with the letter J can become swampers.') return self.cleaned_data swamper = Swamper(fields, data) assert swamper.errors == {} assert swamper.cleaned_data['job_title'] == 'swamper' assert swamper.cleaned_data['first_name'] == 'John' obj = Job() obj = swamper.build_or_update(obj, fields) assert obj.job_title == 'swamper' assert obj.first_name == 'John'
5,332,544
def download_datasets(force=False): """ Download all datasets required for project. - force: force dataset download even if dataset has been previously downloaded. """ download_enron_email_dataset(force) download_ud120_project(force)
5,332,545
def write_image(image_base64: str, filepath: pathlib.Path) -> None: """ Write an image to a file. Args: image_base64: Image encoded in Base64 filepath: Output image file path """ with open(filepath, "wb") as f: f.write(base64.b64decode(image_base64))
5,332,546
def wrap_application(app: App, wsgi: WSGICallable) -> WSGICallable: """Wrap a given WSGI callable in all active middleware.""" for middleware_instance in reversed(ACTIVE_MIDDLEWARES): wsgi = middleware_instance(app, wsgi) return wsgi
5,332,547
def cal_rpn(imgsize, featuresize, scale, gtboxes): """ Args: imgsize: [h, w] featuresize: the size of each output feature map, e.g. [19, 19] scale: the scale factor of the base anchor to the feature map, e.g. [32, 32] gtboxes: ground truth boxes in the image, shape of [N, 4]. stride: the stride of the output feature map. Returns: labels: label for each anchor, shape of [N, ], -1 for ignore, 0 for background, 1 for object bbox_targets: bbox regrssion target for each anchor, shape of [N, 4] """ imgh, imgw = imgsize # gen base anchor base_anchor = gen_anchor(featuresize, scale) # calculate iou overlaps = cal_overlaps(base_anchor, gtboxes) # init labels -1 don't care 0 is negative 1 is positive labels = np.empty(base_anchor.shape[0]) labels.fill(-1) # for each GT box corresponds to an anchor which has highest IOU gt_argmax_overlaps = overlaps.argmax(axis=0) # the anchor with the highest IOU overlap with a GT box anchor_argmax_overlaps = overlaps.argmax(axis=1) anchor_max_overlaps = overlaps[range(overlaps.shape[0]), anchor_argmax_overlaps] # IOU > IOU_POSITIVE labels[anchor_max_overlaps > config.IOU_POSITIVE] = 1 # IOU <IOU_NEGATIVE labels[anchor_max_overlaps < config.IOU_NEGATIVE] = 0 # ensure that every GT box has at least one positive RPN region labels[gt_argmax_overlaps] = 1 # only keep anchors inside the image outside_anchor = np.where( (base_anchor[:, 0] < 0) | (base_anchor[:, 1] < 0) | (base_anchor[:, 2] >= imgw) | (base_anchor[:, 3] >= imgh) )[0] labels[outside_anchor] = -1 # subsample positive labels ,if greater than RPN_POSITIVE_NUM(default 128) fg_index = np.where(labels == 1)[0] # print(len(fg_index)) if len(fg_index) > config.RPN_POSITIVE_NUM: labels[ np.random.choice( fg_index, len(fg_index) - config.RPN_POSITIVE_NUM, replace=False ) ] = -1 # subsample negative labels if not config.OHEM: bg_index = np.where(labels == 0)[0] num_bg = config.RPN_TOTAL_NUM - np.sum(labels == 1) if len(bg_index) > num_bg: # print('bgindex:',len(bg_index),'num_bg',num_bg) labels[ np.random.choice(bg_index, len(bg_index) - num_bg, replace=False) ] = -1 bbox_targets = bbox_transfrom(base_anchor, gtboxes[anchor_argmax_overlaps, :]) return [labels, bbox_targets], base_anchor
5,332,548
def main(data, context): """Triggered from a message on a Cloud Pub/Sub topic. Args: data (dict): Event payload. context (google.cloud.functions.Context): Metadata for the event. """ try: current_time = datetime.utcnow() log_message = Template('Cloud Function was triggered on $time') logging.info(log_message.safe_substitute(time=current_time)) try: run_btyd(TRAINING_DATA_QUERY, ACTUAL_CUSTOMER_VALUE_QUERY, PREDICTION_LENGTH_IN_MONTHS, GCS_BUCKET_MODELS, GCS_BUCKET_PREDICTIONS, PREFIX, LOCAL_STORAGE_FOLDER, FREQUENZY, PENALIZER_COEF, DISCOUNT_RATE) except Exception as error: log_message = Template('Predictions failed due to ' '$message.') logging.error(log_message.safe_substitute(message=error)) except Exception as error: log_message = Template('$error').substitute(error=error) logging.error(log_message)
5,332,549
def run(args): """This function is called by a user to recover or reset their primary one-time-password secret. This is used, e.g. if a user has changed their phone, or if they think the secret has been compromised, or if they have lost the secret completely (in which case they will need to log in using a backup method and then call this function from that login) The user will need to pass in a validated Authorisation, meaning they must have a login by at least one method (e.g. a pre-approved device or a one-time-login requested via backup codes or via an admin-authorised login) """ auth = Authorisation.from_data(args["authorisation"]) try: reset_otp = bool(args["reset_otp"]) except: reset_otp = False auth.verify(resource="reset_otp") identity_uid = auth.identity_uid() service = get_this_service(need_private_access=True) if service.uid() != identity_uid: raise PermissionError( "You can only reset the OTP on the identity service on " "which the user is registered! %s != %s" % (service.uid(), identity_uid)) user_uid = auth.user_uid() return (user_uid, reset_otp)
5,332,550
def adjust_price(iteration, current_price, global_start, last_tx_time): """ Function that decides to lower or increase the price, according to the time of previous transaction and the progress in reaching TARGET in TARGET_TIME. Args: iteration (int) - Number of previous successful transactions. Iterator which changes with the changing of nonce; current_price (int) - Current gas price in Wei; global_start (float/Unix format) - The start of the whole process; last_tx_time (float/Unix format) - Time spent in previous iteration. Return: current_price (int) - New gas price after adjustments. """ if iteration > 0: target_ratio = TARGET_TIME / TARGET actual_ratio = (time.time() - global_start) / iteration # If we check only the duration of the latest tx, it will increase # the price very rapidly, ignoring the global progress. # So it is necessary to control the price according to plan. if actual_ratio < target_ratio: current_price -= int(current_price / 10) elif last_tx_time >= target_ratio: current_price += int(current_price / 10) return current_price
5,332,551
def read_csv_batch(file: str, offset, cnt, **read_csv_params): """ Args: file: offset: cnt: read_csv_params: Returns: """ read_csv_params = copy(read_csv_params) if read_csv_params is None: read_csv_params = {} try: usecols = read_csv_params.pop('usecols') except KeyError: usecols = None header = pd.read_csv(file, nrows=0, **read_csv_params).columns with open(file, 'rb') as f: f.seek(offset) data = pd.read_csv(f, header=None, names=header, chunksize=None, nrows=cnt, usecols=usecols, **read_csv_params) return data
5,332,552
def fault_ack_faults_by_dn(cookie, in_dns): """ Auto-generated UCSC XML API Method. """ method = ExternalMethod("FaultAckFaultsByDn") method.cookie = cookie method.in_dns = in_dns xml_request = method.to_xml(option=WriteXmlOption.DIRTY) return xml_request
5,332,553
def provides(name=None, needs: List[str] = None): """A shortcut for defining a factory function that also needs dependencies itself.""" if not needs: needs = [] def decorator(f): decorated = _needs(*needs)(f) set(name or f.__name__, decorated) return f return decorator
5,332,554
def clip_xyxy_to_image(x1, y1, x2, y2, height, width): """Clip coordinates to an image with the given height and width.""" x1 = np.minimum(width - 1.0, np.maximum(0.0, x1)) y1 = np.minimum(height - 1.0, np.maximum(0.0, y1)) x2 = np.minimum(width - 1.0, np.maximum(0.0, x2)) y2 = np.minimum(height - 1.0, np.maximum(0.0, y2)) return x1, y1, x2, y2
5,332,555
def build_url(urlo, base, end, url_whitespace, url_case): """ Build and return a valid url. Parameters ---------- urlo A ParseResult object returned by urlparse base base_url from config end end_url from config url_whitespace url_whitespace from config url_case url_case from config Returns ------- URL string """ if not urlo.netloc: if not end: clean_target = re.sub(r'\s+', url_whitespace, urlo.path) else: clean_target = re.sub(r'\s+', url_whitespace, urlo.path.rstrip('/')) if clean_target.endswith(end): end = '' if base.endswith('/'): path = "%s%s%s" % (base, clean_target.lstrip('/'), end) elif base and not clean_target.startswith('/'): path = "%s/%s%s" % (base, clean_target, end) else: path = "%s%s%s" % (base, clean_target, end) if url_case == 'lowercase': urlo = urlo._replace(path=path.lower() ) elif url_case == 'uppercase': urlo = urlo._replace(path=path.upper() ) else: urlo = urlo._replace(path=path) return urlunparse(urlo)
5,332,556
def AnomalyDicts(anomalies, v2=False): """Makes a list of dicts with properties of Anomaly entities.""" bisect_statuses = _GetBisectStatusDict(anomalies) return [GetAnomalyDict(a, bisect_statuses.get(a.bug_id), v2) for a in anomalies]
5,332,557
def remove_links(txt: str): """ Remove weblinks from the text """ pattern = r'[-a-zA-Z0-9@:%._\+~#=]{1,256}\.[a-zA-Z0-9()]{1,6}\b([-a-zA-Z0-9()@:%_\+.~#?&//=]*)' txt = re.sub(pattern, " ", txt) txt = re.sub('http|https', " ", txt) return txt
5,332,558
def timefn(fn): """Times a function and stores the result in LOG variables""" @wraps(fn) def inside(*args, **kwargs): start = timer() result = fn(*args, **kwargs) end = timer() gv.TIME_LOG += f'Fn : {fn.__name__} - {end - start}\n' return result return inside
5,332,559
def get_tweet_stream(output_file, twitter_credentials): """ This function is given and returns a "stream" to listen to tweets and store them in output_file To understand how this function works, check it against the code of twitter_streaming in part00_preclass :param output_file: the file where the returned stream will store tweets :param twitter_credentials: a dicionary containing the credentials to aceess twitter (you should have created your own!_ :return: a "stream" variable to track live tweets """ access_token = twitter_credentials['access_token'] access_token_secret = twitter_credentials['access_token_secret'] consumer_key = twitter_credentials['consumer_key'] consumer_secret = twitter_credentials['consumer_secret'] l = TweetToFileListener() l.set_output_file(output_file) auth = OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_token, access_token_secret) stream = Stream(auth, l) return stream
5,332,560
def delete(review_id): """Delete a review. Args: review_id: ID of the review to be deleted. """ review = get_by_id(review_id) with db.engine.connect() as connection: connection.execute(sqlalchemy.text(""" DELETE FROM review WHERE id = :review_id """), { "review_id": review_id, }) if review["rating"] is not None: db_avg_rating.update(review["entity_id"], review["entity_type"])
5,332,561
def conv3d_3x3(filters, stride=1, padding=1, kernel_initializer=None, bias_initializer=None, name=None): """3D convolution with padding.""" return keras.Sequential([ layers.ZeroPadding3D(padding), layers.Conv3D(filters, kernel_size=3, strides=stride, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, padding='valid') ], name=name)
5,332,562
def get_available_quests(user, num_quests): """Get the quests the user could participate in.""" quests = [] for quest in Quest.objects.exclude(questmember__user=user).order_by('priority'): if quest.can_add_quest(user) and not quest.completed_quest(user): quests.append(quest) if len(quests) == num_quests: return quests return quests
5,332,563
def height(): """ Default window height """ return get_default_height()
5,332,564
def no_adjust_tp_func_nb(c: AdjustTPContext, *args) -> float: """Placeholder function that returns the initial take-profit value.""" return c.curr_stop
5,332,565
def inverse_theoretical_laser_position(y, a, b, c): """ theoretical angular position of the wire in respect to the laser position """ return np.pi - a - np.arccos((b - y) / c)
5,332,566
def ad_roc(y_true, y_score): """ Compute ROC-curve. """ fpr, tpr, thresholds = sklearn.metrics.roc_curve(y_true, y_score, pos_label=1, drop_intermediate=False) return fpr, tpr, thresholds
5,332,567
def check_reachability(gateway): # from https://stackoverflow.com/questions/2953462/pinging-servers-in-python """ Returns True if host (str) responds to a ping request. Remember that a host may not respond to a ping (ICMP) request even if the host name is valid. """ time.sleep(15) # Option for the number of packets as a function of param = '-n' if platform.system().lower()=='windows' else '-c' # Building the command. Ex: "ping -c 1 google.com" command = ['ping', param, '1', '-w', '1', gateway] return subprocess.call(command, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) == 0
5,332,568
def greedy_decoding(baseline_transformer, src_representations_batch, src_mask, trg_field_processor, max_target_tokens=100): """ Supports batch (decode multiple source sentences) greedy decoding. Decoding could be further optimized to cache old token activations because they can't look ahead and so adding a newly predicted token won't change old token's activations. Example: we input <s> and do a forward pass. We get intermediate activations for <s> and at the output at position 0, after the doing linear layer we get e.g. token <I>. Now we input <s>,<I> but <s>'s activations will remain the same. Similarly say we now got <am> at output position 1, in the next step we input <s>,<I>,<am> and so <I>'s activations will remain the same as it only looks at/attends to itself and to <s> and so forth. """ device = next(baseline_transformer.parameters()).device pad_token_id = trg_field_processor.vocab.stoi[PAD_TOKEN] # Initial prompt is the beginning/start of the sentence token. Make it compatible shape with source batch => (B,1) target_sentences_tokens = [[BOS_TOKEN] for _ in range(src_representations_batch.shape[0])] trg_token_ids_batch = torch.tensor([[trg_field_processor.vocab.stoi[tokens[0]]] for tokens in target_sentences_tokens], device=device) # Set to true for a particular target sentence once it reaches the EOS (end-of-sentence) token is_decoded = [False] * src_representations_batch.shape[0] while True: trg_mask, _ = get_masks_and_count_tokens_trg(trg_token_ids_batch, pad_token_id) # Shape = (B*T, V) where T is the current token-sequence length and V target vocab size predicted_log_distributions = baseline_transformer.decode(trg_token_ids_batch, src_representations_batch, trg_mask, src_mask) # Extract only the indices of last token for every target sentence (we take every T-th token) num_of_trg_tokens = len(target_sentences_tokens[0]) predicted_log_distributions = predicted_log_distributions[num_of_trg_tokens-1::num_of_trg_tokens] # This is the "greedy" part of the greedy decoding: # We find indices of the highest probability target tokens and discard every other possibility most_probable_last_token_indices = torch.argmax(predicted_log_distributions, dim=-1).cpu().numpy() # Find target tokens associated with these indices predicted_words = [trg_field_processor.vocab.itos[index] for index in most_probable_last_token_indices] for idx, predicted_word in enumerate(predicted_words): target_sentences_tokens[idx].append(predicted_word) if predicted_word == EOS_TOKEN: # once we find EOS token for a particular sentence we flag it is_decoded[idx] = True if all(is_decoded) or num_of_trg_tokens == max_target_tokens: break # Prepare the input for the next iteration (merge old token ids with the new column of most probable token ids) trg_token_ids_batch = torch.cat((trg_token_ids_batch, torch.unsqueeze(torch.tensor(most_probable_last_token_indices, device=device), 1)), 1) # Post process the sentences - remove everything after the EOS token target_sentences_tokens_post = [] for target_sentence_tokens in target_sentences_tokens: try: target_index = target_sentence_tokens.index(EOS_TOKEN) + 1 except: target_index = None target_sentence_tokens = target_sentence_tokens[:target_index] target_sentences_tokens_post.append(target_sentence_tokens) return target_sentences_tokens_post
5,332,569
def _parse_cell_type(cell_type_arg): """ Convert the cell type representation to the expected JVM CellType object.""" def to_jvm(ct): return _context_call('_parse_cell_type', ct) if isinstance(cell_type_arg, str): return to_jvm(cell_type_arg) elif isinstance(cell_type_arg, CellType): return to_jvm(cell_type_arg.cell_type_name)
5,332,570
def _extract_line(args): """Implements the BigQuery extract magic used to extract table data to GCS. The supported syntax is: %bigquery extract -S|--source <table> -D|--destination <url> <other_args> Args: args: the arguments following '%bigquery extract'. Returns: A message about whether the extract succeeded or failed. """ name = args['source'] source = datalab.utils.commands.get_notebook_item(name) if not source: source = _get_table(name) if not source: raise Exception('No source named %s found' % name) elif isinstance(source, datalab.bigquery.Table) and not source.exists(): raise Exception('Table %s does not exist' % name) else: job = source.extract(args['destination'], format='CSV' if args['format'] == 'csv' else 'NEWLINE_DELIMITED_JSON', compress=args['compress'], csv_delimiter=args['delimiter'], csv_header=args['header']) if job.failed: raise Exception('Extract failed: %s' % str(job.fatal_error)) elif job.errors: raise Exception('Extract completed with errors: %s' % str(job.errors))
5,332,571
def Ht(mu, q=None, t=None, pi=None): """ Returns the symmetric Macdonald polynomial using the Haiman, Haglund, and Loehr formula. Note that if both `q` and `t` are specified, then they must have the same parent. REFERENCE: - J. Haglund, M. Haiman, N. Loehr. *A combinatorial formula for non-symmetric Macdonald polynomials*. :arXiv:`math/0601693v3`. EXAMPLES:: sage: from sage.combinat.sf.ns_macdonald import Ht sage: HHt = SymmetricFunctions(QQ['q','t'].fraction_field()).macdonald().Ht() sage: Ht([0,0,1]) x0 + x1 + x2 sage: HHt([1]).expand(3) x0 + x1 + x2 sage: Ht([0,0,2]) x0^2 + (q + 1)*x0*x1 + x1^2 + (q + 1)*x0*x2 + (q + 1)*x1*x2 + x2^2 sage: HHt([2]).expand(3) x0^2 + (q + 1)*x0*x1 + x1^2 + (q + 1)*x0*x2 + (q + 1)*x1*x2 + x2^2 """ P, q, t, n, R, x = _check_muqt(mu, q, t, pi) res = 0 for a in n: weight = a.weight() res += q**a.maj()*t**a.inv()*prod( x[i]**weight[i] for i in range(len(weight)) ) return res
5,332,572
def is_generic_alias_of(to_check, type_def): """ :param to_check: the type that is supposed to be a generic alias of ``type_def`` if this function returns ``True``. :param type_def: the type that is supposed to be a generic version of ``to_check`` if this function returns \ ``True``. :return: ``True`` if ``to_check`` is a generic alias of ``type_def``, ``False`` otherwise. """ if isinstance(to_check, type) and issubclass(to_check, type_def): return True origin = getattr(to_check, "__origin__", None) if origin is not None: return issubclass(origin, type_def) return False
5,332,573
def train_PCA(X,n_dims,model='pca'): """ name: train_PCA Linear dimensionality reduction using Singular Value Decomposition of the data to project it to a lower dimensional space. It uses the LAPACK implementation of the full SVD or a randomized truncated SVD by the method of Halko et al. 2009, depending on the shape of the input data and the number of components to extract. returns: the transformer model """ estimator=transformer[model].set_params(pca__n_components=n_dims) estimator.fit(X) return estimator
5,332,574
def find_song(hash_dictionary, sample_dictionary, id_to_song): """ Run our song matching algorithm to find the song :param hash_dictionary: :param sample_dictionary: :param id_to_song: :return max_frequencies, max_frequencies_keys: """ offset_dictionary = dict() for song_id in id_to_song.keys(): offset_dictionary[song_id] = {} song_size = {} for song_id in id_to_song.keys(): rate, data = wavfile.read(os.path.join(os.path.join( AppDirs('Presto-Chango').user_data_dir, 'Songs'), id_to_song[song_id])) song_size[song_id] = len(data) / rate for sample_hash_value, sample_offsets in sample_dictionary.items(): for sample_offset in sample_offsets: try: for song_id, offset in hash_dictionary[sample_hash_value]: try: offset_dictionary[song_id][( offset - sample_offset) // 1] += 1 except KeyError: offset_dictionary[song_id][( offset - sample_offset) // 1] = 1 except KeyError: pass max_frequencies = {} for song_id, offset_dict in offset_dictionary.items(): for relative_set, frequency in offset_dict.items(): try: max_frequencies[song_id] = max( max_frequencies[song_id], frequency) except KeyError: max_frequencies[song_id] = frequency max_frequencies_keys = sorted( max_frequencies, key=max_frequencies.get, reverse=True) return max_frequencies, max_frequencies_keys
5,332,575
def plot_percentage( contacts, parameters="Description", t_detail=1, n_t_cells=100, save=False, palette="deep", context="notebook", ): """Plot final percentage of T cells in contact with DC""" t_cells_in_contact = contacts.drop_duplicates(["Track_ID", "Run", parameters]) contacts_at_t_detail = t_cells_in_contact[ t_cells_in_contact["Time"] <= t_detail * 60 ] sns.set(style="ticks", palette=palette, context=context) total_contacts = contacts_at_t_detail[["Run", parameters]].pivot_table( columns=parameters, index="Run", aggfunc=len, fill_value=0 ) normalized_contacts = total_contacts / n_t_cells * 100 sorted_contacts = normalized_contacts.reindex( sorted(total_contacts.columns, key=lambda col: total_contacts[col].median()), axis=1, ) ax = sns.violinplot(data=sorted_contacts, cut=0, inner=None, bw=0.75) ax.set_xlabel("") ax.set_ylabel("% T cells in contact") plt.xticks(rotation=45, horizontalalignment="right") sns.despine() plt.tight_layout() plt.show() if save == True: save = "raw_violins.csv" if save: sorted_contacts.to_csv(save)
5,332,576
def phedex_url(api=''): """Return Phedex URL for given API name""" return 'https://cmsweb.cern.ch/phedex/datasvc/json/prod/%s' % api
5,332,577
def upgrade(): """upgrade to this revision""" op.execute("CREATE SCHEMA data") op.execute("CREATE EXTENSION IF NOT EXISTS postgis") # Create collections table op.create_table( "collections", sa.Column("id", sa.VARCHAR(1024), nullable=False, primary_key=True), sa.Column("stac_version", sa.VARCHAR(300)), sa.Column("title", sa.VARCHAR(1024)), sa.Column("stac_extensions", sa.ARRAY(sa.VARCHAR(300)), nullable=True), sa.Column("description", sa.VARCHAR(1024), nullable=False), sa.Column("keywords", sa.ARRAY(sa.VARCHAR(300))), sa.Column("version", sa.VARCHAR(300)), sa.Column("license", sa.VARCHAR(300), nullable=False), sa.Column("providers", JSONB), sa.Column("summaries", JSONB, nullable=True), sa.Column("extent", JSONB), sa.Column("links", JSONB, nullable=True), schema="data", ) # Create items table op.create_table( "items", sa.Column("id", sa.VARCHAR(1024), nullable=False, primary_key=True), sa.Column("stac_version", sa.VARCHAR(300)), sa.Column("stac_extensions", sa.ARRAY(sa.VARCHAR(300)), nullable=True), sa.Column("geometry", Geometry("POLYGON", srid=4326, spatial_index=True)), sa.Column("bbox", sa.ARRAY(sa.NUMERIC), nullable=False), sa.Column("gsd", sa.REAL, nullable=True, index=True), sa.Column("properties", JSONB), sa.Column("assets", JSONB), sa.Column("collection_id", sa.VARCHAR(1024), nullable=False, index=True), # These are usually in properties but defined as their own fields for indexing sa.Column("datetime", sa.TIMESTAMP, nullable=False, index=True), sa.Column("links", JSONB, nullable=True), sa.ForeignKeyConstraint(["collection_id"], ["data.collections.id"]), schema="data", ) # Create pagination token table op.create_table( "tokens", sa.Column("id", sa.VARCHAR(100), nullable=False, primary_key=True), sa.Column("keyset", sa.VARCHAR(1000), nullable=False), schema="data", )
5,332,578
def extract_emails(fname, email='Email Address', outfile="emails_from_mailchimp.txt", nofile=False, nolog=False, sort=True): """ Extract e-mail addresses from a CSV-exported MailChimp list. :param fname: the input .csv file :param email: the header of the column containing all e-mail addresses :param outfile: the .txt file the addresses will get written to :param nofile: suppresses the creation of a text file if set to True :param nolog: suppresses logging the addresses to stdout if set to True :param sort: sorts e-mail addresses alphabetically if set to True :return a list containing all e-mail addresses """ addresses = [] try: with open(fname, newline='') as csvfile: reader = csv.DictReader(csvfile, delimiter=',') next(reader) for item in reader: try: addresses.append(item[email]) except KeyError: log.error("The provided CSV file does not contain " "the header \"{}\".\n" "Please provide the correct header name " "for the column containing e-mail " "addresses.".format(email)) return except FileNotFoundError: log.error("The input file is not available. " "Please provide a valid path.") except IsADirectoryError: log.error("The input file is not a CSV file but a directory.") except StopIteration: log.error("The input file cannot be read. " "Please provide a valid CSV file.") if sort: addresses.sort() if not nolog: for address in addresses: log.info(address) if not nofile: try: with open(outfile, 'w') as txtfile: for address in addresses: txtfile.write(address + '\n') except FileNotFoundError: log.error("The file you are trying to write to " "does not exist.") except PermissionError: log.error("You do not have permission to write to the file " "whose path you provided.") return addresses
5,332,579
def continuations(tree, *, syntax, expander, **kw): """[syntax, block] call/cc for Python. This allows saving the control state and then jumping back later (in principle, any time later). Some possible use cases: - Tree traversal (possibly a cartesian product of multiple trees, with the current position in each tracked automatically). - McCarthy's amb operator. - Generators. (Python already has those, so only for teaching.) This is a very loose pythonification of Paul Graham's continuation-passing macros, which implement continuations by chaining closures and passing the continuation semi-implicitly. For details, see chapter 20 in On Lisp: http://paulgraham.com/onlisp.html Continuations are most readily implemented when the program is written in continuation-passing style (CPS), but that is unreadable for humans. The purpose of this macro is to partly automate the CPS transformation, so that at the use site, we can write CPS code in a much more readable fashion. A ``with continuations`` block implies TCO; the same rules apply as in a ``with tco`` block. Furthermore, ``with continuations`` introduces the following additional rules: - Functions which make use of continuations, or call other functions that do, must be defined within a ``with continuations`` block, using the usual ``def`` or ``lambda`` forms. - All function definitions in a ``with continuations`` block, including any nested definitions, have an implicit formal parameter ``cc``, **even if not explicitly declared** in the formal parameter list. If declared explicitly, ``cc`` must be in a position that can accept a default value. This means ``cc`` must be declared either as by-name-only:: with continuations: def myfunc(a, b, *, cc): ... f = lambda *, cc: ... or as the last parameter that has no default:: with continuations: def myfunc(a, b, cc): ... f = lambda cc: ... Then the continuation machinery will automatically set the default value of ``cc`` to the default continuation (``identity``), which just returns its arguments. The most common use case for explicitly declaring ``cc`` is that the function is the target of a ``call_cc[]``; then it helps readability to make the ``cc`` parameter explicit. - A ``with continuations`` block will automatically transform all function definitions and ``return`` statements lexically contained within the block to use the continuation machinery. - ``return somevalue`` actually means a tail-call to ``cc`` with the given ``somevalue``. Multiple values can be returned as a ``Values``. Multiple-valueness is tested at run time. Any ``Values`` return value is automatically unpacked to the args and kwargs of ``cc``. - An explicit ``return somefunc(arg0, ..., k0=v0, ...)`` actually means a tail-call to ``somefunc``, with its ``cc`` automatically set to our ``cc``. Hence this inserts a call to ``somefunc`` before proceeding with our current continuation. (This is most often what we want when making a tail-call from a continuation-enabled function.) Here ``somefunc`` **must** be a continuation-enabled function; otherwise the TCO chain will break and the result is immediately returned to the top-level caller. (If the call succeeds at all; the ``cc`` argument is implicitly filled in and passed by name. Regular functions usually do not accept a named parameter ``cc``, let alone know what to do with it.) - Just like in ``with tco``, a lambda body is analyzed as one big return-value expression. This uses the exact same analyzer; for example, ``do[]`` (including any implicit ``do[]``) and the ``let[]`` expression family are supported. - Calls from functions defined in one ``with continuations`` block to those defined in another are ok; there is no state or context associated with the block. - Much of the language works as usual. Any non-tail calls can be made normally. Regular functions can be called normally in any non-tail position. Continuation-enabled functions behave as regular functions when called normally; only tail calls implicitly set ``cc``. A normal call uses ``identity`` as the default ``cc``. - For technical reasons, the ``return`` statement is not allowed at the top level of the ``with continuations:`` block. (Because a continuation is essentially a function, ``return`` would behave differently based on whether it is placed lexically before or after a ``call_cc[]``.) If you absolutely need to terminate the function surrounding the ``with continuations:`` block from inside the block, use an exception to escape; see ``call_ec``, ``catch``, ``throw``. **Capturing the continuation**: Inside a ``with continuations:`` block, the ``call_cc[]`` statement captures a continuation. (It is actually a macro, for technical reasons.) For various possible program topologies that continuations may introduce, see the clarifying pictures under ``doc/`` in the source distribution. Syntax:: x = call_cc[func(...)] *xs = call_cc[func(...)] x0, ... = call_cc[func(...)] x0, ..., *xs = call_cc[func(...)] call_cc[func(...)] Conditional variant:: x = call_cc[f(...) if p else g(...)] *xs = call_cc[f(...) if p else g(...)] x0, ... = call_cc[f(...) if p else g(...)] x0, ..., *xs = call_cc[f(...) if p else g(...)] call_cc[f(...) if p else g(...)] Assignment targets: - To destructure positional multiple-values (from a `Values` return value), use a tuple assignment target (comma-separated names, as usual). Destructuring *named* return values from a `call_cc` is currently not supported. - The last assignment target may be starred. It is transformed into the vararg (a.k.a. ``*args``) of the continuation function. (It will capture a whole tuple, or any excess items, as usual.) - To ignore the return value (useful if ``func`` was called only to perform its side-effects), just omit the assignment part. Conditional variant: - ``p`` is any expression. If truthy, ``f(...)`` is called, and if falsey, ``g(...)`` is called. - Each of ``f(...)``, ``g(...)`` may be ``None``. A ``None`` skips the function call, proceeding directly to the continuation. Upon skipping, all assignment targets (if any are present) are set to ``None``. The starred assignment target (if present) gets the empty tuple. - The main use case of the conditional variant is for things like:: with continuations: k = None def setk(cc): global k k = cc def dostuff(x): call_cc[setk() if x > 10 else None] # capture only if x > 10 ... To keep things relatively straightforward, a ``call_cc[]`` is only allowed to appear **at the top level** of: - the ``with continuations:`` block itself - a ``def`` or ``async def`` Nested defs are ok; here *top level* only means the top level of the *currently innermost* ``def``. If you need to place ``call_cc[]`` inside a loop, use ``@looped`` et al. from ``unpythonic.fploop``; this has the loop body represented as the top level of a ``def``. Multiple ``call_cc[]`` statements in the same function body are allowed. These essentially create nested closures. **Main differences to Scheme and Racket**: Compared to Scheme/Racket, where ``call/cc`` will capture also expressions occurring further up in the call stack, our ``call_cc`` may be need to be placed differently (further out, depending on what needs to be captured) due to the delimited nature of the continuations implemented here. Scheme and Racket implicitly capture the continuation at every position, whereas we do it explicitly, only at the use sites of the ``call_cc`` macro. Also, since there are limitations to where a ``call_cc[]`` may appear, some code may need to be structured differently to do some particular thing, if porting code examples originally written in Scheme or Racket. Unlike ``call/cc`` in Scheme/Racket, ``call_cc`` takes **a function call** as its argument, not just a function reference. Also, there's no need for it to be a one-argument function; any other args can be passed in the call. The ``cc`` argument is filled implicitly and passed by name; any others are passed exactly as written in the client code. **Technical notes**: The ``call_cc[]`` statement essentially splits its use site into *before* and *after* parts, where the *after* part (the continuation) can be run a second and further times, by later calling the callable that represents the continuation. This makes a computation resumable from a desired point. The return value of the continuation is whatever the original function returns, for any ``return`` statement that appears lexically after the ``call_cc[]``. The effect of ``call_cc[]`` is that the function call ``func(...)`` in the brackets is performed, with its ``cc`` argument set to the lexically remaining statements of the current ``def`` (at the top level, the rest of the ``with continuations`` block), represented as a callable. The continuation itself ends there (it is *delimited* in this particular sense), but it will chain to the ``cc`` of the function it appears in. This is termed the *parent continuation* (**pcc**), stored in the internal variable ``_pcc`` (which defaults to ``None``). Via the use of the pcc, here ``f`` will maintain the illusion of being just one function, even though a ``call_cc`` appears there:: def f(*, cc): ... call_cc[g(1, 2, 3)] ... The continuation is a closure. For its pcc, it will use the value the original function's ``cc`` had when the definition of the continuation was executed (for that particular instance of the closure). Hence, calling the original function again with its ``cc`` set to something else will produce a new continuation instance that chains into that new ``cc``. The continuation's own ``cc`` will be ``identity``, to allow its use just like any other function (also as argument of a ``call_cc`` or target of a tail call). When the pcc is set (not ``None``), the effect is to run the pcc first, and ``cc`` only after that. This preserves the whole captured tail of a computation also in the presence of nested ``call_cc`` invocations (in the above example, this would occur if also ``g`` used ``call_cc``). Continuations are not accessible by name (their definitions are named by gensym). To get a reference to a continuation instance, stash the value of the ``cc`` argument somewhere while inside the ``call_cc``. The function ``func`` called by a ``call_cc[func(...)]`` is (almost) the only place where the ``cc`` argument is actually set. There it is the captured continuation. Roughly everywhere else, ``cc`` is just ``identity``. Tail calls are an exception to this rule; a tail call passes along the current value of ``cc``, unless overridden manually (by setting the ``cc=...`` kwarg in the tail call). When the pcc is set (not ``None``) at the site of the tail call, the machinery will create a composed continuation that runs the pcc first, and ``cc`` (whether current or manually overridden) after that. This composed continuation is then passed to the tail call as its ``cc``. **Tips**: - Once you have a captured continuation, one way to use it is to set ``cc=...`` manually in a tail call, as was mentioned. Example:: def main(): call_cc[myfunc()] # call myfunc, capturing the current cont... ... # ...which is the rest of "main" def myfunc(cc): ourcc = cc # save the captured continuation (sent by call_cc[]) def somefunc(): return dostuff(..., cc=ourcc) # and use it here somestack.append(somefunc) In this example, when ``somefunc`` is eventually called, it will tail-call ``dostuff`` and then proceed with the continuation ``myfunc`` had at the time when that instance of the ``somefunc`` closure was created. (This pattern is essentially how to build the ``amb`` operator.) - Instead of setting ``cc``, you can also overwrite ``cc`` with a captured continuation inside a function body. That overrides the continuation for the rest of the dynamic extent of the function, not only for a particular tail call:: def myfunc(cc): ourcc = cc def somefunc(): cc = ourcc return dostuff(...) somestack.append(somefunc) - A captured continuation can also be called manually; it's just a callable. The assignment targets, at the ``call_cc[]`` use site that spawned this particular continuation, specify its call signature. All args are positional, except the implicit ``cc``, which is by-name-only. - Just like in Scheme/Racket's ``call/cc``, the values that get bound to the ``call_cc[]`` assignment targets on second and further calls (when the continuation runs) are the arguments given to the continuation when it is called (whether implicitly or manually). - Setting ``cc`` to ``unpythonic.fun.identity``, while inside a ``call_cc``, will short-circuit the rest of the computation. In such a case, the continuation will not be invoked automatically. A useful pattern for suspend/resume. - However, it is currently not possible to prevent the rest of the tail of a captured continuation (the pcc) from running, apart from manually setting ``_pcc`` to ``None`` before executing a ``return``. Note that doing that is not strictly speaking supported (and may be subject to change in a future version). - When ``call_cc[]`` appears inside a function definition: - It tail-calls ``func``, with its ``cc`` set to the captured continuation. - The return value of the function containing one or more ``call_cc[]`` statements is the return value of the continuation. - When ``call_cc[]`` appears at the top level of ``with continuations``: - A normal call to ``func`` is made, with its ``cc`` set to the captured continuation. - In this case, if the continuation is called later, it always returns ``None``, because the use site of ``call_cc[]`` is not inside a function definition. - If you need to insert just a tail call (no further statements) before proceeding with the current continuation, no need for ``call_cc[]``; use ``return func(...)`` instead. The purpose of ``call_cc[func(...)]`` is to capture the current continuation (the remaining statements), and hand it to ``func`` as a first-class value. - To combo with ``multilambda``, use this ordering:: with multilambda, continuations: ... - Some very limited comboability with ``call_ec``. May be better to plan ahead, using ``call_cc[]`` at the appropriate outer level, and then short-circuit (when needed) by setting ``cc`` to ``identity``. This avoids the need to have both ``call_cc`` and ``call_ec`` at the same time. - ``unpythonic.ec.call_ec`` can be used normally **lexically before any** ``call_cc[]``, but (in a given function) after at least one ``call_cc[]`` has run, the ``ec`` ceases to be valid. This is because our ``call_cc[]`` actually splits the function into *before* and *after* parts, and **tail-calls** the *after* part. (Wrapping the ``def`` in another ``def``, and placing the ``call_ec`` on the outer ``def``, does not help either, because even the outer function has exited by the time *the continuation* is later called the second and further times.) Usage of ``call_ec`` while inside a ``with continuations`` block is:: with continuations: @call_ec def result(ec): print("hi") ec(42) print("not reached") assert result == 42 result = call_ec(lambda ec: do[print("hi"), ec(42), print("not reached")]) Note the signature of ``result``. Essentially, ``ec`` is a function that raises an exception (to escape to a dynamically outer context), whereas the implicit ``cc`` is the closure-based continuation handled by the continuation machinery. See the ``tco`` macro for details on the ``call_ec`` combo. """ if syntax != "block": raise SyntaxError("continuations is a block macro only") # pragma: no cover if syntax == "block" and kw['optional_vars'] is not None: raise SyntaxError("continuations does not take an as-part") # pragma: no cover # Two-pass macro. with dyn.let(_macro_expander=expander): return _continuations(block_body=tree)
5,332,580
def get_autotune_level() -> int: """Get the autotune level. Returns: The autotune level. """ return int(os.environ.get("BAGUA_AUTOTUNE", 0))
5,332,581
def DNA_dynamic_pressure(y, r, h, yunits='kT', dunits='m', opunits='kg/cm^2'): """Estimate peak pynamic overpressure at range r from a burst of yield y using the the Defense Nuclear Agency 1kT standard free airburst overpressure, assuming an ideal surface. Many real-world surfaces are not ideal (most, in the opinion of Soviet analysts), meaning that this function has only limited predictove capability.""" yld = convert_units(y, yunits, 'kT') gr = convert_units(r, dunits, 'm') height = convert_units(h, dunits, 'm') dyn = _DNAairburstpeakdyn(gr, yld, height) return convert_units(dyn, 'Pa', opunits)
5,332,582
def revcumsum(U): """ Reverse cumulative sum for faster performance. """ return U.flip(dims=[0]).cumsum(dim=0).flip(dims=[0])
5,332,583
def http_trace_parser_hook(request): """ Retrieves the propagation context out of the request. Uses the honeycomb header, with W3C header as fallback. """ honeycomb_header_value = honeycomb.http_trace_parser_hook(request) w3c_header_value = w3c.http_trace_parser_hook(request) if honeycomb_header_value: return honeycomb_header_value else: return w3c_header_value
5,332,584
def format_attn(attention_tuples: tuple): """ Input: N tuples (N = layer num) Each tuple item is Tensor of shape Batch x num heads x from x to Output: Tensor of shape layer x from x to (averaged over heads) """ # Combine tuples into large Tensor, then avg return torch.cat([l for l in attention_tuples], dim=0).mean(dim=1)
5,332,585
def fit_gaussian2d(img, coords, boxsize, plot=False, fwhm_min=1.7, fwhm_max=30, pos_delta_max=1.7): """ Calculate the FWHM of an objected located at the pixel coordinates in the image. The FWHM will be estimated from a cutout with the specified boxsize. Parameters ---------- img : ndarray, 2D The image where a star is located for calculating a FWHM. coords : len=2 ndarray The [x, y] pixel position of the star in the image. boxsize : int The size of the box (on the side), in pixels. fwhm_min : float, optional The minimum allowed FWHM for constraining the fit (pixels). fwhm_max : float, optional The maximum allowed FWHM for constraining the fit (pixels). pos_delta_max : float, optional The maximum allowed positional offset for constraining the fit (pixels). This ensures that the fitter doesn't wonder off to a bad pixel. """ cutout_obj = Cutout2D(img, coords, boxsize, mode='strict') cutout = cutout_obj.data x1d = np.arange(0, cutout.shape[0]) y1d = np.arange(0, cutout.shape[1]) x2d, y2d = np.meshgrid(x1d, y1d) # Setup our model with some initial guess x_init = boxsize/2.0 y_init = boxsize/2.0 stddev_init = fwhm_to_stddev(fwhm_min) g2d_init = models.Gaussian2D(x_mean = x_init, y_mean = y_init, x_stddev = stddev_init, y_stddev = stddev_init, amplitude=cutout.max()) g2d_init += models.Const2D(amplitude=0.0) g2d_init.x_stddev_0.min = fwhm_to_stddev(fwhm_min) g2d_init.y_stddev_0.min = fwhm_to_stddev(fwhm_min) g2d_init.x_stddev_0.max = fwhm_to_stddev(fwhm_max) g2d_init.y_stddev_0.max = fwhm_to_stddev(fwhm_max) g2d_init.x_mean_0.min = x_init - pos_delta_max g2d_init.x_mean_0.max = x_init + pos_delta_max g2d_init.y_mean_0.min = y_init - pos_delta_max g2d_init.y_mean_0.max = y_init + pos_delta_max # print(g2d_init) # pdb.set_trace() fit_g = fitting.LevMarLSQFitter() g2d = fit_g(g2d_init, x2d, y2d, cutout) if plot: mod_img = g2d(x2d, y2d) plt.figure(1, figsize=(15,5)) plt.clf() plt.subplots_adjust(left=0.05, wspace=0.3) plt.subplot(1, 3, 1) plt.imshow(cutout, vmin=mod_img.min(), vmax=mod_img.max()) plt.colorbar() plt.title("Original") plt.subplot(1, 3, 2) plt.imshow(mod_img, vmin=mod_img.min(), vmax=mod_img.max()) plt.colorbar() plt.title("Model") plt.subplot(1, 3, 3) plt.imshow(cutout - mod_img) plt.colorbar() plt.title("Orig - Mod") # Adjust Gaussian parameters to the original coordinates. cutout_pos = np.array([g2d.x_mean_0.value, g2d.y_mean_0.value]) origin_pos = cutout_obj.to_original_position(cutout_pos) g2d.x_mean_0 = origin_pos[0] g2d.y_mean_0 = origin_pos[1] return g2d
5,332,586
def _ensure_aware(series, tz_local): """Convert naive datetimes to timezone-aware, or return them as-is. Args: tz_local (str, pytz.timezone, dateutil.tz.tzfile): Time zone for time which timestamps will be converted to. If the series already has local timezone info, it is returned as-is. """ if pd.api.types.is_datetime64tz_dtype(series): return series return series.dt.tz_localize(tz=tz_local)
5,332,587
def one_mask(df, mask_type, sample_type, data, logger=None): """ return a vector of booleans from the lower triangle of a matching-matrix based on 'mask_type' :param df: pandas.DataFrame with samples as columns :param str mask_type: A list of strings to specify matching masks, or a minimum distance to mask out :param str sample_type: Samples can be 'wellid' or 'parcelid' :param data: PyGEST.Data object with access to AHBA data :param logger: A logger object to receive debug information :return: Boolean 1-D vector to remove items (False values in mask) from any sample x sample triangle vector """ def handle_log(maybe_logger, severity, message): if maybe_logger is None: print(message) else: if severity == "info": maybe_logger.info(message) if severity == "warn": maybe_logger.warn(message) # If mask is a number, use it as a distance filter try: # Too-short values to mask out are False, keepers are True. min_dist = float(mask_type) distance_vector = data.distance_vector(df.columns, sample_type=sample_type) if len(distance_vector) != (len(df.columns) * (len(df.columns) - 1)) / 2: handle_log(logger, "warn", " MISMATCH in expr and dist!!! Some sample IDs probably not found.") mask_vector = np.array(distance_vector > min_dist, dtype=bool) handle_log(logger, "info", " masking out {:,} of {:,} edges closer than {}mm apart.".format( np.count_nonzero(np.invert(mask_vector)), len(mask_vector), min_dist )) handle_log(logger, "info", " mean dist of masked edges : {:0.2f} [{:0.2f} to {:0.2f}].".format( np.mean(distance_vector[~mask_vector]), np.min(distance_vector[~mask_vector]), np.max(distance_vector[~mask_vector]), )) handle_log(logger, "info", " mean dist of unmasked edges: {:0.2f} [{:0.2f} to {:0.2f}].".format( np.mean(distance_vector[mask_vector]), np.min(distance_vector[mask_vector]), np.max(distance_vector[mask_vector]), )) return mask_vector except TypeError: pass except ValueError: pass # Mask is not a number, see if it's a pickled dataframe if os.path.isfile(mask_type): with open(mask_type, 'rb') as f: mask_df = pickle.load(f) if isinstance(mask_df, pd.DataFrame): # Note what we started with so we can report after we tweak the dataframe. # Too-variant values to mask out are False, keepers are True. orig_vector = mask_df.values[np.tril_indices(n=mask_df.shape[0], k=-1)] orig_falses = np.count_nonzero(~orig_vector) orig_length = len(orig_vector) handle_log(logger, "info", "Found {} containing {:,} x {:,} mask".format( mask_type, mask_df.shape[0], mask_df.shape[1] )) handle_log(logger, "info", " generating {:,}-len vector with {:,} False values to mask.".format( orig_length, orig_falses )) # We can only use well_ids found in BOTH df and our new mask, make shapes match. unmasked_ids = [well_id for well_id in df.columns if well_id not in mask_df.columns] usable_ids = [well_id for well_id in df.columns if well_id in mask_df.columns] usable_df = mask_df.reindex(index=usable_ids, columns=usable_ids) usable_vector = usable_df.values[np.tril_indices(n=len(usable_ids), k=-1)] usable_falses = np.count_nonzero(~usable_vector) usable_length = len(usable_vector) handle_log(logger, "info", " {:,} well_ids not found in the mask; padding with Falses.".format( len(unmasked_ids) )) pad_rows = pd.DataFrame(np.zeros((len(unmasked_ids), len(mask_df.columns)), dtype=bool), columns=mask_df.columns, index=unmasked_ids) mask_df = pd.concat([mask_df, pad_rows], axis=0) pad_cols = pd.DataFrame(np.zeros((len(mask_df.index), len(unmasked_ids)), dtype=bool), columns=unmasked_ids, index=mask_df.index) mask_df = pd.concat([mask_df, pad_cols], axis=1) mask_vector = mask_df.values[np.tril_indices(n=mask_df.shape[0], k=-1)] mask_falses = np.count_nonzero(~mask_vector) mask_trues = np.count_nonzero(mask_vector) handle_log(logger, "info", " padded mask matrix out to {:,} x {:,}".format( mask_df.shape[0], mask_df.shape[1] )) handle_log(logger, "info", " with {:,} True, {:,} False, {:,} NaNs in triangle.".format( mask_trues, mask_falses, np.count_nonzero(np.isnan(mask_vector)) )) shaped_mask_df = mask_df.reindex(index=df.columns, columns=df.columns) shaped_vector = shaped_mask_df.values[np.tril_indices(n=len(df.columns), k=-1)] handle_log(logger, "info", " masking out {:,} (orig {:,}, {:,} usable) hi-var".format( np.count_nonzero(~shaped_vector), orig_falses, usable_falses, )) handle_log(logger, "info", " of {:,} (orig {:,}, {:,} usable) edges.".format( len(shaped_vector), orig_length, usable_length )) return shaped_vector else: handle_log(logger, "warn", "{} is a file, but not a pickled dataframe. Skipping mask.".format(mask_type)) do_nothing_mask = np.ones((len(df.columns), len(df.columns)), dtype=bool) return do_nothing_mask[np.tril_indices(n=len(df.columns), k=-1)] # Mask is not a number, so treat it as a matching filter if mask_type[:4] == 'none': items = list(df.columns) elif mask_type[:4] == 'fine': items = data.samples(samples=df.columns)['fine_name'] elif mask_type[:6] == 'coarse': items = data.samples(samples=df.columns)['coarse_name'] else: items = data.samples(samples=df.columns)['structure_name'] mask_array = np.ndarray((len(items), len(items)), dtype=bool) # There is, potentially, a nice vectorized way to mark matching values as True, but I can't find it. # So, looping works and is easy to read, although it might cost us a few extra ms. for i, y in enumerate(items): for j, x in enumerate(items): # Generate one edge of the match matrix mask_array[i][j] = True if mask_type == 'none' else (x != y) mask_vector = mask_array[np.tril_indices(n=mask_array.shape[0], k=-1)] handle_log(logger, "info", " masking out {:,} of {:,} '{}' edges.".format( sum(np.invert(mask_vector)), len(mask_vector), mask_type )) # if len(mask_vector) == 0: # mask_vector = np.ones(int(len(df.columns) * (len(df.columns) - 1) / 2), dtype=bool) return mask_vector
5,332,588
def compute_epsilon(steps): """Computes epsilon value for given hyperparameters.""" if FLAGS.noise_multiplier == 0.0: return float('inf') orders = [1 + x / 10. for x in range(1, 100)] + list(range(12, 64)) sampling_probability = FLAGS.batch_size / NB_TRAIN rdp = compute_rdp(q=sampling_probability, noise_multiplier=FLAGS.noise_multiplier, steps=steps, orders=orders) # Delta is set to 1e-5 because Penn TreeBank has 60000 training points. return get_privacy_spent(orders, rdp, target_delta=1e-5)[0]
5,332,589
def test_website_migrate(): """TODO:""" pass
5,332,590
def extract_tumblr_posts(client, nb_requests, search_query, before, delta_limit): """Extract Tumblr posts with a given emotion. Parameters: client: Authenticated Tumblr client with the pytumblr package. nb_requests: Number of API request. search_query: Emotion to search for. before: A timestamp to search for posts before that value. delta_limit: Maximum difference of timestamp between two queries. Returns: posts: List of Tumblr posts. """ posts = [] for i in range(nb_requests): tagged = client.tagged(search_query, filter='text', before=before) for elt in tagged: timestamp = elt['timestamp'] if (abs(timestamp - before) < delta_limit): before = timestamp current_post = [] current_post.append(elt['id']) current_post.append(elt['post_url']) elt_type = elt['type'] current_post.append(elt_type) current_post.append(timestamp) current_post.append(elt['date']) current_post.append(elt['tags']) current_post.append(elt['liked']) current_post.append(elt['note_count']) if (elt_type == 'photo'): # Only take the first image current_post.append(elt['photos'][0]['original_size']['url']) current_post.append(elt['caption'].replace('\n',' ').replace('\r',' ')) current_post.append(search_query) posts.append(current_post) elif (elt_type == 'text'): current_post.append(np.nan) current_post.append(elt['body'].replace('\n',' ').replace('\r',' ')) current_post.append(search_query) posts.append(current_post) return posts
5,332,591
def total_curtailment_expression_rule(mod, g, tmp): """ **Expression Name**: GenVar_Total_Curtailment_MW **Defined Over**: GEN_VAR_OPR_TMPS Available energy that was not delivered There's an adjustment for subhourly reserve provision: 1) if downward reserves are provided, they will be called upon occasionally, so power provision will have to decrease and additional curtailment will be incurred; 2) if upward reserves are provided (energy is being curtailed), they will be called upon occasionally, so power provision will have to increase and less curtailment will be incurred The subhourly adjustment here is a simple linear function of reserve Assume cap factors don't incorporate availability derates, so don't multiply capacity by Availability_Derate here (will count as curtailment). """ return ( mod.Capacity_MW[g, mod.period[tmp]] * mod.gen_var_cap_factor[g, tmp] - mod.GenVar_Provide_Power_MW[g, tmp] + mod.GenVar_Subhourly_Curtailment_MW[g, tmp] - mod.GenVar_Subhourly_Energy_Delivered_MW[g, tmp] )
5,332,592
def write_pred_kaggle_file(y, outfname, le): """Writes the predictions in Kaggle format. Given the unlabeled object, classifier, outputfilename, and the speech object, this function write the predictions of the classifier on the unlabeled data and writes it to the outputfilename. The speech object is required to ensure consistent label names. """ # print("Making predictions") # yp = cls.predict(unlabeled.X) labels = le.inverse_transform(y) print(f"Writing to {outfname}") f = open(outfname, 'w') f.write("FileIndex,Category\n") for i in range(len(labels)): # for i in range(len(unlabeled.fnames)): # fname = unlabeled.fnames[i] # iid = file_to_id(fname) f.write(str(i + 1)) f.write(",") # f.write(fname) # f.write(",") f.write(labels[i]) f.write("\n") f.close()
5,332,593
def transpose(m): """Compute the inverse of `m` Args: m (Matrix3): Returns: Matrix3: the inverse """ return Matrix3(m[0], m[3], m[6], m[1], m[4], m[7], m[2], m[5], m[8])
5,332,594
def reverse_string(string): """Solution to exercise C-4.16. Write a short recursive Python function that takes a character string s and outputs its reverse. For example, the reverse of "pots&pans" would be "snap&stop". """ n = len(string) def recurse(idx): if idx == 0: return string[0] # Base case, decremented to beginning of string return string[idx] + recurse(idx-1) return recurse(n-1)
5,332,595
def result_to_df(model, data, path: str = None, prediction: str = 'prediction', residual: str = 'residual') -> pd.DataFrame: """Create result data frame. Args: model (Union[NodeModel, StagewiseModel]): Model instance. data (MRData): Data object try to predict.s prediction (str, optional): Column name of the prediction. Defaults to 'prediction'. residual (str, optional): Column name of the residual. Defaults to 'residual'. path (Union[str, None], optional): Address that save the result, include the file name. If ``None`` do not save the result, only return the result data frame. Defaults to None. Returns: pd.DataFrame: Result data frame. """ data._sort_by_data_id() pred = model.predict(data) resi = data.obs - pred df = data.to_df() df[prediction] = pred df[residual] = resi if path is not None: df.to_csv(path) return df
5,332,596
def auto(frmt, minV = None, maxV = None): """ Generating regular expressions for integer, real, date and time. :param format: format similar to C printf function (description below) :param min: optional minimum value :param max: optional maximum value :return: regular expression for a given format Supported formats: see :py:class:`regexpgen.integer`, :py:class:`regexpgen.real`, :py:class:`regexpgen.date`, :py:class:`regexpgen.time` Additional information: Because single %d occurs as well in integer format and in date format, the integer function is preferred. To generate single %d for date please use regexpgen.date Examples of use: >>> import regexpgen >>> regexpgen.auto("%Y-%m-%d", "2013-03-15", "2013-04-24") '^(2013\\-03\\-(1[5-9]|2[0-9]|3[0-1])|2013\\-03\\-(0[1-9]|1[0-9]|2[0-9]|3[0-1])|2013\\-04\\-(0[1-9]|1[0-9]|2[0-9]|30)|2013\\-04\\-(0[1-9]|1[0-9]|2[0-4]))$' >>> regexpgen.auto("%0d", -10, 10) '^(-?([0-9]|10))$' """ if (frmt is None or not isinstance(frmt, str)): raise ValueError("Bad input") b = builder.RegexpBuilder() integerFormats = frmt in ["%d", "%0d"] or re.match("^%0[0-9]+d$", frmt) integerFormatsNotd = frmt in ["%0d"] or re.match("^%0[0-9]+d$", frmt) realFormats = frmt in ["%lf", "%0lf"] or re.match("^%\.[0-9]+lf$", frmt) or re.match("^%0\.[0-9]+lf$", frmt) or re.match("^%0[1-9][0-9]*\.[0-9]+lf$", frmt) or re.match("^%[1-9][0-9]*\.[0-9]+lf$", frmt) timeFormats = str(frmt).find("%H") >= 0 or str(frmt).find("%I") >= 0 or str(frmt).find("%M") >= 0 or str(frmt).find("%p") >= 0 or str(frmt).find("%P") >= 0 or str(frmt).find("%S") >= 0 dateFormats = str(frmt).find("%d") >= 0 or str(frmt).find("%m") >= 0 or str(frmt).find("%Y") >= 0 or str(frmt).find("%y") >= 0 if integerFormats and realFormats: raise ValueError("Bad input") elif integerFormatsNotd and dateFormats: raise ValueError("Bad input") elif integerFormats and timeFormats: raise ValueError("Bad input") elif realFormats and dateFormats: raise ValueError("Bad input") elif realFormats and timeFormats: raise ValueError("Bad input") elif dateFormats and timeFormats: raise ValueError("Bad input") elif integerFormats: return b.createIntegerRegex(frmt, minV, maxV) elif realFormats: return b.createRealRegex(frmt, minV, maxV) elif dateFormats: return b.createDateRegex(frmt, minV, maxV) elif timeFormats: return b.createTimeRegex(frmt, minV, maxV) else: raise ValueError("Bad input")
5,332,597
def tf_box_3d_diagonal_length(boxes_3d): """Returns the diagonal length of box_3d Args: boxes_3d: An tensor of shape (N x 7) of boxes in box_3d format. Returns: Diagonal of all boxes, a tensor of (N,) shape. """ lengths_sqr = tf.square(boxes_3d[:, 3]) width_sqr = tf.square(boxes_3d[:, 4]) height_sqr = tf.square(boxes_3d[:, 5]) lwh_sqr_sums = lengths_sqr + width_sqr + height_sqr diagonals = tf.sqrt(lwh_sqr_sums) return diagonals
5,332,598
async def employment_plot(current_city:City): """ Visualize employment information for city - see industry breakdown and employment type ### Query Parameters - city ### Response JSON string to render with react-plotly.js """ city = validate_city(current_city) city_data = CityData(city) # Industry industry_type = city_data.subset[city_data.industry()] industry_melt = pd.melt(industry_type) industry_melt.columns = ['industry', 'percentage'] # Employment Type employment_type = city_data.subset[city_data.employment()] type_melt = pd.melt(employment_type) type_melt.columns = ['employment type', 'percentage'] #Create subplots fig = make_subplots(rows=1, cols=2, subplot_titles = (f'Industry in {city}', f'Employment Types in {city}')) fig.add_trace(go.Bar(x = industry_melt['industry'], y = industry_melt['percentage'], marker = dict(color = industry_melt['percentage'], coloraxis = "coloraxis")), row = 1, col = 1) fig.add_trace(go.Bar(x =type_melt['employment type'], y =type_melt['percentage'], marker = dict(color = type_melt['percentage'], coloraxis = "coloraxis")), row = 1, col = 2) fig.update_layout( coloraxis=dict(colorscale = 'Bluered_r'), coloraxis_showscale = False, showlegend = False) fig.show() # fig.write_html("path/to/file.html") return fig.to_json()
5,332,599