code
stringlengths
4
4.48k
docstring
stringlengths
1
6.45k
_id
stringlengths
24
24
def main(): <NEW_LINE> <INDENT> window = Test(SCREEN_WIDTH, SCREEN_HEIGHT, UserInterface, (100, 100, 100)) <NEW_LINE> window.setup() <NEW_LINE> arcade.run()
Main method
625941bcd164cc6175782c20
@aggregate_results <NEW_LINE> def query_by_inmate_id(id_, jurisdictions=None, timeout=None): <NEW_LINE> <INDENT> if jurisdictions is None: <NEW_LINE> <INDENT> jurisdictions = PROVIDERS.keys() <NEW_LINE> <DEDENT> providers = [PROVIDERS[j] for j in jurisdictions] <NEW_LINE> async def async_helper(): <NEW_LINE> <INDENT> loop = asyncio.get_event_loop() <NEW_LINE> def generate_futures(): <NEW_LINE> <INDENT> for _, module in providers: <NEW_LINE> <INDENT> yield loop.run_in_executor( None, module.query_by_inmate_id, id_, timeout ) <NEW_LINE> <DEDENT> <DEDENT> futures = list(generate_futures()) <NEW_LINE> results = await asyncio.gather(*futures, return_exceptions=True) <NEW_LINE> return results <NEW_LINE> <DEDENT> results = asyncio.run(async_helper()) <NEW_LINE> return providers, results
Query jurisdictions with an inmate ID. :param id_: Numeric identifier of the inmate. :type id_: int or str :param jurisdictions: List of jurisdictions to search. If `None`, then all available jurisdictions are searched. :type jurisdictions: None or iterable :param timeout: Time in seconds to wait for HTTP requests to complete. :type timeout: float :returns: tuple `(inmates, errors)` where - :py:data:`inmates` -- inmates matching search parameters. - :py:data:`errors` -- errors encountered while searching.
625941bcb57a9660fec33753
def run(in_args): <NEW_LINE> <INDENT> parser = _define_parser() <NEW_LINE> args = parser.parse_args(in_args) <NEW_LINE> gen = ZGNRgen(args.n_zig, args.n_uc, args.spacing) <NEW_LINE> lat = gen.make_lattice() <NEW_LINE> if args.name: <NEW_LINE> <INDENT> lat.name = args.name <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> lat.name = "{}zgnr{}".format(args.n_zig, args.n_uc) <NEW_LINE> <DEDENT> if args.comment: <NEW_LINE> <INDENT> lat.comment = args.comment <NEW_LINE> <DEDENT> return lat
Run ZGNRgen from command line arguments.
625941bc38b623060ff0acc1
def summary(self): <NEW_LINE> <INDENT> dash = "-"*75 <NEW_LINE> print(dash) <NEW_LINE> print('{:<30s}|{:22s}|{:20s}'.format("Name", "Trainable Parameters", "Total Parameters")) <NEW_LINE> print(dash) <NEW_LINE> total_params = 0 <NEW_LINE> total_trainable_params = 0 <NEW_LINE> for layer in self.layers: <NEW_LINE> <INDENT> param_size = 0 <NEW_LINE> trainable_param_size = 0 <NEW_LINE> for p in layer.params: <NEW_LINE> <INDENT> param_size += np.prod(p.shape) <NEW_LINE> if layer.trainable: <NEW_LINE> <INDENT> trainable_param_size += np.prod(p.shape) <NEW_LINE> <DEDENT> <DEDENT> print('{:<30s}|{:>22d}|{:>20d}'.format(layer.l_name, trainable_param_size, param_size)) <NEW_LINE> total_params += param_size <NEW_LINE> total_trainable_params += trainable_param_size <NEW_LINE> <DEDENT> print(dash) <NEW_LINE> print('{:<30s}|{:>22d}|{:>20d}'.format("Total", total_trainable_params, total_params)) <NEW_LINE> print(dash)
Prints a table describing the model layers and parameters used
625941bc63f4b57ef0000ff3
def reverseString(self, s): <NEW_LINE> <INDENT> l, r = 0, len(s) - 1 <NEW_LINE> while l < r: <NEW_LINE> <INDENT> s[l], s[r] = s[r], s[l] <NEW_LINE> l += 1 <NEW_LINE> r -= 1
:type s: List[str] :rtype: None Do not return anything, modify s in-place instead.
625941bc94891a1f4081b97b
def write(self, event: Dict, param: parametrization.Parametrization, monitor_list: List[str] = None) -> None: <NEW_LINE> <INDENT> self._log_counter += 1 <NEW_LINE> monitor_data = {} <NEW_LINE> if monitor_list: <NEW_LINE> <INDENT> mon_vals = graph_executor.eval_fun( [self._work.get_object(mon) for mon in monitor_list], param) <NEW_LINE> for mon, mon_val in zip(monitor_list, mon_vals): <NEW_LINE> <INDENT> monitor_data[mon.name] = mon_val <NEW_LINE> <DEDENT> <DEDENT> parameter_data = {} <NEW_LINE> parameter_list = self._work.get_objects_by_type(optplan.Parameter) <NEW_LINE> for param_name, param_obj in parameter_list.items(): <NEW_LINE> <INDENT> parameter_data[param_name] = param_obj.calculate_objective_function( param) <NEW_LINE> <DEDENT> data = { "transformation": self._transform_name, "event": event, "time": str(datetime.now()), "parametrization": param.serialize(), "parameters": parameter_data, "monitor_data": monitor_data, "log_counter": self._log_counter } <NEW_LINE> self._logger.info( "Saving monitors for transformation %s with event info %s [%d].", self._transform_name, event, self._log_counter) <NEW_LINE> file_path = os.path.join( self._path, os.path.join("step{}.pkl".format(self._log_counter))) <NEW_LINE> with open(file_path, "wb") as handle: <NEW_LINE> <INDENT> pickle.dump(data, handle)
Write monitor data to log file. Args: event: Transformation-specific information about the event. param: Parametrization that has to be evaluated. monitor_list: List of monitor names to be evaluated.
625941bc91af0d3eaac9b8e8
def compute_all_delta_a(old_file: H5File, new_file: H5File, include_cd: bool=False) -> None: <NEW_LINE> <INDENT> reorg.copy_rounds_structure_for_delta_a(old_file, new_file) <NEW_LINE> old_rounds_root = old_file['rounds'] <NEW_LINE> new_rounds_root = new_file['rounds'] <NEW_LINE> for rnd in subgroups(old_rounds_root): <NEW_LINE> <INDENT> for wav in subgroups(old_rounds_root[rnd]): <NEW_LINE> <INDENT> wav_group_path = f'{rnd}/{wav}' <NEW_LINE> old_group = old_rounds_root[wav_group_path] <NEW_LINE> if not should_compute_delta_a(old_group): <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> new_group = new_rounds_root[wav_group_path] <NEW_LINE> compute_single_delta_a(old_group, new_group, include_cd) <NEW_LINE> <DEDENT> <DEDENT> return
Computes the change in absorption for all cases where valid data exists and stores the results in a new file Args: old_file (H5File): The file in which the raw data is stored new_file (H5File): The file in which the computed values will be stored include_cd (bool): Whether to compute the change in absorption of circularly polarized light as well
625941bcadb09d7d5db6c665
def enqueue_user(user): <NEW_LINE> <INDENT> if user not in user_queue: <NEW_LINE> <INDENT> user_queue.append(user) <NEW_LINE> return True <NEW_LINE> <DEDENT> return False
Coloca o usuário no final da fila se ele já não estiver presente na mesma.
625941bc7b25080760e3932d
def info_label(*content): <NEW_LINE> <INDENT> return default_label(classes(LABEL_INFO_CLASS), *content)
Returns the HTML code for a light blue info label.
625941bce8904600ed9f1dfc
def splitMerge(self): <NEW_LINE> <INDENT> path_merge = self.aug_merge_path <NEW_LINE> path_train = self.aug_train_path <NEW_LINE> path_label = self.aug_label_path <NEW_LINE> for i in range(self.slices): <NEW_LINE> <INDENT> path = path_merge + "/" + str(i) <NEW_LINE> train_imgs = glob.glob(path + "/*." + self.label_img_type) <NEW_LINE> savedir = path_train + "/" + str(i) <NEW_LINE> if not os.path.lexists(savedir): <NEW_LINE> <INDENT> os.mkdir(savedir) <NEW_LINE> <DEDENT> savedir = path_label + "/" + str(i) <NEW_LINE> if not os.path.lexists(savedir): <NEW_LINE> <INDENT> os.mkdir(savedir) <NEW_LINE> <DEDENT> for imgname in train_imgs: <NEW_LINE> <INDENT> midname = imgname[imgname.rindex("/") + 1:imgname.rindex("." + self.label_img_type)] <NEW_LINE> img = cv2.imread(imgname) <NEW_LINE> img_train = img[:, :, 2] <NEW_LINE> img_label = img[:, :, 0] <NEW_LINE> cv2.imwrite(path_train + "/" + str(i) + "/" + midname + "_train" + "." + self.label_img_type, img_train) <NEW_LINE> cv2.imwrite(path_label + "/" + str(i) + "/" + midname + "_label" + "." + self.label_img_type, img_label)
split merged image apart
625941bc67a9b606de4a7d8f
def get_merge_command (self): <NEW_LINE> <INDENT> raise NotImplementedError
Returns the command(s) to run for the merging. @returns: command to run @rtype: string[]
625941bcf548e778e58cd44f
def Tlim(self, p=None): <NEW_LINE> <INDENT> return pm.units.temperature_scale( np.asarray(self.data['Tlim']), from_units='K')
Return the temperature limits for the data set Tmin, Tmax = Tlim(p=None) Tlim accepts pressure as an argument for extensibility, but the MP1 class has homogeneous temperature limits. Returns the temperature limits in [unit_temperature].
625941bc5fcc89381b1e1590
def preprocess_input(x, data_format=None, mode='caffe'): <NEW_LINE> <INDENT> if data_format is None: <NEW_LINE> <INDENT> data_format = K.image_data_format() <NEW_LINE> <DEDENT> if data_format not in {'channels_first', 'channels_last'}: <NEW_LINE> <INDENT> raise ValueError('Unknown data_format ' + str(data_format)) <NEW_LINE> <DEDENT> if isinstance(x, np.ndarray): <NEW_LINE> <INDENT> return _preprocess_numpy_input(x, data_format=data_format, mode=mode) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return _preprocess_symbolic_input(x, data_format=data_format, mode=mode)
Preprocesses a tensor or Numpy array encoding a batch of images. # Arguments x: Input Numpy or symbolic tensor, 3D or 4D. The preprocessed data is written over the input data if the data types are compatible. To avoid this behaviour, `numpy.copy(x)` can be used. data_format: Data format of the image tensor/array. mode: One of "caffe", "tf" or "torch". - caffe: will convert the images from RGB to BGR, then will zero-center each color channel with respect to the ImageNet dataset, without scaling. - tf: will scale pixels between -1 and 1, sample-wise. - torch: will scale pixels between 0 and 1 and then will normalize each channel with respect to the ImageNet dataset. # Returns Preprocessed tensor or Numpy array. # Raises ValueError: In case of unknown `data_format` argument.
625941bc3617ad0b5ed67dcb
def volume_up(self): <NEW_LINE> <INDENT> self._firetv.action('volume_up')
Send volume up command.
625941bcff9c53063f47c0c8
def validateArchiveList(archiveList): <NEW_LINE> <INDENT> if not archiveList: <NEW_LINE> <INDENT> raise InvalidConfiguration("You must specify at least one archive configuration!") <NEW_LINE> <DEDENT> archiveList.sort(key=lambda a: a[0]) <NEW_LINE> for i,archive in enumerate(archiveList): <NEW_LINE> <INDENT> if i == len(archiveList) - 1: <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> nextArchive = archiveList[i+1] <NEW_LINE> if not archive[0] < nextArchive[0]: <NEW_LINE> <INDENT> raise InvalidConfiguration("A Whisper database may not configured having" "two archives with the same precision (archive%d: %s, archive%d: %s)" % (i, archive, i + 1, nextArchive)) <NEW_LINE> <DEDENT> if nextArchive[0] % archive[0] != 0: <NEW_LINE> <INDENT> raise InvalidConfiguration("Higher precision archives' precision " "must evenly divide all lower precision archives' precision " "(archive%d: %s, archive%d: %s)" % (i, archive[0], i + 1, nextArchive[0])) <NEW_LINE> <DEDENT> retention = archive[0] * archive[1] <NEW_LINE> nextRetention = nextArchive[0] * nextArchive[1] <NEW_LINE> if not nextRetention > retention: <NEW_LINE> <INDENT> raise InvalidConfiguration("Lower precision archives must cover " "larger time intervals than higher precision archives " "(archive%d: %s seconds, archive%d: %s seconds)" % (i, retention, i + 1, nextRetention)) <NEW_LINE> <DEDENT> archivePoints = archive[1] <NEW_LINE> pointsPerConsolidation = nextArchive[0] // archive[0] <NEW_LINE> if not archivePoints >= pointsPerConsolidation: <NEW_LINE> <INDENT> raise InvalidConfiguration("Each archive must have at least enough points " "to consolidate to the next archive (archive%d consolidates %d of " "archive%d's points but it has only %d total points)" % (i + 1, pointsPerConsolidation, i, archivePoints))
Validates an archiveList. An ArchiveList must: 1. Have at least one archive config. Example: (60, 86400) 2. No archive may be a duplicate of another. 3. Higher precision archives' precision must evenly divide all lower precision archives' precision. 4. Lower precision archives must cover larger time intervals than higher precision archives. 5. Each archive must have at least enough points to consolidate to the next archive Returns True or False
625941bc167d2b6e31218a6a
def save_json_file(self, indent=1): <NEW_LINE> <INDENT> with open(self.data_file, 'w') as outfile: <NEW_LINE> <INDENT> json.dump(self._words, outfile, indent=indent) <NEW_LINE> <DEDENT> self.data_id = getsize(self.data_file)
Save vocabulary data to the JSON file.
625941bc50812a4eaa59c1f8
def time_stats(df): <NEW_LINE> <INDENT> print('\nCalculating The Most Frequent Times of Travel...\n') <NEW_LINE> start_time = time.time() <NEW_LINE> popular_month=df['month'].mode()[0] <NEW_LINE> print('The most common month: ', popular_month) <NEW_LINE> popular_day_of_week=df['day_of_week'].mode()[0] <NEW_LINE> print('The most common day of week: ', popular_day_of_week) <NEW_LINE> popular_start_hour=df['Start Time'].mode()[0] <NEW_LINE> print('The most common start hour: ', popular_start_hour) <NEW_LINE> print("\nThis took %s seconds." % (time.time() - start_time)) <NEW_LINE> print('-'*40)
Displays statistics on the most frequent times of travel.
625941bc99cbb53fe6792aba
def generate_received_signal(self, transmitted_signal, sampling_feq, azimuth, elevation, use_phase=True): <NEW_LINE> <INDENT> v, t_delay = self.phase_delay(azimuth, elevation) <NEW_LINE> n_samples = transmitted_signal.size <NEW_LINE> s = transmitted_signal.reshape((1, n_samples)) <NEW_LINE> if use_phase: <NEW_LINE> <INDENT> s = s.repeat(self.num_mics, 0) <NEW_LINE> rec_noiseless_signal = np.dot(v, s) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> rec_noiseless_signal = np.zeros((self.num_mics, n_samples), np.complex128) <NEW_LINE> num_delays = t_delay * sampling_feq <NEW_LINE> for h in range(self.num_mics): <NEW_LINE> <INDENT> delay = int(np.round((num_delays[h]))) <NEW_LINE> if delay >= 0: <NEW_LINE> <INDENT> rec_noiseless_signal[h, delay:] = s[0, :-delay] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> rec_noiseless_signal[h, :delay] = s[0, -delay:] <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> path_delays = np.random.uniform(0, self.max_delay, self.n_paths) <NEW_LINE> n_delays = (path_delays * sampling_feq).astype("int32") <NEW_LINE> gains = np.random.rayleigh(self.max_multipath_gain, self.n_paths) <NEW_LINE> phases = np.random.uniform(0, 2 * pi, self.n_paths) <NEW_LINE> complex_gains = gains * np.exp(1j * phases) <NEW_LINE> s = s.repeat(self.num_mics, 0) <NEW_LINE> n_samples = transmitted_signal.shape[0] <NEW_LINE> for p in range(self.n_paths): <NEW_LINE> <INDENT> azp = np.random.uniform(-180, 180) <NEW_LINE> elvp = np.random.uniform(0, 90) <NEW_LINE> v, t_delays = self.phase_delay(azp, elvp) <NEW_LINE> reflected_sig = np.zeros_like(s) <NEW_LINE> reflected_sig[:, n_delays[p]:] = s[:, :(n_samples - n_delays[p])] <NEW_LINE> reflected_sig = np.dot(v, complex_gains[p] * reflected_sig) <NEW_LINE> rec_noiseless_signal += reflected_sig <NEW_LINE> <DEDENT> noise_p = np.random.normal(0, self.noise_mag, (self.num_mics, n_samples)) <NEW_LINE> rec_noiseless_signal += noise_p <NEW_LINE> return rec_noiseless_signal
Generate received signals at all hydrophones with multipaths and noises :param transmitted_signal: transmitted signal pulse :param sampling_feq: sampling frequency :param azimuth: arrival angles of main path in azimuth direction in degree :param elevation: arrival angles of main path in elevation direction in degree :return: received signal vector
625941bc3eb6a72ae02ec3a8
def init (self, configurator, **kwargs): <NEW_LINE> <INDENT> self._load_adapters(configurator=configurator, **kwargs) <NEW_LINE> if not self._detect_topology(): <NEW_LINE> <INDENT> self.log.warning( "%s domain not confirmed during init!" % self.domain_name) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.raiseEventNoErrors(DomainChangedEvent, domain=self.domain_name, data=self.internal_topo, cause=DomainChangedEvent.TYPE.DOMAIN_UP) <NEW_LINE> <DEDENT> if self._poll: <NEW_LINE> <INDENT> if self._keepalive: <NEW_LINE> <INDENT> self.log.warning("Keepalive feature is disabled " "when polling is enabled!") <NEW_LINE> <DEDENT> self.log.info("Start polling %s domain..." % self.domain_name) <NEW_LINE> self.start_polling(self.POLL_INTERVAL) <NEW_LINE> <DEDENT> elif self._keepalive: <NEW_LINE> <INDENT> self.log.debug("Keepalive is enabled! Start sending ping messages...") <NEW_LINE> self.start_keepalive()
Abstract function for component initialization. :param configurator: component configurator for configuring adapters :type configurator: :any:`ComponentConfigurator` :param kwargs: optional parameters :type kwargs: dict :return: None
625941bcf9cc0f698b1404d1
def safe_mkdir(directory): <NEW_LINE> <INDENT> if not os.path.exists(directory): <NEW_LINE> <INDENT> os.makedirs(directory)
Make directory if it doesn't exist. a la mkdir -p
625941bc8c0ade5d55d3e893
def set_data(self, data): <NEW_LINE> <INDENT> self._data = data
set_data sets the data field
625941bcad47b63b2c509e54
def headers(self): <NEW_LINE> <INDENT> headers = IterDict() <NEW_LINE> if self.is_valid(): <NEW_LINE> <INDENT> sorts = self.cleaned_data.get('sort', '') <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> sorts = [] <NEW_LINE> <DEDENT> params = copy.copy(self.data) <NEW_LINE> for index, header in enumerate(self.HEADERS, 1): <NEW_LINE> <INDENT> header = copy.copy(header) <NEW_LINE> header['classes'] = [] <NEW_LINE> if header['sortable']: <NEW_LINE> <INDENT> if sorts and abs(sorts[0]) == index: <NEW_LINE> <INDENT> header_sorts = [sorts[0] * -1] + sorts[1:] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> header_sorts = [index] + filter(lambda x: not abs(x) == index, sorts) <NEW_LINE> <DEDENT> sort_param = ((self.prefix or '') + '-sort').strip('-') <NEW_LINE> params[sort_param] = '.'.join(map(str, header_sorts)) <NEW_LINE> header['querystring'] = urllib.urlencode(params) <NEW_LINE> params[sort_param] = str(index) <NEW_LINE> header['singular'] = urllib.urlencode(params) <NEW_LINE> params[sort_param] = '.'.join(map(str, header_sorts[1:])) <NEW_LINE> header['remove'] = urllib.urlencode(params) <NEW_LINE> try: <NEW_LINE> <INDENT> header['priority'] = map(abs, sorts).index(index) + 1 <NEW_LINE> header['classes'].append('active') <NEW_LINE> if index in sorts: <NEW_LINE> <INDENT> header['classes'].append('ascending') <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> header['classes'].append('descending') <NEW_LINE> <DEDENT> <DEDENT> except ValueError: <NEW_LINE> <INDENT> header['priority'] = None <NEW_LINE> <DEDENT> <DEDENT> headers[header.get('name', header['column'])] = header <NEW_LINE> <DEDENT> return headers
Returns an object with the following template variables: ``{{ form.headers }}`` - access to the header ``{{ header.title }}`` - title declared for this header ``{{ header.sortable }}`` - boolean for whether this header is sortable ``{{ header.active }}`` - boolean for whether the queryset is currently being sorted by this header ``{{ header.classes }}`` - list of css classes for this header. (active, ascending|descending) ``{{ header.priority }}`` - numeric index for which place this header is being used for ordering. ``{{ header.querystring }}`` - querystring for use with progressive sorting (sorting by multiple fields) ``{{ header.remove }}`` - querystring which can be used to remove this header from sorting ``{{ header.singular }}`` - querystring which can be used to sort only by this header Example: :: {% for header in form.headers %} {% if header.priority %} <th scope="col" class="active {{ form.prefix }}-{{ header.column }}"> <div class="sortoptions {{ header.classes|join:' ' }}"> <a class="sortremove" href="?{{ header.remove }}" title="Remove from sorting">X</a> <span class="sortpriority" title="Sorting priority: {{ header.priority }}">{{ header.priority }}</span> <a href="?{{ header.querystring }}" class="toggle" title="Toggle sorting"></a> </div> {% else %} <th scope="col" class="{{ form.prefix }}-{{ header.column }}"> {% endif %} {% if header.sortable %} <div class="text"><a href="?{{ header.querystring }}">{{ header.title }}</a></div> {% else %} <div class="text">{{ header.title|safe }}</div> {% endif %} </th> {% endfor %}
625941bc2c8b7c6e89b35696
def get_accessible_dag_ids(self, user) -> Set[str]: <NEW_LINE> <INDENT> accessible_dags = self.get_accessible_dags( [permissions.ACTION_CAN_EDIT, permissions.ACTION_CAN_READ], user ) <NEW_LINE> return {dag.dag_id for dag in accessible_dags}
Gets the DAG IDs editable or readable by authenticated user.
625941bc377c676e9127207e
def energy(powers, default_timestep=8): <NEW_LINE> <INDENT> energy = {'night_rate': 0, 'day_rate': 0, 'value': 0} <NEW_LINE> if len(powers) == 1: <NEW_LINE> <INDENT> if powers[0].night_rate == 1: <NEW_LINE> <INDENT> energy["night_rate"] = (powers[0].value / 1000 * default_timestep / 3600) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> energy["day_rate"] = (powers[0].value / 1000 * default_timestep / 3600) <NEW_LINE> <DEDENT> energy['value'] = energy['day_rate'] + energy['night_rate'] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> x = [] <NEW_LINE> day_rate = [] <NEW_LINE> night_rate = [] <NEW_LINE> for i in powers: <NEW_LINE> <INDENT> x.append(i.timestamp) <NEW_LINE> if i.night_rate == 1: <NEW_LINE> <INDENT> night_rate.append(i.value) <NEW_LINE> day_rate.append(0) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> day_rate.append(i.value) <NEW_LINE> night_rate.append(0) <NEW_LINE> <DEDENT> <DEDENT> energy["night_rate"] = numpy.trapz(night_rate, x) / 1000 / 3600 <NEW_LINE> energy["day_rate"] = numpy.trapz(day_rate, x) / 1000 / 3600 <NEW_LINE> energy['value'] = energy['day_rate'] + energy['night_rate'] <NEW_LINE> <DEDENT> return energy
Compute the energy associated to a list of measures (in W) and associated timestamps (in s).
625941bce64d504609d74713
def _search_down(self, i, j, word): <NEW_LINE> <INDENT> k = (i + 1) % self._num_rows <NEW_LINE> c = 1 <NEW_LINE> while k != i and c < len(word): <NEW_LINE> <INDENT> if word[c] != self._puzzle[k][j]: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> k = (k + 1) % self._num_rows <NEW_LINE> c += 1 <NEW_LINE> <DEDENT> return c == len(word)
Search downwards from this position. Each successful character match results in a unit increment in i :param i: The row index where the search started :param j: The row index where the search started. :param word: The word being searched for :return: True if the search is a success. Return false otherwise
625941bc4a966d76dd550ee0
def update(self): <NEW_LINE> <INDENT> transaction.abort() <NEW_LINE> builders_and_bqs = IStore(Builder).using( Builder, LeftJoin(BuildQueue, BuildQueue.builderID == Builder.id) ).find((Builder, BuildQueue)) <NEW_LINE> self.vitals_map = dict( (b.name, extract_vitals_from_db(b, bq)) for b, bq in builders_and_bqs) <NEW_LINE> transaction.abort() <NEW_LINE> self.date_updated = datetime.datetime.utcnow()
See `BuilderFactory`.
625941bc046cf37aa974cc1e
def deque_slice(deque,m,n): <NEW_LINE> <INDENT> import copy <NEW_LINE> deque_copy = copy.deepcopy(deque) <NEW_LINE> deque_copy.rotate(n) <NEW_LINE> for _ in range(m+n): <NEW_LINE> <INDENT> deque_copy.popleft() <NEW_LINE> <DEDENT> return deque_copy
form m to n, not including n both m & n are +ve
625941bc91af0d3eaac9b8e9
def pcns_enabled(request): <NEW_LINE> <INDENT> global _pcns_enabled <NEW_LINE> if _pcns_enabled is None: <NEW_LINE> <INDENT> _pcns_enabled = PCN.objects.active().exists() <NEW_LINE> <DEDENT> return {"pcns_enabled": _pcns_enabled}
Add a flag indicating whether or not we should show PCN-related features in the UI based on whether we've imported any PCNs and associated them with practices We cache this in a global variable to avoid repeatedly doing this query on every page load. After importing PCN data we will need to restart the application anyway for the MatrixStore, so there's no additional invalidation to be done. Once we've imported PCN data for the first time this context processor and all references to the `pcns_enabled` flag can be removed.
625941bcc432627299f04b18
def test_custom_action_response_descriptor_octopus_server_web_api_actions_authentication_login_initiated_responder(self): <NEW_LINE> <INDENT> pass
Test case for custom_action_response_descriptor_octopus_server_web_api_actions_authentication_login_initiated_responder
625941bc32920d7e50b280a0
def _getAnnotation(self, user, id, params): <NEW_LINE> <INDENT> setResponseTimeLimit(86400) <NEW_LINE> annotation = Annotation().load( id, region=params, user=user, level=AccessType.READ, getElements=False) <NEW_LINE> if annotation is None: <NEW_LINE> <INDENT> raise RestException('Annotation not found', 404) <NEW_LINE> <DEDENT> annotation = Annotation().filter(annotation, self.getCurrentUser()) <NEW_LINE> annotation['annotation']['elements'] = [] <NEW_LINE> breakStr = b'"elements": [' <NEW_LINE> base = json.dumps(annotation, sort_keys=True, allow_nan=False, cls=JsonEncoder).encode('utf8').split(breakStr) <NEW_LINE> centroids = str(params.get('centroids')).lower() == 'true' <NEW_LINE> def generateResult(): <NEW_LINE> <INDENT> info = {} <NEW_LINE> idx = 0 <NEW_LINE> yield base[0] <NEW_LINE> yield breakStr <NEW_LINE> collect = [] <NEW_LINE> if centroids: <NEW_LINE> <INDENT> yield b'\x00' <NEW_LINE> <DEDENT> for element in Annotationelement().yieldElements(annotation, params, info): <NEW_LINE> <INDENT> if isinstance(element, dict): <NEW_LINE> <INDENT> element['id'] = str(element['id']) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> element = struct.pack( '>QL', int(element[0][:16], 16), int(element[0][16:24], 16) ) + struct.pack('<fffl', *element[1:]) <NEW_LINE> <DEDENT> collect.append(element) <NEW_LINE> if len(collect) >= 100: <NEW_LINE> <INDENT> if isinstance(collect[0], dict): <NEW_LINE> <INDENT> yield (b',' if idx else b'') + orjson.dumps(collect)[1:-1] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> yield b''.join(collect) <NEW_LINE> <DEDENT> idx += 1 <NEW_LINE> collect = [] <NEW_LINE> <DEDENT> <DEDENT> if len(collect): <NEW_LINE> <INDENT> if isinstance(collect[0], dict): <NEW_LINE> <INDENT> yield (b',' if idx else b'') + orjson.dumps(collect)[1:-1] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> yield b''.join(collect) <NEW_LINE> <DEDENT> <DEDENT> if centroids: <NEW_LINE> <INDENT> yield b'\x00' <NEW_LINE> <DEDENT> yield base[1].rstrip().rstrip(b'}') <NEW_LINE> yield b', "_elementQuery": ' <NEW_LINE> yield json.dumps( info, sort_keys=True, allow_nan=False, cls=JsonEncoder).encode('utf8') <NEW_LINE> yield b'}' <NEW_LINE> <DEDENT> if centroids: <NEW_LINE> <INDENT> setResponseHeader('Content-Type', 'application/octet-stream') <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> setResponseHeader('Content-Type', 'application/json') <NEW_LINE> <DEDENT> return generateResult
Get a generator function that will yield the json of an annotation. :param user: the user that needs read access on the annotation and its parent item. :param id: the annotation id. :param params: paging and region parameters for the annotation. :returns: a function that will return a generator.
625941bc0383005118ecf4b8
def execScript(self, script): <NEW_LINE> <INDENT> raise NotImplementedError()
在页面中执行脚本代码 @type script: str @param script: 要执行的脚本代码 @rtype: bool @return: True=成功;False=失败
625941bc97e22403b379ce6c
@print_func <NEW_LINE> def generate_keys(): <NEW_LINE> <INDENT> key = rsa.generate_private_key( backend=crypto_default_backend(), public_exponent=65537, key_size=4096 ) <NEW_LINE> private_key = key.private_bytes( crypto_serialization.Encoding.PEM, crypto_serialization.PrivateFormat.PKCS8, crypto_serialization.NoEncryption()) <NEW_LINE> public_key = key.public_key().public_bytes( crypto_serialization.Encoding.OpenSSH, crypto_serialization.PublicFormat.OpenSSH ) <NEW_LINE> return private_key, public_key
Generates public/private RSA keys.
625941bc30bbd722463cbc96
@verbose <NEW_LINE> def create_eog_epochs(raw, ch_name=None, event_id=998, picks=None, tmin=-0.5, tmax=0.5, l_freq=1, h_freq=10, reject=None, flat=None, baseline=None, verbose=None): <NEW_LINE> <INDENT> events = find_eog_events(raw, ch_name=ch_name, event_id=event_id, l_freq=l_freq, h_freq=h_freq) <NEW_LINE> eog_epochs = Epochs(raw, events=events, event_id=event_id, tmin=tmin, tmax=tmax, proj=False, reject=reject, flat=flat, picks=picks, baseline=baseline, preload=True) <NEW_LINE> return eog_epochs
Conveniently generate epochs around EOG artifact events Parameters ---------- raw : instance of Raw The raw data ch_name : str The name of the channel to use for ECG peak detection. The argument is mandatory if the dataset contains no ECG channels. event_id : int The index to assign to found events picks : array-like of int | None (default) Indices of channels to include (if None, all channels are used). tmin : float Start time before event. tmax : float End time after event. l_freq : float Low pass frequency. h_freq : float High pass frequency. reject : dict | None Rejection parameters based on peak to peak amplitude. Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'. If reject is None then no rejection is done. You should use such parameters to reject big measurement artifacts and not ECG for example flat : dict | None Rejection parameters based on flatness of signal Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg' If flat is None then no rejection is done. verbose : bool, str, int, or None If not None, override default verbose level (see mne.verbose). baseline : tuple or list of length 2, or None The time interval to apply rescaling / baseline correction. If None do not apply it. If baseline is (a, b) the interval is between "a (s)" and "b (s)". If a is None the beginning of the data is used and if b is None then b is set to the end of the interval. If baseline is equal ot (None, None) all the time interval is used. If None, no correction is applied. Returns ------- ecg_epochs : instance of Epochs Data epoched around ECG r-peaks.
625941bc5fdd1c0f98dc0105
def format_output(self, rendered_widgets): <NEW_LINE> <INDENT> return super(DateWidget, self).format_output(rendered_widgets).replace('glyphicon glyphicon-th', 'glyphicon glyphicon-calendar')
Given a list of rendered widgets (as strings), it inserts an HTML linebreak between them. Returns a Unicode string representing the HTML for the whole lot.
625941bc92d797404e30405d
def setvalues(self, values): <NEW_LINE> <INDENT> self.set('topsconfig', 'model', values['model']) <NEW_LINE> self.set('topsconfig', 'port', values['port']) <NEW_LINE> self.write()
set specific config file value
625941bc07d97122c4178759
def test_readParams_noFile(self): <NEW_LINE> <INDENT> myEnv = paramReader('fileThatDoesNotExist.xml') <NEW_LINE> self.assertEqual(len(myEnv),0)
Testing the paramReader with no existing file
625941bcf8510a7c17cf95ce
def render_json_object_response(self, objects, **kwargs): <NEW_LINE> <INDENT> json_data = serializers.serialize("json", objects, **kwargs) <NEW_LINE> return HttpResponse(json_data, content_type="application/json")
Serializes objects using Django's builtin JSON serializer. Additional kwargs can be used the same way for django.core.serializers.serialize.
625941bc30bbd722463cbc97
def select_package(self, package: CardSetPackage): <NEW_LINE> <INDENT> if package is None: <NEW_LINE> <INDENT> self.canceled.emit() <NEW_LINE> self.close() <NEW_LINE> return <NEW_LINE> <DEDENT> self.__package = package <NEW_LINE> path_buttons = [] <NEW_LINE> parent = package <NEW_LINE> while parent: <NEW_LINE> <INDENT> button = self.__create_path_button(parent) <NEW_LINE> path_buttons.insert(0, button) <NEW_LINE> parent = parent.get_parent() <NEW_LINE> <DEDENT> self.__path_bar_layout.clear() <NEW_LINE> for button in path_buttons: <NEW_LINE> <INDENT> self.__path_bar_layout.add(button) <NEW_LINE> <DEDENT> self.__list_layout.clear() <NEW_LINE> back_button = None <NEW_LINE> if self.__close_on_select or package.get_parent(): <NEW_LINE> <INDENT> back_button = widgets.Button("[Back]") <NEW_LINE> back_button.clicked.connect(lambda: self.select_package(package.get_parent())) <NEW_LINE> self.__list_layout.add(back_button) <NEW_LINE> <DEDENT> for sub_package in package.get_packages(): <NEW_LINE> <INDENT> button = self.__create_package_button(sub_package) <NEW_LINE> self.__list_layout.add(button) <NEW_LINE> <DEDENT> if not self.__card_set_mode: <NEW_LINE> <INDENT> select_button = widgets.Button("[Select]") <NEW_LINE> select_button.clicked.connect(lambda: self.__on_select_package(package)) <NEW_LINE> self.__list_layout.add(select_button) <NEW_LINE> <DEDENT> if self.__card_set_mode: <NEW_LINE> <INDENT> for card_set in package.get_card_sets(): <NEW_LINE> <INDENT> button = self.__create_card_set_button(card_set) <NEW_LINE> self.__list_layout.add(button) <NEW_LINE> <DEDENT> <DEDENT> if back_button: <NEW_LINE> <INDENT> back_button.focus() <NEW_LINE> <DEDENT> self.package_changed.emit(package)
Start browsing a new card set package.
625941bc4d74a7450ccd4096
def _make_submission_file_text(self, command: str, uid: str) -> str: <NEW_LINE> <INDENT> return command + "2"
Creates the text of a file which will be created and run for the submission (for slurm, this is sbatch file).
625941bc60cbc95b062c641d
def print_fixtures(self): <NEW_LINE> <INDENT> for home in self._teams: <NEW_LINE> <INDENT> for away in self._teams: <NEW_LINE> <INDENT> if home == away or ((home, away) not in self._fixtures): <NEW_LINE> <INDENT> print("{:<7} ".format(" ")) <NEW_LINE> continue <NEW_LINE> <DEDENT> print("{:<7} ".format(self._fixtures[(home, away)].score_str())) <NEW_LINE> <DEDENT> print
Print the fixture table to the console.
625941bca8370b7717052774
def add_level(self, num_topics=None, topic_names=None, parent_level_weight=1): <NEW_LINE> <INDENT> if len(self._levels) and num_topics <= self._levels[-1].num_topics: <NEW_LINE> <INDENT> warnings.warn("Adding level with num_topics = %s less or equal than parent level's num_topics = %s" % (num_topics, self._levels[-1].num_topics)) <NEW_LINE> <DEDENT> level_idx = len(self._levels) <NEW_LINE> if not len(self._levels): <NEW_LINE> <INDENT> self._levels.append(artm.ARTM(num_topics=num_topics, topic_names=topic_names, seed=self._get_seed(level_idx), **self._common_models_args)) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self._levels.append(ARTM_Level(parent_model=self._levels[-1], phi_batch_weight=parent_level_weight, phi_batch_path=self._tmp_files_path, model_name=self._model_name, num_topics=num_topics, topic_names=topic_names, seed=self._get_seed(level_idx), **self._common_models_args)) <NEW_LINE> <DEDENT> level = self._levels[-1] <NEW_LINE> config = level.master._config <NEW_LINE> config.opt_for_avx = False <NEW_LINE> level.master._lib.ArtmReconfigureMasterModel( level.master.master_id, config) <NEW_LINE> return level
:Description: adds new level to the hierarchy :param int num_topics: the number of topics in level model, will be overwriten if parameter topic_names is set :param topic_names: names of topics in model :type topic_names: list of str :param float parent_level_weight: the coefficient of smoothing n_wt by n_wa, a enumerates parent topics :return: ARTM or derived ARTM_Level instance :Notes: * hierarchy structure assumes the number of topics on each following level is greater than on previous one * work with returned value as with usual ARTM model * to access any level, use [] or get_level method * Important! You cannot add next level before previous one is initialized and fit.
625941bc4527f215b584c32e
@contextmanager <NEW_LINE> def exception_is_errorcode(api, exception, error_code, message=None): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> yield <NEW_LINE> <DEDENT> except exception as e: <NEW_LINE> <INDENT> message = str(e) if message is None else message <NEW_LINE> api.abort(error_code, message)
Context to make sure all Exceptions of a given type calls an api.abort with a suitable error and corresponding message. If no message is passed, use str() on the exception.
625941bc85dfad0860c3ad2d
def load_module(filename): <NEW_LINE> <INDENT> basename = os.path.basename(filename) <NEW_LINE> path = os.path.dirname(filename) <NEW_LINE> sys.path.append(path) <NEW_LINE> return __import__(os.path.splitext(basename)[0])
Loads a module by filename
625941bc442bda511e8be2f1
def generateFailoverFile( self ): <NEW_LINE> <INDENT> reportRequest = None <NEW_LINE> result = self.jobReport.generateForwardDISET() <NEW_LINE> if not result['OK']: <NEW_LINE> <INDENT> self.log.warn( "Could not generate Operation for job report with result:\n%s" % ( result ) ) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> reportRequest = result['Value'] <NEW_LINE> <DEDENT> if reportRequest: <NEW_LINE> <INDENT> self.log.info( "Populating request with job report information" ) <NEW_LINE> self.request.addOperation( reportRequest ) <NEW_LINE> <DEDENT> accountingReport = None <NEW_LINE> if self.workflow_commons.has_key( 'AccountingReport' ): <NEW_LINE> <INDENT> accountingReport = self.workflow_commons['AccountingReport'] <NEW_LINE> <DEDENT> if accountingReport: <NEW_LINE> <INDENT> result = accountingReport.commit() <NEW_LINE> if not result['OK']: <NEW_LINE> <INDENT> self.log.error( "!!! Both accounting and RequestDB are down? !!!" ) <NEW_LINE> return result <NEW_LINE> <DEDENT> <DEDENT> if len( self.request ): <NEW_LINE> <INDENT> isValid = RequestValidator().validate( self.request ) <NEW_LINE> if not isValid['OK']: <NEW_LINE> <INDENT> raise RuntimeError( "Failover request is not valid: %s" % isValid['Message'] ) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> requestJSON = self.request.toJSON() <NEW_LINE> if requestJSON['OK']: <NEW_LINE> <INDENT> self.log.info( "Creating failover request for deferred operations for job %d" % self.jobID ) <NEW_LINE> request_string = str( requestJSON['Value'] ) <NEW_LINE> self.log.debug( request_string ) <NEW_LINE> fname = '%d_%d_request.json' % ( self.production_id, self.prod_job_id ) <NEW_LINE> jsonFile = open( fname, 'w' ) <NEW_LINE> jsonFile.write( request_string ) <NEW_LINE> jsonFile.close() <NEW_LINE> self.log.info( "Created file containing failover request %s" % fname ) <NEW_LINE> result = self.request.getDigest() <NEW_LINE> if result['OK']: <NEW_LINE> <INDENT> self.log.info( "Digest of the request: %s" % result['Value'] ) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.log.error( "No digest? That's not sooo important, anyway:", result['Message'] ) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> raise RuntimeError( requestJSON['Message'] )
Retrieve the accumulated reporting request, and produce a JSON file that is consumed by the JobWrapper
625941bc5166f23b2e1a502d
def openBrowser(self, path): <NEW_LINE> <INDENT> path = os.path.dirname(path[0]) <NEW_LINE> if sys.platform.startswith('linux'): <NEW_LINE> <INDENT> os.system("xdg-open " + '"' + path + '"') <NEW_LINE> <DEDENT> elif sys.platform.startswith('darwin'): <NEW_LINE> <INDENT> os.system("open " + '"' + path + '"') <NEW_LINE> <DEDENT> elif sys.platform.startswith('win'): <NEW_LINE> <INDENT> os.startfile(path)
Opens a directory using the filebrowser. When the a file is given the containing directory is used. Parameters ---------- path : list of str The first string from the list is used as the path to open in a filebrowser.
625941bca17c0f6771cbdf27
def book(self, start, stop): <NEW_LINE> <INDENT> for x in self.intervals: <NEW_LINE> <INDENT> if start < x[1] and x[0]<stop: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> <DEDENT> self.intervals.append([start,stop]) <NEW_LINE> self.intervals = sorted(self.intervals, key = lambda x: x[0]) <NEW_LINE> return True
:type start: int :type end: int :rtype: bool
625941bc94891a1f4081b97c
def get_summary_writer(logdir): <NEW_LINE> <INDENT> return summary_io.SummaryWriterCache.get(logdir)
Returns single SummaryWriter per logdir in current run. Args: logdir: str, folder to write summaries. Returns: Existing `SummaryWriter` object or new one if never wrote to given directory.
625941bc627d3e7fe0d68d22
def __init__(self, data): <NEW_LINE> <INDENT> self.next = None <NEW_LINE> self.data = data
Setup the node.
625941bc23849d37ff7b2f65
def check_instances(self): <NEW_LINE> <INDENT> Printer.print_c("Check Consistency and Completeness of input data") <NEW_LINE> n_instances = len(self.instances) <NEW_LINE> n_no_feats = 0 <NEW_LINE> n_unsolvable = 0 <NEW_LINE> n_unsolvable2 = 0 <NEW_LINE> n_valid = 0 <NEW_LINE> n_presolved = 0 <NEW_LINE> feature_costs = 0 <NEW_LINE> for inst_ in self.instances.values(): <NEW_LINE> <INDENT> valid = True <NEW_LINE> unsolvable = "ok" not in list(inst_._status.values()) <NEW_LINE> if unsolvable: <NEW_LINE> <INDENT> n_unsolvable += 1 <NEW_LINE> valid = False <NEW_LINE> <DEDENT> if not inst_._cost: <NEW_LINE> <INDENT> Printer.print_e("Missing algorithm cost for instance \"%s\"" %(inst_._name)) <NEW_LINE> valid = False <NEW_LINE> <DEDENT> inst_.finished_input(self.metainfo.algorithms) <NEW_LINE> if not inst_._features: <NEW_LINE> <INDENT> Printer.print_verbose("Missing features values for instance \"%s\"" %(inst_._name)) <NEW_LINE> n_no_feats += 1 <NEW_LINE> valid = False <NEW_LINE> <DEDENT> if inst_._pre_solved: <NEW_LINE> <INDENT> n_presolved += 1 <NEW_LINE> <DEDENT> if valid: <NEW_LINE> <INDENT> n_valid += 1 <NEW_LINE> <DEDENT> times = filter(lambda x: x < self.metainfo.algorithm_cutoff_time, inst_._cost_vec) <NEW_LINE> feature_costs += inst_._feature_cost_total <NEW_LINE> <DEDENT> Printer.print_c("Instances: \t\t %d" %(n_instances)) <NEW_LINE> Printer.print_c("Incomplete Feature Vector: \t %d" %(n_no_feats)) <NEW_LINE> Printer.print_c("Unsolvable Instances (status): \t %d" %(n_unsolvable)) <NEW_LINE> Printer.print_c("Valid Instances: \t %d" %(n_valid)) <NEW_LINE> Printer.print_c("Presolved: \t\t %d" %(n_presolved)) <NEW_LINE> Printer.print_c("Average Feature Costs on all features: \t %.4f" %(feature_costs / n_instances)) <NEW_LINE> if not n_valid: <NEW_LINE> <INDENT> Printer.print_e("Have not found valid instances",-10)
check each instances of completeness and soundness
625941bc4527f215b584c32f
def p_statement_err(p): <NEW_LINE> <INDENT> print("Statement error - {}".format(p.lineno(0)))
statement : error
625941bc7cff6e4e8111785a
def get_descriptor_metadata_file(self): <NEW_LINE> <INDENT> path = os.path.join(self._path.current_os, "cache", "descriptor_info.yml") <NEW_LINE> filesystem.ensure_folder_exists(os.path.dirname(path)) <NEW_LINE> return path
Returns the path to the metadata file holding descriptor information. :return: path
625941bcbe7bc26dc91cd4d9
def evaluate_on (self, time_points, initial_state_map=None): <NEW_LINE> <INDENT> time_points = np.array (time_points) <NEW_LINE> zeroed_times = False <NEW_LINE> if time_points[0] != 0: <NEW_LINE> <INDENT> time_points = np.insert (time_points, 0, 0) <NEW_LINE> zeroed_times = True <NEW_LINE> <DEDENT> initial_state = self.initial_state.copy () <NEW_LINE> if initial_state_map != None: <NEW_LINE> <INDENT> for var in initial_state_map: <NEW_LINE> <INDENT> idx = self.index_map[var] <NEW_LINE> initial_state[idx] = initial_state_map[var] <NEW_LINE> <DEDENT> <DEDENT> sys_function = self.__get_system_function () <NEW_LINE> y = self.__integrate_with_odeint (sys_function, initial_state, time_points) <NEW_LINE> values_map = {} <NEW_LINE> for var in self.index_map: <NEW_LINE> <INDENT> idx = self.index_map[var] <NEW_LINE> if zeroed_times: <NEW_LINE> <INDENT> values_map[var] = list (y[1:, idx]) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> values_map[var] = list (y[:, idx]) <NEW_LINE> <DEDENT> <DEDENT> return values_map
Returns the state of the systems variables at the specified time points. Parameters time_points: the list of time points for which the system should be evaluated. initial_state_map: a dictionary that contains variables as keys and initial values as values. Returns values_map, a dictionary with variables as keys and a list as value. The list contains the values of a variable over the determined time points.
625941bca4f1c619b28aff14
def tryToCompleteIteration(self, state_change, **kw): <NEW_LINE> <INDENT> portal = self <NEW_LINE> story = state_change.object <NEW_LINE> iteration = story.aq_parent <NEW_LINE> wf_tool = getToolByName(portal, 'portal_workflow') <NEW_LINE> if wf_tool.getInfoFor(iteration, 'review_state') == 'in-progress' and iteration.completable(): <NEW_LINE> <INDENT> wf_tool.doActionFor(iteration, 'complete')
If all Stories in an Iteration have been set to complete, then the Iteration itself can be set to complete. Try that.
625941bce1aae11d1e749b89
def merge_module_rows(self, row_module, row_risk): <NEW_LINE> <INDENT> for idx in range(1): <NEW_LINE> <INDENT> first_cell = row_module.cells[idx] <NEW_LINE> last_cell = row_risk.cells[idx] <NEW_LINE> self.set_cell_border(last_cell) <NEW_LINE> first_cell.merge(last_cell) <NEW_LINE> <DEDENT> for idx, cell in enumerate(row_risk.cells[1:]): <NEW_LINE> <INDENT> self.set_cell_border(cell, settings=LEFT_RIGHT_BORDERS)
This merges the the first cell of the given rows, the one containing the module title. Also remove the horizontal borders between the not merged cells.
625941bcaad79263cf390911
def setGaussLocalizationOnCells(self, *args): <NEW_LINE> <INDENT> return _MEDCouplingCorba.MEDCouplingFieldDiscretizationGauss_setGaussLocalizationOnCells(self, *args)
setGaussLocalizationOnCells(self, MEDCouplingMesh m, int begin, int end, dvec refCoo, dvec gsCoo, dvec wg) 1
625941bc3317a56b86939b3d
def process(self, dpbuffer): <NEW_LINE> <INDENT> data = np.array(dpbuffer.get_data(), copy=False) <NEW_LINE> weights = np.array(dpbuffer.get_weights(), copy=False) <NEW_LINE> data *= self.datafactor <NEW_LINE> weights *= self.weightsfactor <NEW_LINE> self.process_next_step(dpbuffer)
Process one time slot of data. This function MUST call process_next_step. Args: dpbuffer: DPBuffer object which can contain data, flags and weights for one time slot.
625941bc85dfad0860c3ad2e
def _init_dump(self, **kwargs): <NEW_LINE> <INDENT> components = kwargs.get('components', self.components) <NEW_LINE> components = np.asarray(components).reshape(-1) <NEW_LINE> _empty = [component for component in components if not self._if_component_filled(component)] <NEW_LINE> if len(_empty) > 0: <NEW_LINE> <INDENT> logger.warning('Components %r are empty. Nothing is dumped!', _empty) <NEW_LINE> return [] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return self.indices
Init function for dump. Checks if all components that should be dumped are non-None. If some are None, prints warning and makes sure that nothing is dumped. Parameters ---------- **kwargs: components : tuple, list, ndarray of strings or str components that we need to dump
625941bcfbf16365ca6f6092
def _profpay_from_json_summ(self, prof, dest_prof, dest_pays): <NEW_LINE> <INDENT> for symgrp in prof["symmetry_groups"]: <NEW_LINE> <INDENT> _, role, strat, count, pay = _unpack_symgrp(**symgrp) <NEW_LINE> index = self.role_strat_index(role, strat) <NEW_LINE> dest_prof[index] = count <NEW_LINE> dest_pays[index] = pay
Get profile and payoff from summary format
625941bc379a373c97cfaa1f
def _remove_duplicates(input_string): <NEW_LINE> <INDENT> if len(input_string) ==0 or len(input_string)==1 : <NEW_LINE> <INDENT> return input_string <NEW_LINE> <DEDENT> else : <NEW_LINE> <INDENT> NO_CHARS = 256 <NEW_LINE> input_list = list(input_string) <NEW_LINE> hash_table = [0]*NO_CHARS <NEW_LINE> start = 0 <NEW_LINE> for i in range(len(input_list)): <NEW_LINE> <INDENT> if hash_table[ord(input_list[i])] == 0 : <NEW_LINE> <INDENT> hash_table[ord(input_list[i])] = 1 <NEW_LINE> input_list[start] = input_list[i] <NEW_LINE> start +=1 <NEW_LINE> <DEDENT> <DEDENT> return input_list[:start]
O(n) time algorithm :param input_string: :return:
625941bc24f1403a92600a3e
def calc_filter_image_sets(self): <NEW_LINE> <INDENT> sets = pyst.dsp.calc_num_subframes(self.training_segment_ms, self.window_size, self.window_shift) <NEW_LINE> return sets
calculates how many feature sets create a full image, given window size, window shift, and desired image length in milliseconds.
625941bc5e10d32532c5edfc
def add_issue(repo, issue, list): <NEW_LINE> <INDENT> hash = '[%s#%s]' % (repo.name, issue.number) <NEW_LINE> title = '%s %s /GitHub' % (issue.title, hash) <NEW_LINE> for task in list.tasks(): <NEW_LINE> <INDENT> if hash in task.title: <NEW_LINE> <INDENT> print ('found', title) <NEW_LINE> return <NEW_LINE> <DEDENT> <DEDENT> print ('adding', title) <NEW_LINE> newtask = TheHitList.Task() <NEW_LINE> newtask.title = title.encode('utf8') <NEW_LINE> list.add_task(newtask)
Add an issue from GitHub into THL @param repo: GitHub repo @param issue: GitHub issue @param list: THL List
625941bc1f037a2d8b9460d4
def _do_smart_punctuation(self, text): <NEW_LINE> <INDENT> if "'" in text: <NEW_LINE> <INDENT> text = self._do_smart_contractions(text) <NEW_LINE> text = self._opening_single_quote_re.sub("&#8216;", text) <NEW_LINE> text = self._closing_single_quote_re.sub("&#8217;", text) <NEW_LINE> <DEDENT> if '"' in text: <NEW_LINE> <INDENT> text = self._opening_double_quote_re.sub("&#8220;", text) <NEW_LINE> text = self._closing_double_quote_re.sub("&#8221;", text) <NEW_LINE> <DEDENT> text = text.replace("---", "&#8212;") <NEW_LINE> text = text.replace("--", "&#8211;") <NEW_LINE> text = text.replace("...", "&#8230;") <NEW_LINE> text = text.replace(" . . . ", "&#8230;") <NEW_LINE> text = text.replace(". . .", "&#8230;") <NEW_LINE> return text
Fancifies 'single quotes', "double quotes", and apostrophes. Converts --, ---, and ... into en dashes, em dashes, and ellipses. Inspiration is: <http://daringfireball.net/projects/smartypants/> See "test/tm-cases/smarty_pants.text" for a full discussion of the support here and <http://code.google.com/p/python-markdown2/issues/detail?id=42> for a discussion of some diversion from the original SmartyPants.
625941bcd164cc6175782c22
def test_search_val_in_tree_retruns_node_with_data(bst_is_full): <NEW_LINE> <INDENT> bst_is_full.insert(5) <NEW_LINE> assert bst_is_full.search(5).data == 5
Test search for val in tree returns node with data of val.
625941bc1f5feb6acb0c4a29
def fit(self, predictors, targets, initialize=True, **kwargs): <NEW_LINE> <INDENT> if initialize: <NEW_LINE> <INDENT> self.init_fit(predictors, targets) <NEW_LINE> <DEDENT> if self.impute: <NEW_LINE> <INDENT> predictors, targets = self.imputer_transform(predictors, y=targets) <NEW_LINE> <DEDENT> predictors_scaled, targets_scaled = self.scaler_transform(predictors, targets) <NEW_LINE> if 'validation_data' in kwargs: <NEW_LINE> <INDENT> if self.impute: <NEW_LINE> <INDENT> predictors_test_scaled, targets_test_scaled = self.imputer_transform(*kwargs['validation_data']) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> predictors_test_scaled, targets_test_scaled = kwargs['validation_data'] <NEW_LINE> <DEDENT> predictors_test_scaled, targets_test_scaled = self.scaler_transform(predictors_test_scaled, targets_test_scaled) <NEW_LINE> kwargs['validation_data'] = (predictors_test_scaled, targets_test_scaled) <NEW_LINE> <DEDENT> self.model.fit(predictors_scaled, targets_scaled, **kwargs)
Fit the EnsembleSelector model. Also performs input feature scaling. :param predictors: ndarray: predictor data :param targets: ndarray: corresponding truth data :param initialize: bool: if True, initializes the Imputer and Scaler to the given predictors. 'fit' must be called with initialize=True the first time, or the Imputer and Scaler must be fit with 'init_fit'. :param kwargs: passed to the Keras 'fit' method :return:
625941bc94891a1f4081b97d
def action_servicemc_ready(self, cr, uid, ids, context=None): <NEW_LINE> <INDENT> for servicemc in self.browse(cr, uid, ids, context=context): <NEW_LINE> <INDENT> self.pool.get('mrp.servicemc.line').write(cr, uid, [l.id for l in servicemc.operations], {'state': 'confirmed'}, context=context) <NEW_LINE> self.write(cr, uid, [servicemc.id], {'state': 'ready'}) <NEW_LINE> <DEDENT> return True
Writes servicemc order state to 'Ready' @return: True
625941bc23849d37ff7b2f66
def idxstats(step, inBam, outStats): <NEW_LINE> <INDENT> cmd = 'samtools idxstats {inBam} > {outStats}'.format(inBam=inBam, outStats=outStats) <NEW_LINE> toolName = __name__.split('.')[-1] + ' idxstats' <NEW_LINE> step.toolBegins(toolName) <NEW_LINE> step.getToolVersion('samtools',logOut=True) <NEW_LINE> step.err = step.ana.runCmd(cmd, logOut=False, log=step.log) <NEW_LINE> step.toolEnds(toolName, step.err)
Calculates stats for each chromosome in a sorted and indexed bam
625941bc3617ad0b5ed67dcd
def main(config, model, stid, forecast_date): <NEW_LINE> <INDENT> mean_forecast, dailys = get_gefs_mos_forecast(stid, forecast_date) <NEW_LINE> if stid.upper() == config['current_stid'].upper(): <NEW_LINE> <INDENT> if config['debug'] > 50: <NEW_LINE> <INDENT> print('gefs_mos: writing ensemble file for the current station, %s' % stid) <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> ensemble_file = config['Models'][model]['ensemble_file'] <NEW_LINE> <DEDENT> except KeyError: <NEW_LINE> <INDENT> if config['debug'] > 9: <NEW_LINE> <INDENT> print("gefs_mos warning: 'ensemble_file' not found in config; not writing ensemble values") <NEW_LINE> <DEDENT> ensemble_file = None <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> write_ensemble_daily(config, dailys, ensemble_file) <NEW_LINE> <DEDENT> except BaseException as e: <NEW_LINE> <INDENT> if config['debug'] > 0: <NEW_LINE> <INDENT> print("gefs_mos warning: unable to write ensemble file ('%s')" % e) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return mean_forecast
Produce a Forecast object from GEFS MOS data.
625941bc2eb69b55b151c781
def get(self, request, *args, **kwargs): <NEW_LINE> <INDENT> start = time.time() <NEW_LINE> search_term = request.GET.get('q', '') <NEW_LINE> context = self.banned_search(search_term) <NEW_LINE> if context: <NEW_LINE> <INDENT> return self.render_to_response(context) <NEW_LINE> <DEDENT> kwargs['q'] = search_term <NEW_LINE> kwargs['page'] = request.GET.get('page', 0) <NEW_LINE> self.log_stats(**kwargs) <NEW_LINE> self.get_queryset(**kwargs) <NEW_LINE> if 'gp' in request.GET or 'lp' in request.GET: <NEW_LINE> <INDENT> local_pp_scores = local_page_pop(self.object_list.hits) <NEW_LINE> self.sort_hits(local_pp_scores, request.GET) <NEW_LINE> <DEDENT> self.filter_hits() <NEW_LINE> kwargs['time'] = round(time.time() - start, 2) <NEW_LINE> context = self.get_context_data(**kwargs) <NEW_LINE> return self.render_to_response(context)
This method is override to add parameters to the get_context_data call
625941bc30c21e258bdfa370
def element_should_not_have_attribute(self, source, name, xpath='.', message=None): <NEW_LINE> <INDENT> attr = self.get_element_attribute(source, name, xpath) <NEW_LINE> if attr is not None: <NEW_LINE> <INDENT> raise AssertionError(message or "Attribute '%s' exists and " "has value '%s'." % (name, attr))
Verifies that the specified element does not have attribute ``name``. The element whose attribute is verified is specified using ``source`` and ``xpath``. They have exactly the same semantics as with `Get Element` keyword. The keyword fails if the specified element has attribute ``name``. The default error message can be overridden with the ``message`` argument. Examples using ``${XML}`` structure from `Example`: | Element Should Not Have Attribute | ${XML} | id | | Element Should Not Have Attribute | ${XML} | xxx | xpath=first | See also `Get Element Attribute`, `Get Element Attributes`, `Element Text Should Be` and `Element Text Should Match`.
625941bcbe8e80087fb20b1d
def connect(location): <NEW_LINE> <INDENT> proxy = Pyro4.core.Proxy("PYRO:%s@%s" % (Pyro4.constants.FLAME_NAME, location)) <NEW_LINE> proxy._pyroBind() <NEW_LINE> return proxy
Connect to a Flame server on the given location, for instance localhost:9999 or ./u:unixsock This is just a convenience function to creates an appropriate Pyro proxy.
625941bc8e05c05ec3eea247
def sample_ingredient(user, name="Tomato"): <NEW_LINE> <INDENT> return Ingredient.objects.create(user=user, name=name)
Create and return sample ingredient
625941bc30dc7b766590183f
def path(): <NEW_LINE> <INDENT> _path = salt.utils.stringutils.to_unicode(os.environ.get("PATH", "").strip()) <NEW_LINE> return { "path": _path, "systempath": _path.split(os.path.pathsep), }
Return the path
625941bc8e7ae83300e4aea1
def _create_action_item(self, package, extra_data): <NEW_LINE> <INDENT> action_item = package.get_action_item_for_type(self.ACTION_ITEM_TYPE_NAME) <NEW_LINE> if action_item is None: <NEW_LINE> <INDENT> action_item = ActionItem( package=package, item_type=self.action_item_type) <NEW_LINE> <DEDENT> action_item.short_description = self.ITEM_DESCRIPTION <NEW_LINE> if package.main_entry: <NEW_LINE> <INDENT> section = package.main_entry.section <NEW_LINE> if section not in ('contrib', 'non-free'): <NEW_LINE> <INDENT> query_string = urlencode({'package': package.name}) <NEW_LINE> extra_data['check_why_url'] = ( 'http://release.debian.org/migration/testing.pl' '?{query_string}'.format(query_string=query_string)) <NEW_LINE> <DEDENT> <DEDENT> action_item.extra_data = extra_data <NEW_LINE> action_item.save()
Creates a :class:`pts.core.models.ActionItem` for the given package including the given extra data. The item indicates that there is a problem with the package migrating to testing.
625941bc046cf37aa974cc1f
def get_model(self): <NEW_LINE> <INDENT> return self.switch_model
Returns the current used model.
625941bcfb3f5b602dac3565
def __init__(self, rank_matrix, year_list): <NEW_LINE> <INDENT> self.names = range(1, len(rank_matrix)+1) <NEW_LINE> self.year_list = year_list <NEW_LINE> self.rank_matrix = [] <NEW_LINE> self.num_students = len(rank_matrix) <NEW_LINE> self.num_seminars = len(year_list) <NEW_LINE> self.popular_seminars = [] <NEW_LINE> seminar_ctr = Counter() <NEW_LINE> for i in range(self.num_students): <NEW_LINE> <INDENT> for j in range(self.num_seminars): <NEW_LINE> <INDENT> if (rank_matrix[i][j] < 100): <NEW_LINE> <INDENT> seminar_ctr[j] += 1 <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> self.popular_seminars = seminar_ctr.most_common() <NEW_LINE> logging.debug("Popular seminars: " + str(self.popular_seminars)) <NEW_LINE> num_extra = self.num_students % self.num_seminars <NEW_LINE> seminar_size = self.num_students / self.num_seminars <NEW_LINE> for row in rank_matrix: <NEW_LINE> <INDENT> row *= seminar_size <NEW_LINE> for popular_seminar in self.popular_seminars[0:num_extra]: <NEW_LINE> <INDENT> row.append(row[popular_seminar[0]]) <NEW_LINE> <DEDENT> self.rank_matrix.append(row)
Create a new instance
625941bc498bea3a759b9985
def page_not_found(request, exception, template_name='error/404.html'): <NEW_LINE> <INDENT> return render(request, template_name, status=404)
Override of the default 404 page
625941bcfbf16365ca6f6093
def __init__(self, request_id=None, took=0.0, data=None): <NEW_LINE> <INDENT> self._request_id = None <NEW_LINE> self._took = None <NEW_LINE> self._data = None <NEW_LINE> self.discriminator = None <NEW_LINE> self.request_id = request_id <NEW_LINE> self.took = took <NEW_LINE> if data is not None: <NEW_LINE> <INDENT> self.data = data
ListIntegrationActionsResponse - a model defined in Swagger
625941bc91af0d3eaac9b8eb
def file_tracer(self, filename): <NEW_LINE> <INDENT> if "xyz.py" in filename: <NEW_LINE> <INDENT> return FileTracer(filename)
Trace only files named xyz.py
625941bc30bbd722463cbc98
def plot_embedding(self): <NEW_LINE> <INDENT> if len(self.unique_taxa_list) > len(COLOR_MAP) or self.unique_y.shape[0] > len(MARKER_MAP): <NEW_LINE> <INDENT> logging.info('Visualization Failed: too many taxa or phenotype to be plotted, please cutomized your plot.') <NEW_LINE> return <NEW_LINE> <DEDENT> if self.emb_2d is None: <NEW_LINE> <INDENT> pca_model = PCA(n_components=2) <NEW_LINE> pca_model = pca_model.fit(self.emb) <NEW_LINE> self.emb_2d = pca_model.transform(self.emb) <NEW_LINE> <DEDENT> plt.figure(figsize=(15,10)) <NEW_LINE> for label_idx in range(self.unique_y.shape[0]): <NEW_LINE> <INDENT> plt.scatter(self.emb[self.y == self.unique_y[label_idx],0], self.emb[self.y == self.unique_y[label_idx],1], s=50, alpha=0.5, marker=MARKER_MAP[label_idx], color='k', facecolors='none', label='in {}'.format(self.idx_to_label[label_idx])) <NEW_LINE> <DEDENT> for idx, taxa in enumerate(self.unique_taxa_list): <NEW_LINE> <INDENT> plt.scatter(self.emb[self.y_taxa == self.taxa_to_idx[taxa],0], self.emb[self.y_taxa == self.taxa_to_idx[taxa],1], s=15, alpha=0.5, marker='o', color=COLOR_MAP[idx], label=taxa) <NEW_LINE> <DEDENT> lgnd = plt.legend(loc="lower center", ncol=3) <NEW_LINE> for idx, item in enumerate(lgnd.legendHandles): <NEW_LINE> <INDENT> item._sizes = [100] <NEW_LINE> item.set_alpha(1) <NEW_LINE> <DEDENT> plt.xlabel('Axis 1') <NEW_LINE> plt.ylabel('Axis 2') <NEW_LINE> plt.savefig('{}/embedding_visualization.pdf'.format(self.output_dir), format='pdf', dpi=300, bbox_inches='tight') <NEW_LINE> plt.show()
plot embedding in 2D.
625941bc82261d6c526ab378
def safe_touch(path): <NEW_LINE> <INDENT> safe_makedirs(os.path.dirname(path)) <NEW_LINE> if not os.path.exists(path): <NEW_LINE> <INDENT> with open(path, "w") as fileobj: <NEW_LINE> <INDENT> fileobj.write("")
Create a file without throwing if it exists.
625941bcd4950a0f3b08c227
def iter_source_code(paths: Iterable[str], config: Config, skipped: List[str]) -> Iterator[str]: <NEW_LINE> <INDENT> visited_dirs: Set[Path] = set() <NEW_LINE> for path in paths: <NEW_LINE> <INDENT> if os.path.isdir(path): <NEW_LINE> <INDENT> for dirpath, dirnames, filenames in os.walk(path, topdown=True, followlinks=True): <NEW_LINE> <INDENT> base_path = Path(dirpath) <NEW_LINE> for dirname in list(dirnames): <NEW_LINE> <INDENT> full_path = base_path / dirname <NEW_LINE> resolved_path = full_path.resolve() <NEW_LINE> if config.is_skipped(full_path): <NEW_LINE> <INDENT> skipped.append(dirname) <NEW_LINE> dirnames.remove(dirname) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> if resolved_path in visited_dirs: <NEW_LINE> <INDENT> if not config.quiet: <NEW_LINE> <INDENT> warn(f"Likely recursive symlink detected to {resolved_path}") <NEW_LINE> <DEDENT> dirnames.remove(dirname) <NEW_LINE> <DEDENT> <DEDENT> visited_dirs.add(resolved_path) <NEW_LINE> <DEDENT> for filename in filenames: <NEW_LINE> <INDENT> filepath = os.path.join(dirpath, filename) <NEW_LINE> if config.is_supported_filetype(filepath): <NEW_LINE> <INDENT> if config.is_skipped(Path(filepath)): <NEW_LINE> <INDENT> skipped.append(filename) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> yield filepath <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> yield path
Iterate over all Python source files defined in paths.
625941bc16aa5153ce36234e
def test_get_root(self): <NEW_LINE> <INDENT> self.assertEquals(200, self.response.status_code)
GET / must return status code 200
625941bc187af65679ca4ff3
def send_headers(self): <NEW_LINE> <INDENT> assert not self.headers_sent, 'headers have been sent already' <NEW_LINE> self.headers_sent = True <NEW_LINE> if (self.chunked is True) or ( self.length is None and self.version >= HttpVersion11 and self.status not in (304, 204)): <NEW_LINE> <INDENT> self.chunked = True <NEW_LINE> self.writer = self._write_chunked_payload() <NEW_LINE> <DEDENT> elif self.length is not None: <NEW_LINE> <INDENT> self.writer = self._write_length_payload(self.length) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.writer = self._write_eof_payload() <NEW_LINE> <DEDENT> next(self.writer) <NEW_LINE> self._add_default_headers() <NEW_LINE> hdrs = ''.join(itertools.chain( (self.status_line,), *((k, ': ', v, '\r\n') for k, v in ((k, value) for k, value in self.headers.items())))) <NEW_LINE> hdrs = hdrs.encode('utf-8') + b'\r\n' <NEW_LINE> self.output_length += len(hdrs) <NEW_LINE> self.transport.write(hdrs)
Writes headers to a stream. Constructs payload writer.
625941bcf7d966606f6a9ed7
def build_cimfoosub_sub_instance(id_): <NEW_LINE> <INDENT> inst_id = 'CIM_Foo_sub_sub%s' % id_ <NEW_LINE> inst = CIMInstance('CIM_Foo_sub_sub', properties={ 'InstanceID': inst_id, 'cimfoo_sub': 'cimfoo_sub prop: %s' % inst_id, 'cimfoo_sub_sub': 'cimfoo_sub_sub: %s' % inst_id}, path=CIMInstanceName('CIM_Foo_sub_sub', {'InstanceID': inst_id})) <NEW_LINE> return inst
Build a single instance of an instance of CIM_Foo_sub2 where id is used to create the unique identity.
625941bcc4546d3d9de72907
def set_gateway_dcenter_flows(self, dpid_gw, topology, vmac_manager): <NEW_LINE> <INDENT> dcenter_to_port = topology.gateway_to_dcenters[dpid_gw].items() <NEW_LINE> for (dcenter, port_no) in dcenter_to_port: <NEW_LINE> <INDENT> peer_dc_vmac = vmac_manager.create_dc_vmac(dcenter) <NEW_LINE> self.set_topology_flow(dpid_gw, peer_dc_vmac, VmacManager.DCENTER_MASK, port_no)
Set up flows on gateway switches to other datacenters
625941bca219f33f34628846
def test_multilayerhob_pr(): <NEW_LINE> <INDENT> ml = flopy.modflow.Modflow() <NEW_LINE> dis = flopy.modflow.ModflowDis( ml, nlay=3, nrow=1, ncol=1, nper=1, perlen=[1] ) <NEW_LINE> flopy.modflow.HeadObservation( ml, layer=-3, row=0, column=0, time_series_data=[[1.0, 0]], mlay={0: 0.19, 1: 0.69, 2: 0.12}, ) <NEW_LINE> return
test041 test multilayer obs PR == 1 criteria with problematic PRs
625941bc4f6381625f114913
def test_pass_round_integer_to_nearest_integer(self): <NEW_LINE> <INDENT> self.assertFilterPasses( self._filter(42, to_nearest='5'), Decimal('40.0'), )
Rounds an integer to the nearest integer value.
625941bc8a349b6b435e8049
def move(cups: dict, current: int, max_n: int) -> int: <NEW_LINE> <INDENT> tmp = (cups[current], cups[cups[current]], cups[cups[cups[current]]]) <NEW_LINE> target = current - 1 <NEW_LINE> while target in tmp or target == 0: <NEW_LINE> <INDENT> target = (target - 1) % (max_n + 1) <NEW_LINE> <DEDENT> cups[current] = cups[tmp[-1]] <NEW_LINE> cups[tmp[-1]] = cups[target] <NEW_LINE> cups[target] = tmp[0] <NEW_LINE> return cups[current]
return new current cup after one move, mutates cups
625941bcf9cc0f698b1404d4
def view_project(request, id): <NEW_LINE> <INDENT> title = "View Project" <NEW_LINE> project = Project.get_pro_by_id(id=id) <NEW_LINE> return render(request, 'view_project.html', locals())
Function that enables one to view specific project
625941bc1f5feb6acb0c4a2a
def setup_push(subparsers): <NEW_LINE> <INDENT> parser = subparsers.add_parser( 'push', help="push challenges configuration to the scoreboard." ) <NEW_LINE> parser.add_argument( '--host', default=getenv('MKCTF_SB_HOST', 'scoreboard.ctf.insecurity-insa.fr'), help="scoreboard host, overrides MKCTF_SB_HOST (env)", ) <NEW_LINE> parser.add_argument( '--port', default=int(getenv('MKCTF_SB_PORT', '443')), type=int, help="scoreboard port, overrides MKCTF_SB_PORT (env)", ) <NEW_LINE> parser.add_argument( '-t', '--tag', action='append', default=[], dest='tags', metavar='TAG', help="tag of challenges to include. Can appear multiple times.", ) <NEW_LINE> parser.add_argument( '-c', '--category', action='append', default=[], dest='categories', metavar='CATEGORY', help="category of challenge to include. Can appear multiple times.", ) <NEW_LINE> parser.add_argument( '-u', '--username', default=getenv('MKCTF_SB_USER'), help="scoreboard username, overrides MKCTF_SB_USER (env)", ) <NEW_LINE> parser.add_argument( '-p', '--password', default=getenv('MKCTF_SB_PSWD'), help="scoreboard password, overrides MKCTF_SB_PSWD (env). Using this option is strongly discouraged.", ) <NEW_LINE> parser.add_argument( '--no-verify-ssl', action='store_true', help="Disable SSL checks. Using this option is strongly discouraged.", ) <NEW_LINE> parser.set_defaults(func=push)
Setup push subparser
625941bc5166f23b2e1a502f
def parentheses(formula): <NEW_LINE> <INDENT> while True: <NEW_LINE> <INDENT> result = re.split(r"\(([0-9*/+\-.]+)\)", formula, 1) <NEW_LINE> if len(result) == 1: <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> result[1] = str(compute(result[1])) <NEW_LINE> formula = ''.join(result) <NEW_LINE> <DEDENT> result = compute(formula) <NEW_LINE> return result
处理括号 :return: 返回最后的结果
625941bcd10714528d5ffbb6
def login(self, username, password): <NEW_LINE> <INDENT> self._smtp.login(username, password) <NEW_LINE> self._logged_in = True
Attempts to login to the smpt host with the provided credentials. The credentials are not stored in this object. :param username: smtp username :param password: smtp password
625941bc4527f215b584c331
def to_dict(self): <NEW_LINE> <INDENT> result = {} <NEW_LINE> for attr, _ in six.iteritems(self.swagger_types): <NEW_LINE> <INDENT> value = getattr(self, attr) <NEW_LINE> if isinstance(value, list): <NEW_LINE> <INDENT> result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) <NEW_LINE> <DEDENT> elif hasattr(value, "to_dict"): <NEW_LINE> <INDENT> result[attr] = value.to_dict() <NEW_LINE> <DEDENT> elif isinstance(value, dict): <NEW_LINE> <INDENT> result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> result[attr] = value <NEW_LINE> <DEDENT> <DEDENT> if issubclass(RequestViewModel, dict): <NEW_LINE> <INDENT> for key, value in self.items(): <NEW_LINE> <INDENT> result[key] = value <NEW_LINE> <DEDENT> <DEDENT> return result
Returns the model properties as a dict
625941bc3cc13d1c6d3c7254
def GetNumberOfFixedImageSamples(self): <NEW_LINE> <INDENT> return _itkImageToImageMetricPython.itkImageToImageMetricID3ID3_GetNumberOfFixedImageSamples(self)
GetNumberOfFixedImageSamples(self) -> unsigned long
625941bc7cff6e4e8111785c
def _set_positions_known_time(self, ind): <NEW_LINE> <INDENT> reference_time = self._midprice_df.index[ind + 1] - self._latency[1] + self._latency[0] <NEW_LINE> self._positions_as_of = self._index_series.values[ind] <NEW_LINE> self._positions_as_of += 1 <NEW_LINE> while self._orderbook.get_current_time(self._positions_as_of) <= reference_time: <NEW_LINE> <INDENT> self._positions_as_of += 1 <NEW_LINE> <DEDENT> self._positions_as_of -= 1
time positions are known when we reach next step. this is the next midprice time, plus our quote delay minus our exchange_to_trader delay positions_as_of represents the index in the orderbook data that we can access at the next step regarding our trade status
625941bca4f1c619b28aff16
def delete_by_value(self, value): <NEW_LINE> <INDENT> if self.__head is None: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> if self.__head.data == value: <NEW_LINE> <INDENT> self.__head = self.__head.next_node <NEW_LINE> <DEDENT> pro = self.__head <NEW_LINE> node = self.__head.next_node <NEW_LINE> not_found = False <NEW_LINE> while node.data != value: <NEW_LINE> <INDENT> if node.next_node is None: <NEW_LINE> <INDENT> not_found = True <NEW_LINE> break <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> pro = node <NEW_LINE> node = node.next_node <NEW_LINE> <DEDENT> <DEDENT> if not_found is False: <NEW_LINE> <INDENT> pro.next_node = node.next_node
在链表中删除指定存储数据的Node节点. 参数: value:指定的存储数据
625941bcbde94217f3682cd2
def get_user_names(): <NEW_LINE> <INDENT> SQL = """SELECT user_name FROM users;""" <NEW_LINE> data = None <NEW_LINE> fetch = "col" <NEW_LINE> user_names = db.run_statements(((SQL, data, fetch),))[0] <NEW_LINE> return user_names
Return user names.
625941bc4e4d5625662d42b2
def find(self, key): <NEW_LINE> <INDENT> i = self._linear_search(key) <NEW_LINE> if i == -1: <NEW_LINE> <INDENT> value = None <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> value = self._values[i] <NEW_LINE> <DEDENT> return deepcopy(value)
------------------------------------------------------- Finds and returns a copy of the first value in list that matches key. Use: value = source.find(key) ------------------------------------------------------- Parameters: key - a partial data element (?) Returns: value - a copy of the full value matching key, otherwise None (?) -------------------------------------------------------
625941bcbd1bec0571d9050e
def record( fn: Callable[..., T], error_handler: Optional[ErrorHandler] = None ) -> Callable[..., T]: <NEW_LINE> <INDENT> if not error_handler: <NEW_LINE> <INDENT> error_handler = get_error_handler() <NEW_LINE> <DEDENT> def wrap(f): <NEW_LINE> <INDENT> @wraps(f) <NEW_LINE> def wrapper(*args, **kwargs): <NEW_LINE> <INDENT> assert error_handler is not None <NEW_LINE> error_handler.initialize() <NEW_LINE> try: <NEW_LINE> <INDENT> return f(*args, **kwargs) <NEW_LINE> <DEDENT> except ChildFailedError as e: <NEW_LINE> <INDENT> rank, failure = e.get_first_failure() <NEW_LINE> if failure.error_file != _NOT_AVAILABLE: <NEW_LINE> <INDENT> error_handler.dump_error_file(failure.error_file, failure.exitcode) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> warnings.warn(_no_error_file_warning_msg(rank, failure)) <NEW_LINE> <DEDENT> raise <NEW_LINE> <DEDENT> except Exception as e: <NEW_LINE> <INDENT> error_handler.record_exception(e) <NEW_LINE> raise <NEW_LINE> <DEDENT> <DEDENT> return wrapper <NEW_LINE> <DEDENT> return wrap(fn)
Syntactic sugar to record errors/exceptions that happened in the decorated function using the provided ``error_handler``. Using this decorator is equivalent to: :: error_handler = get_error_handler() error_handler.initialize() try: foobar() except ChildFailedError as e: _, failure = e.get_first_failure() error_handler.dump_error_file(failure.error_file, failure.exitcode) raise except Exception as e: error_handler.record(e) raise .. important:: use this decorator once per process at the top level method, typically this is the main method. Example :: @record def main(): pass if __name__=="__main__": main()
625941bc8a43f66fc4b53f3f
def adjust_target(self, spec): <NEW_LINE> <INDENT> if not (spec.architecture and spec.architecture.concrete): <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> def _make_only_one_call(spec): <NEW_LINE> <INDENT> yield self._adjust_target(spec) <NEW_LINE> while True: <NEW_LINE> <INDENT> yield False <NEW_LINE> <DEDENT> <DEDENT> if self._adjust_target_answer_generator is None: <NEW_LINE> <INDENT> self._adjust_target_answer_generator = _make_only_one_call(spec) <NEW_LINE> <DEDENT> return next(self._adjust_target_answer_generator)
Adjusts the target microarchitecture if the compiler is too old to support the default one. Args: spec: spec to be concretized Returns: True if spec was modified, False otherwise
625941bc91f36d47f21ac3c6