code
stringlengths 4
4.48k
| docstring
stringlengths 1
6.45k
| _id
stringlengths 24
24
|
|---|---|---|
def output(*args, **kwargs): <NEW_LINE> <INDENT> print(*args, **kwargs) <NEW_LINE> try: <NEW_LINE> <INDENT> sys.stdout.flush() <NEW_LINE> <DEDENT> except IOError as e: <NEW_LINE> <INDENT> if e.errno in (errno.EINVAL, errno.EPIPE): <NEW_LINE> <INDENT> sys.exit() <NEW_LINE> <DEDENT> raise
|
Print wrapper, avoids "Broken pipe" errors if piping is interrupted.
|
625941bc0c0af96317bb80ca
|
def save(obj: object, filename: str): <NEW_LINE> <INDENT> f = open(get_and_create_data_folder() + filename, "w") <NEW_LINE> f.write(json.dumps(obj)) <NEW_LINE> f.close()
|
Make a json from obj and save to filename in data folder.
|
625941bc5fdd1c0f98dc0114
|
def put(self, key, value): <NEW_LINE> <INDENT> bucket = self.get_bucket(key) <NEW_LINE> is_key_found = False <NEW_LINE> index = 0 <NEW_LINE> for idx, record in enumerate(bucket): <NEW_LINE> <INDENT> record_key, record_val = record <NEW_LINE> if record_key == key: <NEW_LINE> <INDENT> is_key_found = True <NEW_LINE> index = idx <NEW_LINE> break <NEW_LINE> <DEDENT> <DEDENT> if is_key_found: <NEW_LINE> <INDENT> bucket[index] = (key, value) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> bucket.append((key, value))
|
Method to add or update entries based on a key.
:param key: object
Key can be int, str or any object.
:param value: object
Value can be int, str or any object.
:return: None
|
625941bca8ecb033257d2fb8
|
def get_help_element(self): <NEW_LINE> <INDENT> return self.q(css='.help-link').first
|
Returns the general Help button in the header.
|
625941bc56ac1b37e62640b7
|
def __exit__(self, exc_type, exc_val, exc_tb): <NEW_LINE> <INDENT> self.stop() <NEW_LINE> self.remove()
|
Stop the container and clean up.
|
625941bc91f36d47f21ac3d1
|
def __str__(self): <NEW_LINE> <INDENT> return json.dumps(self.__dict__)
|
Returns a string serialized JSON of the RepoFile.
|
625941bc293b9510aa2c317b
|
def get_crnn_output(input_images, parameters: Params=None) -> tf.Tensor: <NEW_LINE> <INDENT> cnn_features_list = parameters.cnn_features_list <NEW_LINE> cnn_kernel_size = parameters.cnn_kernel_size <NEW_LINE> cnn_pool_size = parameters.cnn_pool_size <NEW_LINE> cnn_stride_size = parameters.cnn_stride_size <NEW_LINE> cnn_batch_norm = parameters.cnn_batch_norm <NEW_LINE> rnn_units = parameters.rnn_units <NEW_LINE> cnn_params = zip(cnn_features_list, cnn_kernel_size, cnn_stride_size, cnn_pool_size, cnn_batch_norm) <NEW_LINE> conv_layers = [ConvBlock(ft, ks, ss, 'same', psz, bn) for ft, ks, ss, psz, bn in cnn_params] <NEW_LINE> x = conv_layers[0](input_images) <NEW_LINE> for conv in conv_layers[1:]: <NEW_LINE> <INDENT> x = conv(x) <NEW_LINE> <DEDENT> x = Permute((2, 1, 3))(x) <NEW_LINE> shape = x.get_shape().as_list() <NEW_LINE> x = Reshape((shape[1], shape[2] * shape[3]))(x) <NEW_LINE> rnn_layers = [Bidirectional(LSTM(ru, dropout=0.5, return_sequences=True, time_major=False)) for ru in rnn_units] <NEW_LINE> for rnn in rnn_layers: <NEW_LINE> <INDENT> x = rnn(x) <NEW_LINE> <DEDENT> x = Dense(parameters.alphabet.n_classes)(x) <NEW_LINE> net_output = Softmax()(x) <NEW_LINE> return net_output
|
Creates the CRNN network and returns it's output.
Passes the `input_images` through the network and returns its output
:param input_images: images to process (B, H, W, C)
:param parameters: parameters of the model (``Params``)
:return: the output of the CRNN model
|
625941bc97e22403b379ce7b
|
def on_scrub_geo(self, notice): <NEW_LINE> <INDENT> log.debug("Received location deletion notice: %s", notice)
|
This is called when a location deletion notice is received.
|
625941bc3cc13d1c6d3c725e
|
def fdin(self, arguments='optional', retrieval='optional'): <NEW_LINE> <INDENT> command = "fdin " <NEW_LINE> if arguments != "optional": <NEW_LINE> <INDENT> command += " " + arguments + " " <NEW_LINE> <DEDENT> if retrieval != "optional": <NEW_LINE> <INDENT> command += "{" + retrieval + "}" <NEW_LINE> <DEDENT> self.run_command(command)
|
FDIN
----
This is auto-generated documentation. For more command information visit the Datamine help file.
Input Files:
------------
Output Files:
-------------
Fields:
-------
Parameters:
-----------
|
625941bceab8aa0e5d26da40
|
def __init__(self, temboo_session): <NEW_LINE> <INDENT> Choreography.__init__(self, temboo_session, '/Library/Parse/PushNotifications/UpdateInstallation')
|
Create a new instance of the UpdateInstallation Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
|
625941bc4f88993c3716bf4e
|
def _check_del_unmatched(self, local_entry): <NEW_LINE> <INDENT> if not self._match(local_entry): <NEW_LINE> <INDENT> if self.options.get("delete_unmatched"): <NEW_LINE> <INDENT> self._log_action("delete", "unmatched", "<", local_entry) <NEW_LINE> if local_entry.is_dir(): <NEW_LINE> <INDENT> self._remove_dir(local_entry) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self._remove_file(local_entry) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> self._log_action("skip", "unmatched", "-", local_entry, min_level=4) <NEW_LINE> <DEDENT> return True <NEW_LINE> <DEDENT> return False
|
Return True if entry is NOT matched (i.e. excluded by filter).
If --delete-unmatched is on, remove the local resource.
|
625941bc91f36d47f21ac3d2
|
@cli.command() <NEW_LINE> @click.option('--tournament', default=8, help='The ID of the tournament, defaults to 8') <NEW_LINE> @click.option( '--model_id', type=str, default=None, help="An account model UUID (required for accounts with multiple models") <NEW_LINE> @click.option( '--new_data', is_flag=True, default=False, help="set this flag if you are using the new data") <NEW_LINE> @click.argument('path', type=click.Path(exists=True)) <NEW_LINE> def submit(path, tournament, model_id, new_data): <NEW_LINE> <INDENT> data_version = 2 if new_data else 1 <NEW_LINE> click.echo(napi.upload_predictions( path, tournament, model_id, version=data_version))
|
Upload predictions from file.
|
625941bc4c3428357757c20c
|
def AddMenuElements(self): <NEW_LINE> <INDENT> idaapi.add_menu_item("Edit/Plugins/", "ADB Super Connector", "", 0, self.AdbCall, ())
|
Menus are better than no GUI at all *sigh*
|
625941bc3eb6a72ae02ec3b7
|
def delete(self, filename=None, delete_v1=True, delete_v2=True): <NEW_LINE> <INDENT> if filename is None: <NEW_LINE> <INDENT> filename = self.filename <NEW_LINE> <DEDENT> delete(filename, delete_v1, delete_v2) <NEW_LINE> self.clear()
|
Remove tags from a file.
If no filename is given, the one most recently loaded is used.
Keyword arguments:
delete_v1 -- delete any ID3v1 tag
delete_v2 -- delete any ID3v2 tag
|
625941bc85dfad0860c3ad3c
|
def load_data(): <NEW_LINE> <INDENT> diabetes = datasets.load_diabetes() <NEW_LINE> return cross_validation.train_test_split(diabetes.data,diabetes.target, test_size=0.25,random_state=0)
|
load for the dataset
return:
1 array for the regression problem.
train_data, test_data, train_value, test_value
|
625941bcbe7bc26dc91cd4e8
|
def show(self): <NEW_LINE> <INDENT> _focus = windowing.FocusManager() <NEW_LINE> if not self._shown: <NEW_LINE> <INDENT> def destroy(*args): <NEW_LINE> <INDENT> self.window = None <NEW_LINE> Gcf.destroy(self._num) <NEW_LINE> <DEDENT> self.canvas._tkcanvas.bind("<Destroy>", destroy) <NEW_LINE> self.window.deiconify() <NEW_LINE> self.window.update() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.canvas.draw_idle() <NEW_LINE> <DEDENT> self._shown = True
|
this function doesn't segfault but causes the
PyEval_RestoreThread: NULL state bug on win32
|
625941bc71ff763f4b54956a
|
def decode(string, alphabet=ALPHABET): <NEW_LINE> <INDENT> base = len(alphabet) <NEW_LINE> strlen = len(string) <NEW_LINE> num = 0 <NEW_LINE> idx = 0 <NEW_LINE> for char in string: <NEW_LINE> <INDENT> power = (strlen - (idx + 1)) <NEW_LINE> num += alphabet.index(char) * (base ** power) <NEW_LINE> idx += 1
|
Decode a Base X encoded string into the number
Arguments:
- `string`: The encoded string
- `alphabet`: The alphabet to use for encoding
|
625941bc507cdc57c6306bb7
|
def make_matcher(init_scan, laser_pose, search_size, search_resolution): <NEW_LINE> <INDENT> scan = scan_from_ros(init_scan) <NEW_LINE> pose = pose_from_tuple(p) <NEW_LINE> return KartoScanMatcherCpp(scan, pose, search_size, search_resolution)
|
@param init_scan: Initial scan used only to read parameters of laser scanner
@type init_scan: sensor_msgs.msg.LaserScan
@param laser_pose: Pose of laser wrt base frame
@type laser_pose: Tuple of form (x, y, theta)
@param search_size: Radius of square to do matching in (meters)
@param search_resolution: Resolution of grid search (meters)
@return Opaque object of type KartoScanMatcher, which can be used as an argument to repeated calls to match_scans.
|
625941bc2eb69b55b151c78e
|
def by_device(device=None): <NEW_LINE> <INDENT> return (sorted([e for e in entries()[0] if e.get('device') == device], key=lambda d: d.get('date'), reverse=True), sorted([e for e in entries()[1] if e.get('device') == device], key=lambda d: d.get('date'), reverse=True), sorted([e for e in entries()[2] if e.get('device') == device], key=lambda d: d.get('date'), reverse=True), sorted(list(entries()[3]), key=lambda d: d.get('date'), reverse=True))
|
return all manifest_entries (tuple) for selected {device} reverse sorted by date
|
625941bca934411ee375157d
|
def test_detalable(self): <NEW_LINE> <INDENT> deletable_object_id = 123 <NEW_LINE> MyDeletable.delete(deletable_object_id, otherparam="val") <NEW_LINE> self.request_called_with('DELETE', "host/api/v1/deletables/" + str(deletable_object_id), params={'otherparam': "val"})
|
Deletable resource logic.
|
625941bc66656f66f7cbc08d
|
def list_of_matching(tt, matched): <NEW_LINE> <INDENT> r = [] <NEW_LINE> for child in kids(tt): <NEW_LINE> <INDENT> if name(child) not in matched: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> r.append(parse_any(child)) <NEW_LINE> <DEDENT> return r
|
Parse only the children of particular types under tt.
Other children are ignored rather than giving an error.
|
625941bc1f037a2d8b9460e1
|
def run(self, application, graph=None): <NEW_LINE> <INDENT> if graph is None: <NEW_LINE> <INDENT> graph = ApplicationDriver.create_graph( application=application, num_gpus=self.num_gpus, num_threads=self.num_threads, is_training_action=self.is_training_action) <NEW_LINE> <DEDENT> start_time = time.time() <NEW_LINE> loop_status = {'current_iter': self.initial_iter, 'normal_exit': False} <NEW_LINE> with tf.Session(config=tf_config(), graph=graph): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> SESS_STARTED.send(application, iter_msg=None) <NEW_LINE> iteration_messages = self._generator(**vars(self))() <NEW_LINE> ApplicationDriver.loop( application=application, iteration_messages=iteration_messages, loop_status=loop_status) <NEW_LINE> <DEDENT> except KeyboardInterrupt: <NEW_LINE> <INDENT> tf.logging.warning('User cancelled application') <NEW_LINE> <DEDENT> except (tf.errors.OutOfRangeError, EOFError): <NEW_LINE> <INDENT> if not loop_status.get('normal_exit', False): <NEW_LINE> <INDENT> loop_status['normal_exit'] = True <NEW_LINE> <DEDENT> <DEDENT> except RuntimeError: <NEW_LINE> <INDENT> import sys <NEW_LINE> import traceback <NEW_LINE> exc_type, exc_value, exc_traceback = sys.exc_info() <NEW_LINE> traceback.print_exception( exc_type, exc_value, exc_traceback, file=sys.stdout) <NEW_LINE> <DEDENT> finally: <NEW_LINE> <INDENT> tf.logging.info('cleaning up...') <NEW_LINE> iter_msg = IterationMessage() <NEW_LINE> iter_msg.current_iter = loop_status.get('current_iter', -1) <NEW_LINE> SESS_FINISHED.send(application, iter_msg=iter_msg) <NEW_LINE> <DEDENT> <DEDENT> application.stop() <NEW_LINE> if not loop_status.get('normal_exit', False): <NEW_LINE> <INDENT> tf.logging.warning('stopped early, incomplete iterations.') <NEW_LINE> <DEDENT> tf.logging.info( "%s stopped (time in second %.2f).", type(application).__name__, (time.time() - start_time))
|
Initialise a TF graph, connect data sampler and network within
the graph context, run training loops or inference loops.
:param application: a niftynet application
:param graph: default base graph to run the application
:return:
|
625941bc379a373c97cfaa2d
|
def pwd_crack_task3(salt, pwd_hash): <NEW_LINE> <INDENT> start_time = time.time() <NEW_LINE> concat_dict_search("web2",salt,pwd_hash) <NEW_LINE> time_taken = time.time()-start_time <NEW_LINE> print("Seconds required: ", time_taken)
|
Please complete this function:
1) find the concatenation of two words (password) in the given dictionary such that its hash is
matched to the given hash
2) print out the word (password) in plaintext
|
625941bcd58c6744b4257b43
|
def find_latest_simulation(product): <NEW_LINE> <INDENT> sims = {} <NEW_LINE> dates = reversed(list_dates(product)) <NEW_LINE> for date in dates: <NEW_LINE> <INDENT> date_sims = list_sims(product, date) <NEW_LINE> if product == 'long_range' and len(date_sims) == 16: <NEW_LINE> <INDENT> is_complete = True <NEW_LINE> for key, sim in date_sims.iteritems(): <NEW_LINE> <INDENT> if not sim['is_complete']: <NEW_LINE> <INDENT> is_complete = False <NEW_LINE> break <NEW_LINE> <DEDENT> <DEDENT> if is_complete: <NEW_LINE> <INDENT> sims = date_sims <NEW_LINE> break <NEW_LINE> <DEDENT> <DEDENT> elif product != 'long_range': <NEW_LINE> <INDENT> key, sim = _find_complete_sim(date_sims) <NEW_LINE> if key: <NEW_LINE> <INDENT> sims[key] = sim <NEW_LINE> break <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return sims
|
Identifies files for the most recent complete simulation.
Each simulation is represented as a dictionary describing product
type, simulation date, and whether all expected files are present,
and it also includes a list of filenames, e.g.
{'product': 'long_range_mem1',
'date': '20170401t06-00',
'is_complete': True,
'files': ['nwm...f006.conus.nc', 'nwm...f012.conus.nc', ...],
'links': ['http...', ...]}
Args:
product: String product name, e.g., 'short_range'.
Returns:
An ordered dictionary of simulation dictionaries, indexed by
product and date, e.g., 'long_range_mem1_20170401t06-00', or
empty dictionary if no complete simulations found.
|
625941bc24f1403a92600a4c
|
def remove_subset(self, label): <NEW_LINE> <INDENT> super(LexiconQuery, self).remove_subset(label) <NEW_LINE> self.corpus.hierarchy.remove_type_subsets(self.corpus, self.to_find.node_type, [label])
|
Delete a subset label from the corpus.
N.B. This function only removes the type subset label, not any annotations or data in the corpus.
Parameters
----------
label : str
Name of the subset to remove
|
625941bc94891a1f4081b98b
|
def replace_currency_symbols(text, replace_with=None): <NEW_LINE> <INDENT> if replace_with is None: <NEW_LINE> <INDENT> for k, v in CURRENCIES.items(): <NEW_LINE> <INDENT> text = text.replace(k, " "+v+" ") <NEW_LINE> <DEDENT> return text <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return CURRENCY_REGEX.sub(replace_with, text)
|
Replace all currency symbols in ``text`` str with string specified by ``replace_with`` str.
Args:
text (str): raw text
replace_with (str): if None (default), replace symbols with
their standard 3-letter abbreviations (e.g. '$' with 'USD', '£' with 'GBP');
otherwise, pass in a string with which to replace all symbols
(e.g. "*CURRENCY*")
Returns:
str
|
625941bc6e29344779a624f7
|
def get_candidate(self, candidate_id): <NEW_LINE> <INDENT> if candidate_id not in self._candidate_id: <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> return self._candidate_id[candidate_id]
|
Returns the corresponding ElectionCandidate based upon their string
representation of the candidate ID.
|
625941bc21bff66bcd684838
|
def _assert_num_attempts(self, students, num_attempts): <NEW_LINE> <INDENT> for student in students: <NEW_LINE> <INDENT> module = StudentModule.objects.get(course_id=self.course.id, student=student, module_state_key=self.problem_url) <NEW_LINE> state = json.loads(module.state) <NEW_LINE> self.assertEquals(state['attempts'], num_attempts)
|
Check the number attempts for all students is the same
|
625941bc1b99ca400220a994
|
@given(DEVICE_STEPS[0]) <NEW_LINE> @given(DEVICE_STEPS[1]) <NEW_LINE> @given(DEVICE_STEPS[2]) <NEW_LINE> @given(DEVICE_STEPS[3]) <NEW_LINE> @given(DEVICE_STEPS[4]) <NEW_LINE> def _step(context, os_type, os_ver, device_type=None): <NEW_LINE> <INDENT> user_agent = None <NEW_LINE> if hasattr(context, u'user_agent'): <NEW_LINE> <INDENT> user_agent = context.user_agent <NEW_LINE> <DEDENT> window_size_x = None <NEW_LINE> if hasattr(context, u'window_size_x'): <NEW_LINE> <INDENT> window_size_x = context.window_size_x <NEW_LINE> <DEDENT> window_size_y = None <NEW_LINE> if hasattr(context, u'window_size_y'): <NEW_LINE> <INDENT> window_size_y = context.window_size_y <NEW_LINE> <DEDENT> if device_type not in [u'phone', u'tablet']: <NEW_LINE> <INDENT> device_type = None <NEW_LINE> <DEDENT> if context.os_type != os_type or context.os_ver != os_ver or context.device_type != device_type: <NEW_LINE> <INDENT> context.os_type = os_type <NEW_LINE> context.os_ver = os_ver <NEW_LINE> context.device_type = device_type <NEW_LINE> context.refresh_driver = True <NEW_LINE> <DEDENT> if context.driver and not context.refresh_driver: <NEW_LINE> <INDENT> log.debug(u'context already has a driver instance and refresh_driver=False, not instantiating one') <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> log.debug(u'creating a new step-level webdriver instance') <NEW_LINE> if context.driver: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> context.driver.quit() <NEW_LINE> <DEDENT> except Exception as e: <NEW_LINE> <INDENT> log.warn(u'error when calling driver.quit(): {}'.format(unicode(e))) <NEW_LINE> <DEDENT> <DEDENT> assert context.app_uri, u'a path to the app wasnt given!' <NEW_LINE> driver_class = webdriver_me(context.os_ver, context.os_type, context.app_uri, context.device_type) <NEW_LINE> context.driver = driver_class( device_type=context.device_type, os_ver=context.os_ver, os_type=context.os_type, app_path=context.app_uri, app_package=context.app_package, app_activity=context.app_activity, user_agent=user_agent, window_size_x=window_size_x, window_size_y=window_size_y, webdriver_url=context.webdriver_url, webdriver_processor=context.webdriver_processor ) <NEW_LINE> context.refresh_driver = False <NEW_LINE> context.feature_driver = False
|
:type context: HackedContext
|
625941bc2c8b7c6e89b356a5
|
def test_user_update(self): <NEW_LINE> <INDENT> user = sample_user(first_name='Someone') <NEW_LINE> self.assertEqual(user.first_name, 'Someone') <NEW_LINE> self.assertIsNone(user.last_name) <NEW_LINE> user.first_name = 'Sample' <NEW_LINE> user.last_name = 'User' <NEW_LINE> user.full_clean() <NEW_LINE> user.save() <NEW_LINE> self.assertNotEqual(user.first_name, 'Someone') <NEW_LINE> self.assertIsNotNone(user.last_name) <NEW_LINE> self.assertEqual(user.first_name, 'Sample') <NEW_LINE> self.assertEqual(user.last_name, 'User')
|
Tests user object updating and saving
|
625941bcd4950a0f3b08c234
|
def say(text): <NEW_LINE> <INDENT> subprocess.call('say ' + text, shell=True)
|
OS X has built in text to speech
|
625941bc44b2445a33931f82
|
def mol_from_smiles(smi): <NEW_LINE> <INDENT> if smi == "foo": <NEW_LINE> <INDENT> smi = "*" <NEW_LINE> <DEDENT> mol = Chem.MolFromSmiles(smi) <NEW_LINE> if not mol: <NEW_LINE> <INDENT> mol = Chem.MolFromSmiles("*") <NEW_LINE> <DEDENT> return mol
|
Generate a mol from Smiles.
For invalid Smiles it generates a No-structure.
|
625941bc15fb5d323cde09ee
|
def save_indexed(self, index_name, key, value, *subj): <NEW_LINE> <INDENT> if self.readonly: <NEW_LINE> <INDENT> raise RuntimeError('Attempt to save an object to a read-only store') <NEW_LINE> <DEDENT> if not isinstance(subj, tuple) and not isinstance(subj, list): <NEW_LINE> <INDENT> subj = (subj,) <NEW_LINE> <DEDENT> for obj in subj: <NEW_LINE> <INDENT> self._register(obj, neo4j.Node.abstract(**Store.safe_attrs(obj)) , index=index_name, key=key, value=value, unique=False)
|
Save the given (new) object as an indexed node
|
625941bc8c0ade5d55d3e8a2
|
def get_one(self, group_id): <NEW_LINE> <INDENT> members = self.get_all(group_id) <NEW_LINE> if members is None: <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> length = len(members) <NEW_LINE> if length == 0: <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> return random.choice(members)
|
The default behavior of get_one is to randomly pick one from
the result of get_all(). This is likely to be overridden in the
actual driver implementation.
|
625941bc796e427e537b04a6
|
def _update(self, func, *args, **kwargs): <NEW_LINE> <INDENT> func(*args, **kwargs) <NEW_LINE> self._setDownloadDate()
|
Initiate data update
The download date is updated automatically.
Parameters
----------
func : callable
Function to call to carry out the update.
The function will be called with *args and
**kwargs as parameters.
|
625941bc3eb6a72ae02ec3b8
|
def speaker_model_corr_mat(model_comp_list, code_scenarios_speaker, adj_scenarios_speaker): <NEW_LINE> <INDENT> model_comp_mat = np.zeros((len(model_comp_list), len(model_comp_list), len(adj_scenarios_speaker))) <NEW_LINE> model_comp_abs = np.zeros((len(model_comp_list), len(model_comp_list))) <NEW_LINE> for i, scen in enumerate(adj_scenarios_speaker): <NEW_LINE> <INDENT> adj_set = adj_scenarios_speaker[i] <NEW_LINE> noun_set = code_scenarios_speaker[i] <NEW_LINE> model_choices = abs_max_across_models(model_comp_list, adj_set, noun_set, comp='speaker') <NEW_LINE> model_dist = dist_across_models(model_comp_list, adj_set, noun_set, comp='speaker') <NEW_LINE> for j in range(len(model_comp_list)): <NEW_LINE> <INDENT> for k in range(j, len(model_comp_list)): <NEW_LINE> <INDENT> model_comp_mat[j, k, i] = scipy.stats.spearmanr(model_dist[j], model_dist[k])[0] <NEW_LINE> model_comp_mat[k, j, i] = scipy.stats.spearmanr(model_dist[j], model_dist[k])[0] <NEW_LINE> if model_choices[j] == model_choices[k] and k > j: <NEW_LINE> <INDENT> model_comp_abs[j, k] += 1 <NEW_LINE> model_comp_abs[k, j] += 1 <NEW_LINE> <DEDENT> <DEDENT> model_comp_abs[j, j] = len(adj_scenarios_speaker) <NEW_LINE> <DEDENT> <DEDENT> model_comp_mat = np.mean(model_comp_mat, axis=2) <NEW_LINE> model_comp_abs /= len(adj_scenarios_speaker) <NEW_LINE> return model_comp_abs, model_comp_mat
|
compute correlation of model distributions across all scenarios
:param model_comp_list: list of models to compare in (adj_mat, prag)
:param code_scenarios_speaker: codename scenarios
:param adj_scenarios_speaker: adj scenarios
:return:return top match percentage and average rank corr matrix
|
625941bc6e29344779a624f8
|
def cfg_total_src_dims(self, nsrc): <NEW_LINE> <INDENT> self.nsrc = nsrc <NEW_LINE> for nr_var in mbu.source_nr_vars(): <NEW_LINE> <INDENT> setattr(self, nr_var, nsrc)
|
Configure the total number of sources that will
be handled by this solver. Used by v5 to allocate
solvers handling subsets of the total problem.
Passing nsrc=100 means that the solver will handle
100 sources in total.
Additionally, sets the number for each individual
source type to 100. So npsrc=100, ngsrc=100,
nssrc=100 for instance. This is because if we're
handling 100 sources total, we'll need space for
at least 100 sources of each type.
The number of sources actually handled by the
solver on each iteration is set in the
rime_const_data_cpu structure.
|
625941bc30dc7b766590184c
|
def get_votecast_recent_votes(self): <NEW_LINE> <INDENT> return self.sessconfig['votecast_recent_votes']
|
Returns the maximum limit for the recent votes by the user,
that will be forwarded to connected peers
@return int
|
625941bc9f2886367277a773
|
def performJoin(self, oInst): <NEW_LINE> <INDENT> if not oInst in self._dJoinCache: <NEW_LINE> <INDENT> self._dJoinCache[oInst] = joins.SORelatedJoin.performJoin(self, oInst) <NEW_LINE> <DEDENT> return self._dJoinCache[oInst]
|
Return the join the result, from the cache if possible.
|
625941bc66673b3332b91f74
|
def print_tokens(self, file=None): <NEW_LINE> <INDENT> for tok in self.tokens: <NEW_LINE> <INDENT> print(tok.pretty_print(), file=file)
|
Print the tokens for this sentence.
|
625941bcd53ae8145f87a158
|
def _FillBucketInputQueue(self): <NEW_LINE> <INDENT> print("in _FillBucketInputQueue") <NEW_LINE> inputs = [] <NEW_LINE> for i in range(len(self._input_queue)): <NEW_LINE> <INDENT> getcontent = self._input_queue[i] <NEW_LINE> inputs.append(getcontent) <NEW_LINE> <DEDENT> batches = [] <NEW_LINE> for i in range(0, len(inputs), self._hps.batch_size): <NEW_LINE> <INDENT> batches.append(inputs[i:i+self._hps.batch_size]) <NEW_LINE> <DEDENT> if self._hps.mode != 'decode': <NEW_LINE> <INDENT> shuffle(batches) <NEW_LINE> <DEDENT> for b in batches: <NEW_LINE> <INDENT> self._bucket_input_queue.append(b) <NEW_LINE> <DEDENT> print("end _FillBucketInputQueue")
|
Fill bucketed batches into the bucket_input_queue.
|
625941bc31939e2706e4cd51
|
def fol_fc_ask(kb, alpha): <NEW_LINE> <INDENT> kb_consts = list({c for clause in kb.clauses for c in constant_symbols(clause)}) <NEW_LINE> def enum_subst(p): <NEW_LINE> <INDENT> query_vars = list({v for clause in p for v in variables(clause)}) <NEW_LINE> for assignment_list in itertools.product(kb_consts, repeat=len(query_vars)): <NEW_LINE> <INDENT> theta = {x: y for x, y in zip(query_vars, assignment_list)} <NEW_LINE> yield theta <NEW_LINE> <DEDENT> <DEDENT> for q in kb.clauses: <NEW_LINE> <INDENT> phi = unify_mm(q, alpha) <NEW_LINE> if phi is not None: <NEW_LINE> <INDENT> yield phi <NEW_LINE> <DEDENT> <DEDENT> while True: <NEW_LINE> <INDENT> new = [] <NEW_LINE> for rule in kb.clauses: <NEW_LINE> <INDENT> p, q = parse_definite_clause(rule) <NEW_LINE> for theta in enum_subst(p): <NEW_LINE> <INDENT> if set(subst(theta, p)).issubset(set(kb.clauses)): <NEW_LINE> <INDENT> q_ = subst(theta, q) <NEW_LINE> if all([unify_mm(x, q_) is None for x in kb.clauses + new]): <NEW_LINE> <INDENT> new.append(q_) <NEW_LINE> phi = unify_mm(q_, alpha) <NEW_LINE> if phi is not None: <NEW_LINE> <INDENT> yield phi <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> <DEDENT> if not new: <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> for clause in new: <NEW_LINE> <INDENT> kb.tell(clause) <NEW_LINE> <DEDENT> <DEDENT> return None
|
[Figure 9.3]
A simple forward-chaining algorithm.
|
625941bc15baa723493c3e56
|
def fold_path(path, width=30): <NEW_LINE> <INDENT> assert isinstance(path, six.string_types) <NEW_LINE> if len(path) > width: <NEW_LINE> <INDENT> path.replace(".", ".\n ") <NEW_LINE> <DEDENT> return path
|
Fold a string form of a path so that each element is on separate line
|
625941bc498bea3a759b9993
|
def birth_prior(self, meas_source_index, meas_score, birth_count): <NEW_LINE> <INDENT> score_index = self.get_score_index(meas_score, meas_source_index) <NEW_LINE> return self.clutter_probabilities[meas_source_index][score_index][birth_count]
|
The prior probability of birth_count number of births with score given by
meas_score from the measurement source with index meas_source_index
|
625941bcf9cc0f698b1404e1
|
def formatError(self, test, err): <NEW_LINE> <INDENT> open_files = _gather_open_files() <NEW_LINE> if not open_files: <NEW_LINE> <INDENT> return err <NEW_LINE> <DEDENT> ec, ev, tb = err <NEW_LINE> handle_couter = collections.Counter(open_files) <NEW_LINE> new_ev = u'\n'.join( [ev, ln(u'>>> There {} open file handlers for {} files: <<<' .format(len(open_files), len(handle_couter)))] + [ln(u'* ({}) {}'.format(count, path)) for path, count in handle_couter.items()] + [ln(u'>>> End of the open file list. <<<')] ) <NEW_LINE> return (ec, new_ev, tb)
|
List the open files when a test errors.
|
625941bc5510c4643540f2cf
|
def test_render_internalservice(): <NEW_LINE> <INDENT> deployment_rendered = SIMPLE_K8S_DEPLOYMENT.render(k8s.RenderingOptions()) <NEW_LINE> rendered = k8s.InternalService( deployment=SIMPLE_K8S_DEPLOYMENT).render(k8s.RenderingOptions()) <NEW_LINE> assert len(rendered) == 4 <NEW_LINE> assert rendered["apiVersion"] == "v1" <NEW_LINE> assert rendered["kind"] == "Service" <NEW_LINE> assert rendered["metadata"] == deployment_rendered["metadata"] <NEW_LINE> expected_spec = { "type": "NodePort", "ports": [{ "port": 1234, "targetPort": 1234, "protocol": "TCP" }] } <NEW_LINE> expected_spec["selector"] = deployment_rendered["metadata"]["labels"] <NEW_LINE> assert rendered["spec"] == expected_spec
|
An InternalService renders to a k8s Service.
|
625941bc01c39578d7e74d1f
|
def saMsgMessageSendAsync(msgHandle, invocation, destination, message, ackFlags): <NEW_LINE> <INDENT> return msgdll.saMsgMessageSendAsync(msgHandle, invocation, BYREF(destination), BYREF(message), ackFlags)
|
Send message to message queue or message queue group asynchronously.
type arguments:
SaMsgHandleT msgHandle
SaInvocationT invocation
SaNameT destination
SaMsgMessageT message
SaMsgAckFlagsT ackFlags
returns:
SaAisErrorT
|
625941bc7cff6e4e81117869
|
def getproxypeername(self): <NEW_LINE> <INDENT> return socket.socket.getpeername(self)
|
getproxypeername() -> address info
Returns the IP and port number of the proxy.
|
625941bc711fe17d82542254
|
def close(self): <NEW_LINE> <INDENT> for hdlr in self.__data_handlers: <NEW_LINE> <INDENT> hdlr.close() <NEW_LINE> <DEDENT> self.__data_handlers = None <NEW_LINE> self.__geod_handler.close() <NEW_LINE> self.__time_handler.close() <NEW_LINE> self.__geod_handler = None <NEW_LINE> self.__time_handler = None
|
Close handler on storage
|
625941bc3346ee7daa2b2c4d
|
def test_repo_detail_fail_different_user( self ): <NEW_LINE> <INDENT> response = self.open( '/repos/', {'format': 'json', 'user': 'admin'} ) <NEW_LINE> repo = response[ 'objects' ][0][ 'id' ] <NEW_LINE> error_thrown = False <NEW_LINE> response = None <NEW_LINE> try: <NEW_LINE> <INDENT> data = {'format': 'json', 'user': 'test_user'} <NEW_LINE> response = self.open( '/repos/%s' % ( repo ), data ) <NEW_LINE> <DEDENT> except HTTPError as e: <NEW_LINE> <INDENT> error_thrown = True <NEW_LINE> assert e.code == 401 <NEW_LINE> <DEDENT> assert error_thrown <NEW_LINE> assert response is None
|
Test failure state for the repo detail API when passed a
user who does not own the current repo
|
625941bc63f4b57ef0001004
|
def count(expr, where=None): <NEW_LINE> <INDENT> op = expr.op() <NEW_LINE> if isinstance(op, ops.DistinctColumn): <NEW_LINE> <INDENT> result = ops.CountDistinct(op.args[0], where).to_expr() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> result = ops.Count(expr, where).to_expr() <NEW_LINE> <DEDENT> return result.name('count')
|
Compute cardinality / sequence size of expression. For array expressions,
the count is excluding nulls. For tables, it's the size of the entire
table.
Returns
-------
counts : int64 type
|
625941bc0383005118ecf4c8
|
def get_global_rank(): <NEW_LINE> <INDENT> if IS_STANDALONE: <NEW_LINE> <INDENT> return 0 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return GLOBAL_RANK[rpc.get_rank()]
|
Get the global rank
The rank can globally identify the client process. For the client processes
of the same role, their ranks are in a contiguous range.
|
625941bc097d151d1a222d3f
|
def GetHash(transaction): <NEW_LINE> <INDENT> pass
|
:param transaction:
|
625941bc07f4c71912b1136b
|
def connect(self, root): <NEW_LINE> <INDENT> if not root: return <NEW_LINE> stack = [root] <NEW_LINE> while stack: <NEW_LINE> <INDENT> children = [] <NEW_LINE> length = len(stack) <NEW_LINE> for index, node in enumerate(stack): <NEW_LINE> <INDENT> if index+1<length: <NEW_LINE> <INDENT> node.next = stack[index+1] <NEW_LINE> <DEDENT> if node.left: children.append(node.left) <NEW_LINE> if node.right: children.append(node.right) <NEW_LINE> <DEDENT> stack = children <NEW_LINE> <DEDENT> return
|
:type root: TreeLinkNode
:rtype: nothing
|
625941bc92d797404e30406d
|
def __init__(self,configDocName ,featuresSerializationFileName, trainFeatures, trainTargets, testFeatures, testTargets): <NEW_LINE> <INDENT> self.ParseConfigFile(configDocName) <NEW_LINE> self.trainFeatures = trainFeatures <NEW_LINE> self.trainTargets = trainTargets <NEW_LINE> self.testFeatures = testFeatures <NEW_LINE> self.testTargets = testTargets <NEW_LINE> self.featuresSerializationFileName = featuresSerializationFileName
|
Constructor
|
625941bc99cbb53fe6792acb
|
def setUp(self): <NEW_LINE> <INDENT> Test.setUp(self) <NEW_LINE> self.__cache = bundy.bundy.socket_cache.Cache(self) <NEW_LINE> self.__address = IPAddr("192.0.2.1") <NEW_LINE> self.__socket = bundy.bundy.socket_cache.Socket('UDP', self.__address, 1024, 42) <NEW_LINE> self.__get_socket_called = False
|
Creates the cache for tests with us being the socket creator.
Also creates some more variables for testing.
|
625941bc7b180e01f3dc46e7
|
@pytest.fixture <NEW_LINE> def initialized_patient_logged_in(client, initialized_patient): <NEW_LINE> <INDENT> initialized_patient = db.session.merge(initialized_patient) <NEW_LINE> oauth_info = {'user_id': initialized_patient.id} <NEW_LINE> client.get( 'test/oauth', query_string=oauth_info, follow_redirects=True ) <NEW_LINE> return initialized_patient
|
Fixture to extend initialized patient to one logged in
|
625941bce64d504609d74724
|
def testNodeDrivesNode(self): <NEW_LINE> <INDENT> pass
|
Test NodeDrivesNode
|
625941bc16aa5153ce36235c
|
def _load_array(self, vtype, struct_class, element_struct): <NEW_LINE> <INDENT> if element_struct == Uint8 or element_struct == Int8: <NEW_LINE> <INDENT> raise ValueError('use Bytes type to replace vector<byte>') <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> if vtype == DataHeadType.LIST: <NEW_LINE> <INDENT> _vtype, _vtag = self._buf.read_head() <NEW_LINE> assert _vtag == 0 <NEW_LINE> size = self._load_int32(_vtype, struct_class, True) <NEW_LINE> listdata = [] <NEW_LINE> for _ in range(size): <NEW_LINE> <INDENT> evtype, tag = self._buf.read_head() <NEW_LINE> assert tag == 0 <NEW_LINE> listdata.append(self._loads(element_struct, evtype)) <NEW_LINE> <DEDENT> return listdata <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise JceDecodeError('inconsistent type, structure is "%s", data head is "%d"' % (struct_class.__name__, vtype))
|
反序列化数组
|
625941bc099cdd3c635f0b40
|
def cycle(self): <NEW_LINE> <INDENT> prev = self.active_cubes.copy() <NEW_LINE> search_x = range(self.min_x - 3, self.max_x + 3) <NEW_LINE> search_y = range(self.min_y - 3, self.max_y + 3) <NEW_LINE> search_z = range(self.min_z - 3, self.max_z + 3) <NEW_LINE> search_w = range(self.min_w - 3, self.max_w + 3) <NEW_LINE> self.active_cubes = set() <NEW_LINE> self.min_x, self.max_x = 0, 0 <NEW_LINE> self.min_y, self.max_y = 0, 0 <NEW_LINE> self.min_z, self.max_z = 0, 0 <NEW_LINE> self.min_w, self.max_w = 0, 0 <NEW_LINE> for x in search_x: <NEW_LINE> <INDENT> for y in search_y: <NEW_LINE> <INDENT> for z in search_z: <NEW_LINE> <INDENT> for w in search_w: <NEW_LINE> <INDENT> neighbors = 0 <NEW_LINE> for dx in range(-1, 2): <NEW_LINE> <INDENT> for dy in range(-1, 2): <NEW_LINE> <INDENT> for dz in range(-1, 2): <NEW_LINE> <INDENT> for dw in range(-1, 2): <NEW_LINE> <INDENT> if dx != 0 or dy != 0 or dz != 0 or dw != 0: <NEW_LINE> <INDENT> if (x + dx, y + dy, z + dz, w + dw) in prev: <NEW_LINE> <INDENT> neighbors += 1 <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> <DEDENT> <DEDENT> if (x, y, z, w) in prev and 2 <= neighbors <= 3: <NEW_LINE> <INDENT> self.activate(x, y, z, w) <NEW_LINE> <DEDENT> if (x, y, z, w) not in prev and neighbors == 3: <NEW_LINE> <INDENT> self.activate(x, y, z, w) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> <DEDENT> self.cycles += 1
|
Iterate the pocket dimension through a cycle.
|
625941bc66656f66f7cbc08e
|
def __ne__(self, other): <NEW_LINE> <INDENT> if not isinstance(other, ConnectFailureFilter): <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> return self.to_dict() != other.to_dict()
|
Returns true if both objects are not equal
|
625941bc851cf427c661a3f6
|
def test_is_user_authorized_none_user(self): <NEW_LINE> <INDENT> self.assertFalse(is_user_authorized(None))
|
Check that standard user will not have right to see protected variabls
|
625941bc3d592f4c4ed1cf5a
|
def spawn_modify_review_ui(self, parent_xid, iconname, datadir, review_id, callback): <NEW_LINE> <INDENT> cmd = [os.path.join(datadir, RNRApps.MODIFY_REVIEW), "--parent-xid", "%s" % parent_xid, "--iconname", iconname, "--datadir", "%s" % datadir, "--review-id", "%s" % review_id, ] <NEW_LINE> spawn_helper = SpawnHelper(format="json") <NEW_LINE> spawn_helper.connect("data-available", self._on_modify_review_finished, review_id, callback) <NEW_LINE> spawn_helper.connect("error", self._on_modify_review_error, review_id, callback) <NEW_LINE> spawn_helper.run(cmd)
|
this spawns the UI for writing a new review and
adds it automatically to the reviews DB
|
625941bcde87d2750b85fc73
|
def main(): <NEW_LINE> <INDENT> while True: <NEW_LINE> <INDENT> print() <NEW_LINE> print("\t\tMenu of Options\n\n" "\t\t1) Send a Thank You\n\n" "\t\t2) Create a Report\n\n" "\t\t3) Quit\n") <NEW_LINE> intOption = menu.menuoptions() <NEW_LINE> if intOption == 1: <NEW_LINE> <INDENT> print() <NEW_LINE> sub_menu.send_thankyou() <NEW_LINE> if menu.menureturn(strName) == "Yes": <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> <DEDENT> elif intOption == 2: <NEW_LINE> <INDENT> print() <NEW_LINE> try: <NEW_LINE> <INDENT> sub_menu.create_report() <NEW_LINE> <DEDENT> except ValueError: <NEW_LINE> <INDENT> print("There are no donors on the list to create a report.") <NEW_LINE> <DEDENT> if menu.menureturn(strName) == "Yes": <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> <DEDENT> elif intOption == 3: <NEW_LINE> <INDENT> print() <NEW_LINE> break <NEW_LINE> <DEDENT> <DEDENT> return
|
This is the main function. This will run the full program.
:return: This method does not return anything. Return is just used to clearly show where this method ends.
|
625941bca8370b7717052784
|
def put(self, name, start, end): <NEW_LINE> <INDENT> entry = self._uploadController.getEntry(name) <NEW_LINE> if entry is not None: <NEW_LINE> <INDENT> return None, 200 if self._targetController.storeFromWav(entry, start, end) else 500 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return None, 404
|
Stores a new target.
:param name: the name.
:param start: start time.
:param end: end time.
:return:
|
625941bc097d151d1a222d40
|
def test_example(data_regression): <NEW_LINE> <INDENT> contents = {"contents": "Foo", "value": 11} <NEW_LINE> data_regression.check(contents)
|
Basic example
|
625941bc462c4b4f79d1d5b4
|
def gen_sinf_array(a, b, n=1000): <NEW_LINE> <INDENT> x = np.linspace(a,b,n) <NEW_LINE> def sinf(x): <NEW_LINE> <INDENT> return np.sin(1/x) <NEW_LINE> <DEDENT> sf = np.array(sinf(x)) <NEW_LINE> return (x,sf)
|
gen_sinf_array(a, b, n=1000)
Generate a discrete approximation of a sinf function, including its
domain and range, stored as numpy arrays.
Args:
a (float) : Lower bound of domain
b (float) : Upper bound of domain
n (int, optional) : Number of points in domain, defaults to 1000.
Returns:
(x, s) : Pair of numpy arrays of float64
x : [a, ..., b] Array of n equally spaced float64 between a and b
s : [s(a), ..., s(b)] Array of sinf values matched to x
|
625941bc0a366e3fb873e6fb
|
@pytest.fixture(scope="function") <NEW_LINE> def rpc_registry(): <NEW_LINE> <INDENT> _registry_dump = {**modernrpc.core.registry._registry} <NEW_LINE> yield modernrpc.core.registry <NEW_LINE> modernrpc.core.registry._registry = _registry_dump
|
An instance of internal rpc method registry, reset to its initial state after each use
|
625941bc442bda511e8be301
|
def parse_file_from_path(path: str) -> str: <NEW_LINE> <INDENT> formattedpath = format_directory_path(path) <NEW_LINE> return formattedpath.split('/')[-2]
|
Given the absolute path to a file, returns the file name only
|
625941bc0c0af96317bb80cd
|
def parse_config(file): <NEW_LINE> <INDENT> yaml_file = yaml.load(open(file)) <NEW_LINE> main.run(yaml_file)
|
Checks the config values and brings them in
:param file: The config.yml file
:return:
|
625941bc4527f215b584c33f
|
def least_squares(X, Y): <NEW_LINE> <INDENT> XT = np.transpose(X) <NEW_LINE> beta = np.dot(np.dot(inv(np.dot(XT,X)), XT), Y) <NEW_LINE> return beta
|
Calculates and returns beta for the least squares problem.
|
625941bc5f7d997b8717497e
|
def get_total_savings_for_org(date, org_type, org_id): <NEW_LINE> <INDENT> group_by_org = get_row_grouper(org_type) <NEW_LINE> substitution_sets = get_substitution_sets() <NEW_LINE> if not substitution_sets: <NEW_LINE> <INDENT> return 0.0 <NEW_LINE> <DEDENT> totals = get_total_savings_for_org_type( db=get_db(), substitution_sets=substitution_sets, date=date, group_by_org=group_by_org, min_saving=CONFIG_MIN_SAVINGS_FOR_ORG_TYPE[org_type], practice_group_by_org=get_row_grouper(CONFIG_TARGET_PEER_GROUP), target_centile=CONFIG_TARGET_CENTILE, ) <NEW_LINE> offset = group_by_org.offsets[org_id] <NEW_LINE> return totals[offset, 0] / 100
|
Get total available savings through presentation switches for the given org
|
625941bcbaa26c4b54cb1007
|
def plot_fit(self, **kwargs): <NEW_LINE> <INDENT> import matplotlib.pyplot as plt <NEW_LINE> import seaborn as sns <NEW_LINE> figsize = kwargs.get('figsize',(10,7)) <NEW_LINE> plt.figure(figsize=figsize) <NEW_LINE> date_index = self.index[max(self.ar, self.ma):self.data.shape[0]] <NEW_LINE> mu, Y = self._model(self.latent_variables.get_z_values()) <NEW_LINE> if self.model_name2 == "Exponential": <NEW_LINE> <INDENT> values_to_plot = 1.0/self.link(mu) <NEW_LINE> <DEDENT> elif self.model_name2 == "Skewt": <NEW_LINE> <INDENT> t_params = self.transform_z() <NEW_LINE> model_scale, model_shape, model_skewness = self._get_scale_and_shape(t_params) <NEW_LINE> m1 = (np.sqrt(model_shape)*sp.gamma((model_shape-1.0)/2.0))/(np.sqrt(np.pi)*sp.gamma(model_shape/2.0)) <NEW_LINE> additional_loc = (model_skewness - (1.0/model_skewness))*model_scale*m1 <NEW_LINE> values_to_plot = mu + additional_loc <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> values_to_plot = self.link(mu) <NEW_LINE> <DEDENT> plt.plot(date_index, Y, label='Data') <NEW_LINE> plt.plot(date_index, values_to_plot, label='ARIMA model', c='black') <NEW_LINE> plt.title(self.data_name) <NEW_LINE> plt.legend(loc=2) <NEW_LINE> plt.show()
|
Plots the fit of the model against the data
|
625941bc925a0f43d2549d58
|
def create(self, token, payload): <NEW_LINE> <INDENT> return self._makeRequest( "POST", self._path+self._create, token, payload )
|
Creates a content.
Args:
token (string): a previously acquired authentication token
payload (dict): the payload
Returns:
The payload as returned from the server
|
625941bc63b5f9789fde6fca
|
def compute_height(n: int, parents: list) -> int: <NEW_LINE> <INDENT> root = build_tree(n, parents) <NEW_LINE> return cal_tree_height(root)
|
Compute the maximum height of tree by first reverse the pointer. In the original format,
the pointer is pointing from the child to its parent (similar to UnionFind data structure).
We first make them point from parent to children. Then we can use recursive call to compute
its height.
|
625941bc8e05c05ec3eea256
|
def get_counters(content): <NEW_LINE> <INDENT> counters = [] <NEW_LINE> for counter_name, regexp in COUNTER_TYPES: <NEW_LINE> <INDENT> if re.match(regexp, content): <NEW_LINE> <INDENT> counters.append(counter_name) <NEW_LINE> <DEDENT> <DEDENT> return counters
|
Ищет в хтмл-странице счетчик и возвращает массив типов найденных
|
625941bcc4546d3d9de72916
|
def GetPointer(self): <NEW_LINE> <INDENT> return _itkSumProjectionImageFilterPython.itkSumProjectionImageFilterID3ID2_GetPointer(self)
|
GetPointer(self) -> itkSumProjectionImageFilterID3ID2
|
625941bc26238365f5f0ed4f
|
def _instruction_8(self, arg): <NEW_LINE> <INDENT> functions = { 0x0: self._instruction_8xy0, 0x1: self._instruction_8xy1, 0x2: self._instruction_8xy2, 0x3: self._instruction_8xy3, 0x4: self._instruction_8xy4, 0x5: self._instruction_8xy5, 0x6: self._instruction_8xy6, 0x7: self._instruction_8xy7, 0xE: self._instruction_8xyE } <NEW_LINE> (arg_x, arg_y, arg_n) = nnn_format_to_xyn(arg) <NEW_LINE> functions[arg_n](arg_x, arg_y)
|
Redirect to 8xy[0-7] and 8xyE.
|
625941bc5fcc89381b1e15a1
|
def get_page(url): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> return urlopen(url).read() <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> return None
|
simple page getter for initial pull
|
625941bc7d43ff24873a2b82
|
def test_get_library_name(self): <NEW_LINE> <INDENT> library = Library(name='TestLibrary', description='Test library', public=True, bibcode=self.stub_library.bibcode) <NEW_LINE> with self.app.session_scope() as session: <NEW_LINE> <INDENT> session.add(library) <NEW_LINE> session.commit() <NEW_LINE> session.refresh(library) <NEW_LINE> session.expunge(library) <NEW_LINE> <DEDENT> name = self.library_view.helper_library_name(library_id=library.id) <NEW_LINE> self.assertEqual(name, library.name)
|
Tests retrieval of a library name
:return: no return
|
625941bc21bff66bcd684839
|
def show_program_season(self, program, season): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> program_obj = self._vtm_go.get_program(program) <NEW_LINE> <DEDENT> except UnavailableException: <NEW_LINE> <INDENT> self._kodi.show_ok_dialog(message=self._kodi.localize(30717)) <NEW_LINE> self._kodi.end_of_directory() <NEW_LINE> return <NEW_LINE> <DEDENT> if season == -1: <NEW_LINE> <INDENT> seasons = list(program_obj.seasons.values()) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> seasons = [program_obj.seasons[season]] <NEW_LINE> <DEDENT> listing = [self._menu.generate_titleitem(e) for s in seasons for e in list(s.episodes.values())] <NEW_LINE> self._kodi.show_listing(listing, 30003, content='episodes', sort=['episode', 'duration'])
|
Show the episodes of a program from the catalog
:type program: str
:type season: int
|
625941bcbe7bc26dc91cd4ea
|
def test_single_litteral(): <NEW_LINE> <INDENT> parsed = Parser().parse_clasp_output(OUTCLASP_SINGLE_LITERAL.splitlines()) <NEW_LINE> type, answer_number = next(parsed) <NEW_LINE> assert type == 'answer_number' <NEW_LINE> type, model = next(parsed) <NEW_LINE> assert next(parsed, None) is None, "there is only one model" <NEW_LINE> assert type == 'answer', "the model is an answer" <NEW_LINE> assert len(model) == 2, "only 2 atom in it" <NEW_LINE> assert model == {3, '"hello !"'}
|
Show that string with comma in it is handled correctly
|
625941bc8a43f66fc4b53f4d
|
@app.route('/movies') <NEW_LINE> def get_movies(): <NEW_LINE> <INDENT> movie_list = [] <NEW_LINE> genre = flask.request.args.get('genre') <NEW_LINE> start_year = flask.request.args.get('start_year', default=0, type=int) <NEW_LINE> end_year = flask.request.args.get('end_year', default=10000, type=int) <NEW_LINE> for movie in movies: <NEW_LINE> <INDENT> if genre is not None and genre != movie['genre']: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> if movie['year'] < start_year: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> if movie['year'] > end_year: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> movie_list.append(movie) <NEW_LINE> <DEDENT> return json.dumps(movie_list)
|
Returns the list of movies that match GET parameters:
start_year, int: reject any movie released earlier than this year
end_year, int: reject any movie released later than this year
genre: reject any movie whose genre does not match this genre exactly
If a GET parameter is absent, then any movie is treated as though
it meets the corresponding constraint. (That is, accept a movie unless
it is explicitly rejected by a GET parameter.)
|
625941bc7d847024c06be19d
|
def granularity(cases, detections): <NEW_LINE> <INDENT> if len(detections) == 0: <NEW_LINE> <INDENT> return 1 <NEW_LINE> <DEDENT> detections_per_case = list() <NEW_LINE> case_index = index_annotations(cases) <NEW_LINE> det_index = index_annotations(detections) <NEW_LINE> for tref in case_index: <NEW_LINE> <INDENT> cases, detections = case_index[tref], det_index.get(tref, False) <NEW_LINE> if not detections: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> for case in cases: <NEW_LINE> <INDENT> num_dets = sum((is_overlapping(case, det) for det in detections)) <NEW_LINE> detections_per_case.append(num_dets) <NEW_LINE> <DEDENT> <DEDENT> detected_cases = sum((num_dets > 0 for num_dets in detections_per_case)) <NEW_LINE> if detected_cases == 0: <NEW_LINE> <INDENT> return 1 <NEW_LINE> <DEDENT> return sum(detections_per_case) / detected_cases
|
Granularity of the detections in detecting the plagiarism cases.
|
625941bc99fddb7c1c9de277
|
def __init__(self, dataClass, timestep=0): <NEW_LINE> <INDENT> self.dC = dataClass <NEW_LINE> self.x = timestep
|
dataClass and timestep at which to run the dalecv2 model.
|
625941bcadb09d7d5db6c676
|
def do_compare_plots(cat7, cat7s, subdir,label, z_dla_max = 5): <NEW_LINE> <INDENT> cat7.plot_line_density(zmax=z_dla_max) <NEW_LINE> cat7s.plot_line_density(zmax=z_dla_max, label=label) <NEW_LINE> plt.legend(loc=0) <NEW_LINE> save_figure(path.join(subdir,"dndx_"+label)) <NEW_LINE> plt.clf() <NEW_LINE> cat7.plot_cddf(zmax=z_dla_max,color="blue") <NEW_LINE> cat7s.plot_cddf(zmax=z_dla_max,color="red",label=label) <NEW_LINE> plt.xlim(1e20, 1e23) <NEW_LINE> plt.ylim(1e-28, 5e-21) <NEW_LINE> plt.legend(loc=0) <NEW_LINE> save_figure(path.join(subdir, "cddf_"+label)) <NEW_LINE> plt.clf() <NEW_LINE> cat7.plot_omega_dla(zmax=z_dla_max) <NEW_LINE> cat7s.plot_omega_dla(zmax=z_dla_max, label=label) <NEW_LINE> plt.legend(loc=0) <NEW_LINE> save_figure(path.join(subdir,"omega_"+label)) <NEW_LINE> plt.clf()
|
Plots to compare two cddfs
|
625941bc460517430c394071
|
def __init__(self, items=None): <NEW_LINE> <INDENT> self._items = None <NEW_LINE> self.discriminator = None <NEW_LINE> self.items = items
|
ArrayOfEvents - a model defined in Swagger
|
625941bcd268445f265b4d53
|
def test_Cr_magnetic_moments(convcell_cr: PhonopyAtoms): <NEW_LINE> <INDENT> convcell_cr.magnetic_moments = [1, -1] <NEW_LINE> _test_phonopy_atoms(convcell_cr) <NEW_LINE> convcell_cr.magnetic_moments = None
|
Test by Cr with [1, -1] magnetic moments.
|
625941bca934411ee375157f
|
def pass_encrypt(self, data, password): <NEW_LINE> <INDENT> out = create_string_buffer(len(data) + TOX_PASS_ENCRYPTION_EXTRA_LENGTH) <NEW_LINE> tox_err_encryption = c_int() <NEW_LINE> self.libtoxencryptsave.tox_pass_encrypt(c_char_p(data), c_size_t(len(data)), c_char_p(bytes(password, 'utf-8')), c_size_t(len(password)), out, byref(tox_err_encryption)) <NEW_LINE> tox_err_encryption = tox_err_encryption.value <NEW_LINE> if tox_err_encryption == TOX_ERR_ENCRYPTION['OK']: <NEW_LINE> <INDENT> return out[:] <NEW_LINE> <DEDENT> elif tox_err_encryption == TOX_ERR_ENCRYPTION['NULL']: <NEW_LINE> <INDENT> raise ArgumentError('Some input data, or maybe the output pointer, was null.') <NEW_LINE> <DEDENT> elif tox_err_encryption == TOX_ERR_ENCRYPTION['KEY_DERIVATION_FAILED']: <NEW_LINE> <INDENT> raise RuntimeError('The crypto lib was unable to derive a key from the given passphrase, which is usually a' ' lack of memory issue. The functions accepting keys do not produce this error.') <NEW_LINE> <DEDENT> elif tox_err_encryption == TOX_ERR_ENCRYPTION['FAILED']: <NEW_LINE> <INDENT> raise RuntimeError('The encryption itself failed.')
|
Encrypts the given data with the given password.
:return: output array
|
625941bc66656f66f7cbc08f
|
def revert(self, member, *args, **kwargs): <NEW_LINE> <INDENT> LOG.warning("Reverting update member in DB " "for member id %s", member.id) <NEW_LINE> try: <NEW_LINE> <INDENT> self.member_repo.update(db_apis.get_session(), member.id, provisioning_status=constants.ERROR) <NEW_LINE> <DEDENT> except Exception as e: <NEW_LINE> <INDENT> LOG.error("Failed to update member %(member)s provisioning_status " "to ERROR due to: %(except)s", {'member': member.id, 'except': str(e)})
|
Mark the member ERROR since the update couldn't happen
:param member: The member that couldn't be updated
:returns: None
|
625941bc4c3428357757c20f
|
def get_user_perms(user, obj): <NEW_LINE> <INDENT> check = ObjectPermissionChecker(user) <NEW_LINE> return check.get_user_perms(obj)
|
Returns permissions for given user and object pair, as list of
strings, only those assigned directly for the user.
|
625941bc5e10d32532c5ee0c
|
def created_at_pretty(self, date_format=None): <NEW_LINE> <INDENT> if date_format: <NEW_LINE> <INDENT> return arlotime_strftime(self.created_at, date_format=date_format) <NEW_LINE> <DEDENT> return arlotime_strftime(self.created_at)
|
Returns date video was taken formated with `last_date_format`
|
625941bcd164cc6175782c32
|
def request(self, **request): <NEW_LINE> <INDENT> return WSGIRequest(self._base_environ(**request))
|
Construct a generic request object.
|
625941bc7d847024c06be19e
|
@admin.route('/admin/batch-mod-property',methods=['GET', 'POST']) <NEW_LINE> @login_required <NEW_LINE> def batch_mod_property(): <NEW_LINE> <INDENT> return render_template("admin/batch_mod_property.html", title="批量修改房源信息", nav_menu=get_nav_menu(current_user), tab_menu=get_tab_menu("houses", current_user, "房源"))
|
批量修改房源信息,一个一个的修改
|
625941bc4e4d5625662d42c1
|
def utf_char_width(string): <NEW_LINE> <INDENT> if east_asian_width(string) in WIDE_SYMBOLS: <NEW_LINE> <INDENT> return WIDE <NEW_LINE> <DEDENT> return NARROW
|
Return the width of a single character
|
625941bccc0a2c11143dcd75
|
def test_unenrollment_email_off(self): <NEW_LINE> <INDENT> course = self.course <NEW_LINE> url = reverse('instructor_dashboard', kwargs={'course_id': course.id}) <NEW_LINE> response = self.client.post(url, {'action': 'Unenroll multiple students', 'multiple_students': 'student0@test.com student1@test.com'}) <NEW_LINE> self.assertContains(response, '<td>student0@test.com</td>') <NEW_LINE> self.assertContains(response, '<td>student1@test.com</td>') <NEW_LINE> self.assertContains(response, '<td>un-enrolled</td>') <NEW_LINE> user = User.objects.get(email='student0@test.com') <NEW_LINE> ce = CourseEnrollment.objects.filter(course_id=course.id, user=user) <NEW_LINE> self.assertEqual(0, len(ce)) <NEW_LINE> user = User.objects.get(email='student1@test.com') <NEW_LINE> ce = CourseEnrollment.objects.filter(course_id=course.id, user=user) <NEW_LINE> self.assertEqual(0, len(ce)) <NEW_LINE> self.assertEqual(len(mail.outbox), 0)
|
Do un-enrollment email off test
|
625941bc3317a56b86939b4d
|
def parse_sib_byte(sib_byte): <NEW_LINE> <INDENT> sib_byte = hexlify(sib_byte) <NEW_LINE> scale = (int(sib_byte, 16) >> 6) & 7 <NEW_LINE> index = (int(sib_byte, 16) >> 3) & 7 <NEW_LINE> base = int(sib_byte, 16) & 7 <NEW_LINE> return [scale, index, base]
|
Takes a sib byte and parses it into the SCALE, INDEX, and BASE fields.
Args:
sibByte (byte): The sib byte to be parsed
Returns:
fieldList (list): The SCALE, INDEX, and BASE fields as a list in that
order.
|
625941bc63d6d428bbe443d4
|
def create_groups(group_ids, corpus, word, ignore_corpus, ignore_word): <NEW_LINE> <INDENT> def attach_corpus_fn(group, corpus, ignore): <NEW_LINE> <INDENT> selected = None <NEW_LINE> len_corpus = len(corpus) <NEW_LINE> while not selected: <NEW_LINE> <INDENT> c = corpus[randrange(0, len_corpus - 1)].values()[0] <NEW_LINE> if c != ignore: <NEW_LINE> <INDENT> selected = c <NEW_LINE> <DEDENT> <DEDENT> yield (group, selected) <NEW_LINE> <DEDENT> def attach_word_fn(group, words, ignore): <NEW_LINE> <INDENT> selected = None <NEW_LINE> len_words = len(words) <NEW_LINE> while not selected: <NEW_LINE> <INDENT> c = words[randrange(0, len_words - 1)].values()[0] <NEW_LINE> if c != ignore: <NEW_LINE> <INDENT> selected = c <NEW_LINE> <DEDENT> <DEDENT> yield group + (selected,) <NEW_LINE> <DEDENT> return (group_ids | df.FlatMap( 'attach corpus', attach_corpus_fn, AsList(corpus), AsSingleton(ignore_corpus)) | df.FlatMap( 'attach word', attach_word_fn, AsIter(word), AsSingleton(ignore_word)))
|
Generate groups given the input PCollections.
|
625941bc76d4e153a657ea15
|
def verify_date(prompt, date_format): <NEW_LINE> <INDENT> success = False <NEW_LINE> while not success: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> result = datetime.datetime.strptime(input(prompt), date_format) <NEW_LINE> <DEDENT> except ValueError: <NEW_LINE> <INDENT> print("Please enter the date in a {} format.".format(user_friendly_date(date_format))) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> success = True <NEW_LINE> <DEDENT> <DEDENT> return result
|
Verifies input as a date with a specified format
|
625941bca79ad161976cc02a
|
def build_server(): <NEW_LINE> <INDENT> log("Instalando build-essential e outros pacotes", yellow) <NEW_LINE> sudo("apt -y install build-essential automake") <NEW_LINE> sudo("apt -y install libxml2-dev libxslt-dev") <NEW_LINE> sudo( "apt -y install libjpeg-dev libjpeg8-dev zlib1g-dev libfreetype6 libfreetype6-dev" ) <NEW_LINE> try: <NEW_LINE> <INDENT> sudo("ln -s /usr/lib/x86_64-linux-gnu/libfreetype.so /usr/lib/") <NEW_LINE> sudo("ln -s /usr/lib/x86_64-linux-gnu/libz.so /usr/lib/") <NEW_LINE> sudo("ln -s /usr/lib/x86_64-linux-gnu/libjpeg.so /usr/lib/") <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> pass
|
Instalar build-essential e outros pacotes importantes no servidor
|
625941bcd58c6744b4257b46
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.