code
stringlengths 4
4.48k
| docstring
stringlengths 1
6.45k
| _id
stringlengths 24
24
|
|---|---|---|
def getFlyoutActionList(self): <NEW_LINE> <INDENT> allActionsList = [] <NEW_LINE> subControlAreaActionList =[] <NEW_LINE> subControlAreaActionList.append(self.exitModeAction) <NEW_LINE> subControlAreaActionList.extend(self._subControlAreaActionList) <NEW_LINE> allActionsList.extend(subControlAreaActionList) <NEW_LINE> commandActionLists = [] <NEW_LINE> for i in range(len(subControlAreaActionList)): <NEW_LINE> <INDENT> lst = [] <NEW_LINE> commandActionLists.append(lst) <NEW_LINE> <DEDENT> params = (subControlAreaActionList, commandActionLists, allActionsList) <NEW_LINE> return params
|
Returns a tuple that contains mode spcific actionlists in the
added in the flyout toolbar of the mode.
CommandToolbar._createFlyoutToolBar method calls this
@return: params: A tuple that contains 3 lists:
(subControlAreaActionList, commandActionLists, allActionsList)
|
625941bb004d5f362079a1fd
|
def change_priority(self, item, priority): <NEW_LINE> <INDENT> for eid, elem in enumerate(self.heap): <NEW_LINE> <INDENT> if elem[2] == item: <NEW_LINE> <INDENT> self.heap[eid] = (priority, self.count, item) <NEW_LINE> self.count += 1 <NEW_LINE> heapq.heapify(self.heap) <NEW_LINE> return <NEW_LINE> <DEDENT> <DEDENT> raise ValueError("Error: " + str(item) + " is not in the PriorityQueue.")
|
Change the priority of the given item to the specified value. If
the item is not in the queue, a ValueError is raised.
(PriorityQueue, object, int) -> None
|
625941bb63f4b57ef0000fe8
|
def nearest_image_contains(self, ds_type, ra, dec, radius, f_name): <NEW_LINE> <INDENT> if f_name is None: <NEW_LINE> <INDENT> f_name = self._config["IMG_DEFAULT_FILTER"] <NEW_LINE> <DEDENT> return self.pg_nearest_image_contains(ds_type, ra, dec, radius, f_name)
|
Find nearest image containing the [ra, dec] of radius and filter name.
Parameters
----------
ds_type : str
the dataset type.
ra : degree
dec : degree
radius : arcsec
f_name: str
the filter name.
Returns
-------
r: [(...),(...),...]
the result of the SQL query.
|
625941bbac7a0e7691ed3fa0
|
def refresh(self): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> self._network = self._provider.azure_client. get_network(self.id) <NEW_LINE> self._state = self._network.provisioning_state <NEW_LINE> <DEDENT> except (CloudError, ValueError) as cloudError: <NEW_LINE> <INDENT> log.exception(cloudError.message) <NEW_LINE> self._state = 'unknown'
|
Refreshes the state of this network by re-querying the cloud provider
for its latest state.
|
625941bbb545ff76a8913ce5
|
@with_setup(pretest, posttest) <NEW_LINE> def test_pandas_leave(): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> from numpy.random import randint <NEW_LINE> import pandas as pd <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> raise SkipTest <NEW_LINE> <DEDENT> with closing(StringIO()) as our_file: <NEW_LINE> <INDENT> df = pd.DataFrame(randint(0, 100, (1000, 6))) <NEW_LINE> tqdm.pandas(file=our_file, leave=True, ascii=True) <NEW_LINE> df.groupby(0).progress_apply(lambda x: None) <NEW_LINE> our_file.seek(0) <NEW_LINE> exres = '100%|##########| 101/101' <NEW_LINE> if exres not in our_file.read(): <NEW_LINE> <INDENT> our_file.seek(0) <NEW_LINE> raise AssertionError("\nExpected:\n{0}\nIn:{1}\n".format( exres, our_file.read()))
|
Test pandas with `leave=True`
|
625941bb92d797404e304050
|
def do_show(self, args): <NEW_LINE> <INDENT> args = args.split() <NEW_LINE> if len(args) == 0: <NEW_LINE> <INDENT> print("** class name missing **") <NEW_LINE> return <NEW_LINE> <DEDENT> if len(args) == 1: <NEW_LINE> <INDENT> print("** instance id missing **") <NEW_LINE> return <NEW_LINE> <DEDENT> if args[0] not in HBNBCommand.valid_classes: <NEW_LINE> <INDENT> print("** class doesn't exist **") <NEW_LINE> return <NEW_LINE> <DEDENT> all_objs = storage.all() <NEW_LINE> for objs_id in all_objs.keys(): <NEW_LINE> <INDENT> if objs_id == args[1] and args[0] in str(type(all_objs[objs_id])): <NEW_LINE> <INDENT> print(all_objs[objs_id]) <NEW_LINE> return <NEW_LINE> <DEDENT> <DEDENT> print("** no instance found **")
|
Shows the __dict__ of an instance.
Usage: show <ClassName> <id>
Arguments are supposed to be in order
**Arguments**
ClassName: name of class
id: unique user id of instance
|
625941bbbe383301e01b5353
|
def find_set(self, value): <NEW_LINE> <INDENT> for s in self.sets: <NEW_LINE> <INDENT> if value in s: <NEW_LINE> <INDENT> return s
|
Return the set that value belongs to
|
625941bb5510c4643540f2b4
|
def prekini_igralce(self): <NEW_LINE> <INDENT> logging.debug("prekinjam igralce") <NEW_LINE> if self.igralec_beli: self.igralec_beli.prekini() <NEW_LINE> if self.igralec_crni: self.igralec_crni.prekini()
|
Sporoči igralcem, da morajo nehati razmišljati.
|
625941bb15baa723493c3e3a
|
def date_to_days(date, peak_date): <NEW_LINE> <INDENT> months = {1: 30, 2: 28, 3: 31, 4: 30, 5: 31, 6: 30, 7: 30, 8: 28, 9: 31, 10: 30, 11: 31, 12: 30, } <NEW_LINE> year1, month1, day1 = date_helper(date) <NEW_LINE> year2, month2, day2 = date_helper(peak_date) <NEW_LINE> month2 = int(month2) <NEW_LINE> month1 = int(month1) <NEW_LINE> day2 = int(day2) <NEW_LINE> day1 = int(day1) <NEW_LINE> if month1 == month2: <NEW_LINE> <INDENT> days_apart = day2 - day1 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> months_apart_list = [i for i in range(month1 + 1, month2)] <NEW_LINE> days_apart = months[month1] - day1 <NEW_LINE> for month in months_apart_list: <NEW_LINE> <INDENT> days_apart += months[month] <NEW_LINE> <DEDENT> days_apart += day2 <NEW_LINE> <DEDENT> return days_apart
|
Determines how many days apart two dates are.
|
625941bb15fb5d323cde09d3
|
def StationGroup_ConstCast(*args): <NEW_LINE> <INDENT> return _DataModel.StationGroup_ConstCast(*args)
|
ConstCast(BaseObject o) -> StationGroup
StationGroup_ConstCast(Seiscomp::Core::BaseObjectCPtr o) -> StationGroup
|
625941bb82261d6c526ab36a
|
def mag_from_meanmueff(meanmueff, R_e, q=1): <NEW_LINE> <INDENT> return meanmueff - 2.5*numpy.log10(2*numpy.pi*(R_e**2)*q)
|
meanmueff: mean surface brightness within R_e
R_e: R_e in arcsec
q: axis ratio, q=1 for circularized R_e
|
625941bb63d6d428bbe443b7
|
def lifo_memoize(maxsize=128, ttl=0): <NEW_LINE> <INDENT> return LIFOCache(maxsize=maxsize, ttl=ttl).memoize()
|
Like :func:`memoize` except it uses :class:`.LIFOCache`.
|
625941bb5fdd1c0f98dc00f9
|
def _on_tree_item_activated(self, event): <NEW_LINE> <INDENT> expanded, node, object = self._get_node_data(event.GetItem()) <NEW_LINE> if node.activated(object) is True: <NEW_LINE> <INDENT> if self.factory.on_activated is not None: <NEW_LINE> <INDENT> self.ui.evaluate(self.factory.on_activated, object) <NEW_LINE> self._veto = True <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> self._veto = True <NEW_LINE> <DEDENT> self.activated = object <NEW_LINE> self.dclick = object
|
Handles a tree item being activated.
|
625941bbbe383301e01b5354
|
def SerializeExclusiveData(self, writer): <NEW_LINE> <INDENT> self.PublicKey.Serialize(writer, True)
|
Serialize object.
Args:
writer (neo.IO.BinaryWriter):
|
625941bb32920d7e50b28095
|
def btn_drop_foreign_key_constraints(self) -> None: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> for foreign_key in self.get_foreign_keys: <NEW_LINE> <INDENT> self._drop_constraint( self.selected_schema, table=foreign_key.table, constraint=foreign_key.name, ) <NEW_LINE> self._drop_constraint( self.selected_schema, table=foreign_key.referenced_table, constraint=( "{table}_{column}_unique".format( table=foreign_key.referenced_table, column=foreign_key.referenced_column, ) ), ) <NEW_LINE> <DEDENT> <DEDENT> except Exception as err: <NEW_LINE> <INDENT> self.log(message=err, log_level=2, push=True) <NEW_LINE> raise
|
Delete constraints in the selected schema.
|
625941bbdd821e528d63b072
|
def set_name(self, host, old_name): <NEW_LINE> <INDENT> self.logger.debug('MySalt\'s set_name was called with host=%s and old_name=%s' % (host, old_name)) <NEW_LINE> return 0
|
Just say 'did it!'
|
625941bb66656f66f7cbc072
|
def test(self, svm, cluster_model, k, des_option = constants.ORB_FEAT_OPTION, is_interactive=True): <NEW_LINE> <INDENT> isTrain = False <NEW_LINE> des_name = constants.ORB_FEAT_NAME if des_option == constants.ORB_FEAT_OPTION else constants.SIFT_FEAT_NAME <NEW_LINE> print("Getting global descriptors for the testing set...") <NEW_LINE> start = time.time() <NEW_LINE> x, y, cluster_model= self.get_data_and_labels(self.dataset.get_test_set(), cluster_model, k, des_name,isTrain,des_option) <NEW_LINE> end = time.time() <NEW_LINE> start = time.time() <NEW_LINE> result = svm.predict(x) <NEW_LINE> end = time.time() <NEW_LINE> self.log.predict_time(end - start) <NEW_LINE> mask = result == y <NEW_LINE> correct = np.count_nonzero(mask) <NEW_LINE> accuracy = (correct * 100.0 / result.size) <NEW_LINE> self.log.accuracy(accuracy) <NEW_LINE> return result, y
|
Gets the descriptors for the testing set and use the svm given as a parameter to predict all the elements
Args:
codebook (NumPy matrix): Each row is a center of a codebook of Bag of Words approach.
svm (cv2.SVM): The Support Vector Machine obtained in the training phase.
des_option (integer): The option of the feature that is going to be used as local descriptor.
is_interactive (boolean): If it is the user can choose to load files or generate.
Returns:
NumPy float array: The result of the predictions made.
NumPy float array: The real labels for the testing set.
|
625941bb4d74a7450ccd408a
|
def parse_level_2_expression(self): <NEW_LINE> <INDENT> root = self.parse_primary_expression() <NEW_LINE> while self.get_token_type() in (TokenType.TRA, TokenType.POW): <NEW_LINE> <INDENT> if self.get_token_type() == TokenType.TRA: <NEW_LINE> <INDENT> token = self.pop_token() <NEW_LINE> root = ASTNode(n_type=ASTNodeType.UOP_EXP, n_text=token.get_text(), n_line=token.get_line(), children=[root]) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> token = self.pop_token() <NEW_LINE> child = self.parse_level_4_expression(next_level=self.parse_primary_expression) <NEW_LINE> root = ASTNode(n_type=ASTNodeType.BOP_EXP, n_text=token.get_text(), n_line=token.get_line(), children=[root, child]) <NEW_LINE> <DEDENT> <DEDENT> return root
|
unary postfix operators: .' '
binary operators (left associate): .^ ^
|
625941bb004d5f362079a1fe
|
def __init__(self, n=3, r=1., m=1., hw=10, granularity=5, res=32, t=1, init_v_factor=0.18, friction_coefficient=0, seed=None, sprites=False, use_colors=False, drift=False): <NEW_LINE> <INDENT> super().__init__( n, r, m, hw, granularity, res, t, init_v_factor, friction_coefficient, seed, sprites, use_colors) <NEW_LINE> self.G = 0.5 <NEW_LINE> self.K1 = self.G <NEW_LINE> self.K2 = 1
|
Initialise arguments of parent class.
|
625941bb0a50d4780f666d57
|
def get_gap(self, tol: float = 0.001, abs_tol: bool = False, spin: Spin = None): <NEW_LINE> <INDENT> (cbm, vbm) = self.get_cbm_vbm(tol, abs_tol, spin) <NEW_LINE> return max(cbm - vbm, 0.0)
|
Expects a DOS object and finds the gap.
Args:
tol: tolerance in occupations for determining the gap
abs_tol: An absolute tolerance (True) and a relative one (False)
spin: Possible values are None - finds the gap in the summed
densities, Up - finds the gap in the up spin channel,
Down - finds the gap in the down spin channel.
Returns:
gap in eV
|
625941bb293b9510aa2c3161
|
def update(self): <NEW_LINE> <INDENT> self.x += (self.al_settings.alien_speed_factor * self.al_settings.fleet_direction) <NEW_LINE> self.rect.x = self.x
|
向左或者向右移动外星人
|
625941bb7c178a314d6ef322
|
def predict(self, feature_batch): <NEW_LINE> <INDENT> self._current_batch = feature_batch <NEW_LINE> results = [next(self._predictions) for _ in range(len(feature_batch))] <NEW_LINE> return results
|
Predict labels for the given features.
feature_batch must be a sequence of feature dictionaries.
|
625941bbcc40096d6159581a
|
def translate(self, vector, memo=None): <NEW_LINE> <INDENT> if memo is None: <NEW_LINE> <INDENT> memo = {} <NEW_LINE> <DEDENT> return type(self)(n.translate(vector, memo) for n in self)
|
Translate region in given direction
Parameters
----------
vector : iterable of float
Direction in which region should be translated
memo : dict or None
Dictionary used for memoization. This parameter is used internally
and should not be specified by the user.
Returns
-------
openmc.Region
Translated region
|
625941bb4527f215b584c323
|
def test_nouns_more_common_in_plural(self): <NEW_LINE> <INDENT> self.assertTrue(3000 < len(self.nouns_more_common_in_plural) < 3800)
|
There are about 3400 of them.
|
625941bb956e5f7376d70d40
|
def max_pooling_step(self, x, dim): <NEW_LINE> <INDENT> return tf.nn.max_pool(x, ksize=dim, strides=[1, 1, 1, 1], padding="VALID")
|
Max pooling step with specified dimensions.
|
625941bb2ae34c7f2600cffa
|
def calculate_twiss(M): <NEW_LINE> <INDENT> if M[0,1]>0: <NEW_LINE> <INDENT> phi = np.arccos(0.5*np.matrix.trace(M)) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> phi = np.arccos(0.5*np.matrix.trace(M))+np.pi <NEW_LINE> <DEDENT> alpha = (M[0,0]-M[1,1])/(2*np.sin(phi)) <NEW_LINE> beta = M[0,1]/np.sin(phi) <NEW_LINE> gamma = -M[1,0]/np.sin(phi) <NEW_LINE> print(phi) <NEW_LINE> return alpha, beta, gamma
|
Input the 2D transform matrix and output Twiss parameters in X and Y.
|
625941bb3cc13d1c6d3c724b
|
def recv_timeout(the_socket, timeout=4): <NEW_LINE> <INDENT> the_socket.setblocking(0) <NEW_LINE> total_data = [] <NEW_LINE> data = '' <NEW_LINE> begin = time.time() <NEW_LINE> while True: <NEW_LINE> <INDENT> if total_data and time.time()-begin > timeout: <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> elif time.time()-begin > timeout: <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> data = the_socket.recv(8192) <NEW_LINE> if data: <NEW_LINE> <INDENT> total_data.append(data.decode()) <NEW_LINE> begin = time.time() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> time.sleep(0.1) <NEW_LINE> <DEDENT> <DEDENT> except BaseException as exc: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> <DEDENT> return ''.join(total_data)
|
Attempt to listen for data until the specified timeout is reached.
Returns:
str - Data received over socket
|
625941bbbe7bc26dc91cd4cd
|
def convertLoanFields(self, fields): <NEW_LINE> <INDENT> return self.convert(fields, self.loanFieldName2IndexOld, self.loanFieldName2IndexNew)
|
转换贷款协议表字段列表:过滤掉非保留字段,并将保留字段按照保留字段索引的顺序排列
Args:
field (list): 字段列表
Returns:
list: 转换后字段列表
|
625941bb91f36d47f21ac3b7
|
def peek_token(self, tokens, token, index, direction): <NEW_LINE> <INDENT> j = index + direction <NEW_LINE> while 0 <= j < len(tokens): <NEW_LINE> <INDENT> tok1 = tokens[j] <NEW_LINE> if tok1.type == Token.T_SPECIAL and tok1.value == ";": <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> elif tok1.type == Token.T_SPECIAL and tok1.value == ",": <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> elif tok1.type == Token.T_NEWLINE: <NEW_LINE> <INDENT> j += direction <NEW_LINE> <DEDENT> elif tok1.type == Token.T_KEYWORD and tok1.value not in ("true", "false", "null", "undefined", "this", "new", "function", "function*", "class"): <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return j <NEW_LINE> <DEDENT> <DEDENT> return None
|
return the index of the token that would be returned by consume
|
625941bbd99f1b3c44c6745e
|
def testBasicVector(self): <NEW_LINE> <INDENT> size = 20 <NEW_LINE> init_var_np = np.zeros(size) <NEW_LINE> grad_np = np.random.rand(size) <NEW_LINE> grad_np_2 = np.random.rand(size) <NEW_LINE> with self.test_session() as sess: <NEW_LINE> <INDENT> global_step = variables.Variable(0, dtype=dtypes.int64) <NEW_LINE> var = variables.Variable(init_var_np, dtype=dtypes.float32) <NEW_LINE> grad = constant_op.constant(grad_np, dtype=dtypes.float32) <NEW_LINE> grad_2 = constant_op.constant(grad_np_2, dtype=dtypes.float32) <NEW_LINE> opt = shampoo.ShampooOptimizer(global_step) <NEW_LINE> update = opt.apply_gradients(zip([grad], [var]), global_step=global_step) <NEW_LINE> update_2 = opt.apply_gradients(zip([grad_2], [var]), global_step=global_step) <NEW_LINE> variables.global_variables_initializer().run() <NEW_LINE> init_val = sess.run(var) <NEW_LINE> self.assertAllCloseAccordingToType(init_var_np, init_val) <NEW_LINE> update.run() <NEW_LINE> new_val = sess.run(var) <NEW_LINE> mat_g = np.outer(grad_np, grad_np) <NEW_LINE> mat_h = np_power(mat_g + 0.1 * np.eye(size), -0.5) <NEW_LINE> new_val_np = init_var_np - np.dot(mat_h, grad_np) <NEW_LINE> self.assertAllCloseAccordingToType(new_val_np, new_val, atol=TOLERANCE, rtol=TOLERANCE) <NEW_LINE> update_2.run() <NEW_LINE> new_val = sess.run(var) <NEW_LINE> mat_g += np.outer(grad_np_2, grad_np_2) <NEW_LINE> mat_h = np_power(mat_g + 0.1 * np.eye(size), -0.5) <NEW_LINE> new_val_np -= np.dot(mat_h, grad_np_2) <NEW_LINE> self.assertAllCloseAccordingToType(new_val_np, new_val, atol=TOLERANCE, rtol=TOLERANCE)
|
Similar to the full Adagrad update.
|
625941bb4f88993c3716bf34
|
def cleanup_server(self, client): <NEW_LINE> <INDENT> server = self._servers[client] <NEW_LINE> server.stop() <NEW_LINE> del self._servers[client]
|
The purpose of this function is for a proxied client to notify the LocalForwarder that it
is shutting down and its corresponding server can also be shut down.
|
625941bbec188e330fd5a66d
|
def update_one(self, filter, update, upsert=False, bypass_document_validation=False, collation=None, array_filters=None, hint=None, session=None): <NEW_LINE> <INDENT> common.validate_is_mapping("filter", filter) <NEW_LINE> common.validate_ok_for_update(update) <NEW_LINE> common.validate_list_or_none('array_filters', array_filters) <NEW_LINE> write_concern = self._write_concern_for(session) <NEW_LINE> return UpdateResult( self._update_retryable( filter, update, upsert, check_keys=False, write_concern=write_concern, bypass_doc_val=bypass_document_validation, collation=collation, array_filters=array_filters, hint=hint, session=session), write_concern.acknowledged)
|
Update a single document matching the filter.
>>> for doc in db.test.find():
... print(doc)
...
{'x': 1, '_id': 0}
{'x': 1, '_id': 1}
{'x': 1, '_id': 2}
>>> result = db.test.update_one({'x': 1}, {'$inc': {'x': 3}})
>>> result.matched_count
1
>>> result.modified_count
1
>>> for doc in db.test.find():
... print(doc)
...
{'x': 4, '_id': 0}
{'x': 1, '_id': 1}
{'x': 1, '_id': 2}
:Parameters:
- `filter`: A query that matches the document to update.
- `update`: The modifications to apply.
- `upsert` (optional): If ``True``, perform an insert if no documents
match the filter.
- `bypass_document_validation`: (optional) If ``True``, allows the
write to opt-out of document level validation. Default is
``False``. This option is only supported on MongoDB 3.2 and above.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
- `array_filters` (optional): A list of filters specifying which
array elements an update should apply. This option is only
supported on MongoDB 3.6 and above.
- `hint` (optional): An index to use to support the query
predicate specified either by its string name, or in the same
format as passed to
:meth:`~pymongo.collection.Collection.create_index` (e.g.
``[('field', ASCENDING)]``). This option is only supported on
MongoDB 4.2 and above.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
:Returns:
- An instance of :class:`~pymongo.results.UpdateResult`.
.. versionchanged:: 3.11
Added ``hint`` parameter.
.. versionchanged:: 3.9
Added the ability to accept a pipeline as the ``update``.
.. versionchanged:: 3.6
Added the ``array_filters`` and ``session`` parameters.
.. versionchanged:: 3.4
Added the ``collation`` option.
.. versionchanged:: 3.2
Added ``bypass_document_validation`` support.
.. versionadded:: 3.0
|
625941bb460517430c394055
|
def _missing_sum(self, document): <NEW_LINE> <INDENT> return sum(0.25 * square(self.idf[word]) for word in document)
|
Сумата на квадратите на tfidf на всяка дума в документа,
изчислена при tf == 0.5 т.е. при term frequency 0
|
625941bbf8510a7c17cf95c3
|
def extract_table(bsobject): <NEW_LINE> <INDENT> table = str(bsobject) <NEW_LINE> data = [] <NEW_LINE> for i in range(len(table)): <NEW_LINE> <INDENT> if table[i:i+8] == '<td><': <NEW_LINE> <INDENT> for k in range(100): <NEW_LINE> <INDENT> if table[i+8+k:i+8+k+1] == '*': <NEW_LINE> <INDENT> data.append('0') <NEW_LINE> break <NEW_LINE> <DEDENT> elif table[i+8+k:i+8+k+3] == '<i>': <NEW_LINE> <INDENT> data.append(table[i+8:i+8+k].strip()) <NEW_LINE> for j in range(10): <NEW_LINE> <INDENT> if table[i+8+k+3+j:i+8+k+3+j+1] == '*': <NEW_LINE> <INDENT> data[-1] = '0' <NEW_LINE> break <NEW_LINE> <DEDENT> <DEDENT> break <NEW_LINE> <DEDENT> elif table[i+8+k:i+8+k+5] == '</td>': <NEW_LINE> <INDENT> data.append(table[i+8:i+8+k].strip()) <NEW_LINE> break <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> elif table[i:i+5] == '<td>~': <NEW_LINE> <INDENT> for k in range(100): <NEW_LINE> <INDENT> if table[i+5+k:i+5+k+1] == '*': <NEW_LINE> <INDENT> data.append('0') <NEW_LINE> break <NEW_LINE> <DEDENT> elif table[i+5+k:i+5+k+3] == '<i>': <NEW_LINE> <INDENT> data.append(table[i+5:i+5+k].strip()) <NEW_LINE> for j in range(10): <NEW_LINE> <INDENT> if table[i+5+k+3+j:i+5+k+3+j+1] == '*': <NEW_LINE> <INDENT> data[-1] = '0' <NEW_LINE> break <NEW_LINE> <DEDENT> <DEDENT> break <NEW_LINE> <DEDENT> elif table[i+5+k:i+5+k+5] == '</td>': <NEW_LINE> <INDENT> data.append(table[i+5:i+5+k].strip()) <NEW_LINE> break <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> elif table[i:i+4] == '<td>': <NEW_LINE> <INDENT> for k in range(100): <NEW_LINE> <INDENT> if table[i+4+k:i+4+k+1] == '*': <NEW_LINE> <INDENT> data.append('0') <NEW_LINE> break <NEW_LINE> <DEDENT> elif table[i+4+k:i+4+k+3] == '<i>': <NEW_LINE> <INDENT> data.append(table[i+4:i+4+k].strip()) <NEW_LINE> for j in range(10): <NEW_LINE> <INDENT> if table[i+4+k+3+j:i+4+k+3+j+1] == '*': <NEW_LINE> <INDENT> data[-1] = '0' <NEW_LINE> break <NEW_LINE> <DEDENT> <DEDENT> break <NEW_LINE> <DEDENT> elif table[i+4+k:i+4+k+5] == '</td>': <NEW_LINE> <INDENT> data.append(table[i+4:i+4+k].strip()) <NEW_LINE> break <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> return(data,table)
|
table_extractor takes the bsobject and extracts all data in the table.
|
625941bb925a0f43d2549d3c
|
def GetToolSticky(self, tool_id): <NEW_LINE> <INDENT> item = self.FindTool(tool_id) <NEW_LINE> if not item: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> return item.sticky
|
Returns whether the toolbar item identified by `tool_id` has a sticky behaviour or not.
:param integer `tool_id`: the :class:`AuiToolBarItem` identifier.
|
625941bb925a0f43d2549d3d
|
def test_filllike_v01(function_name='test_filllike_v01'): <NEW_LINE> <INDENT> size = 5 <NEW_LINE> arr1 = N.arange(0, size) <NEW_LINE> ns=env.globalns(function_name) <NEW_LINE> p1 = ns.defparameter( 'w1', central=1.0, sigma=0.1 ) <NEW_LINE> points1 = C.Points( arr1 ) <NEW_LINE> with ns: <NEW_LINE> <INDENT> ws = C.WeightedSum(['w1'], [points1.points.points]) <NEW_LINE> <DEDENT> ws.print() <NEW_LINE> print() <NEW_LINE> dbg1 = R.DebugTransformation('wsum') <NEW_LINE> dbg1.debug.source(ws.sum.sum) <NEW_LINE> flvalue = 2.0 <NEW_LINE> fl = C.FillLike(flvalue) <NEW_LINE> fl.fill.inputs[0](dbg1.debug.target) <NEW_LINE> dbg2 = R.DebugTransformation('fill') <NEW_LINE> dbg2.debug.source(fl.fill.outputs[0]) <NEW_LINE> data=dbg2.debug.target.data() <NEW_LINE> print('data:', data) <NEW_LINE> print() <NEW_LINE> compare_filllike(data, [flvalue]*size, 'Data output failed') <NEW_LINE> print('Change parameter') <NEW_LINE> p1.set(-1.0) <NEW_LINE> taintflag = dbg2.debug.tainted() <NEW_LINE> data=dbg2.debug.target.data() <NEW_LINE> print('data:', data) <NEW_LINE> print('taintflag:', taintflag) <NEW_LINE> compare_filllike(data, [flvalue]*size, 'Data output failed') <NEW_LINE> compare_filllike(taintflag, False, 'Taintflag should be false')
|
Initialize inpnuts
|
625941bb7d43ff24873a2b66
|
def save(**params): <NEW_LINE> <INDENT> user_id, game_id, prediction = get_many(params, [], ['user_id', 'game_id', 'prediction']) <NEW_LINE> game = Session.query(Game).get(game_id) <NEW_LINE> time_now = datetime.datetime.utcnow() <NEW_LINE> if game.game_time < time_now: <NEW_LINE> <INDENT> raise ValueError("You cannot make a prediction for this game anymore.") <NEW_LINE> <DEDENT> q = Session.query(Prediction) <NEW_LINE> q = q.filter(Prediction.user_id==user_id) <NEW_LINE> q = q.filter(Prediction.game_id==game_id) <NEW_LINE> p = q.first() <NEW_LINE> if not p: <NEW_LINE> <INDENT> p = Prediction() <NEW_LINE> p.user_id = user_id <NEW_LINE> p.game_id = game_id <NEW_LINE> <DEDENT> p.prediction = prediction <NEW_LINE> Session.add(p) <NEW_LINE> Session.commit() <NEW_LINE> return p
|
:param user_id
:param game_id
:param prediction
|
625941bb9b70327d1c4e0c9c
|
@app.route('/groups/<group_name>', methods=['DELETE']) <NEW_LINE> @auth_required <NEW_LINE> def delete_group(group_name): <NEW_LINE> <INDENT> group = _get_group_by_name(group_name) <NEW_LINE> if not group.can_edit(identity.current.user): <NEW_LINE> <INDENT> raise Forbidden403('Cannot edit group') <NEW_LINE> <DEDENT> if group.is_protected_group(): <NEW_LINE> <INDENT> raise BadRequest400("Group '%s' is predefined and cannot be deleted" % group.group_name) <NEW_LINE> <DEDENT> if group.jobs: <NEW_LINE> <INDENT> raise BadRequest400('Cannot delete a group which has associated jobs') <NEW_LINE> <DEDENT> for rule in group.system_access_policy_rules: <NEW_LINE> <INDENT> rule.record_deletion() <NEW_LINE> <DEDENT> pools = SystemPool.query.filter_by(owning_group=group) <NEW_LINE> for pool in pools: <NEW_LINE> <INDENT> pool.change_owner(user=identity.current.user, service=u'HTTP') <NEW_LINE> <DEDENT> session.delete(group) <NEW_LINE> activity = Activity(identity.current.user, u'HTTP', u'Removed', u'Group', group.display_name) <NEW_LINE> session.add(activity) <NEW_LINE> return '', 204
|
Deletes a group.
:status 204: Group was successfully deleted.
:status 400: Group cannot be deleted because it is a predefined group, or
because it has associated jobs.
|
625941bbc4546d3d9de728fa
|
def _rune_expected_efficiency(self, include_grind): <NEW_LINE> <INDENT> primary_score_12 = self._forecast_primary_score(exp_level=12) <NEW_LINE> primary_score_15 = self._forecast_primary_score(exp_level=15) <NEW_LINE> innate_score = self._compute_innate_score() <NEW_LINE> substats_roll_score = self._compute_roll_score(include_grind) <NEW_LINE> owned_stat_upgrade_score = self._forecast_owned_stat_upgrade_score() <NEW_LINE> new_stat_upgrade_score = self._forecast_new_stat_upgrade_score() <NEW_LINE> exp_eff_at_12 = Rune._compute_final_score(primary_score_12, innate_score, substats_roll_score, new_stat_upgrade_score, owned_stat_upgrade_score) <NEW_LINE> exp_eff_at_15 = Rune._compute_final_score(primary_score_15, innate_score, substats_roll_score, new_stat_upgrade_score, owned_stat_upgrade_score) <NEW_LINE> return exp_eff_at_12, exp_eff_at_15
|
Finding rune's expected efficiency at +12 (4 times roll)
:param include_grind: if set True, applied grind will be counted in efficiency
:type include_grind: bool
:return: rune's expected efficiency
:rtype: float
|
625941bb3c8af77a43ae3666
|
def java_pids(self, match): <NEW_LINE> <INDENT> cmd = """jcmd | awk '/%s/ { print $1 }'""" % match <NEW_LINE> return [int(pid) for pid in self.ssh_capture(cmd, allow_fail=True)]
|
Get all the Java process IDs matching 'match'.
:param match: The AWK expression to match
|
625941bbd58c6744b4257b29
|
def test_stinespring_compose_other_reps(self): <NEW_LINE> <INDENT> chan = Stinespring(self.UI) <NEW_LINE> self._check_compose_other_reps(chan)
|
Test compose of Stinespring works with other reps.
|
625941bb656771135c3eb734
|
def _process_done(self, result): <NEW_LINE> <INDENT> self._current_deferred = None <NEW_LINE> self._try_dispatch_top() <NEW_LINE> return result
|
Called when a calculation completes. It returns the value
|
625941bbd6c5a10208143f11
|
def create_super_user(self, superuser_name: str, superuser_email: str, superuser_password: str, project_id: str, instance_name: str, cloud_sql_proxy_path: str = 'cloud_sql_proxy', region: str = 'us-west1', port: Optional[int] = 5432): <NEW_LINE> <INDENT> with self.with_cloud_sql_proxy(project_id, instance_name, cloud_sql_proxy_path, region, port): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> from django.contrib.auth.models import User <NEW_LINE> User.objects.create_superuser( username=superuser_name, email=superuser_email, password=superuser_password) <NEW_LINE> <DEDENT> except Exception as e: <NEW_LINE> <INDENT> raise crash_handling.UserError( 'Not able to create super user.') from e
|
Create a super user in the cloud sql database.
This function should be called after we did the following:
1. Generated the Django project source files.
2. Setup Django environment so that it is using configuration files
of the newly generated project.
3. Created the Cloud SQL instance and database user.
4. Migrated database. Otherwise the schema for superuser does not
exist.
Args:
superuser_name: Name of the super user you want to create.
superuser_email: Email of the super user you want to create.
superuser_password: Password of the super user you want to create.
project_id: GCP project id.
instance_name: The Cloud SQL instance name in which you want to
create the super user.
cloud_sql_proxy_path: The command to run your cloud sql proxy.
region: Where the Cloud SQL instance is in.
port: The port being forwarded by cloud sql proxy.
|
625941bbfbf16365ca6f6086
|
def run_checkers(segment): <NEW_LINE> <INDENT> segment = slice_checker(segment) <NEW_LINE> segment = unique_checker(segment) <NEW_LINE> segment = pair_checker(segment) <NEW_LINE> segment = hidden_pairs(segment) <NEW_LINE> return segment
|
Run the various slice-based checkers. These all check 9 cells at once, either in squares, columns or rows.
|
625941bb23849d37ff7b2f5a
|
def get_pid_hash(problem, short=False): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> problem_schema(problem) <NEW_LINE> <DEDENT> except MultipleInvalid as e: <NEW_LINE> <INDENT> logger.critical("Error validating problem object!") <NEW_LINE> logger.critical(e) <NEW_LINE> raise FatalException <NEW_LINE> <DEDENT> input = "{}-{}-{}-{}".format( problem["name"], problem["author"], problem["organization"], problem["event"] ) <NEW_LINE> output = md5(input.encode("utf-8")).hexdigest() <NEW_LINE> if short: <NEW_LINE> <INDENT> return output[:7] <NEW_LINE> <DEDENT> return output
|
Returns a hash of a given problem.
Args:
problem: a valid problem object.
short: shorten the return value (first 7 characters)
Returns:
Hex digest of the MD5 hash
|
625941bb6e29344779a624de
|
def WritePresAb(paDic, outfile): <NEW_LINE> <INDENT> with open(outfile, 'w') as out: <NEW_LINE> <INDENT> for k in paDic.keys(): <NEW_LINE> <INDENT> if k != 'loci': <NEW_LINE> <INDENT> out.write('%s\t%s\n' %(k, ' '.join(paDic[k]))) <NEW_LINE> <DEDENT> <DEDENT> out.write('\nList of loci:\n%s' % ' '.join(paDic['loci']))
|
Writes the presence absence matrix (paDic) to a text file
|
625941bb9c8ee82313fbb63d
|
def __init__( self, *, id: Optional[str] = None, offer: Optional[str] = None, publisher: Optional[str] = None, sku: Optional[str] = None, version: Optional[str] = None, **kwargs ): <NEW_LINE> <INDENT> super(ImageReference, self).__init__(**kwargs) <NEW_LINE> self.id = id <NEW_LINE> self.offer = offer <NEW_LINE> self.publisher = publisher <NEW_LINE> self.sku = sku <NEW_LINE> self.version = version <NEW_LINE> self.exact_version = None
|
:keyword id: Image resource ID.
:paramtype id: str
:keyword offer: The image offer if applicable.
:paramtype offer: str
:keyword publisher: The image publisher.
:paramtype publisher: str
:keyword sku: The image SKU.
:paramtype sku: str
:keyword version: The image version specified on creation.
:paramtype version: str
|
625941bbcc0a2c11143dcd59
|
def to_json(self, strip=None): <NEW_LINE> <INDENT> prep = { "token": self.token, "refresh_token": self.refresh_token, "token_uri": self.token_uri, "client_id": self.client_id, "client_secret": self.client_secret, "scopes": self.scopes, } <NEW_LINE> if self.expiry: <NEW_LINE> <INDENT> prep["expiry"] = self.expiry.isoformat() + "Z" <NEW_LINE> <DEDENT> prep = {k: v for k, v in prep.items() if v is not None} <NEW_LINE> if strip is not None: <NEW_LINE> <INDENT> prep = {k: v for k, v in prep.items() if k not in strip} <NEW_LINE> <DEDENT> return json.dumps(prep)
|
Utility function that creates a JSON representation of a Credentials
object.
Args:
strip (Sequence[str]): Optional list of members to exclude from the
generated JSON.
Returns:
str: A JSON representation of this instance. When converted into
a dictionary, it can be passed to from_authorized_user_info()
to create a new credential instance.
|
625941bb1f5feb6acb0c4a1d
|
def launch(self): <NEW_LINE> <INDENT> self.second = tkinter.Toplevel() <NEW_LINE> self.second.geometry('200x200')
|
Méthode permettant d'ouvrir une nouvelle fenêtre
|
625941bbd164cc6175782c16
|
def h_ss_pmz_consv_k(Nx, Ny, kx, ky, l): <NEW_LINE> <INDENT> mat = _lib.k_h_ss_pmz(Nx, Ny, kx, ky, l) <NEW_LINE> with CoordMatrix(mat) as coordmat: <NEW_LINE> <INDENT> H = coordmat.to_csr() <NEW_LINE> <DEDENT> return H
|
construct the H_pmz matrix in the given momentum configuration
Parameters
--------------------
Nx: int
lattice length in the x-direction
Ny: int
lattice length in the y-direction
kx: int
the x-component of lattice momentum * Nx / 2π in a [0, 2π)
Brillouin zone
ky: int
the y-component of lattice momentum * Nx / 2π in a [0, 2π)
Brillouin zone
l: int
Returns
--------------------
H: scipy.sparse.csr_matrix
|
625941bbcad5886f8bd26eab
|
def format_sent(s: str) -> str: <NEW_LINE> <INDENT> s = caps(s) <NEW_LINE> if s and s[-1].isalnum(): <NEW_LINE> <INDENT> return s + '.' <NEW_LINE> <DEDENT> return s
|
this is a test -> This is a test.
|
625941bb7047854f462a12d5
|
def post(server, topic, username, token, https_verify): <NEW_LINE> <INDENT> url = "https://%s/v1/topic/%s" % (server, topic) <NEW_LINE> data = json.dumps([{ "id": str(uuid.uuid4()), "session": SESSION, "schema": SCHEMA, "version": 1, "data": { "timestamp": int(time.time()), "hostname": HOSTNAME, "foo": "bar", "bar": 42 * random.random() } }]) <NEW_LINE> return requests.post(url, auth=(username, token), headers={"content-type": "application/json"}, data=data, verify=https_verify)
|
Post a single message to the API.
|
625941bb38b623060ff0acb8
|
def client_add(self, commands): <NEW_LINE> <INDENT> self.progress("gpsfake: client_add()\n") <NEW_LINE> newclient = gps.gps(port=self.port, verbose=self.verbose) <NEW_LINE> self.append(newclient) <NEW_LINE> newclient.id = self.client_id + 1 <NEW_LINE> self.client_id += 1 <NEW_LINE> self.progress("gpsfake: client %d has %s\n" % (self.client_id,newclient.device)) <NEW_LINE> if commands: <NEW_LINE> <INDENT> self.initialize(newclient, commands) <NEW_LINE> <DEDENT> return self.client_id
|
Initiate a client session and force connection to a fake GPS.
|
625941bb31939e2706e4cd37
|
def send(self, d): <NEW_LINE> <INDENT> if not d: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> req_id = self.proto.put(d) <NEW_LINE> self.req_ids[req_id] = d.get('name', '?') <NEW_LINE> return req_id
|
@return the request id
|
625941bb3617ad0b5ed67dc2
|
def play_alarm(ringtone, current_vol): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> while True: <NEW_LINE> <INDENT> subprocess.call(["ffplay", "-nodisp", "-loglevel", "quiet", "-autoexit", ringtone]) <NEW_LINE> <DEDENT> <DEDENT> except KeyboardInterrupt: <NEW_LINE> <INDENT> set_volume(current_vol)
|
ffplay: Portable Media Play using FFmpeg library & SDL library
-nodisp: Disable graphical display
-loglevel quiet: Set logging level & flags to quiet mode (display nothing)
-autoexit: Exit when file is done playing
|
625941bb5510c4643540f2b5
|
def clean(self): <NEW_LINE> <INDENT> duplicate = False <NEW_LINE> actors = [] <NEW_LINE> for form in self.forms: <NEW_LINE> <INDENT> if form.cleaned_data: <NEW_LINE> <INDENT> name = form.cleaned_data['name'] <NEW_LINE> surname = form.cleaned_data['surname'] <NEW_LINE> actors_dict = {"name": name, "surname": surname} <NEW_LINE> duplicate = duplicate or actors_dict in actors <NEW_LINE> actors.append(actors_dict) <NEW_LINE> <DEDENT> <DEDENT> if duplicate: <NEW_LINE> <INDENT> raise forms.ValidationError('No se pueden agregar actores repetidos')
|
Validaciones del form set de DayFunction
|
625941bb7047854f462a12d6
|
def heapsort_test(c, branch): <NEW_LINE> <INDENT> print("starting heapSort test") <NEW_LINE> t = time.process_time() <NEW_LINE> heapsort(c, branch) <NEW_LINE> print("Heapsort took %f seconds" % ((time.process_time()-t))) <NEW_LINE> print("====") <NEW_LINE> return c
|
Test for heap sort. Prints time taken and returns
a sorted array.
|
625941bb50812a4eaa59c1ee
|
def match_alphabet(self, pattern): <NEW_LINE> <INDENT> s = {} <NEW_LINE> for char in pattern: <NEW_LINE> <INDENT> s[char] = 0 <NEW_LINE> <DEDENT> for i in xrange(len(pattern)): <NEW_LINE> <INDENT> s[pattern[i]] |= 1 << (len(pattern) - i - 1) <NEW_LINE> <DEDENT> return s
|
Initialise the alphabet for the Bitap algorithm.
Args:
pattern: The text to encode.
Returns:
Hash of character locations.
|
625941bb283ffb24f3c557d5
|
def test_content_and_content_comments_extractor_blocks(self): <NEW_LINE> <INDENT> content = content_extractor.analyze(self._html, blocks=True) <NEW_LINE> content_comments = content_comments_extractor.analyze( self._html, blocks=True) <NEW_LINE> passed_content = False <NEW_LINE> passed_content_comments = False <NEW_LINE> for i in xrange(5): <NEW_LINE> <INDENT> actual_content, actual_content_comments = content_and_content_comments_extractor.analyze( self._html, blocks=True) <NEW_LINE> passed_content = ( [blk.text for blk in actual_content] == [blk.text for blk in content] ) <NEW_LINE> passed_content_comments = ( [blk.text for blk in actual_content_comments] == [blk.text for blk in content_comments] ) <NEW_LINE> if passed_content and passed_content_comments: <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> <DEDENT> self.assertTrue(passed_content) <NEW_LINE> self.assertTrue(passed_content_comments)
|
The content and content/comments extractor should return proper blocks
|
625941bb2eb69b55b151c775
|
def update(self): <NEW_LINE> <INDENT> import mpd <NEW_LINE> try: <NEW_LINE> <INDENT> self.status = self.client.status() <NEW_LINE> self.currentsong = self.client.currentsong() <NEW_LINE> <DEDENT> except (mpd.ConnectionError, BrokenPipeError, ValueError): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> self.client.disconnect() <NEW_LINE> <DEDENT> except mpd.ConnectionError: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> self.client.connect(self.server, self.port) <NEW_LINE> if self.password is not None: <NEW_LINE> <INDENT> self.client.password(self.password) <NEW_LINE> <DEDENT> self.status = self.client.status() <NEW_LINE> self.currentsong = self.client.currentsong()
|
Get the latest data and update the state.
|
625941bbab23a570cc250049
|
def proximalIteration(y, gamma, A, Z, b): <NEW_LINE> <INDENT> options['show_progress'] = False <NEW_LINE> n, r = A.shape <NEW_LINE> P = 2 * gamma * spdiag(list(ones(n)) + [0]) <NEW_LINE> q = hstack((-2. * gamma * y.T, array([1.]))) <NEW_LINE> G = hstack((A.T, -1.*ones((r, 1)))) <NEW_LINE> h = array([ai.dot(zi) - bi for bi, ai, zi in zip(b, A.T, Z.T)]) <NEW_LINE> resdict = coneqp(P, matrix(q), G=matrix(G), h=matrix(h)) <NEW_LINE> return array(resdict['x'])[:-1].reshape(n)
|
Columns of A give gradients of linear functions having the values in b
at the locations of the columns of Z. The maximum of these functions is F,
and G is the squared distance from y scaled by gamma.
Then return the minimum of F+G.
|
625941bb7b180e01f3dc46cd
|
def save(self, force_insert=False, force_update=False): <NEW_LINE> <INDENT> if not self.id: <NEW_LINE> <INDENT> if self.user: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> Rating.objects.get(target_ct=self.target_ct, target_id=self.target_id, user=self.user) <NEW_LINE> return <NEW_LINE> <DEDENT> except Rating.DoesNotExist: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> <DEDENT> elif (self.ip_address and Rating.objects.filter( target_ct=self.target_ct, target_id=self.target_id, user__isnull=True, ip_address=self.ip_address , time__gte=(self.time or datetime.now()) - timedelta(seconds=MINIMAL_ANONYMOUS_IP_DELAY) ).count() > 0): <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> <DEDENT> super(Rating, self).save(force_insert, force_update)
|
Modified save() method that checks for duplicit entries.
|
625941bbd164cc6175782c17
|
def path_to_tones(path, tempo=120, chord_types=None, root_octave=0, double_root=False, equal_temperament=False, timings=False): <NEW_LINE> <INDENT> envelope = piano_envelope() <NEW_LINE> sample_rate = DEFAULT_SAMPLE_RATE <NEW_LINE> beat_length = 60.0 / tempo <NEW_LINE> if timings: <NEW_LINE> <INDENT> root_times = path <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> time = Fraction(0) <NEW_LINE> root_times = [] <NEW_LINE> for root,length in path: <NEW_LINE> <INDENT> root_times.append((root,time)) <NEW_LINE> time += length <NEW_LINE> <DEDENT> <DEDENT> def _root_at_time(time): <NEW_LINE> <INDENT> current_root = root_times[0][0] <NEW_LINE> for root,rtime in root_times[1:]: <NEW_LINE> <INDENT> if rtime > time: <NEW_LINE> <INDENT> return current_root <NEW_LINE> <DEDENT> current_root = root <NEW_LINE> <DEDENT> return current_root <NEW_LINE> <DEDENT> if chord_types is None: <NEW_LINE> <INDENT> chord_types = [('prime',length) for __,length in path] <NEW_LINE> <DEDENT> if equal_temperament: <NEW_LINE> <INDENT> _pitch_ratio = tonal_space_et_pitch <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> _pitch_ratio = tonal_space_pitch_2d <NEW_LINE> <DEDENT> matrix = ToneMatrix(sample_rate=sample_rate) <NEW_LINE> time = Fraction(0) <NEW_LINE> for ctype,length in chord_types: <NEW_LINE> <INDENT> coord = _root_at_time(time) <NEW_LINE> pitch_ratio = _pitch_ratio(coord) <NEW_LINE> duration = beat_length * float(length) <NEW_LINE> if not equal_temperament and coordinate_to_et_2d(coord) == 0 and pitch_ratio > 1.5: <NEW_LINE> <INDENT> pitch_ratio /= 2.0 <NEW_LINE> <DEDENT> tone = SineChordEvent(220*pitch_ratio, chord_type=ctype, duration=duration, envelope=envelope, root_octave=root_octave, root_weight=1.2, double_root=double_root) <NEW_LINE> matrix.add_tone(beat_length * float(time), tone) <NEW_LINE> time += length <NEW_LINE> <DEDENT> return matrix
|
Takes a tonal space path, given as a list of coordinates, and
generates the tones of the roots.
@type path: list of (3d-coordinate,length) tuples
@param path: coordinates of the points in the sequence and the length
of each, in beats
@type tempo: int
@param tempo: speed in beats per second (Maelzel's metronome)
@type chord_types: list of (string,length)
@param chord_types: the type of chord to use for each tone and the
time spent on that chord type, in beats. See
L{CHORD_TYPES} keys for possible values.
@type equal_temperament: bool
@param equal_temperament: render all the pitches as they would be
played in equal temperament.
@rtype: L{ToneMatrix}
@return: a tone matrix that can be used to render the sound
|
625941bb4e696a04525c9315
|
def test_codec_merge_remove(self): <NEW_LINE> <INDENT> merge_codec = codec.PytorchCodec('a') <NEW_LINE> new_codec, del_labels = self.o2o_codec.merge(merge_codec) <NEW_LINE> self.assertEqual(del_labels, {2}) <NEW_LINE> self.assertEqual(new_codec.c2l, {'a': [1]})
|
Test merging of a codec removing code points
|
625941bb92d797404e304052
|
def write(self, *a, **kw): <NEW_LINE> <INDENT> self.response.out.write(*a, **kw)
|
Writes directly to the http request
|
625941bb31939e2706e4cd38
|
def buddyStrings(self, A, B): <NEW_LINE> <INDENT> index = [] <NEW_LINE> for i, v in enumerate(zip(A,B)): <NEW_LINE> <INDENT> if v[0] != v[1]: <NEW_LINE> <INDENT> index.append(i) <NEW_LINE> <DEDENT> <DEDENT> if len(set(A)) == len(set(B)) == 1 and len(A) == len(B) >= 2: <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> elif (len(index) == 2) and (A[index[0]] == B[index[1]] and A[index[1]] == B[index[0]]): <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> elif len(index) > 2: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> elif (len(A) == len(B)) and (len(set(A)) == len(set(B)) != len(B)): <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return False
|
:type A: str
:type B: str
:rtype: bool
|
625941bb56ac1b37e626409e
|
def get_unique_bitcoin_addresses(transaction_list): <NEW_LINE> <INDENT> bitcoin_addresses = [] <NEW_LINE> for tx in transaction_list: <NEW_LINE> <INDENT> for addr in tx['vout'][0]['scriptPubKey']['addresses']: <NEW_LINE> <INDENT> if addr not in bitcoin_addresses: <NEW_LINE> <INDENT> bitcoin_addresses.append(addr) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return tuple(bitcoin_addresses)
|
Simple function to return a list of all unique
bitcoin addresses from a transaction list
:param transaction_list:
:return:
|
625941bbe5267d203edcdb69
|
def loadLocal(self, local=None): <NEW_LINE> <INDENT> data = self.keep.loadLocalData() <NEW_LINE> if data and self.keep.verifyLocalData(data): <NEW_LINE> <INDENT> self.local = lotting.Lot(stack=self, uid=data['uid'], name=data['name'], ha=data['ha']) <NEW_LINE> self.name = self.local.name <NEW_LINE> <DEDENT> elif local: <NEW_LINE> <INDENT> local.stack = self <NEW_LINE> self.local = local <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.local = lotting.Lot(stack=self)
|
Load self.local from keep file else local or new
|
625941bb498bea3a759b9979
|
def _build_examplars(self, loader, n_examplars=None): <NEW_LINE> <INDENT> n_examplars = n_examplars or self._m <NEW_LINE> lo, hi = self._task * self._task_size, self._n_classes <NEW_LINE> print("Building examplars for classes {} -> {}.".format(lo, hi)) <NEW_LINE> for class_idx in range(lo, hi): <NEW_LINE> <INDENT> loader.dataset.set_classes_range(class_idx, class_idx) <NEW_LINE> self._examplars[class_idx] = self._build_class_examplars(loader, n_examplars)
|
Builds new examplars.
:param loader: A DataLoader.
:param n_examplars: Maximum number of examplars to create.
|
625941bbb7558d58953c4de3
|
def abvcalc_main(): <NEW_LINE> <INDENT> import argparse <NEW_LINE> parser = argparse.ArgumentParser() <NEW_LINE> parser.add_argument('og', type=float, help='Original Gravity') <NEW_LINE> parser.add_argument('fg', type=float, help='Final Gravity') <NEW_LINE> args = parser.parse_args() <NEW_LINE> abv = 100. * abv_calc(args.og, args.fg) <NEW_LINE> att = 100.0 * attenuation(args.og, args.fg) <NEW_LINE> print('{0:.02f}% ABV'.format(abv)) <NEW_LINE> print('{0:.0f}% Attenuation'.format(att))
|
Entry point for abvcalc command line script.
|
625941bb8e71fb1e9831d676
|
def arrange_by_x(self, points): <NEW_LINE> <INDENT> points.sort(key = lambda point : point.x)
|
sort the points by their x co-ordinate, in ascending order
|
625941bb3d592f4c4ed1cf47
|
def test_assert_not_equal_values(self): <NEW_LINE> <INDENT> self.assertNotEqual(__, 1 + 1)
|
Sometimes we will ask you to fill in the values
|
625941bb377c676e91272074
|
def fetch_query(opts): <NEW_LINE> <INDENT> q = { "sessionID": opts.sessionID, "minutes": 1440, "maxCount": 1 } <NEW_LINE> url = Defaults.LatestGlucose_url + '?' + urllib.urlencode(q) <NEW_LINE> return url
|
Build the api query for the data fetch
|
625941bb6fece00bbac2d605
|
def _grab_impl(self, monitor): <NEW_LINE> <INDENT> core = self.core <NEW_LINE> rect = CGRect( (monitor["left"], monitor["top"]), (monitor["width"], monitor["height"]) ) <NEW_LINE> image_ref = core.CGWindowListCreateImage(rect, 1, 0, 0) <NEW_LINE> if not image_ref: <NEW_LINE> <INDENT> raise ScreenShotError("CoreGraphics.CGWindowListCreateImage() failed.") <NEW_LINE> <DEDENT> width = core.CGImageGetWidth(image_ref) <NEW_LINE> height = core.CGImageGetHeight(image_ref) <NEW_LINE> prov = copy_data = None <NEW_LINE> try: <NEW_LINE> <INDENT> prov = core.CGImageGetDataProvider(image_ref) <NEW_LINE> copy_data = core.CGDataProviderCopyData(prov) <NEW_LINE> data_ref = core.CFDataGetBytePtr(copy_data) <NEW_LINE> buf_len = core.CFDataGetLength(copy_data) <NEW_LINE> raw = ctypes.cast(data_ref, POINTER(c_ubyte * buf_len)) <NEW_LINE> data = bytearray(raw.contents) <NEW_LINE> bytes_per_row = core.CGImageGetBytesPerRow(image_ref) <NEW_LINE> bytes_per_pixel = core.CGImageGetBitsPerPixel(image_ref) <NEW_LINE> bytes_per_pixel = (bytes_per_pixel + 7) // 8 <NEW_LINE> if bytes_per_pixel * width != bytes_per_row: <NEW_LINE> <INDENT> cropped = bytearray() <NEW_LINE> for row in range(height): <NEW_LINE> <INDENT> start = row * bytes_per_row <NEW_LINE> end = start + width * bytes_per_pixel <NEW_LINE> cropped.extend(data[start:end]) <NEW_LINE> <DEDENT> data = cropped <NEW_LINE> <DEDENT> <DEDENT> finally: <NEW_LINE> <INDENT> if prov: <NEW_LINE> <INDENT> core.CGDataProviderRelease(prov) <NEW_LINE> <DEDENT> if copy_data: <NEW_LINE> <INDENT> core.CFRelease(copy_data) <NEW_LINE> <DEDENT> <DEDENT> return self.cls_image(data, monitor, size=Size(width, height))
|
Retrieve all pixels from a monitor. Pixels have to be RGB.
|
625941bb15fb5d323cde09d4
|
def pipeline_name(self, name): <NEW_LINE> <INDENT> if name is None: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> _pipeline_name = name
|
Set the user defined name for the pipeline this node belongs to
|
625941bbfb3f5b602dac3559
|
def cache(fn): <NEW_LINE> <INDENT> cache_dict = {} <NEW_LINE> def fn_cached(arg0): <NEW_LINE> <INDENT> if arg0 in cache_dict: <NEW_LINE> <INDENT> return cache_dict[arg0] <NEW_LINE> <DEDENT> res = fn(arg0) <NEW_LINE> cache_dict[arg0] = res <NEW_LINE> return res <NEW_LINE> <DEDENT> return fn_cached
|
Cache a function that receives one parameter.
|
625941bbfbf16365ca6f6087
|
def show_both(self, reveal_card=False): <NEW_LINE> <INDENT> print(f"\n== Turn #{self.turn} ==\n") <NEW_LINE> print("The Dealer's hand is:\n") <NEW_LINE> self._dealer.show_hand(reveal_card) <NEW_LINE> print("\nYour Hand is:\n") <NEW_LINE> self._player.show_hand()
|
Prints cards of all both players.
By default one of the dealer's card is hidden.
Args:
reveal_card (bool): reveals all the dealer's cards.
|
625941bb4a966d76dd550ed6
|
def tags_id_designs_post(self, id, **kwargs): <NEW_LINE> <INDENT> kwargs['_return_http_data_only'] = True <NEW_LINE> if kwargs.get('callback'): <NEW_LINE> <INDENT> return self.tags_id_designs_post_with_http_info(id, **kwargs) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> (data) = self.tags_id_designs_post_with_http_info(id, **kwargs) <NEW_LINE> return data
|
Creates a new instance in designs of this model.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.tags_id_designs_post(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Tag id (required)
:param Design data:
:return: Design
If the method is called asynchronously,
returns the request thread.
|
625941bbe64d504609d7470a
|
def parse_args(argv): <NEW_LINE> <INDENT> return [lib.CharArray(x) for x in argv]
|
:param argv: the system argv
:return: the argv in spl String object
|
625941bbd7e4931a7ee9dde6
|
def remove_trade(self, trade_to_remove): <NEW_LINE> <INDENT> if trade_to_remove.stock_symbol in self.recorded_trades: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> self.recorded_trades[trade_to_remove.stock_symbol].remove(trade_to_remove) <NEW_LINE> if len(self.recorded_trades[trade_to_remove.stock_symbol]) == 0: <NEW_LINE> <INDENT> self.recorded_trades.pop(trade_to_remove.stock_symbol) <NEW_LINE> <DEDENT> logger.info('Removed trade for stock: {}, from stock_exchange.'.format(trade_to_remove.stock_symbol)) <NEW_LINE> return True <NEW_LINE> <DEDENT> except ValueError: <NEW_LINE> <INDENT> logger.warning('Could not remove trade. It was not found in the stock_exchange.') <NEW_LINE> return False <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> logger.warning('Could not remove trade. No trades in the stock_exchange for stock: {}.'.format( trade_to_remove.stock_symbol)) <NEW_LINE> return False
|
A method to remove a trade for a specific stock from the stock exchange.
:param trade_to_remove: The trade object to remove from the stock exchange
:type trade_to_remove: TradeRecord
:return: True || False, whether the trade was removed successfully from the stock exchange.
:rtype: bool
|
625941bb91af0d3eaac9b8df
|
@app.route('/Average-Time') <NEW_LINE> def chart_data(): <NEW_LINE> <INDENT> def generate_data(): <NEW_LINE> <INDENT> blocks_to_update = blockchain.blocks_to_update <NEW_LINE> while True: <NEW_LINE> <INDENT> if blockchain.chain[-1].index == (blocks_to_update - 1): <NEW_LINE> <INDENT> first_block_secs = blockchain.chain[-1 * blocks_to_update ].timestamp <NEW_LINE> last_block_secs = blockchain.chain[-1].timestamp <NEW_LINE> time_span_secs = last_block_secs - first_block_secs <NEW_LINE> avg_time_block= time_span_secs / (blocks_to_update - 1) <NEW_LINE> json_data = json.dumps({ 'time': datetime.now().strftime('%H:%M:%S'), 'value': avg_time_block }) <NEW_LINE> yield f"data:{json_data}\n\n" <NEW_LINE> <DEDENT> elif ((blockchain.chain[-1].index + 1) >= blocks_to_update): <NEW_LINE> <INDENT> first_block_secs = blockchain.chain[-1 * (blocks_to_update + 1)].timestamp <NEW_LINE> last_block_secs = blockchain.chain[-1].timestamp <NEW_LINE> time_span_secs = last_block_secs - first_block_secs <NEW_LINE> avg_time_block= time_span_secs / blocks_to_update <NEW_LINE> json_data = json.dumps({ 'time': datetime.now().strftime('%H:%M:%S'), 'value': avg_time_block }) <NEW_LINE> yield f"data:{json_data}\n\n" <NEW_LINE> <DEDENT> time.sleep(1) <NEW_LINE> <DEDENT> <DEDENT> return Response(generate_data(), mimetype='text/event-stream')
|
Sends a live data graph displaying the moving average mining time of the last few blocks
|
625941bbfb3f5b602dac355a
|
def remove_permission(self, perm): <NEW_LINE> <INDENT> if self.has_permission(perm): <NEW_LINE> <INDENT> self.permissions -= perm
|
自定义的移除权限方法
|
625941bbe1aae11d1e749b7e
|
def test_01_StoreStatus_StoreStatusModified(self): <NEW_LINE> <INDENT> value = pykerio.enums.StoreStatus(name='StoreStatusModified') <NEW_LINE> self.assertEquals(value.dump(), 'StoreStatusModified') <NEW_LINE> self.assertEquals(value.get_name(), 'StoreStatusModified') <NEW_LINE> self.assertEquals(value.get_value(), 1)
|
Test StoreStatus with StoreStatusModified
|
625941bb32920d7e50b28097
|
def has_deleted_revisions(self) -> bool: <NEW_LINE> <INDENT> if not hasattr(self, '_has_deleted_revisions'): <NEW_LINE> <INDENT> gen = self.site.deletedrevs(self, total=1, prop=['ids']) <NEW_LINE> self._has_deleted_revisions = bool(list(gen)) <NEW_LINE> <DEDENT> return self._has_deleted_revisions
|
Return True if the page has deleted revisions.
*New in version 4.2.*
|
625941bb596a897236089994
|
def add_typing_import(self, name: str) -> None: <NEW_LINE> <INDENT> self.import_tracker.require_name(name)
|
Add a name to be imported from typing, unless it's imported already.
The import will be internal to the stub.
|
625941bb099cdd3c635f0b26
|
def MA_P2O6_s_HG_FA(lipid, adduct, intensity): <NEW_LINE> <INDENT> for tail in lipid.tails: <NEW_LINE> <INDENT> if tail.type in ['Acyl']: <NEW_LINE> <INDENT> yield MA_P2O6_s_HG_FAx(lipid, adduct, intensity, MA_P2O6_s_HG_FA, tail)
|
[ MA - Headgroup + H2O - (ROOH) ]
Fragment for a 'clean' headgroup neutral loss and loss of free-fatty acid
i.e. loss of headgroup, including phospate from adducted molecular ion
Method used to generate multiple objects
|
625941bba8370b771705276b
|
def extract_hooks(config): <NEW_LINE> <INDENT> hooks = {} <NEW_LINE> config_items = config.serialize() <NEW_LINE> for section, name, value in config_items: <NEW_LINE> <INDENT> if section != 'hooks': <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> hooks[name] = value <NEW_LINE> <DEDENT> return hooks
|
Return a dictionary with the hook entries of the given config.
|
625941bb63b5f9789fde6fb0
|
@ssl_required <NEW_LINE> def aaq_step2(request, product_key): <NEW_LINE> <INDENT> return aaq(request, product_key=product_key, step=1)
|
Step 2: The product is selected.
|
625941bb7c178a314d6ef324
|
def setUp(self): <NEW_LINE> <INDENT> self.hass = get_test_home_assistant() <NEW_LINE> dt_util.set_default_time_zone(dt_util.get_time_zone('America/Regina'))
|
Setup things to be run when tests are started.
|
625941bb8a43f66fc4b53f32
|
def get_signature_revocation_lists(self, gid='', path='/attestation/sgx/v2/sigrl'): <NEW_LINE> <INDENT> path = '{}/{}'.format(path, gid) if gid else path <NEW_LINE> url = urljoin(self._ias_url, path) <NEW_LINE> LOGGER.debug("Fetching SigRL from: %s", url) <NEW_LINE> result = requests.get(url, cert=self._cert, timeout=self._timeout) <NEW_LINE> if result.status_code != requests.codes.ok: <NEW_LINE> <INDENT> LOGGER.error("get_signature_revocation_lists HTTP Error code : %d", result.status_code) <NEW_LINE> result.raise_for_status() <NEW_LINE> <DEDENT> return str(result.text)
|
@param gid: Hex, base16 encoded
@param path: URL path for sigrl request
@return: Base 64-encoded SigRL for EPID
group identified by {gid} parameter.
|
625941bb7d847024c06be183
|
def create_gift_card(set_configuration_account=True): <NEW_LINE> <INDENT> pool = Pool() <NEW_LINE> Company = pool.get('company.company') <NEW_LINE> GiftCard = pool.get('gift_card.gift_card') <NEW_LINE> Configuration = pool.get('gift_card.configuration') <NEW_LINE> Sequence = pool.get('ir.sequence') <NEW_LINE> Currency = pool.get('currency.currency') <NEW_LINE> Account = pool.get('account.account') <NEW_LINE> gateway = create_payment_gateway() <NEW_LINE> gateway.save() <NEW_LINE> company, = Company.search([]) <NEW_LINE> with set_company(company): <NEW_LINE> <INDENT> revenue, = Account.search([ ('type.revenue', '=', True), ]) <NEW_LINE> <DEDENT> gift_card_sequence, = Sequence.search([ ('code', '=', 'gift_card.gift_card'), ]) <NEW_LINE> with Transaction().set_context(company=company.id): <NEW_LINE> <INDENT> configuration = Configuration(1) <NEW_LINE> configuration.number_sequence = gift_card_sequence <NEW_LINE> if set_configuration_account: <NEW_LINE> <INDENT> configuration.liability_account = revenue <NEW_LINE> <DEDENT> configuration.save() <NEW_LINE> <DEDENT> currencies = Currency.search([ ('code', '=', 'USD'), ]) <NEW_LINE> if currencies: <NEW_LINE> <INDENT> usd = currencies[0] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> usd = create_currency('USD') <NEW_LINE> try: <NEW_LINE> <INDENT> add_currency_rate(usd, Decimal('1')) <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> <DEDENT> gift_card, = GiftCard.create([{ 'currency': usd, 'amount': Decimal('20'), }]) <NEW_LINE> return gift_card
|
Create gift card
|
625941bb1f5feb6acb0c4a1e
|
def _cpy(self, arg, immediate): <NEW_LINE> <INDENT> v = self.memory.read(arg) if not immediate else arg <NEW_LINE> self._compare(self.Y, v)
|
Sets flags as if a subtraction A - v was performed. The N flag is valid iff the numbers are signed.
:return:
|
625941bb187af65679ca4fe8
|
def train(self, doclist): <NEW_LINE> <INDENT> model = doc2vec.Doc2Vec(doclist, vector_size=self.vector_size, window=self.window, min_count=self.min_count, workers=self.workers) <NEW_LINE> for dummyepoch in range(self.epochs): <NEW_LINE> <INDENT> model.train(doclist, total_examples=model.corpus_count, epochs=1) <NEW_LINE> model.alpha -= 0.0002 <NEW_LINE> model.min_alpha = model.alpha <NEW_LINE> <DEDENT> return model
|
Trains the analysed document.
Arguments:
doc: List with the analysed paper
min_count: Only words that occur more than min_count will be used
epochs: The number of iterations over the data set
in order to train the model
Returns:
A doc2vec model
|
625941bb8a349b6b435e803d
|
def transpose(self, *dims): <NEW_LINE> <INDENT> if len(dims) == 0: <NEW_LINE> <INDENT> dims = self.dims[::-1] <NEW_LINE> <DEDENT> axes = self.get_axis_num(dims) <NEW_LINE> if len(dims) < 2: <NEW_LINE> <INDENT> return self.copy(deep=False) <NEW_LINE> <DEDENT> data = as_indexable(self._data).transpose(axes) <NEW_LINE> return type(self)(dims, data, self._attrs, self._encoding, fastpath=True)
|
Return a new Variable object with transposed dimensions.
Parameters
----------
*dims : str, optional
By default, reverse the dimensions. Otherwise, reorder the
dimensions to this order.
Returns
-------
transposed : Variable
The returned object has transposed data and dimensions with the
same attributes as the original.
Notes
-----
Although this operation returns a view of this variable's data, it is
not lazy -- the data will be fully loaded.
See Also
--------
numpy.transpose
|
625941bb94891a1f4081b972
|
def get_classifier_model(model_name, num_att_layers): <NEW_LINE> <INDENT> if model_name == ModelName.text_cnn: <NEW_LINE> <INDENT> classifier = text_models.TextCNN(filter_sizes=(1, 2, 3), is_trainable=True) <NEW_LINE> <DEDENT> elif model_name == ModelName.hie_text: <NEW_LINE> <INDENT> classifier = text_models.HieText(is_trainable=True, is_primary_model=True) <NEW_LINE> <DEDENT> elif model_name == ModelName.image_cnn: <NEW_LINE> <INDENT> classifier = image_models.ImageCNN(is_trainable=True) <NEW_LINE> <DEDENT> elif model_name == ModelName.image_cnn_v2: <NEW_LINE> <INDENT> classifier = image_models.ImageCNNV2(is_trainable=True) <NEW_LINE> <DEDENT> elif model_name == ModelName.resnet: <NEW_LINE> <INDENT> classifier = image_models.Resnet(is_trainable=True, train_last_block=False) <NEW_LINE> <DEDENT> elif model_name == ModelName.resnet_clf: <NEW_LINE> <INDENT> classifier = image_models.ResnetClf(is_trainable=True) <NEW_LINE> <DEDENT> elif model_name == ModelName.embedding_concat_semifreeze: <NEW_LINE> <INDENT> classifier = image_text_models.EmbeddingConcatWithSemiFreeze(is_trainable=True) <NEW_LINE> <DEDENT> elif model_name == ModelName.stacked_attention_with_semi_freeze_cnn: <NEW_LINE> <INDENT> classifier = image_text_models.StackedAttentionWithSemiFreezeCNN(nlayers=num_att_layers, is_trainable=True) <NEW_LINE> <DEDENT> elif model_name == ModelName.hie_co_att: <NEW_LINE> <INDENT> classifier = image_text_models.HieCoAtt(is_primary_model=True, is_trainable=True) <NEW_LINE> <DEDENT> elif model_name == ModelName.aux_task_model: <NEW_LINE> <INDENT> classifier = image_text_models.AuxTaskModel() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise ValueError("Invalid model name=%s provided" % model_name) <NEW_LINE> <DEDENT> return classifier
|
Given the model name, construct the corresponding BaseModel derived object and return it
:param model_name: ModelName enum (from global_hyperparams)
:param num_att_layers: int or None, number of attention layers to be used for SAN
:return: BaseModel derived class object
:raises: ValueError, if model_name in not a implemented classifier model
|
625941bbd10714528d5ffbaa
|
def xml_control(self): <NEW_LINE> <INDENT> control_dict = self.control.copy() <NEW_LINE> jrcount = control_dict.get('jr:count') <NEW_LINE> if jrcount: <NEW_LINE> <INDENT> survey = self.get_root() <NEW_LINE> control_dict['jr:count'] = survey.insert_xpaths(jrcount) <NEW_LINE> <DEDENT> repeat_node = node(u"repeat", nodeset=self.get_xpath(), **control_dict) <NEW_LINE> for n in Section.xml_control(self): <NEW_LINE> <INDENT> repeat_node.appendChild(n) <NEW_LINE> <DEDENT> label = self.xml_label() <NEW_LINE> if label: <NEW_LINE> <INDENT> return node( u"group", self.xml_label(), repeat_node, ref=self.get_xpath() ) <NEW_LINE> <DEDENT> return node(u"group", repeat_node, ref=self.get_xpath())
|
<group>
<label>Fav Color</label>
<repeat nodeset="fav-color">
<select1 ref=".">
<label ref="jr:itext('fav')" />
<item><label ref="jr:itext('red')" /><value>red</value></item>
<item><label ref="jr:itext('green')" /><value>green</value></item>
<item><label ref="jr:itext('yellow')" /><value>yellow</value></item>
</select1>
</repeat>
</group>
|
625941bbd53ae8145f87a13f
|
def global_signal_regression(_data, _eye_mask_path): <NEW_LINE> <INDENT> eye_mask = nib.load(_eye_mask_path).get_data() <NEW_LINE> global_mask = np.array(eye_mask, dtype=bool) <NEW_LINE> regressor_map = {'constant': np.ones((_data.shape[3], 1))} <NEW_LINE> regressor_map['global'] = _data[global_mask].mean(0) <NEW_LINE> X = np.zeros((_data.shape[3], 1)) <NEW_LINE> csv_filename = '' <NEW_LINE> for rname, rval in regressor_map.items(): <NEW_LINE> <INDENT> X = np.hstack((X, rval.reshape(rval.shape[0], -1))) <NEW_LINE> csv_filename += '_' + rname <NEW_LINE> <DEDENT> X = X[:, 1:] <NEW_LINE> Y = _data[global_mask].T <NEW_LINE> B = np.linalg.inv(X.T.dot(X)).dot(X.T).dot(Y) <NEW_LINE> Y_res = Y - X.dot(B) <NEW_LINE> _data[global_mask] = Y_res.T <NEW_LINE> print('GSR completed.') <NEW_LINE> return _data
|
Performs global signal regression
Parameters
----------
_data : float
Data from an fMRI scan as a 4D numpy array
_eye_mask_path :
Pathname for the eye mask NIfTI file (the standard MNI152 2mm FSL template is used for the linked preprint)
Returns
-------
_data :
4D numpy array containing fMRI data after global signal regression
|
625941bbd99f1b3c44c67460
|
def float_to_time(secs): <NEW_LINE> <INDENT> days = secs // 86400 <NEW_LINE> secs %= 86400 <NEW_LINE> hours = secs // 3600 <NEW_LINE> secs %= 3600 <NEW_LINE> mins = secs // 60 <NEW_LINE> secs %= 60 <NEW_LINE> secs = "{:05.2f}".format(secs) if (secs % 1 != 0) else "{:02.0f}".format(secs) <NEW_LINE> if days > 0: <NEW_LINE> <INDENT> return ( "{:02.0f}".format(days) + "-" + "{:02.0f}".format(hours) + ":" + "{:02.0f}".format(mins) + ":" + secs ) <NEW_LINE> <DEDENT> elif hours > 0: <NEW_LINE> <INDENT> return ( "{:02.0f}".format(hours) + ":" + "{:02.0f}".format(mins) + ":" + secs ) <NEW_LINE> <DEDENT> return "{:02.0f}".format(mins) + ":" + secs
|
converts seconds to [dd-[hh:]]mm:ss
|
625941bb97e22403b379ce63
|
def getDetails(self, width): <NEW_LINE> <INDENT> if self._detailsCacheArgs != width: <NEW_LINE> <INDENT> self._detailsCache = self._getDetails(width) <NEW_LINE> self._detailsCacheArgs = width <NEW_LINE> <DEDENT> return self._detailsCache
|
Provides a list of [(msg, attr)...] tuple listings with detailed
information for this connection.
Arguments:
width - available space to display in
|
625941bb925a0f43d2549d3e
|
def savePreferences(self): <NEW_LINE> <INDENT> self.preferences['voxel_coord'] = self.voxel_cb.isChecked() <NEW_LINE> self.preferences['link_mode'] = self.link_menu.currentIndex() <NEW_LINE> self.preferences['cm_under'] = self.gradient_underlay.item.name <NEW_LINE> self.preferences['cm_pos'] = self.gradient_overlay_pos.item.name <NEW_LINE> self.preferences['cm_neg'] = self.gradient_overlay_neg.item.name <NEW_LINE> self.preferences['clip_under_high'] = self.clip_cb_under_high.isChecked() <NEW_LINE> self.preferences['clip_under_low'] = self.clip_cb_under_low.isChecked() <NEW_LINE> self.preferences['clip_pos_high'] = self.clip_cb_over_high_pos.isChecked() <NEW_LINE> self.preferences['clip_pos_low'] = self.clip_cb_over_low_pos.isChecked() <NEW_LINE> self.preferences['clip_neg_high'] = self.clip_cb_over_high_neg.isChecked() <NEW_LINE> self.preferences['clip_neg_low'] = self.clip_cb_over_low_neg.isChecked() <NEW_LINE> self.preferences['interpolation'] = self.interp_menu.currentIndex() <NEW_LINE> self.preferences['res_method'] = self.method_box.currentIndex() <NEW_LINE> self.sigSaveSettings.emit() <NEW_LINE> self.close()
|
Get the values form the tools and change preferences.
|
625941bbd6c5a10208143f12
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.