code
stringlengths 4
4.48k
| docstring
stringlengths 1
6.45k
| _id
stringlengths 24
24
|
|---|---|---|
def get_vertices(pos, angle, size): <NEW_LINE> <INDENT> theta = 360.0 - angle <NEW_LINE> diameter, height = size[0], size[1] <NEW_LINE> posx, posy = pos[0], pos[1] <NEW_LINE> temp_vertices = [] <NEW_LINE> temp_vertices.append((posx - diameter/2.0, posy + height/2.0)) <NEW_LINE> temp_vertices.append((posx + diameter/2.0, posy + height/2.0)) <NEW_LINE> temp_vertices.append((posx + diameter/2.0, posy - height/2.0)) <NEW_LINE> temp_vertices.append((posx - diameter/2.0, posy - height/2.0)) <NEW_LINE> vertices = [] <NEW_LINE> for v in temp_vertices: <NEW_LINE> <INDENT> x = np.round((v[0]-posx)*math.cos(math.radians(theta)) - (v[1]-posy)*math.sin(math.radians(theta)) + posx,decimals=2) <NEW_LINE> y = np.round((v[0]-posx)*math.sin(math.radians(theta)) + (v[1]-posy)*math.cos(math.radians(theta)) + posy,decimals=2) <NEW_LINE> vertices.append((x,y)) <NEW_LINE> <DEDENT> return vertices
|
Given position, rotation angle and size of a rectangle, returns the coordinates of the vertices
|
625941bb21bff66bcd684816
|
def get_wait_time(self): <NEW_LINE> <INDENT> return random.randint(MIN_WAIT_TIME, MAX_WAIT_TIME)
|
@summary: 每批公众号 扫描完一轮的间隔时间
---------
---------
@result:
|
625941bbff9c53063f47c0b6
|
def copyfile_ctf(src, dest): <NEW_LINE> <INDENT> copytree(src, dest) <NEW_LINE> file_types = ('.acq', '.eeg', '.hc', '.hist', '.infods', '.bak', '.meg4', '.newds', '.res4') <NEW_LINE> fnames = [f for f in os.listdir(dest) if f.endswith(file_types)] <NEW_LINE> bids_folder_name = op.splitext(op.split(dest)[-1])[0] <NEW_LINE> for fname in fnames: <NEW_LINE> <INDENT> ext = op.splitext(fname)[-1] <NEW_LINE> os.rename(op.join(dest, fname), op.join(dest, bids_folder_name + ext))
|
Copy and rename CTF files to a new location.
Parameters
----------
src : str | pathlib.Path
Path to the source raw .ds folder.
dest : str | pathlib.Path
Path to the destination of the new bids folder.
See Also
--------
copyfile_brainvision
copyfile_bti
copyfile_edf
copyfile_eeglab
copyfile_kit
|
625941bb3617ad0b5ed67dba
|
def SetInitialColour(List): <NEW_LINE> <INDENT> for vertex in List: <NEW_LINE> <INDENT> vertex.colornum = vertex.degree
|
"Set initial colour (e.g. degree of vertex is colour)
|
625941bb50812a4eaa59c1e6
|
def remove_punct(text): <NEW_LINE> <INDENT> return "".join (char for char in text if char not in string.punctuation)
|
This function is used to remove all punctuation
marks from a string. Spaces do not count as punctuation and should
not be removed. The funcion takes a string and returns a new string
which does not contain any puctuation. For example:
>>> remove_punct("Hello, World!")
'Hello World'
>>> remove_punct("-- ...Hey! -- Yes?!...")
' Hey Yes'
>>> remove_punct(",go!So.?uTh")
'goSouTh'
|
625941bbbe383301e01b534d
|
def _build_model(self, name, hidden_layers, nodes): <NEW_LINE> <INDENT> with tf.variable_scope(name): <NEW_LINE> <INDENT> self.inputs_ = tf.placeholder(tf.float32, [None, self.state_size], name='inputs') <NEW_LINE> self.actions_ = tf.placeholder(tf.int32, [None], name='actions') <NEW_LINE> one_hot_actions = tf.one_hot(self.actions_, self.action_size) <NEW_LINE> self.targetQs_ = tf.placeholder(tf.float32, [None], name='target') <NEW_LINE> self.layers = list() <NEW_LINE> self.layers.append(fully_connected("hidden1", self.inputs_, nodes)) <NEW_LINE> for layer in range(hidden_layers): <NEW_LINE> <INDENT> self.layers.append(fully_connected(f"hidden{layer+2}", self.layers[layer], nodes)) <NEW_LINE> <DEDENT> self.output = fully_connected("output", self.layers[-1], self.action_size, activation=None) <NEW_LINE> self.Q = tf.reduce_sum(tf.multiply(self.output, one_hot_actions), axis=1) <NEW_LINE> self.loss = tf.reduce_mean(tf.square(self.targetQs_ - self.Q)) <NEW_LINE> self.opt = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss)
|
Build the neural network structure
|
625941bbad47b63b2c509e46
|
def isValid(text): <NEW_LINE> <INDENT> return bool(re.search(r'\b(wiadomości|aktualności|informacje)\b', text, re.IGNORECASE))
|
Returns True if the input is related to the news.
Arguments:
text -- user-input, typically transcribed speech
|
625941bb009cb60464c6327c
|
def simulationWithDrug(numViruses, maxPop, maxBirthProb, clearProb, resistances, mutProb, numTrials): <NEW_LINE> <INDENT> Pop = [] <NEW_LINE> resisPop = [] <NEW_LINE> for i in range(300): <NEW_LINE> <INDENT> Pop.append(0) <NEW_LINE> resisPop.append(0) <NEW_LINE> <DEDENT> for n in range(numTrials): <NEW_LINE> <INDENT> virusList = [] <NEW_LINE> for j in range(numViruses): <NEW_LINE> <INDENT> virusList.append(ResistantVirus(maxBirthProb, clearProb, resistances, mutProb)) <NEW_LINE> <DEDENT> trePatient = TreatedPatient(virusList, maxPop) <NEW_LINE> for k in range(150): <NEW_LINE> <INDENT> Pop[k] += trePatient.update() <NEW_LINE> resisPop[k] += trePatient.getResistPop(['guttagonol']) <NEW_LINE> <DEDENT> trePatient.addPrescription('guttagonol') <NEW_LINE> for k in range(150, 300): <NEW_LINE> <INDENT> Pop[k] += trePatient.update() <NEW_LINE> resisPop[k] += trePatient.getResistPop(['guttagonol']) <NEW_LINE> <DEDENT> <DEDENT> avePop = [] <NEW_LINE> aveResisPop = [] <NEW_LINE> for m in range(300): <NEW_LINE> <INDENT> avePop.append(Pop[m]/float(numTrials)) <NEW_LINE> aveResisPop.append(resisPop[m]/float(numTrials)) <NEW_LINE> <DEDENT> pylab.plot(avePop, label = 'Average Total Pop') <NEW_LINE> pylab.plot(aveResisPop, label = 'Average Guttagonol-Resistant Pop') <NEW_LINE> pylab.xlabel('Number of Time Steps') <NEW_LINE> pylab.ylabel('Number of Virus Population') <NEW_LINE> pylab.title('simulation With Drug Additioned in the middle') <NEW_LINE> pylab.legend(loc = 0) <NEW_LINE> pylab.show()
|
Runs simulations and plots graphs for problem 5.
For each of numTrials trials, instantiates a patient, runs a simulation for
150 timesteps, adds guttagonol, and runs the simulation for an additional
150 timesteps. At the end plots the average virus population size
(for both the total virus population and the guttagonol-resistant virus
population) as a function of time.
numViruses: number of ResistantVirus to create for patient (an integer)
maxPop: maximum virus population for patient (an integer)
maxBirthProb: Maximum reproduction probability (a float between 0-1)
clearProb: maximum clearance probability (a float between 0-1)
resistances: a dictionary of drugs that each ResistantVirus is resistant to
(e.g., {'guttagonol': False})
mutProb: mutation probability for each ResistantVirus particle
(a float between 0-1).
numTrials: number of simulation runs to execute (an integer)
|
625941bb30dc7b766590182b
|
def cutcredit(stdid, jobid): <NEW_LINE> <INDENT> pass
|
file = open('page_log', 'r') # specify file to open
data = file.readlines() # read lines in file and put into
if lastlog != data[len(data) - 1]:
lastlog = data[len(data) - 1]
print("Student ID : ", stdid, "Crdit -1 At job : ", jobid)
return True
else:
return False
# file.close() # good practice to close files after use
|
625941bbe64d504609d74701
|
def __init__( self, **kwargs ): <NEW_LINE> <INDENT> super(SuggestOptions, self).__init__(**kwargs) <NEW_LINE> self.filter = kwargs.get('filter', None) <NEW_LINE> self.use_fuzzy_matching = kwargs.get('use_fuzzy_matching', None) <NEW_LINE> self.highlight_post_tag = kwargs.get('highlight_post_tag', None) <NEW_LINE> self.highlight_pre_tag = kwargs.get('highlight_pre_tag', None) <NEW_LINE> self.minimum_coverage = kwargs.get('minimum_coverage', None) <NEW_LINE> self.order_by = kwargs.get('order_by', None) <NEW_LINE> self.search_fields = kwargs.get('search_fields', None) <NEW_LINE> self.select = kwargs.get('select', None) <NEW_LINE> self.top = kwargs.get('top', None)
|
:keyword filter: An OData expression that filters the documents considered for suggestions.
:paramtype filter: str
:keyword use_fuzzy_matching: A value indicating whether to use fuzzy matching for the
suggestions query. Default is false. When set to true, the query will find terms even if
there's a substituted or missing character in the search text. While this provides a better
experience in some scenarios, it comes at a performance cost as fuzzy suggestions queries are
slower and consume more resources.
:paramtype use_fuzzy_matching: bool
:keyword highlight_post_tag: A string tag that is appended to hit highlights. Must be set with
highlightPreTag. If omitted, hit highlighting of suggestions is disabled.
:paramtype highlight_post_tag: str
:keyword highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with
highlightPostTag. If omitted, hit highlighting of suggestions is disabled.
:paramtype highlight_pre_tag: str
:keyword minimum_coverage: A number between 0 and 100 indicating the percentage of the index
that must be covered by a suggestions query in order for the query to be reported as a success.
This parameter can be useful for ensuring search availability even for services with only one
replica. The default is 80.
:paramtype minimum_coverage: float
:keyword order_by: The list of OData $orderby expressions by which to sort the results. Each
expression can be either a field name or a call to either the geo.distance() or the
search.score() functions. Each expression can be followed by asc to indicate ascending, or desc
to indicate descending. The default is ascending order. Ties will be broken by the match scores
of documents. If no $orderby is specified, the default sort order is descending by document
match score. There can be at most 32 $orderby clauses.
:paramtype order_by: list[str]
:keyword search_fields: The list of field names to search for the specified search text. Target
fields must be included in the specified suggester.
:paramtype search_fields: list[str]
:keyword select: The list of fields to retrieve. If unspecified, only the key field will be
included in the results.
:paramtype select: list[str]
:keyword top: The number of suggestions to retrieve. The value must be a number between 1 and
100. The default is 5.
:paramtype top: int
|
625941bb8c0ade5d55d3e881
|
def add_features(self, df_with_features): <NEW_LINE> <INDENT> self._train_set = self._train_set.join(df_with_features, on=self._id) <NEW_LINE> self._test_set = self._test_set.join(df_with_features, on=self._id)
|
Adds features from dataframe df to our data. df has to have the unique column
|
625941bb15fb5d323cde09cc
|
def list_vlans(self, datacenter=None, vlan_number=None, name=None, **kwargs): <NEW_LINE> <INDENT> _filter = utils.NestedDict(kwargs.get('filter') or {}) <NEW_LINE> if vlan_number: <NEW_LINE> <INDENT> _filter['networkVlans']['vlanNumber'] = ( utils.query_filter(vlan_number)) <NEW_LINE> <DEDENT> if name: <NEW_LINE> <INDENT> _filter['networkVlans']['name'] = utils.query_filter(name) <NEW_LINE> <DEDENT> if datacenter: <NEW_LINE> <INDENT> _filter['networkVlans']['primaryRouter']['datacenter']['name'] = ( utils.query_filter(datacenter)) <NEW_LINE> <DEDENT> kwargs['filter'] = _filter.to_dict() <NEW_LINE> if 'mask' not in kwargs: <NEW_LINE> <INDENT> kwargs['mask'] = DEFAULT_VLAN_MASK <NEW_LINE> <DEDENT> return self.account.getNetworkVlans(**kwargs)
|
Display a list of all VLANs on the account.
This provides a quick overview of all VLANs including information about
data center residence and the number of devices attached.
:param string datacenter: If specified, the list will only contain
VLANs in the specified data center.
:param int vlan_number: If specified, the list will only contain the
VLAN matching this VLAN number.
:param int name: If specified, the list will only contain the
VLAN matching this VLAN name.
:param dict \*\*kwargs: response-level options (mask, limit, etc.)
|
625941bbf7d966606f6a9ec3
|
def print_dictionary(dictionary): <NEW_LINE> <INDENT> list_dict = [ele for ele in dictionary.keys()] <NEW_LINE> list_dict.sort() <NEW_LINE> for ele in list_dict: <NEW_LINE> <INDENT> print(ele, "-", dictionary[ele])
|
printing dictionary function
|
625941bb73bcbd0ca4b2bf3f
|
def map(self, row): <NEW_LINE> <INDENT> return_dict = collections.defaultdict(list) <NEW_LINE> if self.match_row(row): <NEW_LINE> <INDENT> record_id = row[sv.VARS.ID] <NEW_LINE> field_value = self.extract_value(row) <NEW_LINE> return_dict[field_value] = [record_id] <NEW_LINE> <DEDENT> return {qs.QRY_QID : self._qid, qs.QRY_VALID : True, qs.QRY_FISHING_MATCHES_FOUND : return_dict}
|
Return a dictionary where DBF_MATCHINGRECORDIDS either
contains a set of the matching row number or an empty set if
there is no match
and QRY_FISHING_MATCHES_FOUND is a dictionary indexed by the
matching field value containing a set of matching row ids or an empty
dictionary if there is no match.
|
625941bb91af0d3eaac9b8d7
|
def get_subscription_owner(self, subscription): <NEW_LINE> <INDENT> pass
|
returns the user who owns the subscription
|
625941bb498bea3a759b9972
|
def get_raw_values(self, pydict, recovery_name=True): <NEW_LINE> <INDENT> new_dict = {"id": pydict["id"]} <NEW_LINE> for field in self: <NEW_LINE> <INDENT> raw_key = "%s_raw" % field.key <NEW_LINE> if raw_key in pydict: <NEW_LINE> <INDENT> if recovery_name: <NEW_LINE> <INDENT> new_dict[field.name] = pydict[raw_key] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> new_dict[field.key] = pydict[raw_key] <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return new_dict
|
Convert naive get response data to human readable field name format.
using raw data format.
|
625941bb6aa9bd52df036c64
|
def quicksort(arr, first, last): <NEW_LINE> <INDENT> stack = [] <NEW_LINE> stack.append((first, last)) <NEW_LINE> while stack: <NEW_LINE> <INDENT> pos = stack.pop() <NEW_LINE> left, right = pos[0], pos[1] <NEW_LINE> piv = partition(arr, left, right) <NEW_LINE> if piv - 1 > left: <NEW_LINE> <INDENT> stack.append((left, piv - 1)) <NEW_LINE> <DEDENT> if piv + 1 < right: <NEW_LINE> <INDENT> stack.append((piv + 1, right))
|
quick sort
|
625941bb99cbb53fe6792aa9
|
def _saveAllCombinations(self, tempBuffer, s, position, prefix, prefixLength): <NEW_LINE> <INDENT> max1 = 1 << position <NEW_LINE> for i in range(1, max1): <NEW_LINE> <INDENT> newPrefixLength = prefixLength <NEW_LINE> for j in range(position): <NEW_LINE> <INDENT> isSet = i & (1 << j) <NEW_LINE> if isSet > 0: <NEW_LINE> <INDENT> prefix.insert(newPrefixLength, tempBuffer[j].itemId) <NEW_LINE> newPrefixLength += 1 <NEW_LINE> <DEDENT> <DEDENT> self._saveItemSet(prefix, newPrefixLength, s)
|
Generating all the combinations for items in single branch in frequentPatternTree
:param tempBuffer: items in a list
:type tempBuffer: list
:param s : support at leaf node of a branch
:param position : the length of a tempBuffer
:type position : int
:param prefix : it represents the list of leaf node
:type prefix : list
:param prefixLength : the length of prefix
:type prefixLength :int
|
625941bbcdde0d52a9e52ef1
|
def setAnalogIOBlock2(self, *args): <NEW_LINE> <INDENT> return _AriaPy.ArMTXIO_setAnalogIOBlock2(self, *args)
|
setAnalogIOBlock2(self, int analog, unsigned short val) -> bool
|
625941bbe1aae11d1e749b76
|
def test_empty(ai): <NEW_LINE> <INDENT> for symbol in 'ox': <NEW_LINE> <INDENT> result = ai.tah_pocitace('-' * 20, symbol) <NEW_LINE> assert len(result) == 20 <NEW_LINE> assert result.count('-') == 19 <NEW_LINE> assert result.count(symbol) == 1
|
Hra na prázdné pole
|
625941bb32920d7e50b2808f
|
def getDICOM_fpath(self, D, force_unique=0): <NEW_LINE> <INDENT> if not os.path.isdir(os.path.join(self.rpath,D.PatientID)): <NEW_LINE> <INDENT> os.mkdir(os.path.join(self.rpath,D.PatientID)) <NEW_LINE> <DEDENT> hSeriesNumber = str(D.SeriesNumber) <NEW_LINE> try: <NEW_LINE> <INDENT> hSeriesNumber='%04i' % int(hSeriesNumber) <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> hSeriesNumber='0'*(4-len(hSeriesNumber)) + hSeriesNumber <NEW_LINE> <DEDENT> series_dir = hSeriesNumber+'_'+D.SeriesDescription.replace('*','_') <NEW_LINE> if not os.path.isdir(os.path.join(self.rpath,D.PatientID,series_dir)): <NEW_LINE> <INDENT> os.mkdir(os.path.join(self.rpath,D.PatientID,series_dir)) <NEW_LINE> <DEDENT> slice_num = D.InstanceNumber <NEW_LINE> try: <NEW_LINE> <INDENT> slice_num = '%03i' % int(slice_num) <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> slice_num = '0'*(3-len(slice_num)) + slice_num <NEW_LINE> <DEDENT> slice_num = D.SOPInstanceUID+'_'+slice_num <NEW_LINE> if(force_unique): <NEW_LINE> <INDENT> slice_num = slice_num+ ('%05i' % random.randint(0,99999)) <NEW_LINE> <DEDENT> fname = D.PatientID+'__'+D.StudyDate+'__'+D.StudyID+'__'+D.Modality+'__'+D.StudyDescription+'__'+hSeriesNumber+'__'+D.SeriesDescription+'_'+slice_num+'.dcm' <NEW_LINE> fname = fname.replace('*','_') <NEW_LINE> fpath = os.path.join(self.rpath,D.PatientID,series_dir,fname) <NEW_LINE> return fpath
|
returns supposed filepath for DICOM dataset D
Parameters
----------
D : Image level Dataset
(with SOPInstanceUID, InstanceNumber, etc.)
force_unique : bool, default=False,
True => adds random int tofilename
Returns
-------
fpath : path to file
|
625941bb2ae34c7f2600cff3
|
def render(self): <NEW_LINE> <INDENT> portal_state = self.context.unrestrictedTraverse('@@plone_portal_state') <NEW_LINE> root = getNavigationRootObject(self.context, portal_state.portal()) <NEW_LINE> ppath = self.context.getPhysicalPath() <NEW_LINE> relative = ppath[len(root.getPhysicalPath()):] <NEW_LINE> path = "/".join(relative) <NEW_LINE> filename = self.request.file.filename <NEW_LINE> obj = createContentInContainer(self.context, 'CloudFile', id=filename, title=filename) <NEW_LINE> filename = obj.id <NEW_LINE> content = self.request.file.read() <NEW_LINE> obj.fileid = create_file_in_owncloud(filename, path, content) <NEW_LINE> transaction.commit()
|
AJAX callback for Uploadify.
|
625941bb462c4b4f79d1d592
|
def is_connected(G): <NEW_LINE> <INDENT> V = vertices(G) <NEW_LINE> BFS(G, V[0]) <NEW_LINE> for v in V: <NEW_LINE> <INDENT> if v.distance == INFINITY: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> <DEDENT> return True
|
Function that determines whether each node in the given graph is connected, directly or indirectly, with every other
node.
Parameters
----------
G : Graph
The graph to be evaluated.
Return
------
bool :
Returns True if the graph has connections and False if it doesn't.
|
625941bb66656f66f7cbc06c
|
def create_insert_query (field_values, table): <NEW_LINE> <INDENT> fields, values = '', '' <NEW_LINE> for fv in field_values: <NEW_LINE> <INDENT> if field_name_restricted(fv.field): <NEW_LINE> <INDENT> fv.field = '[{}]'.format(fv.field) <NEW_LINE> <DEDENT> <DEDENT> for fv in field_values[:-1]: <NEW_LINE> <INDENT> fields += '{}, '.format(fv.field) <NEW_LINE> <DEDENT> fields += field_values[-1].field <NEW_LINE> for fv in field_values[:-1]: <NEW_LINE> <INDENT> values += '{}, '.format(fv.value) <NEW_LINE> <DEDENT> values += field_values[-1].value <NEW_LINE> sql = 'INSERT INTO {0} ({1}) VALUES ({2})'.format(table, fields, values) <NEW_LINE> return sql
|
Creates SQL INSERT query.
Args:
field_values (list[FieldValue]): Fields to be included in the INSERT
query.
table (str): Name of table in which data is to be inserted.
Returns:
str: SQL-ready INSERT statement.
|
625941bbc432627299f04b06
|
def __len__(self) -> int: <NEW_LINE> <INDENT> return len(self._callbacks)
|
Return the number of callbacks in the list.
|
625941bb5e10d32532c5edef
|
def get_attributes_by_entity_type(self, entity_type, **kwargs): <NEW_LINE> <INDENT> kwargs['_return_http_data_only'] = True <NEW_LINE> return self.get_attributes_by_entity_type_with_http_info(entity_type, **kwargs)
|
[beta] List all attributes by entity type # noqa: E501
Fetch all attributes in an organization associated with either drivers or assets. **Submit Feedback**: Likes, dislikes, and API feature requests should be filed as feedback in our <a href="https://forms.gle/zkD4NCH7HjKb7mm69" target="_blank">API feedback form</a>. If you encountered an issue or noticed inaccuracies in the API documentation, please <a href="https://www.samsara.com/help" target="_blank">submit a case</a> to our support team. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_attributes_by_entity_type(entity_type, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str entity_type: Denotes the type of entity, driver or asset. (required)
:param int limit: The limit for how many objects will be in the response. Default and max for this value is 512 objects.
:param str after: If specified, this should be the endCursor value from the previous page of results. When present, this request will return the next page of results that occur immediately after the previous page of results.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: GetAttributesByEntityTypeResponse
If the method is called asynchronously,
returns the request thread.
|
625941bb099cdd3c635f0b1e
|
def polling_subscription(user_id): <NEW_LINE> <INDENT> job = get_current_job() <NEW_LINE> if job: <NEW_LINE> <INDENT> fut_timeout = 30 <NEW_LINE> project_id = app.config['GCP_PROJECT_ID'] <NEW_LINE> subscription_name = app.config['SUBSCRIPTION_NAME'] <NEW_LINE> credentials_env = app.config['GOOGLE_APPLICATION_CREDENTIALS'] <NEW_LINE> if not credentials_env: <NEW_LINE> <INDENT> app.logger.error('Environment variable GOOGLE_APPLICATION_CREDENTIALS is empty') <NEW_LINE> return <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> service_account_info = json.loads(credentials_env) <NEW_LINE> credentials = service_account.Credentials.from_service_account_info(service_account_info) <NEW_LINE> <DEDENT> except json.JSONDecodeError: <NEW_LINE> <INDENT> credentials = service_account.Credentials.from_service_account_file(credentials_env) <NEW_LINE> <DEDENT> subscriber = pubsub_v1.SubscriberClient(credentials=credentials) <NEW_LINE> subscription_path = subscriber.subscription_path(project_id, subscription_name) <NEW_LINE> future = None <NEW_LINE> terminate = False <NEW_LINE> _set_task_progress(job, 'Polling', terminate) <NEW_LINE> while True: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> future = subscriber.subscribe(subscription_path, callback=partial(_process_msg, user_id=user_id, job_id=job.id)) <NEW_LINE> app.logger.info('Listening for messages on {}'.format(subscription_path)) <NEW_LINE> future.result(timeout=fut_timeout) <NEW_LINE> <DEDENT> except TimeoutError: <NEW_LINE> <INDENT> with app.app_context(): <NEW_LINE> <INDENT> task = Task.query.get(job.id) <NEW_LINE> <DEDENT> terminate = task.complete <NEW_LINE> if terminate: <NEW_LINE> <INDENT> app.logger.info('Stopped listening for messages on {}'.format(subscription_name)) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> app.logger.info('Renewing listener for messages on {}'.format(subscription_name)) <NEW_LINE> <DEDENT> <DEDENT> except Exception as e: <NEW_LINE> <INDENT> terminate = True <NEW_LINE> app.logger.error('Stopped listening for messages on {} - Exception thrown: {}' .format(subscription_name, e)) <NEW_LINE> <DEDENT> finally: <NEW_LINE> <INDENT> if future is not None: <NEW_LINE> <INDENT> future.cancel() <NEW_LINE> <DEDENT> if terminate: <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> _set_task_progress(job, 'Finished', terminate)
|
This will continuously poll a Pull-subscription from Google Cloud.
The app is deployed on Heroku where we have only one available worker in the
free-tier, so our queue will only have the task of continuously listening for
new messages in the Pull subscription. When a message is received it will be
processed in the callback.
ref: https://googleapis.github.io/google-cloud-python/latest/pubsub/subscriber/index.html#creating-a-subscription
|
625941bb4e696a04525c930e
|
def set_services(hass, config, newstatus): <NEW_LINE> <INDENT> host = config[CONF_HOST] <NEW_LINE> port = config[CONF_PORT] <NEW_LINE> user = config[CONF_USERNAME] <NEW_LINE> password = config[CONF_PASSWORD] <NEW_LINE> error = False <NEW_LINE> auth = None <NEW_LINE> if user or password: <NEW_LINE> <INDENT> auth = HTTPBasicAuth(user, password) <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> response = requests.get("http://" + host + ":" + port + "/cgi-bin/service.sh?name=all&action=" + newstatus, timeout=HTTP_TIMEOUT, auth=auth) <NEW_LINE> if response.status_code >= 300: <NEW_LINE> <INDENT> _LOGGER.error("Failed to set status of services to device %s", host) <NEW_LINE> error = True <NEW_LINE> <DEDENT> <DEDENT> except requests.exceptions.RequestException as e: <NEW_LINE> <INDENT> _LOGGER.error("Error setting status of services to %s: error %s", host, e) <NEW_LINE> error = True <NEW_LINE> <DEDENT> if error: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> return True
|
Set status of services.
|
625941bb6fece00bbac2d5fe
|
def settle(self, settle_data=None): <NEW_LINE> <INDENT> if self.running_environment == RUNNING_ENVIRONMENT.TZERO and self.hold_available.sum( ) != 0: <NEW_LINE> <INDENT> raise RuntimeError( 'QAACCOUNT: 该T0账户未当日仓位,请平仓 {}'.format( self.hold_available.to_dict() ) ) <NEW_LINE> <DEDENT> if self.market_type == MARKET_TYPE.FUTURE_CN: <NEW_LINE> <INDENT> self.static_balance['frozen'].append( sum( [ rx['money'] * rx['amount'] for var in self.frozen.values() for rx in var.values() ] ) ) <NEW_LINE> self.static_balance['cash'].append(self.cash[-1]) <NEW_LINE> self.static_balance['hold'].append(self.hold.to_dict()) <NEW_LINE> self.static_balance['date'].append(self.date) <NEW_LINE> self.static_balance['static_assets'].append( self.static_balance['cash'][-1] + self.static_balance['frozen'][-1] ) <NEW_LINE> <DEDENT> self.sell_available = self.hold <NEW_LINE> self.buy_available = self.hold <NEW_LINE> self.cash_available = self.cash[-1] <NEW_LINE> self.datetime = '{} 09:30:00'.format( QA_util_get_next_day(self.date) ) if self.date is not None else None <NEW_LINE> for item in self.positions.values(): <NEW_LINE> <INDENT> item.settle()
|
股票/期货的日结算
股票的结算: 结转股票可卖额度
T0的结算: 结转T0的额度
期货的结算: 结转静态资金
@2019-02-25 yutiansut
hold 在下面要进行大变化:
从 只计算数量 ==> 数量+成本+买入价 (携带更多信息)
基于history去计算hold ==> last_settle+ today_pos_change
|
625941bb30bbd722463cbc85
|
def compute_gradient(self, molecule): <NEW_LINE> <INDENT> if self.engine == 'dftd3': <NEW_LINE> <INDENT> jobrec = intf_dftd3.run_dftd3_from_arrays( molrec=molecule.to_dict(np_out=False), name_hint=self.fctldash, level_hint=self.dashlevel, param_tweaks=self.dashparams, dashcoeff_supplement=self.dashcoeff_supplement, ptype='gradient', verbose=1) <NEW_LINE> dashd_part = core.Matrix.from_array(jobrec['qcvars']['DISPERSION CORRECTION GRADIENT'].data) <NEW_LINE> for k, qca in jobrec['qcvars'].items(): <NEW_LINE> <INDENT> if not isinstance(qca.data, np.ndarray): <NEW_LINE> <INDENT> core.set_variable(k, qca.data) <NEW_LINE> <DEDENT> <DEDENT> if self.fctldash in ['hf3c', 'pbeh3c']: <NEW_LINE> <INDENT> gcp_part = gcp.run_gcp(molecule, self.fctldash, verbose=False, dertype=1) <NEW_LINE> dashd_part.add(gcp_part) <NEW_LINE> <DEDENT> return dashd_part <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return self.disp.compute_gradient(molecule)
|
Compute dispersion gradient based on engine, dispersion level, and parameters in `self`.
Parameters
----------
molecule : psi4.core.Molecule
System for which to compute empirical dispersion correction.
Returns
-------
psi4.core.Matrix
(nat, 3) dispersion gradient [Eh/a0].
|
625941bb5fdd1c0f98dc00f4
|
def gradPotentialEnergy(self, x): <NEW_LINE> <INDENT> return 2.*self.scale*x*self.potentialEnergy(x)
|
Required Inputs
x :: np.matrix :: column vector
Optional Inputs
idx :: tuple(int) :: an index for the n-dim SHO
Notes
discard just stores extra arguments passed for compatibility
with the lattice versions
|
625941bb187af65679ca4fe0
|
def buildCaption(self, text): <NEW_LINE> <INDENT> font = pygame.font.Font(None, 14) <NEW_LINE> self.image = font.render(text,True, self.foregroundColour, ) <NEW_LINE> (w,h) = self.image.get_size() <NEW_LINE> if self.size is None: <NEW_LINE> <INDENT> self.size = (w + 2*self.margin, h + 2*self.margin) <NEW_LINE> <DEDENT> self.imagePosition = (self.margin, self.margin)
|
Pre-render the text to go on the button label.
|
625941bb55399d3f05588575
|
def show(self, widget=None, event=None, reload_from_backend=False): <NEW_LINE> <INDENT> if not reload_from_backend: <NEW_LINE> <INDENT> self.update_note() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.populate_menu() <NEW_LINE> <DEDENT> self.winMain.destroy() <NEW_LINE> self.build_note()
|
Shows the stickynotes window
|
625941bb627d3e7fe0d68d10
|
def test_create_activity_as_context_check_not_duplicated_activity(self): <NEW_LINE> <INDENT> from .mockers import user_status_as_context <NEW_LINE> from .mockers import create_context <NEW_LINE> from hashlib import sha1 <NEW_LINE> self.create_context(create_context) <NEW_LINE> url_hash = sha1(create_context['url']).hexdigest() <NEW_LINE> self.testapp.post('/contexts/%s/activities' % url_hash, json.dumps(user_status_as_context), oauth2Header(test_manager), status=201) <NEW_LINE> self.testapp.post('/contexts/%s/activities' % url_hash, json.dumps(user_status_as_context), oauth2Header(test_manager), status=200)
|
Given a admin user
When I post an activity in the name of a context
And I try to post the same content twice in less than a minute
Then the activity is posted only once
|
625941bbcc40096d61595814
|
def load_bert(word2ix, tokenizer=None, model_name="roberta", model_class="seq2seq", target_size=0, target=None): <NEW_LINE> <INDENT> if model_class == "seq2seq": <NEW_LINE> <INDENT> bert_model = Seq2SeqModel(word2ix, model_name=model_name, tokenizer=tokenizer) <NEW_LINE> return bert_model <NEW_LINE> <DEDENT> elif model_class == "cls": <NEW_LINE> <INDENT> if target_size == 0: <NEW_LINE> <INDENT> raise Exception("必须传入参数 target_size,才能确定预测多少分类") <NEW_LINE> <DEDENT> bert_model = BertClsClassifier(word2ix, target_size, model_name=model_name) <NEW_LINE> return bert_model <NEW_LINE> <DEDENT> elif model_class == "sequence_labeling": <NEW_LINE> <INDENT> if target_size == 0: <NEW_LINE> <INDENT> raise Exception("必须传入参数 target_size,才能确定预测多少分类") <NEW_LINE> <DEDENT> bert_model = BertSeqLabeling(word2ix, target_size, model_name=model_name) <NEW_LINE> return bert_model <NEW_LINE> <DEDENT> elif model_class == "sequence_labeling_crf": <NEW_LINE> <INDENT> if target_size == 0: <NEW_LINE> <INDENT> raise Exception("必须传入参数 target_size,才能确定预测多少分类") <NEW_LINE> <DEDENT> bert_model = BertSeqLabelingCRF(word2ix, target_size, model_name=model_name) <NEW_LINE> return bert_model <NEW_LINE> <DEDENT> elif model_class == "relation_extrac": <NEW_LINE> <INDENT> if target_size == 0: <NEW_LINE> <INDENT> raise Exception("必须传入参数 target_size 表示预测predicate的种类") <NEW_LINE> <DEDENT> bert_model = BertRelationExtrac(word2ix, target_size, model_name=model_name) <NEW_LINE> return bert_model <NEW_LINE> <DEDENT> elif model_class == "simbert": <NEW_LINE> <INDENT> bert_model = SimBertModel(word2ix, model_name=model_name, tokenizer=tokenizer) <NEW_LINE> return bert_model <NEW_LINE> <DEDENT> elif model_class == "multi_label_cls": <NEW_LINE> <INDENT> bert_model = BertClsMultiClassifier(word2ix, target_size, model_name=model_name) <NEW_LINE> return bert_model <NEW_LINE> <DEDENT> elif model_class == "multi_label_cls_seq2seq": <NEW_LINE> <INDENT> bert_model = ClsMultiSeq2SeqModel(word2ix, target, model_name=model_name) <NEW_LINE> return bert_model <NEW_LINE> <DEDENT> elif model_class == "embedding": <NEW_LINE> <INDENT> bert_model = BasicBert(word2ix, model_name=model_name, tokenizer=tokenizer) <NEW_LINE> return bert_model <NEW_LINE> <DEDENT> else : <NEW_LINE> <INDENT> raise Exception("model_name_err")
|
model_path: 模型位置
这是个统一的接口,用来加载模型的
model_class : seq2seq or encoder
|
625941bba8ecb033257d2f98
|
def migrate_oldest_version_to_base(self): <NEW_LINE> <INDENT> assert self.base_version == self.versions[0] <NEW_LINE> if not self.versions: <NEW_LINE> <INDENT> ABUNDANT_LOGGER.warning('No base version found') <NEW_LINE> <DEDENT> elif len(self.versions) == 1: <NEW_LINE> <INDENT> ABUNDANT_LOGGER.warning('Cannot migrate when only base version exists') <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.base_version.migrate_to_next_version() <NEW_LINE> self.load_versions()
|
Migrate the oldest version to the base version.
But underneath it migrate the base version to the oldest version.
|
625941bb7d847024c06be17b
|
def _assert_pipeline_read_files_record_count_equal( self, input_pattern, expected_count, use_read_all=False): <NEW_LINE> <INDENT> pipeline = TestPipeline() <NEW_LINE> if use_read_all: <NEW_LINE> <INDENT> pcoll = (pipeline | 'Create' >> beam.Create([input_pattern]) | 'Read' >> ReadAllFromVcf()) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> pcoll = pipeline | 'Read' >> ReadFromVcf(input_pattern) <NEW_LINE> <DEDENT> assert_that(pcoll, asserts.count_equals_to(expected_count)) <NEW_LINE> pipeline.run()
|
Helper method for verifying total records read.
Args:
input_pattern (str): Input file pattern to read.
expected_count (int): Expected number of reacords that was read.
use_read_all (bool): Whether to use the scalable ReadAllFromVcf transform
instead of ReadFromVcf.
|
625941bb4f6381625f114900
|
def notify(self, event): <NEW_LINE> <INDENT> for command in self.commands: <NEW_LINE> <INDENT> command.on_event(event, self)
|
Notifies all commands of a change in the state of
the interpreter.
|
625941bb8a43f66fc4b53f2b
|
def search_rotated_array(arr, key): <NEW_LINE> <INDENT> start, end = 0, len(arr) - 1 <NEW_LINE> while start <= end: <NEW_LINE> <INDENT> mid = int(start + (end - start) // 2) <NEW_LINE> if arr[mid] == key: <NEW_LINE> <INDENT> return mid <NEW_LINE> <DEDENT> if arr[start] <= arr[mid]: <NEW_LINE> <INDENT> if arr[start] <= key < arr[mid]: <NEW_LINE> <INDENT> end = mid - 1 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> start = mid + 1 <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> if arr[mid] < key <= arr[end]: <NEW_LINE> <INDENT> start = mid + 1 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> end = mid - 1 <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return -1
|
Time: O(logN)
Space: O(1)
|
625941bbbd1bec0571d904fa
|
def load_globals(self): <NEW_LINE> <INDENT> self.code_ops.extend([ (bp.LOAD_GLOBAL, 'globals'), (bp.CALL_FUNCTION, 0x0000), ])
|
Load the globals onto the TOS.
|
625941bb8e7ae83300e4ae8e
|
def itkVectorUL2_GetVectorDimension(): <NEW_LINE> <INDENT> return _itkVectorPython.itkVectorUL2_GetVectorDimension()
|
itkVectorUL2_GetVectorDimension() -> unsigned int
|
625941bb3346ee7daa2b2c2c
|
@register.filter <NEW_LINE> def number_new(this_object, user): <NEW_LINE> <INDENT> if type(this_object).__name__ == 'Document': <NEW_LINE> <INDENT> total_unread = 0 <NEW_LINE> for question in this_object.question_set.all(): <NEW_LINE> <INDENT> answers = question.question_answer <NEW_LINE> if not user.userquestionlastvisit_set.first(): <NEW_LINE> <INDENT> total_unread += answers.count() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> visit_datetime = user.userquestionlastvisit_set.first().created_on <NEW_LINE> unseen_answers = answers.filter(~Q(created_by=user), created_on__gt=visit_datetime) <NEW_LINE> total_unread += unseen_answers.count() <NEW_LINE> <DEDENT> <DEDENT> return str(total_unread) <NEW_LINE> <DEDENT> elif type(this_object).__name__ == 'Question': <NEW_LINE> <INDENT> answers = this_object.question_answer <NEW_LINE> if not user.userquestionlastvisit_set.first(): <NEW_LINE> <INDENT> return str(answers.count()) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> visit_datetime = user.userquestionlastvisit_set.first().created_on <NEW_LINE> unseen_answers = answers.filter(~Q(created_by=user), created_on__gt=visit_datetime) <NEW_LINE> return str(unseen_answers.count())
|
Given the object and the user, find the number of answer posts
that the user has not seen based on the user's last visit datetime
|
625941bb4428ac0f6e5ba6b4
|
def load_dataset(opt): <NEW_LINE> <INDENT> print('Reading dataset ', opt.dataset) <NEW_LINE> if opt.dataset == 'fashioniq': <NEW_LINE> <INDENT> trainset = datasets.FashionIQ( anno_dir=opt.root_dir.format('annotation'), image_dir=opt.root_dir.format('images'), split_dir=opt.root_dir.format('public_split'), split='train', transform=torchvision.transforms.Compose([ torchvision.transforms.Resize(256), torchvision.transforms.RandomCrop(224), torchvision.transforms.RandomHorizontalFlip(), torchvision.transforms.ToTensor(), torchvision.transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ])) <NEW_LINE> testset = datasets.FashionIQ( anno_dir=opt.root_dir.format('annotation'), image_dir=opt.root_dir.format('images'), split_dir=opt.root_dir.format('public_split'), split='val', transform=torchvision.transforms.Compose([ torchvision.transforms.Resize(256), torchvision.transforms.CenterCrop(224), torchvision.transforms.ToTensor(), torchvision.transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ])) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> print('Invalid dataset', opt.dataset) <NEW_LINE> sys.exit() <NEW_LINE> <DEDENT> print('trainset size:', len(trainset)) <NEW_LINE> print('testset size:', len(testset)) <NEW_LINE> return trainset, testset
|
Loads the input datasets.
|
625941bbd6c5a10208143f0a
|
def get_return_page(self,prior=False): <NEW_LINE> <INDENT> siteHistory = self.request.session.get('SITE_HISTORY',{}) <NEW_LINE> return getReturnPage(siteHistory,prior=prior)
|
This is just a wrapper for the getReturnPage helper function.
|
625941bb56b00c62f0f1451a
|
@admin.route('/manage_assessors_ajax/<int:id>') <NEW_LINE> @roles_required('root') <NEW_LINE> def manage_assessors_ajax(id): <NEW_LINE> <INDENT> if not validate_using_assessment(): <NEW_LINE> <INDENT> return jsonify({}) <NEW_LINE> <DEDENT> data: PresentationAssessment = PresentationAssessment.query.get_or_404(id) <NEW_LINE> state_filter = request.args.get('state_filter') <NEW_LINE> if state_filter == 'confirm': <NEW_LINE> <INDENT> attached_q = data.assessor_list.subquery() <NEW_LINE> assessors = db.session.query(AssessorAttendanceData) .join(attached_q, attached_q.c.id == AssessorAttendanceData.id) .filter(AssessorAttendanceData.confirmed == True).all() <NEW_LINE> <DEDENT> elif state_filter == 'not-confirm': <NEW_LINE> <INDENT> attached_q = data.assessor_list.subquery() <NEW_LINE> assessors = db.session.query(AssessorAttendanceData) .join(attached_q, attached_q.c.id == AssessorAttendanceData.id) .filter(AssessorAttendanceData.confirmed == False).all() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> assessors = data.assessor_list.all() <NEW_LINE> <DEDENT> return ajax.admin.presentation_assessors_data(data, assessors, editable=not data.is_deployed)
|
AJAX data point for managing faculty assessors
:param id:
:return:
|
625941bbcdde0d52a9e52ef2
|
def reset(self, cleanup_only=False): <NEW_LINE> <INDENT> log.info("resetting Blivet (version %s) instance %s", __version__, self) <NEW_LINE> if flags.installer_mode: <NEW_LINE> <INDENT> self.encryption_passphrase = None <NEW_LINE> for device in self.devices: <NEW_LINE> <INDENT> if device.format.type == "luks" and device.format.exists: <NEW_LINE> <INDENT> self.save_passphrase(device) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> if self.ksdata: <NEW_LINE> <INDENT> self.config.update(self.ksdata) <NEW_LINE> <DEDENT> if flags.installer_mode and not flags.image_install: <NEW_LINE> <INDENT> iscsi.startup() <NEW_LINE> fcoe.startup() <NEW_LINE> zfcp.startup() <NEW_LINE> <DEDENT> self.devicetree.reset(conf=self.config, passphrase=self.encryption_passphrase, luks_dict=self.__luks_devs) <NEW_LINE> self.devicetree.populate(cleanup_only=cleanup_only) <NEW_LINE> self.fsset = FSSet(self.devicetree) <NEW_LINE> self.edd_dict = get_edd_dict(self.partitioned) <NEW_LINE> self.devicetree.edd_dict = self.edd_dict <NEW_LINE> if self.bootloader: <NEW_LINE> <INDENT> self.bootloader.reset() <NEW_LINE> <DEDENT> self.roots = [] <NEW_LINE> if flags.installer_mode: <NEW_LINE> <INDENT> self.roots = find_existing_installations(self.devicetree) <NEW_LINE> self.dump_state("initial") <NEW_LINE> <DEDENT> if not flags.installer_mode: <NEW_LINE> <INDENT> self.devicetree.handle_nodev_filesystems() <NEW_LINE> <DEDENT> self.update_boot_loader_disk_list()
|
Reset storage configuration to reflect actual system state.
This will cancel any queued actions and rescan from scratch but not
clobber user-obtained information like passphrases, iscsi config, &c
:keyword cleanup_only: prepare the tree only to deactivate devices
:type cleanup_only: bool
See :meth:`devicetree.Devicetree.populate` for more information
about the cleanup_only keyword argument.
|
625941bb0a50d4780f666d52
|
def load_embeddings(filename, dim): <NEW_LINE> <INDENT> token_to_ix = dict() <NEW_LINE> ix_to_token = dict() <NEW_LINE> embeds = list() <NEW_LINE> with open(filename) as f: <NEW_LINE> <INDENT> i = 0 <NEW_LINE> for line in f: <NEW_LINE> <INDENT> line = line.split(' ') <NEW_LINE> token = line[0] <NEW_LINE> vals = list(map(float, line[1:dim+1])) <NEW_LINE> token_to_ix[token] = i <NEW_LINE> ix_to_token[i] = token <NEW_LINE> embeds.append(vals) <NEW_LINE> i += 1 <NEW_LINE> <DEDENT> <DEDENT> token_to_ix['TRACE'] = i <NEW_LINE> ix_to_token[i] = 'TRACE' <NEW_LINE> embeds.append([np.random.uniform(low=-0.1, high=0.1) for _ in range(dim)]) <NEW_LINE> i += 1 <NEW_LINE> token_to_ix['NUMBER'] = i <NEW_LINE> ix_to_token[i] = 'NUMBER' <NEW_LINE> embeds.append([np.random.uniform(low=-0.1, high=0.1) for _ in range(dim)]) <NEW_LINE> i += 1 <NEW_LINE> token_to_ix['UNK'] = i <NEW_LINE> ix_to_token[i] = 'UNK' <NEW_LINE> embeds.append([np.random.uniform(low=-0.1, high=0.1) for _ in range(dim)]) <NEW_LINE> i += 1 <NEW_LINE> return token_to_ix, ix_to_token, np.array(embeds)
|
Load word embeddings of the specified dimension from a file
|
625941bbd4950a0f3b08c214
|
def __init__(self): <NEW_LINE> <INDENT> self.__src = '' <NEW_LINE> self.__dst = '' <NEW_LINE> self.__chunks = []
|
! Initializer
@param self The current class
|
625941bb090684286d50eba4
|
def get_market_status_messages(self): <NEW_LINE> <INDENT> return self.__market_status_messages
|
Get market messages
|
625941bb8e05c05ec3eea234
|
def get_sse_tuple_list(self): <NEW_LINE> <INDENT> tuple_list = [ (chain, start_res, end_res, 'H') for (chain, start_res, end_chainid, end_res, htype) in self.helix_list ] <NEW_LINE> tuple_list += [ (chain, start_res, end_res, 'E') for (chain, start_res, end_chainid, end_res) in self.strand_list ] <NEW_LINE> tuple_list.sort(cmp=tuplecmp) <NEW_LINE> return tuple_list
|
build sse tuple list of (chain, start_res, end_res, type) and sort
type is 'H' or 'E'.
Parameters: None
Return value: list of (chain,strartres,end_res,type)
|
625941bb63f4b57ef0000fe3
|
def pipe_collapse_unity(p_e, p_c, gamma_m, gamma_SCLB, p_min=0): <NEW_LINE> <INDENT> pipe_collapse_uty = (p_e - p_min) * gamma_m * gamma_SCLB / p_c <NEW_LINE> return pipe_collapse_uty
|
Calculate pipe collapse unity value.
Local buckling – system collapse (external over pressure only).
Reference:
DNVGL-ST-F101 (2017-12)
sec:5.4.4.1 eq:5.10 p:95 $p_{lt}$
|
625941bbd7e4931a7ee9dddf
|
def __getattr__(self, item): <NEW_LINE> <INDENT> if item in self.data: <NEW_LINE> <INDENT> return self.data[item] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise AttributeError
|
Get stored item with .-notation if not defined as a class member.
:param item: name, string of item compatible
with Python class member name.
:return value of item.
|
625941bbd6c5a10208143f0b
|
def pairs_to_SNP_Pairs(pairs, populations): <NEW_LINE> <INDENT> results = {} <NEW_LINE> for snp1, info in pairs.items(): <NEW_LINE> <INDENT> results[snp1] = [] <NEW_LINE> if info['matches']: <NEW_LINE> <INDENT> for snp2, data in info['matches'].items(): <NEW_LINE> <INDENT> results[snp1].append( SNP_Pair( plink = { snp1: { 'chrom': info['chrom'], 'loc' : info['loc'], 'matches': { snp2: data } } }, populations = populations ) ) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return results
|
Convert output of filter_by_ld() to dict of SNP_Pairs.
|
625941bb167d2b6e31218a59
|
@app.route('/build') <NEW_LINE> @auth.build_access_required <NEW_LINE> def view_build(): <NEW_LINE> <INDENT> build = g.build <NEW_LINE> page_size = 20 <NEW_LINE> offset = request.args.get('offset', 0, type=int) <NEW_LINE> candidate_list = ( models.Release.query .filter_by(build_id=build.id) .order_by(models.Release.created.desc()) .offset(offset) .limit(page_size + 1) .all()) <NEW_LINE> has_next_page = len(candidate_list) > page_size <NEW_LINE> if has_next_page: <NEW_LINE> <INDENT> candidate_list = candidate_list[:-1] <NEW_LINE> <DEDENT> release_dict = {} <NEW_LINE> created_dict = {} <NEW_LINE> run_stats_dict = {} <NEW_LINE> for candidate in candidate_list: <NEW_LINE> <INDENT> release_list = release_dict.setdefault(candidate.name, []) <NEW_LINE> release_list.append(candidate) <NEW_LINE> max_created = created_dict.get(candidate.name, candidate.created) <NEW_LINE> created_dict[candidate.name] = max(candidate.created, max_created) <NEW_LINE> run_stats_dict[candidate.id] = dict( runs_total=0, runs_complete=0, runs_successful=0, runs_failed=0, runs_baseline=0) <NEW_LINE> <DEDENT> for release_list in release_dict.itervalues(): <NEW_LINE> <INDENT> release_list.sort(key=lambda x: x.number, reverse=True) <NEW_LINE> <DEDENT> release_age_list = [ (value, key) for key, value in created_dict.iteritems()] <NEW_LINE> release_age_list.sort(reverse=True) <NEW_LINE> release_name_list = [key for _, key in release_age_list] <NEW_LINE> if run_stats_dict: <NEW_LINE> <INDENT> stats_counts = ( db.session.query( models.Run.release_id, models.Run.status, sqlalchemy.func.count(models.Run.id)) .join(models.Release) .filter(models.Release.id.in_(run_stats_dict.keys())) .group_by(models.Run.status, models.Run.release_id) .all()) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> stats_counts = [] <NEW_LINE> <DEDENT> for candidate_id, status, count in stats_counts: <NEW_LINE> <INDENT> stats_dict = run_stats_dict[candidate_id] <NEW_LINE> if status in (models.Run.DIFF_APPROVED, models.Run.DIFF_NOT_FOUND): <NEW_LINE> <INDENT> stats_dict['runs_successful'] += count <NEW_LINE> stats_dict['runs_complete'] += count <NEW_LINE> stats_dict['runs_total'] += count <NEW_LINE> <DEDENT> elif status == models.Run.DIFF_FOUND: <NEW_LINE> <INDENT> stats_dict['runs_failed'] += count <NEW_LINE> stats_dict['runs_complete'] += count <NEW_LINE> stats_dict['runs_total'] += count <NEW_LINE> <DEDENT> elif status == models.Run.NO_DIFF_NEEDED: <NEW_LINE> <INDENT> stats_dict['runs_baseline'] += count <NEW_LINE> <DEDENT> elif status == models.Run.NEEDS_DIFF: <NEW_LINE> <INDENT> stats_dict['runs_total'] += count <NEW_LINE> <DEDENT> <DEDENT> return render_template( 'view_build.html', build=build, release_name_list=release_name_list, release_dict=release_dict, run_stats_dict=run_stats_dict, has_next_page=has_next_page, current_offset=offset, next_offset=offset + page_size, last_offset=max(0, offset - page_size))
|
Page for viewing all releases in a build.
|
625941bb091ae35668666e27
|
def process_create_log_group_event(event): <NEW_LINE> <INDENT> if os.environ['AUTO_SUBSCRIBE_LOG_GROUPS'] == 'true': <NEW_LINE> <INDENT> log_group_name = event["detail"]["requestParameters"]["logGroupName"] <NEW_LINE> try: <NEW_LINE> <INDENT> log_group_options = json.loads(os.environ['LOG_GROUP_OPTIONS']) <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> LOGGER.exception(f"Error loading LogGroupOptions") <NEW_LINE> raise <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> LOGGER.info(f"Loaded LogGroupOptions: " + json.dumps(log_group_options)) <NEW_LINE> <DEDENT> matched_group_options = match_log_groups([log_group_name], log_group_options) <NEW_LINE> for log_group_name, options in matched_group_options.items(): <NEW_LINE> <INDENT> put_subscription_filter(log_group_name, options, os.environ['DESTINATION_ARN'])
|
Processes a CloudWatch CreateLogGroup event
Subscribes the new log group.
@param event: The AWS event containing the CloudWatch event metadata
@type event: dict
|
625941bb26238365f5f0ed2d
|
def isCashEnough( self, price, volume, commision ): <NEW_LINE> <INDENT> return self.cash > price * volume + commision
|
Check whether the cash is enough to buy the instrument.
Parameters
----------
price : float
Price to buy the instrument;
volume : int
amount of instrument to buy;
commision : float
commision fee to complete the trade.
Returns
-------
isEnough : bool
An indicator whether the cash is enough.
|
625941bb76e4537e8c35153a
|
def change_phone_number(self, number): <NEW_LINE> <INDENT> if not fullmatch(self.__MATCH_PHONE_NUMBER, number): <NEW_LINE> <INDENT> raise InvalidCustomerPhoneNumberException(number) <NEW_LINE> <DEDENT> self.phone_number = number
|
Change customer's phone number raise exception if invalid number is
passed.
:params number
:returns none
|
625941bbd18da76e23532395
|
@review_bp.route('/', methods=['POST']) <NEW_LINE> @oauth.require_auth('review') <NEW_LINE> @crossdomain() <NEW_LINE> def review_post_handler(user): <NEW_LINE> <INDENT> def fetch_params(): <NEW_LINE> <INDENT> is_draft = Parser.bool('json', 'is_draft', optional=True) or False <NEW_LINE> if is_draft: <NEW_LINE> <INDENT> REVIEW_MIN_LENGTH = None <NEW_LINE> <DEDENT> release_group = Parser.uuid('json', 'release_group') <NEW_LINE> text = Parser.string('json', 'text', min=REVIEW_MIN_LENGTH, max=REVIEW_MAX_LENGTH) <NEW_LINE> license_choice = Parser.string('json', 'license_choice') <NEW_LINE> language = Parser.string('json', 'language', min=2, max=3, optional=True) or 'en' <NEW_LINE> if language and language not in supported_languages: <NEW_LINE> <INDENT> raise InvalidRequest(desc='Unsupported language') <NEW_LINE> <DEDENT> if Review.query.filter_by(user=user, release_group=release_group).count(): <NEW_LINE> <INDENT> raise InvalidRequest(desc='You have already published a review for this album') <NEW_LINE> <DEDENT> return release_group, text, license_choice, language, is_draft <NEW_LINE> <DEDENT> if user.is_review_limit_exceeded: <NEW_LINE> <INDENT> raise LimitExceeded('You have exceeded your limit of reviews per day.') <NEW_LINE> <DEDENT> release_group, text, license_choice, language, is_draft = fetch_params() <NEW_LINE> review = Review.create(user=user, release_group=release_group, text=text, license_id=license_choice, language=language, is_draft=is_draft) <NEW_LINE> return jsonify(message='Request processed successfully', id=review.id)
|
Publish a review.
**OAuth scope:** review
:reqheader Content-Type: *application/json*
:json uuid release_group: UUID of the release group that is being reviewed
:json string text: review contents, min length is 25, max is 5000
:json string license_choice: license ID
:json string lang: language code (ISO 639-1), default is ``en`` **(optional)**
:json boolean is_draft: whether the review should be saved as a draft or not, default is ``False`` **(optional)**
:resheader Content-Type: *application/json*
|
625941bbfbf16365ca6f6080
|
def getAllCycles(): <NEW_LINE> <INDENT> cycles = [] <NEW_LINE> for n in graphNodes.keys(): <NEW_LINE> <INDENT> tmp = getCycles(n) <NEW_LINE> for c in tmp: <NEW_LINE> <INDENT> norm = normalizeCycle(c) <NEW_LINE> if not norm in cycles: <NEW_LINE> <INDENT> cycles.append(norm) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> result = [] <NEW_LINE> for c in cycles: <NEW_LINE> <INDENT> result.append((c, amountOf(c))) <NEW_LINE> <DEDENT> return result
|
Returns all cycles in the graph and their amount as tuple <cycle, amount>
|
625941bb85dfad0860c3ad1c
|
def getAction(self, gameState): <NEW_LINE> <INDENT> def maximizer(state, depth, a, b): <NEW_LINE> <INDENT> if state.isLose() or state.isWin() or depth == 0: <NEW_LINE> <INDENT> return self.evaluationFunction(state) <NEW_LINE> <DEDENT> val = float("-inf") <NEW_LINE> legalActions = state.getLegalActions() <NEW_LINE> succState = [state.generateSuccessor(0,x) for x in legalActions] <NEW_LINE> for each in succState: <NEW_LINE> <INDENT> val = max(val, minimizer(each, depth, state.getNumAgents()-1, a, b)) <NEW_LINE> if val > b: <NEW_LINE> <INDENT> return val <NEW_LINE> <DEDENT> a = max(a, val) <NEW_LINE> <DEDENT> return val <NEW_LINE> <DEDENT> def minimizer(state, depth, index, a, b): <NEW_LINE> <INDENT> if state.isLose() or state.isWin() or depth == 0: <NEW_LINE> <INDENT> return self.evaluationFunction(state) <NEW_LINE> <DEDENT> val = float("inf") <NEW_LINE> legalActions = state.getLegalActions(index) <NEW_LINE> succState = [state.generateSuccessor(index, x) for x in legalActions] <NEW_LINE> for each in succState: <NEW_LINE> <INDENT> if index > 1: <NEW_LINE> <INDENT> val = min(val, minimizer(each, depth, index-1, a, b)) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> val = min(val, maximizer(each, depth-1, a, b)) <NEW_LINE> <DEDENT> if val < a: <NEW_LINE> <INDENT> return val <NEW_LINE> <DEDENT> b = min(b, val) <NEW_LINE> <DEDENT> return val <NEW_LINE> <DEDENT> legalActions = gameState.getLegalActions() <NEW_LINE> move = Directions.STOP <NEW_LINE> val = float("-inf") <NEW_LINE> a = float("-inf") <NEW_LINE> b = float("inf") <NEW_LINE> for action in legalActions: <NEW_LINE> <INDENT> tmp = minimizer(gameState.generateSuccessor(0,action), self.depth, gameState.getNumAgents()-1, a, b) <NEW_LINE> if tmp>val: <NEW_LINE> <INDENT> val = tmp <NEW_LINE> move = action <NEW_LINE> <DEDENT> if val > b: <NEW_LINE> <INDENT> return value <NEW_LINE> <DEDENT> a = max(a,val) <NEW_LINE> <DEDENT> return move <NEW_LINE> util.raiseNotDefined()
|
Returns the minimax action using self.depth and self.evaluationFunction
|
625941bbf7d966606f6a9ec4
|
@app.route('/wishlists/<int:id>', methods=['PUT']) <NEW_LINE> def update_wishlist(id): <NEW_LINE> <INDENT> data = request.get_json() <NEW_LINE> if is_valid(data, 'wishlist'): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> return make_response(db.update_wishlist(id, **data), status.HTTP_200_OK) <NEW_LINE> <DEDENT> except WishlistException: <NEW_LINE> <INDENT> message = { 'error' : 'Wishlist %s was not found' % id } <NEW_LINE> return make_response(jsonify(message), status.HTTP_404_NOT_FOUND) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> message = {'error' : 'Wishlist data was not valid'} <NEW_LINE> return make_response(jsonify(message), status.HTTP_400_BAD_REQUEST)
|
The route for modifying a wishlist's user_id or name.
|
625941bb656771135c3eb72f
|
def close(self): <NEW_LINE> <INDENT> pass
|
A method to clean up anything that need to be cleaned up.
Sub-classes should use super to call up the MRO stack and then
do any class-specific clean up
|
625941bbcb5e8a47e48b7971
|
def details(self, job_id): <NEW_LINE> <INDENT> data = {'api_key': self.api_key} <NEW_LINE> return self.get(self.base_url + '/%s' % str(job_id), data=data)
|
Gets details for the given job
|
625941bb711fe17d82542234
|
def add_command_formatting(self, command): <NEW_LINE> <INDENT> if command.description: <NEW_LINE> <INDENT> self.paginator.add_line(command.description, empty=True) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> signature = self.get_command_signature(command) <NEW_LINE> self.paginator.add_line(signature, empty=True) <NEW_LINE> if command.help: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> self.paginator.add_line(command.help.replace('[p]', self.clean_prefix)) <NEW_LINE> <DEDENT> except RuntimeError: <NEW_LINE> <INDENT> for line in command.help.replace('[p]', self.clean_prefix).splitlines(): <NEW_LINE> <INDENT> self.paginator.add_line(line) <NEW_LINE> <DEDENT> self.paginator.add_line()
|
A utility function to format the non-indented block of commands and groups.
Parameters
------------
command: :class:`Command`
The command to format.
|
625941bb45492302aab5e183
|
def has_view(self, view_name): <NEW_LINE> <INDENT> return lib.appnet_application_has_view(self._as_parameter_, view_name)
|
checks if this viewname exists in this application
|
625941bb44b2445a33931f62
|
def _check_Z_for_division(Z,eps): <NEW_LINE> <INDENT> ixNonZero = Z!=0 <NEW_LINE> assert np.all(np.abs(Z[ixNonZero]) > eps*10), 'values to small. might introduce some error since close to zero division'
|
Z might be zero sometimes, we add a small constant epsilon to it. make sure this doesnt change to much
|
625941bbd164cc6175782c10
|
def beautify(r): <NEW_LINE> <INDENT> return BeautifulSoup(r.text, "html.parser")
|
:param r (result of requests.get())
|
625941bb23849d37ff7b2f55
|
def test_html(self): <NEW_LINE> <INDENT> tags = (('<form', 1), ('<select', 1), ('<input', 4), ('type="button"', 1), ('type="submit"', 1), ('type="text"', 1)) <NEW_LINE> for texto, quant in tags: <NEW_LINE> <INDENT> with self.subTest(): <NEW_LINE> <INDENT> self.assertContains(self.resp, texto, quant)
|
deve conter input tags
|
625941bbe8904600ed9f1dec
|
def get_filters(conv_layers=DEFAULT_CONV_LAYERS): <NEW_LINE> <INDENT> prompt = "\nGive the filters (default = {} for all) for each Convolutional Layer as " "a list (e.g.: 16 32 64 ..): " <NEW_LINE> prompt = prompt.format(DEFAULT_FILTERS) <NEW_LINE> filters = input(prompt) <NEW_LINE> while filters != "": <NEW_LINE> <INDENT> split_filters = filters.split() <NEW_LINE> try: <NEW_LINE> <INDENT> if len(split_filters) == conv_layers: <NEW_LINE> <INDENT> filters = [int(filter) for filter in split_filters] <NEW_LINE> break <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise UnequalLenError <NEW_LINE> <DEDENT> <DEDENT> except ValueError: <NEW_LINE> <INDENT> print("The filters must be positive integers. Please try again.") <NEW_LINE> filters = input(prompt) <NEW_LINE> <DEDENT> except UnequalLenError: <NEW_LINE> <INDENT> total_filters = len(split_filters) <NEW_LINE> print("The amount of filters passed ({}) is not equal to the number of Convolutional " "Layers ({}). Please try again.".format(total_filters, conv_layers)) <NEW_LINE> filters = input(prompt) <NEW_LINE> <DEDENT> <DEDENT> if filters == "": <NEW_LINE> <INDENT> filters = [DEFAULT_FILTERS for filter in range(conv_layers)] <NEW_LINE> <DEDENT> return filters
|
function used to read the number of filters per convolutional layer
|
625941bbbe8e80087fb20b0b
|
def to_sql(self, dialect: Any, aliases: AliasesDictType, current_alias: AliasType) -> Any: <NEW_LINE> <INDENT> self.validate() <NEW_LINE> return self.value
|
Return the value.
|
625941bb21bff66bcd684818
|
def eventMassiveDelete(self, document, entitys): <NEW_LINE> <INDENT> self.deleteEntity(entitys)
|
Massive delete of all entity event
|
625941bb0fa83653e4656e80
|
def save_to_config(save_config, config_obj, verbose=False): <NEW_LINE> <INDENT> parser = SafeConfigParser() <NEW_LINE> parser.read(save_config) <NEW_LINE> if not os.path.exists(save_config): <NEW_LINE> <INDENT> parser.add_section('settings') <NEW_LINE> <DEDENT> for k, v in viewitems(config_obj._asdict()): <NEW_LINE> <INDENT> if v is not None and isinstance(v, str): <NEW_LINE> <INDENT> parser.set('settings', k, v) <NEW_LINE> <DEDENT> <DEDENT> with open(save_config, 'w') as f: <NEW_LINE> <INDENT> parser.write(f) <NEW_LINE> if verbose: <NEW_LINE> <INDENT> click.echo("Config file written to {}".format(save_config))
|
Write all the values in the config object to a given file in the user's home folder
:param config_file: Name of config file to store in user's home folder
:param config_obj: The config object to export to config file
:param verbose: Specify if stdout should display a message
:return:
|
625941bb287bf620b61d3930
|
def __str__(self): <NEW_LINE> <INDENT> return str('Movie Title: ' + self.title + '\n' 'Movie Story: ' + self.mov_story)
|
String representation of the Movie instance
|
625941bbadb09d7d5db6c655
|
def test_mine_get_dict_list(self, tgt_type_key='tgt_type'): <NEW_LINE> <INDENT> self.funcs.cache.store('minions/webserver', 'mine', dict(ip_addr='2001:db8::1:3', ip4_addr='127.0.0.1')) <NEW_LINE> with patch('salt.utils.minions.CkMinions._check_compound_minions', MagicMock(return_value=(dict( minions=['webserver'], missing=[])))): <NEW_LINE> <INDENT> ret = self.funcs._mine_get( { 'id': 'requester_minion', 'tgt': 'G@roles:web', 'fun': ['ip_addr', 'ip4_addr'], tgt_type_key: 'compound', } ) <NEW_LINE> <DEDENT> self.assertDictEqual(ret, dict(ip_addr=dict(webserver='2001:db8::1:3'), ip4_addr=dict(webserver='127.0.0.1')))
|
Asserts that ``mine_get`` gives the expected results when request
is a list.
Actually this only tests that:
- the correct check minions method is called
- the correct cache key is subsequently used
|
625941bb711fe17d82542235
|
def is_exclusive_match(logic, data, match_all='all'): <NEW_LINE> <INDENT> if isinstance(logic, six.string_types): <NEW_LINE> <INDENT> logic = set(parse_list(logic)) <NEW_LINE> <DEDENT> if not logic: <NEW_LINE> <INDENT> return not data <NEW_LINE> <DEDENT> if not isinstance(logic, (list, tuple, set)): <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> matched = False <NEW_LINE> for entry in logic: <NEW_LINE> <INDENT> if not isinstance(entry, (six.string_types, list, tuple, set)): <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> entries = set(parse_list(entry)) <NEW_LINE> if not entries: <NEW_LINE> <INDENT> return not data <NEW_LINE> <DEDENT> if len(entries.intersection(data.union({match_all}))) == len(entries): <NEW_LINE> <INDENT> matched = True <NEW_LINE> break <NEW_LINE> <DEDENT> <DEDENT> return matched
|
The data variable should always be a set of strings that the logic can be
compared against. It should be a set. If it isn't already, then it will
be converted as such. These identify the tags themselves.
Our logic should be a list as well:
- top level entries are treated as an 'or'
- second level (or more) entries are treated as 'and'
examples:
logic="tagA, tagB" = tagA or tagB
logic=['tagA', 'tagB'] = tagA or tagB
logic=[('tagA', 'tagC'), 'tagB'] = (tagA and tagC) or tagB
logic=[('tagB', 'tagC')] = tagB and tagC
|
625941bb3617ad0b5ed67dbc
|
def _copyFieldsFromParent(self, parent): <NEW_LINE> <INDENT> self.overrides = parent
|
Copy across field values from the recurring event parent.
|
625941bb4e696a04525c930f
|
def __init__(self): <NEW_LINE> <INDENT> self.general_params = GeneralParams() <NEW_LINE> self.__defaults_general = {'w': 10, 'k': 100, 'sigma': 0.2, 'sample_period': 500, 'ensemble_num': 1} <NEW_LINE> self.general_params.set_from_dictionary(self.__defaults_general) <NEW_LINE> self.pair_params = {} <NEW_LINE> self.__names = []
|
The full set of metadata for a single EBMetaD run include both the general parameters
and the pair-specific parameters.
|
625941bb56ac1b37e6264098
|
def __len__(self): <NEW_LINE> <INDENT> return len(self._dataframe)
|
Return the number of cities in the Cities object.
:returns:
Number of cities in the Cities object.
|
625941bbd164cc6175782c11
|
def init_providers(): <NEW_LINE> <INDENT> GoogleServerData.set_to_google_server() <NEW_LINE> MicrosoftServerData.set_to_microsoft_server() <NEW_LINE> PcloudServerData.set_to_pcloud_server() <NEW_LINE> YandexServerData.set_to_yandex_server() <NEW_LINE> BoxServerData.set_to_box_server()
|
Just sets up provider metadata for general operation.
:return: None
|
625941bba79ad161976cc008
|
def one_step(self): <NEW_LINE> <INDENT> if self._param.test_interval and self._iter % self._param.test_interval == 0: <NEW_LINE> <INDENT> if (self._iter == 0 and self._param.test_initialization) or self._iter != 0: <NEW_LINE> <INDENT> for test_id in range(len(self.tests)): self.Test(test_id) <NEW_LINE> <DEDENT> <DEDENT> run_time, stats = 0., {'loss': {'total': 0.}, 'iter': self.iter} <NEW_LINE> for i in range(self._param.iter_size): <NEW_LINE> <INDENT> tic = time.time() <NEW_LINE> self.train(return_outputs=False) <NEW_LINE> run_time += (time.time() - tic) <NEW_LINE> for cost in self._net._costs: <NEW_LINE> <INDENT> cost_value = ws.FetchTensor(cost) <NEW_LINE> if cost_value.size == 1: <NEW_LINE> <INDENT> stats['loss']['total'] += cost_value[0] <NEW_LINE> <DEDENT> <DEDENT> for idx, net_output in enumerate(self._net.outputs): <NEW_LINE> <INDENT> values = ws.FetchTensor(self._net.blobs[net_output].data) <NEW_LINE> if values.size != 1: continue <NEW_LINE> if net_output not in stats['loss']: stats['loss'][net_output] = 0. <NEW_LINE> stats['loss'][net_output] += values[0] <NEW_LINE> <DEDENT> <DEDENT> self.GetLearningRate() <NEW_LINE> tic = time.time() <NEW_LINE> self.update() <NEW_LINE> run_time += (time.time() - tic) <NEW_LINE> self._iter = self._iter + 1 <NEW_LINE> if self._param.snapshot: <NEW_LINE> <INDENT> if self._iter % self._param.snapshot == 0: self.snapshot() <NEW_LINE> <DEDENT> for k in stats['loss'].keys(): <NEW_LINE> <INDENT> stats['loss'][k] /= self._param.iter_size <NEW_LINE> <DEDENT> stats['lr'] = self.optimizer.base_lr <NEW_LINE> stats['time'] = run_time <NEW_LINE> return stats
|
One step run the train net.
Returns
-------
dict
The stats.
|
625941bba4f1c619b28aff03
|
def get_kth(self, nums1, start1, end1, nums2, start2, end2, k): <NEW_LINE> <INDENT> len1 = end1 - start1 + 1 <NEW_LINE> len2 = end2 - start2 + 1 <NEW_LINE> if len1 > len2: <NEW_LINE> <INDENT> return self.get_kth(nums2, start2, end2, nums1, start1, end1, k) <NEW_LINE> <DEDENT> if len1 == 0: <NEW_LINE> <INDENT> return nums2[start2 + k - 1] <NEW_LINE> <DEDENT> if k == 1: <NEW_LINE> <INDENT> return min(nums1[start1], nums2[start2]) <NEW_LINE> <DEDENT> i = start1 + min(len1, k // 2) - 1 <NEW_LINE> j = start2 + min(len2, k // 2) - 1 <NEW_LINE> if nums1[i] > nums2[j]: <NEW_LINE> <INDENT> return self.get_kth(nums1, start1, end1, nums2, j + 1, end2, k - (j - start2 + 1)) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return self.get_kth(nums1, i + 1, end1, nums2, start2, end2, k - (i - start1 + 1))
|
Args:
nums1: list[int]
start1: int
end1: int
nums2: list[int]
start2: int
end2: int
k: int
Return:
int
|
625941bbb7558d58953c4ddd
|
def get_player_position(self): <NEW_LINE> <INDENT> for x, line in enumerate(self.structure): <NEW_LINE> <INDENT> for y, character in enumerate(line): <NEW_LINE> <INDENT> if character == 'P': <NEW_LINE> <INDENT> return x, y
|
getting player's position
|
625941bba17c0f6771cbdf16
|
def parse_input_condition(lines): <NEW_LINE> <INDENT> write = int(lines[0].split("value")[1].strip('.')) <NEW_LINE> move = lines[1].split("the")[1][1] <NEW_LINE> state = lines[2].split("state")[1][1] <NEW_LINE> return (write, move, state)
|
>>> parse_input_condition([ " - Write the value 1.", " - Move one slot to the right.", " - Continue with state B.",])
(1, 'r', 'B')
|
625941bbbf627c535bc13099
|
def get_client_version(): <NEW_LINE> <INDENT> version = 'unknown' <NEW_LINE> try: <NEW_LINE> <INDENT> version = get_distribution('motuclient').version <NEW_LINE> <DEDENT> except Exception: <NEW_LINE> <INDENT> version = "Undefined" <NEW_LINE> <DEDENT> return version
|
Return the version (as a string) of this client.
The value is automatically set by the maven processing build, so don't
touch it unless you know what you are doing.
|
625941bb96565a6dacc8f597
|
def _get_city_data(self, city_id, year): <NEW_LINE> <INDENT> response = self.session.post( self._base_address, data={ 'MunicipioID': city_id, 'Ano': year } ) <NEW_LINE> if response.status_code != 200: <NEW_LINE> <INDENT> raise RequestError(response.status_code) <NEW_LINE> <DEDENT> return self._parse_download_table( BeautifulSoup(response.content, 'lxml') )
|
Requests and parses city links
|
625941bbde87d2750b85fc52
|
def make_instance(self, include_optional): <NEW_LINE> <INDENT> if include_optional : <NEW_LINE> <INDENT> return Gtccancel( gtcOrderNo = '0' ) <NEW_LINE> <DEDENT> else : <NEW_LINE> <INDENT> return Gtccancel( )
|
Test Gtccancel
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included
|
625941bb796e427e537b0486
|
def __init__(self, jac, tol): <NEW_LINE> <INDENT> self._orig_set_abs = jac._set_abs <NEW_LINE> self._jac = jac <NEW_LINE> self._tol = tol
|
Initialize the function replacement.
Parameters
----------
jac : Jacobian
The Jacobian having its _set_abs method replaced.
tol : float
Values between -tol and tol will be shifted away from 0.
|
625941bb7cff6e4e81117849
|
def test_service_to_rdf_without_identifier_should_raise_error( minimal_spec: str, ) -> None: <NEW_LINE> <INDENT> with pytest.raises(RequiredFieldMissingError): <NEW_LINE> <INDENT> catalog = Catalog() <NEW_LINE> catalog.identifier = "http://example.com/catalogs/1" <NEW_LINE> url = "http://example.com/specifications/1" <NEW_LINE> oas = yaml.safe_load(minimal_spec) <NEW_LINE> oas_spec = OASDataService(url, oas, "") <NEW_LINE> for dataservice in oas_spec.dataservices: <NEW_LINE> <INDENT> catalog.services.append(dataservice) <NEW_LINE> <DEDENT> catalog.to_rdf()
|
It raises a RequiredFieldMissingError.
|
625941bb15baa723493c3e37
|
def test_firstitem(self, neighborlist): <NEW_LINE> <INDENT> assert neighborlist.firstitem() == 1 <NEW_LINE> assert neighborlist._idx == 0
|
Test firstitem().
|
625941bb82261d6c526ab366
|
def tr_to(self, state): <NEW_LINE> <INDENT> return [t for t in self.transitions if t.fstate == state]
|
Obtains the list of transitions finishing in a given state.
|
625941bbfb3f5b602dac3554
|
def proxy_options_namespaced_node_28(self, name, path, **kwargs): <NEW_LINE> <INDENT> all_params = ['name', 'path'] <NEW_LINE> all_params.append('callback') <NEW_LINE> params = locals() <NEW_LINE> for key, val in iteritems(params['kwargs']): <NEW_LINE> <INDENT> if key not in all_params: <NEW_LINE> <INDENT> raise TypeError( "Got an unexpected keyword argument '%s'" " to method proxy_options_namespaced_node_28" % key ) <NEW_LINE> <DEDENT> params[key] = val <NEW_LINE> <DEDENT> del params['kwargs'] <NEW_LINE> if ('name' not in params) or (params['name'] is None): <NEW_LINE> <INDENT> raise ValueError("Missing the required parameter `name` when calling `proxy_options_namespaced_node_28`") <NEW_LINE> <DEDENT> if ('path' not in params) or (params['path'] is None): <NEW_LINE> <INDENT> raise ValueError("Missing the required parameter `path` when calling `proxy_options_namespaced_node_28`") <NEW_LINE> <DEDENT> resource_path = '/api/v1/proxy/nodes/{name}/{path}'.replace('{format}', 'json') <NEW_LINE> path_params = {} <NEW_LINE> if 'name' in params: <NEW_LINE> <INDENT> path_params['name'] = params['name'] <NEW_LINE> <DEDENT> if 'path' in params: <NEW_LINE> <INDENT> path_params['path'] = params['path'] <NEW_LINE> <DEDENT> query_params = {} <NEW_LINE> header_params = {} <NEW_LINE> form_params = [] <NEW_LINE> local_var_files = {} <NEW_LINE> body_params = None <NEW_LINE> header_params['Accept'] = self.api_client. select_header_accept(['*/*']) <NEW_LINE> if not header_params['Accept']: <NEW_LINE> <INDENT> del header_params['Accept'] <NEW_LINE> <DEDENT> header_params['Content-Type'] = self.api_client. select_header_content_type(['*/*']) <NEW_LINE> auth_settings = [] <NEW_LINE> response = self.api_client.call_api(resource_path, 'OPTIONS', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='str', auth_settings=auth_settings, callback=params.get('callback')) <NEW_LINE> return response
|
proxy OPTIONS requests to Node
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.proxy_options_namespaced_node_28(name, path, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: name of the Node (required)
:param str path: path to the resource (required)
:return: str
If the method is called asynchronously,
returns the request thread.
|
625941bbf7d966606f6a9ec5
|
def reverseList(self, head): <NEW_LINE> <INDENT> prev = None <NEW_LINE> current = head <NEW_LINE> nxt = None <NEW_LINE> while (current != None): <NEW_LINE> <INDENT> nxt = current.next <NEW_LINE> current.next = prev <NEW_LINE> prev = current <NEW_LINE> current = nxt <NEW_LINE> <DEDENT> return prev
|
:type head: ListNode
:rtype: ListNode
|
625941bbe8904600ed9f1ded
|
def GetNumberOfArguments(self): <NEW_LINE> <INDENT> return _lldb.SBTypeMemberFunction_GetNumberOfArguments(self)
|
GetNumberOfArguments(self) -> uint32_t
|
625941bb090684286d50eba5
|
def rsa(public_key, signature, message): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> public_rsa = load_pem_public_key(bytes(public_key), backend=default_backend()) <NEW_LINE> hashed = util.sha256(message) <NEW_LINE> public_rsa.verify( binascii.unhexlify(signature), hashed, padding.PSS( mgf=padding.MGF1(hashes.SHA256()), salt_length=padding.PSS.MAX_LENGTH ), hashes.SHA256() ) <NEW_LINE> <DEDENT> except InvalidSignature: <NEW_LINE> <INDENT> raise Exception('Invalid signature')
|
Verifies an RSA signature.
Args:
public_key (str): Public key with BEGIN and END sections.
signature (str): Hex value of the signature with its leading 0x stripped.
message (str): Message that was signed, unhashed.
|
625941bb91af0d3eaac9b8d9
|
def kmers(dna, k): <NEW_LINE> <INDENT> counts = {} <NEW_LINE> for i in range(len(dna)-k): <NEW_LINE> <INDENT> subseq=dna[i:i+k] <NEW_LINE> counts[subseq] = counts.get(subseq, 0) + 1 <NEW_LINE> <DEDENT> return counts
|
Given a dna sequence return a hash with all kmers of length k and their frequency.
This method does NOT use the reverse complement, it only checks the strand you supply.
:param dna: the dna sequence
:param k: the length of the kmer
:return: a hash of kmers and abundance
|
625941bb57b8e32f52483363
|
def delete_note(self, note_id): <NEW_LINE> <INDENT> note = self.filter_by_id(self.notes, note_id) <NEW_LINE> if note: <NEW_LINE> <INDENT> index = self.notes.index(note[0]) <NEW_LINE> self.notes.pop(index) <NEW_LINE> return True <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return False
|
Removes the note with `note_id(str)` to the stub EdxNotes service.
|
625941bb50485f2cf553cc5c
|
def doesClear(self): <NEW_LINE> <INDENT> ran_p = random.random() <NEW_LINE> if ran_p <= self.clearProb: return True <NEW_LINE> else: return False
|
Stochastically determines whether this virus particle is cleared from the
patient's body at a time step.
returns: True with probability self.clearProb and otherwise returns
False.
|
625941bba8370b7717052764
|
def feedArticleParsing(feed): <NEW_LINE> <INDENT> global i <NEW_LINE> for entry in feed: <NEW_LINE> <INDENT> toParse = Article(entry.link) <NEW_LINE> toParse.download() <NEW_LINE> toParse.parse() <NEW_LINE> entry = ParsedEntry(toParse.title, toParse.text, entry.link) <NEW_LINE> articleSet.append(entry) <NEW_LINE> topTags = articleSet[i].getTopTags(30) <NEW_LINE> initTags(topTags, tagSet, entry) <NEW_LINE> i+=1 <NEW_LINE> del toParse <NEW_LINE> if i % 100 == 0: <NEW_LINE> <INDENT> print("You have parsed " + str(i) + " articles.")
|
Procedure for reading in articles from a given feed.
For each article in the RSS feed, grab it, process it, and find its tags
|
625941bb004d5f362079a1fa
|
def filter_path_elements(elements): <NEW_LINE> <INDENT> for i, element in enumerate(elements): <NEW_LINE> <INDENT> element = element.replace('/', '_') <NEW_LINE> if element.startswith('.'): <NEW_LINE> <INDENT> element = '_%s' % element[1:] <NEW_LINE> <DEDENT> if element.endswith('.'): <NEW_LINE> <INDENT> element = '%s_' % element[:-1] <NEW_LINE> <DEDENT> elements[i] = element <NEW_LINE> <DEDENT> return filter_fname(os.path.join(*elements))
|
Wrapper function for filter_fname that also handles the artist/album/song title
containing forward slashes
basedir is the top-level directory (assumed not to contain forbidden characters)
elements is the list of path elementst hat may contain slashes; artist, album, title, etc.
|
625941bb596a89723608998e
|
def __init__(self): <NEW_LINE> <INDENT> self.FromEmailAddress = None <NEW_LINE> self.Destination = None <NEW_LINE> self.Subject = None <NEW_LINE> self.ReplyToAddresses = None <NEW_LINE> self.Template = None <NEW_LINE> self.Simple = None
|
:param FromEmailAddress: 发信邮件地址。例如:noreply@mail.qcloud.com。
:type FromEmailAddress: str
:param Destination: 收信人邮箱地址
:type Destination: list of str
:param Subject: 邮件主题
:type Subject: str
:param ReplyToAddresses: 邮件的“回复”电子邮件地址。可以填写您能收到邮件的邮箱地址,可以是个人邮箱。如果不填,收件人将会回复到腾讯云。
:type ReplyToAddresses: str
:param Template: 使用模板发送时,填写的模板相关参数
:type Template: :class:`tencentcloud.ses.v20201002.models.Template`
:param Simple: 使用API直接发送内容时,填写的邮件内容
:type Simple: :class:`tencentcloud.ses.v20201002.models.Simple`
|
625941bb293b9510aa2c315c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.