code
stringlengths 4
4.48k
| docstring
stringlengths 1
6.45k
| _id
stringlengths 24
24
|
|---|---|---|
def index(request): <NEW_LINE> <INDENT> logger.debug(request) <NEW_LINE> link_latest_list = Link.objects.order_by('-updated_at').all() <NEW_LINE> context = {'link_latest_list': link_latest_list} <NEW_LINE> return render(request, "link/index.html", context, status=200)
|
view link's list page
|
625941bc92d797404e304056
|
def test_api__get_workspaces__ok_200__nominal_case(self): <NEW_LINE> <INDENT> dbsession = get_tm_session(self.session_factory, transaction.manager) <NEW_LINE> admin = dbsession.query(models.User) .filter(models.User.email == 'admin@admin.admin') .one() <NEW_LINE> workspace_api = WorkspaceApi( session=dbsession, current_user=admin, config=self.app_config, ) <NEW_LINE> workspace_api.create_workspace('test', save_now=True) <NEW_LINE> workspace_api.create_workspace('test2', save_now=True) <NEW_LINE> workspace_api.create_workspace('test3', save_now=True) <NEW_LINE> transaction.commit() <NEW_LINE> self.testapp.authorization = ( 'Basic', ( 'admin@admin.admin', 'admin@admin.admin' ) ) <NEW_LINE> res = self.testapp.get('/api/v2/workspaces', status=200) <NEW_LINE> res = res.json_body <NEW_LINE> assert len(res) == 3 <NEW_LINE> workspace = res[0] <NEW_LINE> assert workspace['label'] == 'test' <NEW_LINE> assert workspace['slug'] == 'test' <NEW_LINE> workspace = res[1] <NEW_LINE> assert workspace['label'] == 'test2' <NEW_LINE> assert workspace['slug'] == 'test2' <NEW_LINE> workspace = res[2] <NEW_LINE> assert workspace['label'] == 'test3' <NEW_LINE> assert workspace['slug'] == 'test3'
|
Check obtain all workspaces reachables for user with user auth.
|
625941bcfff4ab517eb2f307
|
@curry <NEW_LINE> def get_by_uuid(uuid, path='.'): <NEW_LINE> <INDENT> return pipe( path, dtr.discover, list, filter(lambda x: uuid in x.uuid), list, get(0, default=None) )
|
Get a Treant by short ID
Args:
uuid: a portion of the uuid
path: the search path for Treants
Returns:
a Treant
|
625941bc711fe17d8254223e
|
def test_binary_small(self): <NEW_LINE> <INDENT> input_data = "101b" <NEW_LINE> exp_data = 0b101 <NEW_LINE> exp_tokens = [ self.lex_token('binaryValue', exp_data, 1, 0), ] <NEW_LINE> self.run_assert_lexer(input_data, exp_tokens)
|
Test a small binary number.
|
625941bc3539df3088e2e218
|
def client_status(self, mac): <NEW_LINE> <INDENT> for client in self._client(SERVER_GETSTATUS, mac)['clients']: <NEW_LINE> <INDENT> if client.get('host').get('mac') == mac: <NEW_LINE> <INDENT> return client <NEW_LINE> <DEDENT> <DEDENT> raise ValueError('No client at given mac')
|
Client status.
'System.GetStatus' with a 'client' parameter
should probably just return the client record,
but instead we get a full system status, so we
have to extract just the relevant client record.
|
625941bc56ac1b37e62640a2
|
def _text_set ( self ): <NEW_LINE> <INDENT> self._refresh()
|
Handles the 'text' facet being changed.
|
625941bcbf627c535bc130a3
|
def __ne__(self, other): <NEW_LINE> <INDENT> if not isinstance(other, TextAvailableSource): <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> return self.to_dict() != other.to_dict()
|
Returns true if both objects are not equal
|
625941bc15baa723493c3e40
|
def initialize(self, *args, **kwargs): <NEW_LINE> <INDENT> app = QApplication.instance() <NEW_LINE> if app is None: <NEW_LINE> <INDENT> if not args: <NEW_LINE> <INDENT> args = ([''],) <NEW_LINE> <DEDENT> app = QApplication(*args, **kwargs) <NEW_LINE> <DEDENT> self._qt_app = app
|
Initializes the underlying QApplication object. It does
*not* start the event loop. If the application object is already
created, it is a no-op.
|
625941bc30c21e258bdfa369
|
def __init__(self, vertices=None, edges=None, adjacency_matrix=None): <NEW_LINE> <INDENT> super(WeightedDigraph).__init__(vertices, edges, adjacency_matrix) <NEW_LINE> self._vertices = None <NEW_LINE> self._edges = None <NEW_LINE> self._adjacency_matrix = None <NEW_LINE> self.vertices = vertices <NEW_LINE> self.edges = edges <NEW_LINE> self.adjacency_matrix = adjacency_matrix
|
Constructor
:param vertices: Set of vertices
:param edges: Set of tuple entries (vertex1, vertex2, weight)
:param adjacency_matrix:
:type vertices: set(Vertex)
:type edges: set(Weighted
|
625941bcfbf16365ca6f608b
|
def target_sound_quality(target, distortion_factor_target=0.2, distortion_factor_noise=0.99, lowpass_cutoff_target=3500, lowpass_cutoff_noise=3500, num_points=2048, window_type='hann', sample_rate=None): <NEW_LINE> <INDENT> if not isinstance(target, audio.Wave): <NEW_LINE> <INDENT> target = audio.Wave(target, sample_rate) <NEW_LINE> <DEDENT> signals_to_sum = [ distorted_target(target, distortion_factor_target, lowpass_cutoff_target, num_points, window_type, sample_rate), musical_noise(target, distortion_factor_noise, lowpass_cutoff_noise, num_points, window_type, sample_rate), ] <NEW_LINE> for signal in signals_to_sum: <NEW_LINE> <INDENT> signal.loudness = -23 <NEW_LINE> <DEDENT> target_sound_quality_anchor = sum(signals_to_sum)[:target.num_frames] <NEW_LINE> target_sound_quality_anchor.loudness = target.loudness <NEW_LINE> return target_sound_quality_anchor
|
Generates a target sound quality anchor, defined as the sum of the
distorted target and an artefacts signal, both equally loud.
Default parameters based on [3].
Parameters
----------
target : np.ndarray or Wave, shape=(num_samples, num_channels)
The true target source.
distortion_factor_target : float, optional
Proportion of time frames to zero out (default 0.2).
distortion_factor_noise : float, optional
Proportion of time-frequency bins to zero out (default 0.99). This
determines the amount and timbre of musical noise.
lowpass_cutoff_target : float, optional
Cutoff frequency of the lowpass filter applied to the target in Hz
(default 3500).
lowpass_cutoff_noise : float, optional
Cutoff frequency of the lowpass filter applied to the musical noise in
Hz (default 3500).
num_points : int, optional
Number of points to use for the FFT (default 2048).
window_type : str, optional
Type of window to use for the FFT (default hann).
sample_rate : int, optional
Only needed if Wave objects not provided (default None).
Returns
-------
target_sound_quality_anchor : Wave, shape=(num_samples, num_channels)
Lowpass filtered and time-distorted target source plus musical_noise.
|
625941bc4a966d76dd550eda
|
def onClick(self, layer, pos5d, pos): <NEW_LINE> <INDENT> label = self.editor.brushingModel.drawnNumber <NEW_LINE> if label == self.editor.brushingModel.erasingNumber: <NEW_LINE> <INDENT> label = 0 <NEW_LINE> <DEDENT> topLevelOp = self.topLevelOperatorView.viewed_operator() <NEW_LINE> imageIndex = topLevelOp.LabelInputs.index( self.topLevelOperatorView.LabelInputs ) <NEW_LINE> operatorAxisOrder = self.topLevelOperatorView.SegmentationImagesOut.meta.getAxisKeys() <NEW_LINE> assert operatorAxisOrder == list('txyzc'), "Need to update onClick() if the operator no longer expects volumina axis order. Operator wants: {}".format( operatorAxisOrder ) <NEW_LINE> self.topLevelOperatorView.assignObjectLabel(imageIndex, pos5d, label)
|
Extracts the object index that was clicked on and updates
that object's label.
|
625941bc63d6d428bbe443bd
|
@tf_export('profiler.experimental.client.monitor', v1=[]) <NEW_LINE> def monitor(service_addr, duration_ms, level=1): <NEW_LINE> <INDENT> return _pywrap_profiler.monitor( _strip_prefix(service_addr, _GRPC_PREFIX), duration_ms, level, True)
|
Sends grpc requests to profiler server to perform on-demand monitoring.
The monitoring result is a light weight performance summary of your model
execution. This method will block the caller thread until it receives the
monitoring result. This method currently supports Cloud TPU only.
Args:
service_addr: gRPC address of profiler service e.g. grpc://10.0.0.2:8466.
duration_ms: Duration of monitoring in ms.
level: Choose a monitoring level between 1 and 2 to monitor your job. Level
2 is more verbose than level 1 and shows more metrics.
Returns:
A string of monitoring output.
Example usage:
# Continuously send gRPC requests to the Cloud TPU to monitor the model
# execution.
```python
for query in range(0, 100):
print(tf.profiler.experimental.client.monitor('grpc://10.0.0.2:8466', 1000))
|
625941bc498bea3a759b997e
|
def list_tab(user): <NEW_LINE> <INDENT> data = raw_cron(user) <NEW_LINE> ret = {'crons': [], 'special': []} <NEW_LINE> for line in data.split('\n'): <NEW_LINE> <INDENT> if line.startswith('@'): <NEW_LINE> <INDENT> dat = {} <NEW_LINE> comps = line.split() <NEW_LINE> if len(comps) < 2: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> dat['spec'] = comps[0] <NEW_LINE> dat['cmd'] = ' '.join(comps[1:]) <NEW_LINE> ret['special'].append(dat) <NEW_LINE> <DEDENT> if len(line.split()) > 5: <NEW_LINE> <INDENT> comps = line.split() <NEW_LINE> dat = {} <NEW_LINE> dat['min'] = comps[0] <NEW_LINE> dat['hour'] = comps[1] <NEW_LINE> dat['daymonth'] = comps[2] <NEW_LINE> dat['month'] = comps[3] <NEW_LINE> dat['dayweek'] = comps[4] <NEW_LINE> dat['cmd'] = ' '.join(comps[5:]) <NEW_LINE> ret['crons'].append(dat) <NEW_LINE> <DEDENT> <DEDENT> return ret
|
Return the contents of the specified user's crontab
CLI Example:
salt '*' cron.list_tab root
|
625941bc26068e7796caeba7
|
def _gap_init_(self): <NEW_LINE> <INDENT> if not self.is_absolute(): <NEW_LINE> <INDENT> raise NotImplementedError("Currently, only simple algebraic extensions are implemented in gap") <NEW_LINE> <DEDENT> G = sage.interfaces.gap.gap <NEW_LINE> q = self.polynomial() <NEW_LINE> if q.variable_name()!='E': <NEW_LINE> <INDENT> return 'CallFuncList(function() local %s,E; %s:=Indeterminate(%s,"%s"); E:=AlgebraicExtension(%s,%s,"%s"); return E; end,[])'%(q.variable_name(),q.variable_name(),G(self.base_ring()).name(),q.variable_name(),G(self.base_ring()).name(),self.polynomial().__repr__(),str(self.gen())) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return 'CallFuncList(function() local %s,F; %s:=Indeterminate(%s,"%s"); F:=AlgebraicExtension(%s,%s,"%s"); return F; end,[])'%(q.variable_name(),q.variable_name(),G(self.base_ring()).name(),q.variable_name(),G(self.base_ring()).name(),self.polynomial().__repr__(),str(self.gen()))
|
Create a gap object representing self and return its name
EXAMPLE::
sage: z = QQ['z'].0
sage: K.<zeta> = NumberField(z^2 - 2)
sage: K._gap_init_() # the following variable name $sage1 represents the F.base_ring() in gap and is somehow random
'CallFuncList(function() local z,E; z:=Indeterminate($sage1,"z"); E:=AlgebraicExtension($sage1,z^2 - 2,"zeta"); return E; end,[])'
sage: k = gap(K)
sage: k
<algebraic extension over the Rationals of degree 2>
sage: k.GeneratorsOfDivisionRing()
[ zeta ]
The following tests that it is possible to use a defining
polynomial in the variable ``E``, even though by default
``E`` is used as a local variable in the above GAP
``CallFuncList``::
sage: P.<E> = QQ[]
sage: L.<tau> = NumberField(E^3 - 2)
sage: l = gap(L); l
<algebraic extension over the Rationals of degree 3>
sage: l.GeneratorsOfField()
[ tau ]
sage: gap(tau)^3
!2
|
625941bcfb3f5b602dac355e
|
def decode_spike(cam_res, key): <NEW_LINE> <INDENT> data_shift = np.uint8(np.log2(cam_res)) <NEW_LINE> col = key >> (data_shift + 1) <NEW_LINE> row = (key >> 1) & ((0b1 << data_shift) -1) <NEW_LINE> polarity = key & 1 <NEW_LINE> return row, col, polarity
|
Decode DVS emulator output
|
625941bc5fdd1c0f98dc00ff
|
def can_run_now(self): <NEW_LINE> <INDENT> for preq in self.prerequisites: <NEW_LINE> <INDENT> if preq.is_done(): <NEW_LINE> <INDENT> if preq.was_invalidated and not preq.was_run and not preq.is_loadable(): <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> <DEDENT> elif preq._pruned: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> <DEDENT> return True
|
Can this job run right now?
|
625941bcaad79263cf39090a
|
def discounnect(self): <NEW_LINE> <INDENT> self.connection.close()
|
Close connection ton APNS server
|
625941bc2ae34c7f2600cfff
|
def update(self, dets): <NEW_LINE> <INDENT> self.frame_count += 1 <NEW_LINE> trks = np.zeros((len(self.trackers), 5)) <NEW_LINE> to_del = [] <NEW_LINE> ret = [] <NEW_LINE> for t, trk in enumerate(trks): <NEW_LINE> <INDENT> pos = self.trackers[t].predict()[0] <NEW_LINE> trk[:] = [pos[0], pos[1], pos[2], pos[3], 0] <NEW_LINE> if np.any(np.isnan(pos)): <NEW_LINE> <INDENT> to_del.append(t) <NEW_LINE> <DEDENT> <DEDENT> trks = np.ma.compress_rows(np.ma.masked_invalid(trks)) <NEW_LINE> for t in reversed(to_del): <NEW_LINE> <INDENT> self.trackers.pop(t) <NEW_LINE> <DEDENT> matched, unmatched_dets, unmatched_trks = associate_detections_to_trackers(dets, trks) <NEW_LINE> for t, trk in enumerate(self.trackers): <NEW_LINE> <INDENT> if t not in unmatched_trks: <NEW_LINE> <INDENT> d = matched[np.where(matched[:, 1] == t)[0], 0] <NEW_LINE> trk.update(dets[d, :][0]) <NEW_LINE> <DEDENT> <DEDENT> for i in unmatched_dets: <NEW_LINE> <INDENT> trk = KalmanBoxTracker(dets[i, :]) <NEW_LINE> self.trackers.append(trk) <NEW_LINE> <DEDENT> i = len(self.trackers) <NEW_LINE> for trk in reversed(self.trackers): <NEW_LINE> <INDENT> d = trk.get_state()[0] <NEW_LINE> if (trk.time_since_update < 1) and (trk.hit_streak >= self.min_hits or self.frame_count <= self.min_hits): <NEW_LINE> <INDENT> ret.append(np.concatenate((d, [trk.id + 1])).reshape(1, -1)) <NEW_LINE> <DEDENT> i -= 1 <NEW_LINE> if trk.time_since_update > self.max_age: <NEW_LINE> <INDENT> self.trackers.pop(i) <NEW_LINE> <DEDENT> <DEDENT> if len(ret) > 0: <NEW_LINE> <INDENT> return np.concatenate(ret) <NEW_LINE> <DEDENT> return np.empty((0, 5))
|
Params:
dets - a numpy array of detections in the format [[x1,y1,x2,y2,score],[x1,y1,x2,y2,score],...]
Requires: this method must be called once for each frame even with empty detections.
Returns the a similar array, where the last column is the object ID.
NOTE: The number of objects returned may differ from the number of detections provided.
|
625941bc45492302aab5e18e
|
def createDatabase(self): <NEW_LINE> <INDENT> conn=self.connectMysql() <NEW_LINE> print("1212121"+self.db) <NEW_LINE> sql="create database if not exists "+self.db <NEW_LINE> cur=conn.cursor() <NEW_LINE> cur.execute(sql) <NEW_LINE> cur.close() <NEW_LINE> conn.close()
|
因为创建数据库直接修改settings中的配置MYSQL_DBNAME即可,所以就不要传sql语句了
|
625941bc07f4c71912b11355
|
def MsgPop(self): <NEW_LINE> <INDENT> if (self.msgIndentLevel>0): self.msgIndentLevel -= 1
|
Decrement the message-nesting level (see ''MsgPush'').
|
625941bc9f2886367277a75e
|
def feature_layer(cfg): <NEW_LINE> <INDENT> layers = [] <NEW_LINE> for i in range(len(cfg)): <NEW_LINE> <INDENT> layers.append([]) <NEW_LINE> for j in range(len(cfg[i]) - 1): <NEW_LINE> <INDENT> layers[i].append(nn.Conv1d(cfg[i][j], cfg[i][j+1], kernel_size=3, stride = (2, 1)[i % 2== 1], padding =1)) <NEW_LINE> <DEDENT> <DEDENT> return layers
|
Args:
cfg:输入的配置文件
Returns:
特征提取网络
|
625941bc4d74a7450ccd4090
|
def test_post_request_viewed(self): <NEW_LINE> <INDENT> self.create_request_data() <NEW_LINE> self.client.post( '/requests/requestsData/', {'data': json.dumps([{'request_id': 1, 'viewed': False}, {'request_id': 2, 'viewed': True}])}, HTTP_X_REQUESTED_WITH='XMLHttpRequest', ) <NEW_LINE> self.assertEqual(RequestData.objects.filter(viewed=True).count(), 1)
|
Checked post requests data
:return:
|
625941bc73bcbd0ca4b2bf4b
|
def handle(self, args): <NEW_LINE> <INDENT> from lifoid.config import settings <NEW_LINE> try: <NEW_LINE> <INDENT> app_settings_module = import_module( settings.lifoid_settings_module ) <NEW_LINE> <DEDENT> except ModuleNotFoundError: <NEW_LINE> <INDENT> color.format("No settings module found. Have you initialized your project with `lifoid init` command? ", color.RED) <NEW_LINE> <DEDENT> path = args.path or app_settings_module.TEMPLATES_PATH <NEW_LINE> try: <NEW_LINE> <INDENT> load_template(args.lifoid_id, path, args.lang) <NEW_LINE> <DEDENT> except Exception: <NEW_LINE> <INDENT> print(traceback.format_exc()) <NEW_LINE> return color.format(" * Error while loading templates", color.RED) <NEW_LINE> <DEDENT> return color.format(" * Conversational model loaded", color.GREEN)
|
CLI to run the lifoid HTTP API server application.
|
625941bc92d797404e304057
|
def grando_transform_shift(image, shift): <NEW_LINE> <INDENT> return scipy.ndimage.shift(image, shift)
|
input: macierz (ndarray), shift (float) odpowiedzialny jest za jednakowe przesunięcie figury wzdłoż osi OX i OY;
output: macierz (ndarray)
|
625941bce1aae11d1e749b82
|
def parse_tuple( self, simplified=False, with_condexpr=True, extra_end_rules=None, explicit_parentheses=False, ): <NEW_LINE> <INDENT> lineno = self.stream.current.lineno <NEW_LINE> if simplified: <NEW_LINE> <INDENT> parse = self.parse_primary <NEW_LINE> <DEDENT> elif with_condexpr: <NEW_LINE> <INDENT> parse = self.parse_expression <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> def parse(): <NEW_LINE> <INDENT> return self.parse_expression(with_condexpr=False) <NEW_LINE> <DEDENT> <DEDENT> args = [] <NEW_LINE> is_tuple = False <NEW_LINE> while 1: <NEW_LINE> <INDENT> if args: <NEW_LINE> <INDENT> self.stream.expect("comma") <NEW_LINE> <DEDENT> if self.is_tuple_end(extra_end_rules): <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> args.append(parse()) <NEW_LINE> if self.stream.current.date_to == "comma": <NEW_LINE> <INDENT> is_tuple = True <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> lineno = self.stream.current.lineno <NEW_LINE> <DEDENT> if not is_tuple: <NEW_LINE> <INDENT> if args: <NEW_LINE> <INDENT> return args[0] <NEW_LINE> <DEDENT> if not explicit_parentheses: <NEW_LINE> <INDENT> self.fail( "Expected an expression, got '%s'" % describe_token(self.stream.current) ) <NEW_LINE> <DEDENT> <DEDENT> return nodes.Tuple(args, "load", lineno=lineno)
|
Works like `parse_expression` but if multiple expressions are
delimited by a comma a :class:`~jinja2.nodes.Tuple` node is created.
This method could also return a regular expression instead of a tuple
if no commas where found.
The default parsing mode is a full tuple. If `simplified` is `True`
only names and literals are parsed. The `no_condexpr` parameter is
forwarded to :meth:`parse_expression`.
Because tuples do not require delimiters and may end in a bogus comma
an extra hint is needed that marks the end of a tuple. For example
for loops support tuples between `for` and `in`. In that case the
`extra_end_rules` is set to ``['name:in']``.
`explicit_parentheses` is true if the parsing was triggered by an
expression in parentheses. This is used to figure out if an empty
tuple is a valid expression or not.
|
625941bc32920d7e50b2809b
|
def _get_affinity_matrix(self, X, Y=None): <NEW_LINE> <INDENT> if self.affinity == 'precomputed': <NEW_LINE> <INDENT> self.affinity_matrix_ = X <NEW_LINE> return self.affinity_matrix_ <NEW_LINE> <DEDENT> if self.affinity == 'nearest_neighbors': <NEW_LINE> <INDENT> if sparse.issparse(X): <NEW_LINE> <INDENT> warnings.warn("Nearest neighbors affinity currently does " "not support sparse input, falling back to " "rbf affinity") <NEW_LINE> self.affinity = "rbf" <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.n_neighbors_ = (self.n_neighbors if self.n_neighbors is not None else max(int(X.shape[0] / 10), 1)) <NEW_LINE> self.affinity_matrix_ = kneighbors_graph(X, self.n_neighbors_, include_self=True) <NEW_LINE> self.affinity_matrix_ = 0.5 * (self.affinity_matrix_ + self.affinity_matrix_.T) <NEW_LINE> return self.affinity_matrix_ <NEW_LINE> <DEDENT> <DEDENT> if self.affinity == 'rbf': <NEW_LINE> <INDENT> self.gamma_ = (self.gamma if self.gamma is not None else 1.0 / X.shape[1]) <NEW_LINE> self.affinity_matrix_ = rbf_kernel(X, gamma=self.gamma_) <NEW_LINE> return self.affinity_matrix_ <NEW_LINE> <DEDENT> self.affinity_matrix_ = self.affinity(X) <NEW_LINE> return self.affinity_matrix_
|
Calculate the affinity matrix from data
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples
and n_features is the number of features.
If affinity is "precomputed"
X : array-like, shape (n_samples, n_samples),
Interpret X as precomputed adjacency graph computed from
samples.
Returns
-------
affinity_matrix, shape (n_samples, n_samples)
|
625941bc099cdd3c635f0b2a
|
def test_parser_output_input(): <NEW_LINE> <INDENT> inputfile = 'inputfile' <NEW_LINE> outputfile = 'outputfile' <NEW_LINE> args = standardcitations.parse_args(['-o', outputfile, '-i', inputfile]) <NEW_LINE> assert args.input == inputfile <NEW_LINE> assert args.output == outputfile <NEW_LINE> assert not args.xelatex
|
Assert that the input and output filenames are read correctly,
when the order is flipped and that the XeLaTeX flag is not set.
|
625941bc4f6381625f11490b
|
def GetCommandLineFiles(command_line_file_list, recursive): <NEW_LINE> <INDENT> return _FindFiles(command_line_file_list, recursive)
|
Return the list of files specified on the command line.
|
625941bc7d847024c06be187
|
def save_occurs(self,outfile,format='mm'): <NEW_LINE> <INDENT> if format == 'mm': <NEW_LINE> <INDENT> mmwrite(outfile,self.occurs) <NEW_LINE> <DEDENT> elif format == 'graphlab': <NEW_LINE> <INDENT> f = open(outfile,'w') <NEW_LINE> occurs = self.occurs.tocoo() <NEW_LINE> for i in range(occurs.row.shape[0]): <NEW_LINE> <INDENT> f.write('%s %s %s\n' % (occurs.row[i]+1,occurs.col[i]+1,occurs.data[i])) <NEW_LINE> <DEDENT> f.close() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise Exception('format must be mm or graphlab')
|
save the occurs to disk
|
625941bc6fece00bbac2d60a
|
def public_ticker(self, market_symbol): <NEW_LINE> <INDENT> return self.get(f'markets/{market_symbol}/ticker')
|
Get the current tick values for a market.
Args:
market_symbol (str):
String literal (ie. BTC-LTC).
Returns:
list
.. seealso:: https://bittrex.github.io/api/v3#/definitions/Ticker
|
625941bc8a43f66fc4b53f36
|
def private_task(job_id): <NEW_LINE> <INDENT> if job_id == 'register': <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> job = qdb.processing_job.ProcessingJob(job_id) <NEW_LINE> job.update_heartbeat_state() <NEW_LINE> task_name = job.command.name <NEW_LINE> try: <NEW_LINE> <INDENT> TASK_DICT[task_name](job) <NEW_LINE> <DEDENT> except Exception as e: <NEW_LINE> <INDENT> log_msg = "Error on job %s: %s" % ( job.id, ''.join(traceback.format_exception(*exc_info()))) <NEW_LINE> le = qdb.logger.LogEntry.create('Runtime', log_msg) <NEW_LINE> job.complete(False, error="Error (log id: %d): %s" % (le.id, e))
|
Completes a Qiita private task
Parameters
----------
job_id : str
The job id
|
625941bc293b9510aa2c3167
|
def find_sums(self,lst_sqrs,x_arr,y_arr,sigmas,fit_type): <NEW_LINE> <INDENT> sigma_nonzero = np.nonzero(sigmas) <NEW_LINE> x_arr = x_arr[sigma_nonzero] <NEW_LINE> y_arr = y_arr[sigma_nonzero] <NEW_LINE> sigmas = sigmas[sigma_nonzero] <NEW_LINE> sigma_2 = sigmas**2 <NEW_LINE> if fit_type == 'linear': <NEW_LINE> <INDENT> lst_sqrs.S = np.sum(np.reciprocal(sigma_2)) <NEW_LINE> lst_sqrs.S_x = np.sum(x_arr/sigma_2) <NEW_LINE> lst_sqrs.S_y = np.sum(y_arr/sigma_2) <NEW_LINE> lst_sqrs.S_xx = np.sum((x_arr * x_arr)/sigma_2) <NEW_LINE> lst_sqrs.S_xy = np.sum((x_arr * y_arr)/sigma_2) <NEW_LINE> <DEDENT> elif fit_type == 'quadratic': <NEW_LINE> <INDENT> x_2 = x_arr**2 <NEW_LINE> lst_sqrs.S = np.sum(np.reciprocal(sigma_2)) <NEW_LINE> lst_sqrs.S_x = np.sum(x_2/sigma_2) <NEW_LINE> lst_sqrs.S_y = np.sum(y_arr/sigma_2) <NEW_LINE> lst_sqrs.S_xx = np.sum((x_2**2)/sigma_2) <NEW_LINE> lst_sqrs.S_xy = np.sum((x_2 * y_arr)/sigma_2) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> error_msg = "Invalid fit type {0} in find_sums()".format(fit_type) <NEW_LINE> raise ValueError(error_msg)
|
Computes the sums needed for linear least squares equations
:param lst_sqrs: LeastSquaresValues object to fill
:param x_arr: array with x-axis data
:param y_arr: array with y-axis data
:param sigmas: weighting for y-axis data
:param fit_type: selects between 'linear' and 'quadratic' fits
|
625941bc187af65679ca4fec
|
def obj(*components): <NEW_LINE> <INDENT> return build('obj/', *components)
|
Given components for a project source path relative to `B8_PROJDIR`,
return the corresponding object path, which is the same path under
``.build/obj/``.
|
625941bc2ae34c7f2600d000
|
def apply_mechanisms(self, cell): <NEW_LINE> <INDENT> modified = False <NEW_LINE> for i in range(len(self.mechanisms)): <NEW_LINE> <INDENT> modified = True <NEW_LINE> if self.mechanisms[i] == "imprinting": <NEW_LINE> <INDENT> self.imprinting_mechanism(cell, self.epi_probs[i]) <NEW_LINE> <DEDENT> elif self.mechanisms[i] == "reprogramming": <NEW_LINE> <INDENT> self.reprograming1(cell, self.epi_probs[i]) <NEW_LINE> <DEDENT> elif self.mechanisms[i] == "paramutation": <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> elif self.mechanisms[i] == "position": <NEW_LINE> <INDENT> self.position_mechanism(cell, self.epi_probs[i]) <NEW_LINE> <DEDENT> elif self.mechanisms[i] == "inactivation": <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> elif self.mechanisms[i] == "bookmarking": <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> elif self.mechanisms[i] == "silencing": <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> <DEDENT> if modified: <NEW_LINE> <INDENT> self.evaluate_cell(cell) <NEW_LINE> <DEDENT> return cell
|
This function applies the epigenetic mechanisms to a given cell
with some probability.
Already implemented mechanisms:
- ...
Possible future implemented mechanisms:
- "imprinting"
- "reprogramming"
- "paramutation"
- "position"
- "inactivation"
- "bookmarking"
- "silencing"
Inputs:
- mechanisms: List of mechanisms to be applied.
- cell: The cell in which we will apply a mechanism.
- epiProb: List of probabilities for every listed mechanism.
Output:
The new modified cell.
|
625941bca8ecb033257d2fa4
|
def __init__(self, fee, _error_use_class_method=True): <NEW_LINE> <INDENT> if _error_use_class_method: <NEW_LINE> <INDENT> raise TypeError("Please use SecondSignature.generate(args) or SecondSignature.from_dict(args) to construct me.") <NEW_LINE> <DEDENT> super().__init__(TRANSACTION_TYPE.SECOND_SIGNATURE, fee)
|
Creates a Second Signature transaction
:param fee: fee for transaction
:param _error_use_class_method: boolean flag, used to indicate if the transaction
was created from generate or from_dict
|
625941bccb5e8a47e48b797c
|
@app.task(base=CollectionDunyaTask) <NEW_LINE> def load_musicbrainz_collection(collectionid): <NEW_LINE> <INDENT> coll = models.Collection.objects.get(collectionid=collectionid) <NEW_LINE> coll.set_state_scanning() <NEW_LINE> coll.add_log_message("Starting collection scan") <NEW_LINE> update_collection(collectionid) <NEW_LINE> scan_and_link(collectionid) <NEW_LINE> coll.set_state_scanned() <NEW_LINE> coll.add_log_message("Collection scan finished") <NEW_LINE> return collectionid
|
Load a musicbrainz collection into the dashboard database
and scan collection root to match directories to releases.
|
625941bc7cff6e4e81117854
|
def onConnect(self): <NEW_LINE> <INDENT> print('onConnect')
|
连接回调
|
625941bc4c3428357757c1f8
|
def bootstrap_pair_counts(self, N_trials): <NEW_LINE> <INDENT> Nfld = self.N_fields <NEW_LINE> resample = np.random.randint( 0, Nfld, N_trials*Nfld ).reshape(N_trials, Nfld) <NEW_LINE> hists = np.apply_along_axis(np.bincount, 1, resample, minlength=Nfld) <NEW_LINE> matrix = lambda x: np.multiply(*np.meshgrid(x, x)) <NEW_LINE> freqs = np.apply_along_axis(matrix, 1, hists) <NEW_LINE> matrix = lambda x, y: np.multiply(*np.meshgrid(x, y)) <NEW_LINE> cts = [arr / np.sum(matrix(asize, bsize)).astype('float64') for arr,(asize,bsize) in zip(self.count_arrays,self.count_sizes)] <NEW_LINE> cts = [np.tensordot(arr, freqs, axes=((1,2),(1,2))) for arr in cts] <NEW_LINE> return cts
|
Perform bootstrap resampling on the results, and return the
estimator value.
Parameters
----------
N_trials (int): The number of resamples to perform.
Returns
-------
A 2D numpy array of shape (R, N_trials) where R is the number of radii
bins, i.e. the number of radii values minus one.
|
625941bc21bff66bcd684823
|
def analyse_maps(self): <NEW_LINE> <INDENT> log.info("Analysing the maps ...")
|
This function ...
:return:
|
625941bcd53ae8145f87a143
|
def scale_ma(ori_score=0, pro_score=0, pro_score_add_k=0): <NEW_LINE> <INDENT> temp_t = [11, 13, 21, 22, 59, 64, 73, 97, 100, 109, 127, 134, 143, 156, 157, 167, 181, 194, 212, 222, 226, 228, 232, 233, 238, 240, 250, 251, 263, 266, 268, 271, 277, 279, 298] <NEW_LINE> temp_f = [101, 105, 111, 119, 120, 148, 166, 171, 180, 267, 289] <NEW_LINE> for i in temp_t: <NEW_LINE> <INDENT> ori_score += is_true(Ans[i]) <NEW_LINE> <DEDENT> for j in temp_f: <NEW_LINE> <INDENT> ori_score += is_false(Ans[j]) <NEW_LINE> <DEDENT> k, ignore = scale_k() <NEW_LINE> pro_score += trans_t(ori_score, Norm_M['Ma'], Norm_SD['Ma']) <NEW_LINE> pro_score_add_k += trans_t(ori_score + round(0.2 * k), Norm_M['Ma+0.2K'], Norm_SD['Ma+0.2K']) <NEW_LINE> return ori_score, pro_score, pro_score_add_k
|
临床量表-9 轻躁狂 Ma
the score of Ma (hypomania) scale
:param ori_score: original score
:param pro_score: processing score
:param pro_score_add_k: processing score added 0.2K
:return: ori_score, pro_score, pro_score_add_k
:rtype: int, int, int
|
625941bc0c0af96317bb80b7
|
def do_lock(verbose=False): <NEW_LINE> <INDENT> click.echo(crayons.yellow('Locking {0} dependencies...'.format( crayons.red('[dev-packages]')) ), err=True) <NEW_LINE> lockfile = project._lockfile <NEW_LINE> for section in ('default', 'develop'): <NEW_LINE> <INDENT> for k, v in lockfile[section].copy().items(): <NEW_LINE> <INDENT> if not hasattr(v, 'keys'): <NEW_LINE> <INDENT> del lockfile[section][k] <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> deps = convert_deps_to_pip(project.dev_packages, r=False) <NEW_LINE> results = resolve_deps(deps, sources=project.sources, verbose=verbose) <NEW_LINE> for dep in results: <NEW_LINE> <INDENT> lockfile['develop'].update({dep['name']: {'version': '=={0}'.format(dep['version'])}}) <NEW_LINE> lockfile['develop'][dep['name']]['hashes'] = dep['hashes'] <NEW_LINE> <DEDENT> click.echo(crayons.yellow('Locking {0} dependencies...'.format(crayons.red('[packages]'))), err=True) <NEW_LINE> deps = convert_deps_to_pip(project.packages, r=False) <NEW_LINE> results = resolve_deps(deps, sources=project.sources) <NEW_LINE> for dep in results: <NEW_LINE> <INDENT> lockfile['default'].update({dep['name']: {'version': '=={0}'.format(dep['version'])}}) <NEW_LINE> lockfile['default'][dep['name']]['hashes'] = dep['hashes'] <NEW_LINE> <DEDENT> cmd = '"{0}" {1}'.format(which('python'), shellquote(pep508checker.__file__.rstrip('cdo'))) <NEW_LINE> c = delegator.run(cmd) <NEW_LINE> lockfile['_meta']['host-environment-markers'] = json.loads(c.out) <NEW_LINE> with open(project.lockfile_location, 'w') as f: <NEW_LINE> <INDENT> json.dump(lockfile, f, indent=4, separators=(',', ': '), sort_keys=True) <NEW_LINE> f.write('\n') <NEW_LINE> <DEDENT> click.echo('{0} Pipfile.lock{1}'.format(crayons.yellow('Updated'), crayons.yellow('!')), err=True)
|
Executes the freeze functionality.
|
625941bc8a43f66fc4b53f37
|
def start_oekaki(self): <NEW_LINE> <INDENT> self.start = True
|
お絵描きをスタートします。
Start oekaki.
|
625941bcec188e330fd5a673
|
def __init__(self, n_actions, hidden=1024, learning_rate=0.00001, frame_height=84, frame_width=84, agent_history_length=4): <NEW_LINE> <INDENT> self.n_actions = n_actions <NEW_LINE> self.hidden = hidden <NEW_LINE> self.learning_rate = learning_rate <NEW_LINE> self.frame_height = frame_height <NEW_LINE> self.frame_width = frame_width <NEW_LINE> self.agent_history_length = agent_history_length <NEW_LINE> self.input = tf.placeholder(shape=[None, self.frame_height, self.frame_width, self.agent_history_length], dtype=tf.float32) <NEW_LINE> self.inputscaled = self.input/255 <NEW_LINE> self.conv1 = tf.layers.conv2d( inputs=self.inputscaled, filters=32, kernel_size=[8, 8], strides=4, kernel_initializer=tf.variance_scaling_initializer(scale=2), padding="valid", activation=tf.nn.relu, use_bias=False, name='conv1') <NEW_LINE> self.conv2 = tf.layers.conv2d( inputs=self.conv1, filters=64, kernel_size=[4, 4], strides=2, kernel_initializer=tf.variance_scaling_initializer(scale=2), padding="valid", activation=tf.nn.relu, use_bias=False, name='conv2') <NEW_LINE> self.conv3 = tf.layers.conv2d( inputs=self.conv2, filters=64, kernel_size=[3, 3], strides=1, kernel_initializer=tf.variance_scaling_initializer(scale=2), padding="valid", activation=tf.nn.relu, use_bias=False, name='conv3') <NEW_LINE> self.conv4 = tf.layers.conv2d( inputs=self.conv3, filters=hidden, kernel_size=[7, 7], strides=1, kernel_initializer=tf.variance_scaling_initializer(scale=2), padding="valid", activation=tf.nn.relu, use_bias=False, name='conv4') <NEW_LINE> self.valuestream, self.advantagestream = tf.split(self.conv4, 2, 3) <NEW_LINE> self.valuestream = tf.layers.flatten(self.valuestream) <NEW_LINE> self.advantagestream = tf.layers.flatten(self.advantagestream) <NEW_LINE> self.advantage = tf.layers.dense( inputs=self.advantagestream, units=self.n_actions, kernel_initializer=tf.variance_scaling_initializer(scale=2), name="advantage") <NEW_LINE> self.value = tf.layers.dense( inputs=self.valuestream, units=1, kernel_initializer=tf.variance_scaling_initializer(scale=2), name='value') <NEW_LINE> self.q_values = self.value + tf.subtract(self.advantage, tf.reduce_mean(self.advantage, axis=1, keepdims=True)) <NEW_LINE> self.best_action = tf.argmax(self.q_values, 1) <NEW_LINE> self.target_q = tf.placeholder(shape=[None], dtype=tf.float32) <NEW_LINE> self.action = tf.placeholder(shape=[None], dtype=tf.int32) <NEW_LINE> self.Q = tf.reduce_sum(tf.multiply(self.q_values, tf.one_hot(self.action, self.n_actions, dtype=tf.float32)), axis=1) <NEW_LINE> self.loss = tf.reduce_mean(tf.losses.huber_loss(labels=self.target_q, predictions=self.Q)) <NEW_LINE> self.optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate) <NEW_LINE> self.update = self.optimizer.minimize(self.loss)
|
Args:
n_actions: Integer, number of possible actions
hidden: Integer, Number of filters in the final convolutional layer.
This is different from the DeepMind implementation
learning_rate: Float, Learning rate for the Adam optimizer
frame_height: Integer, Height of a frame of an Atari game
frame_width: Integer, Width of a frame of an Atari game
agent_history_length: Integer, Number of frames stacked together to create a state
|
625941bc5f7d997b8717496a
|
def test_meal_post_incorrect_params(self): <NEW_LINE> <INDENT> count = Meal.objects.all().count() <NEW_LINE> response = self.client.post(reverse('meal'), {'meal_type_id': 'kanapka'}) <NEW_LINE> assert response.status_code == 400 <NEW_LINE> assert count == Meal.objects.all().count() <NEW_LINE> assert response.json() == {'meal_type_id': ['A valid integer is required.']}
|
Testing POST meal view with incorrect params
|
625941bc0a366e3fb873e6e6
|
def mark_as_hit(self): <NEW_LINE> <INDENT> self.char = 'X' <NEW_LINE> self.was_shot = True
|
Method to mark obj on board as hit by changing char attribute
|
625941bc925a0f43d2549d43
|
def export(filename): <NEW_LINE> <INDENT> tree = lxml.etree.parse(open(filename, encoding='utf=8')) <NEW_LINE> print('<dl>') <NEW_LINE> for template in tree.findall('.//template'): <NEW_LINE> <INDENT> print('<dt>%s</dt>' % (html.escape(template.get('name')))) <NEW_LINE> print('<dd>%s\n<pre class="prettyprint">%s\n</pre>\n</dd>\n' % ( template.get('description'), template.get('value'))) <NEW_LINE> <DEDENT> print('</dl>')
|
https://github.com/hoffmann/PyCharm-Python-Templates
|
625941bc5fcc89381b1e158b
|
def log_likelihood(self): <NEW_LINE> <INDENT> if self._log_likelihood is None: <NEW_LINE> <INDENT> self._log_likelihood = logpdf(x=self.y, cov=self.S) <NEW_LINE> <DEDENT> return self._log_likelihood
|
log-likelihood of the last measurement.
|
625941bca934411ee3751569
|
def execute(self): <NEW_LINE> <INDENT> stories = self.pt.get_backlog(self.project) <NEW_LINE> output = StringIO() <NEW_LINE> for story in stories: <NEW_LINE> <INDENT> if story.state in [Story.STATE_UNSCHEDULED, Story.STATE_UNSTARTED] and (self.namespace.mywork is False or self.owner in story.owners): <NEW_LINE> <INDENT> id = colored.yellow(str(story.id)) <NEW_LINE> if story.estimate is None: <NEW_LINE> <INDENT> if story.type == Story.TYPE_FEATURE: <NEW_LINE> <INDENT> type = "{0} (?)".format(story.type.upper()) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> type = story.type.upper() <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> type = "{0} ({1:d})".format(story.type.upper(), story.estimate) <NEW_LINE> <DEDENT> name = story.name <NEW_LINE> if story.owners: <NEW_LINE> <INDENT> initials = [] <NEW_LINE> for member in self.project.members: <NEW_LINE> <INDENT> if member in story.owners: <NEW_LINE> <INDENT> initials.append(member.initials) <NEW_LINE> <DEDENT> <DEDENT> name = u"{0} ({1})".format(story.name, ', '.join(initials)) <NEW_LINE> <DEDENT> message = u"{0} {1}: {2}\n".format(id, type, name) <NEW_LINE> output.write(message) <NEW_LINE> <DEDENT> <DEDENT> less(output)
|
Execute this backlog command.
|
625941bcdc8b845886cb5402
|
def parse_args(): <NEW_LINE> <INDENT> parser = argparse.ArgumentParser(description='Tensorflow Faster R-CNN demo') <NEW_LINE> parser.add_argument('--train_proto','-tp', dest='train_proto',type=str, help='JPEGImages path to read', default='None') <NEW_LINE> parser.add_argument('--train_model','-tm', dest='train_model',type=str, help='JPEGImages path to save', default='None') <NEW_LINE> parser.add_argument('--deploy_proto','-dp', dest='deploy_proto', help='xml path to read', default='None') <NEW_LINE> parser.add_argument('--save_model','-sm', dest='save_model', help='xml path to save', default='None') <NEW_LINE> args = parser.parse_args() <NEW_LINE> return args
|
Parse input arguments.
|
625941bc1f037a2d8b9460cd
|
def getNumberOfTuples(self, *args): <NEW_LINE> <INDENT> return _MEDCalculator.MEDCouplingFieldDiscretizationP0_getNumberOfTuples(self, *args)
|
getNumberOfTuples(self, MEDCouplingMesh mesh) -> int
1
|
625941bc7d43ff24873a2b6c
|
def getDefaultValue(self): <NEW_LINE> <INDENT> pass
|
Returns the media storage property's default value.
|
625941bc9b70327d1c4e0ca2
|
def get_token_summary(tok): <NEW_LINE> <INDENT> return '('+str(tok.pos)+')' + tok.type + '_'
|
Diese Funktion gibt für ein Token einen formatieren String aus.:
Der ausgebene String hat folgende Form:
::
(pos)TokenType_
Diese Art String wird von der Klasse :class:`~MethodSearch`
vorrausgesetzt.
Args:
tok ([Token]): Das Token welches abgefragt werden soll
Returns:
[String]: String der Form :code:`(pos)TokenType_`
|
625941bc3c8af77a43ae366c
|
def test_clear_user_api_key(self): <NEW_LINE> <INDENT> user = self.login() <NEW_LINE> user.generate_api_key() <NEW_LINE> user.save() <NEW_LINE> self.assertIsNotNone(user.apikey) <NEW_LINE> response = self.post(url_for('users.apikey_settings', user=self.user), {'action': 'clear'}) <NEW_LINE> self.assert200(response) <NEW_LINE> user.reload() <NEW_LINE> self.assertIsNone(user.apikey)
|
It should clear the API Key
|
625941bc7d847024c06be188
|
def isValidSerialization(self, preorder): <NEW_LINE> <INDENT> stack = [] <NEW_LINE> index = -1 <NEW_LINE> preorder = preorder.split(",") <NEW_LINE> for s in preorder: <NEW_LINE> <INDENT> stack.append(s) <NEW_LINE> index += 1 <NEW_LINE> while self.endsWithTwoPoundSign(stack, index): <NEW_LINE> <INDENT> stack.pop() <NEW_LINE> index -= 1 <NEW_LINE> stack.pop() <NEW_LINE> index -= 1 <NEW_LINE> if index < 0: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> stack.pop() <NEW_LINE> stack.append("#") <NEW_LINE> <DEDENT> <DEDENT> if len(stack) == 1 and stack[0] == "#": <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> return False
|
:type preorder: str
:rtype: bool
|
625941bc76e4537e8c351546
|
def scale_spreading_data(data, tau=1., R=1.): <NEW_LINE> <INDENT> def scale_series(view, tau, R): <NEW_LINE> <INDENT> new = view().copy() <NEW_LINE> new.index /= tau <NEW_LINE> new /= R <NEW_LINE> return new <NEW_LINE> <DEDENT> view = [d.view for d in data] <NEW_LINE> try: <NEW_LINE> <INDENT> bc = np.broadcast(view, tau, R) <NEW_LINE> <DEDENT> except ValueError: <NEW_LINE> <INDENT> raise TypeError("Could not broadcast scaling factors to data.") <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> scaled_data = [scale_series(v, t, r) for v, t, r in bc] <NEW_LINE> <DEDENT> return scaled_data
|
Scale spreading times and radii.
The data is scaled by dividing with the input factors, ie. t* = t/tau
where t is the non-scaled times, tau the scaling factor and t* the
scaled times of the returned data.
Input scaling factors are broadcasted to the data. This means that
the input factors must be either a single factor scaling all data,
or a list with separate factors for the entire data set.
Args:
data (pd.Series): List of data to scale.
tau (float): Time scaling factors.
R (float): Radius scaling factors.
Returns:
pd.Series: List of scaled data.
Raises:
TypeError: If scaling factors can not be broadcast to data.
|
625941bcfbf16365ca6f608c
|
def err( self, msg ): <NEW_LINE> <INDENT> if self.lock: <NEW_LINE> <INDENT> self.DispLock.acquire() <NEW_LINE> <DEDENT> self.set_color( 'Red' ) <NEW_LINE> sys.stderr.write( "ERROR: " + msg ) <NEW_LINE> sys.stderr.flush() <NEW_LINE> self.set_color( 'SwitchOffAttributes' ) <NEW_LINE> if self.lock: <NEW_LINE> <INDENT> self.DispLock.release()
|
Print messages with an error. If locking is available use them.
|
625941bcd6c5a10208143f17
|
def send_and_receive(self, receiver, message: bytes) -> bytes: <NEW_LINE> <INDENT> key = DHEntity.session_key_to_16_aes_bytes( self._session_key ) <NEW_LINE> ciphertext, iv = matasano.blocks.aes_cbc( key=key, b=message, random_iv=True ) <NEW_LINE> ciphertext = receiver.receive_and_send_back(ciphertext + iv) <NEW_LINE> plaintext = DHEntity.decipher_received_message(key, ciphertext) <NEW_LINE> return plaintext
|
Send an encrypted message as follows:
- key = SHA1(session_key)[:16]
- iv = random IV
- message
send: AES_CBC(message) || iv
:param receiver: The receiver.
:param message: The message to be sent.
:return: The received answer (if any).
|
625941bc507cdc57c6306ba3
|
def get_sharpe( nav_data, benchmark_nav_data, risk_free_rate=None, window=250 * 3, annualiser=250, tail=True, ): <NEW_LINE> <INDENT> nav_dataframe = _transform_df(nav_data) <NEW_LINE> benchmark_nav_dataframe = _transform_df(benchmark_nav_data) <NEW_LINE> df = RatioCalculator( nav_dataframe, benchmark_nav_dataframe=benchmark_nav_dataframe, risk_free_rate=risk_free_rate, annualiser=annualiser, ).get_sharpe(window) <NEW_LINE> return float(df["sharpe"][-1]) if tail else df
|
The Sharpe ratio was developed by Nobel laureate William F. Sharpe and is used to help
investors understand the return of an investment compared to its risk. The ratio is the
average return earned in excess of the risk-free rate per unit of volatility or total risk.
Volatility is a measure of the price fluctuations of an asset or portfolio. (Investopedia)
:param nav_data:
:param benchmark_nav_data:
:param risk_free_rate: float
:param window: int
:param annualiser: int
:param tail: bool
:return:
|
625941bcd7e4931a7ee9ddeb
|
def test_brk_will_load_the_program_counter_from_the_interrupt_vector(cpu): <NEW_LINE> <INDENT> cpu.reset_to(0xFF00) <NEW_LINE> cpu.Memory[0xFF00] = OpCodes.INS_BRK <NEW_LINE> cpu.Memory[0xFFFE] = 0x00 <NEW_LINE> cpu.Memory[0xFFFF] = 0x80 <NEW_LINE> expected_cycles = 7 <NEW_LINE> cycles_used = cpu.execute(expected_cycles) <NEW_LINE> AssertThat(expected_cycles).IsEqualTo(cycles_used) <NEW_LINE> AssertThat(cpu.program_counter).IsEqualTo(0x8000)
|
BRKWillLoadTheProgramCounterFromTheInterruptVector
|
625941bc656771135c3eb73a
|
def cross_validation(x, y, inputs, model, folds=5): <NEW_LINE> <INDENT> out = [] <NEW_LINE> mode = inputs['prediction_type'] <NEW_LINE> kx, ky = kfold(x, y, folds, True) <NEW_LINE> for i in range(folds): <NEW_LINE> <INDENT> model._make_predict_function() <NEW_LINE> y_pred = model.predict(kx[i], verbose=0) <NEW_LINE> if mode == 'binary': <NEW_LINE> <INDENT> y_pred = y_pred >= .5 <NEW_LINE> scores = f1_score(y_pred, ky[i], average='binary') <NEW_LINE> <DEDENT> elif mode == 'multiclass': <NEW_LINE> <INDENT> y_pred = y_pred.argmax(axis=-1) <NEW_LINE> scores = f1_score(y_pred, ky[i], average='macro') <NEW_LINE> <DEDENT> if mode == 'multilabel': <NEW_LINE> <INDENT> y_pred = model.predict(kx[i]).argmax(axis=1) <NEW_LINE> scores = f1_score(y_pred, ky[i].argmax(axis=1), average='macro') <NEW_LINE> <DEDENT> elif mode == 'continuous': <NEW_LINE> <INDENT> y_pred = model.predict(kx[i]) <NEW_LINE> scores = mean_absolute_error(y_pred, ky[i]) <NEW_LINE> <DEDENT> out.append(scores) <NEW_LINE> <DEDENT> return np.mean(out), np.sum(out), out
|
Performs cross_validation for n folds
|
625941bc24f1403a92600a38
|
def set_properties(self): <NEW_LINE> <INDENT> self.exh.rho_array = np.empty(np.size(self.exh.T_array)) <NEW_LINE> self.exh.mu_array = np.empty(np.size(self.exh.T_array)) <NEW_LINE> for i in range(np.size(self.exh.T_array)): <NEW_LINE> <INDENT> self.exh.T = self.exh.T_array[i] <NEW_LINE> self.exh.set_TempPres_dependents() <NEW_LINE> self.exh.rho_array[i] = self.exh.rho <NEW_LINE> self.exh.mu_array[i] = self.exh.mu
|
Sets array of temperature and pressure dependent properties
based on average temperature in HX.
|
625941bc1f037a2d8b9460ce
|
def k_dom_by_point(p, kValue, aSkyline): <NEW_LINE> <INDENT> numWorstDim = 0 <NEW_LINE> numQualifyDim = 0 <NEW_LINE> isKDom = False <NEW_LINE> domSK = None <NEW_LINE> for d in range(len(p)): <NEW_LINE> <INDENT> if p[d] >= aSkyline[d]: <NEW_LINE> <INDENT> numQualifyDim = numQualifyDim + 1 <NEW_LINE> <DEDENT> if p[d] > aSkyline[d]: <NEW_LINE> <INDENT> numWorstDim = numWorstDim + 1 <NEW_LINE> <DEDENT> if numQualifyDim >= kValue and numWorstDim > 0: <NEW_LINE> <INDENT> isKDom = True <NEW_LINE> domSK = aSkyline <NEW_LINE> break <NEW_LINE> <DEDENT> <DEDENT> return isKDom, domSK
|
Test whether point p is k-dominated by point aSkyline
return True if p is k-dominated by aSkyline; otherwise false
|
625941bc5e10d32532c5edf6
|
def raise_exists(*_args, **_kwargs): <NEW_LINE> <INDENT> raise kazoo.client.NodeExistsError()
|
zk.create side effect, raising appropriate exception.
|
625941bc6fb2d068a760ef69
|
def get_session(self): <NEW_LINE> <INDENT> headers = { 'Authorization': 'GatewayLogin token="{}"'.format(self.token), } <NEW_LINE> session = requests.Session() <NEW_LINE> session.headers.update(headers) <NEW_LINE> return session
|
Returns a session object to make authenticated requests
|
625941bc29b78933be1e5587
|
def prepare_ligand_zmat(complex_pdb_data, ligand_resname, MCPRO_path, BOSS_path): <NEW_LINE> <INDENT> print('Preparing Ligand Z matrix') <NEW_LINE> ligand_pdb_filename, ligand_resnumber, ligand_original_resnumber = generate_ligand_pdb(complex_pdb_data, ligand_resname) <NEW_LINE> protonated_ligand_pdb_filename = protonate_ligand_with_Babel(ligand_pdb_filename) <NEW_LINE> convert_pdb_to_zmat(protonated_ligand_pdb_filename, BOSS_path) <NEW_LINE> ligand_zmat_filename = fix_dummy_atom_names(protonated_ligand_pdb_filename) <NEW_LINE> optimize_zmat(ligand_zmat_filename, MCPRO_path) <NEW_LINE> return ligand_zmat_filename, ligand_resnumber, ligand_original_resnumber
|
Prepares the ligand zmat, starting from the complex pdb
The process involves extracting the ligand pdb info from the complex pdb,
then protonating it and converting it to a z-matrix. Lastly, it's optimized.
Args:
complex_pdb_data: List. Data from the complex pdb file
ligand_resname: String. Residue name of desired ligand
BOSS_path: Full path to MCPROdir as defined in .bashrc
MCPRO_path: Full path to MCPROdir as defined in .bashrc
Returns:
ligand_zmat_filename: String. Filename of the ligand zmat
ligand_resnumber: String. The resnumber of the lig after changes (100)
ligand_original_resnumber: String. Resnumber of lig in complex_pdb_data
|
625941bcbe8e80087fb20b17
|
def _clean_token(self, token): <NEW_LINE> <INDENT> token = re.sub(r"[^a-z\s]", '', token.lower()) <NEW_LINE> token = re.sub(r"[']+", ' ', token) <NEW_LINE> return token
|
Remove everything but whitespace, the alphabet. Separate apostrophes for stopwords
|
625941bc50812a4eaa59c1f3
|
def __init__(self, start, end, string): <NEW_LINE> <INDENT> self.start = start <NEW_LINE> self.end = end <NEW_LINE> self.string = string
|
Parameters
----------
start
end
string
|
625941bca79ad161976cc014
|
def data_type(val): <NEW_LINE> <INDENT> for json_type, py_types in _type_map.items(): <NEW_LINE> <INDENT> if type(val) in py_types: <NEW_LINE> <INDENT> return json_type <NEW_LINE> <DEDENT> <DEDENT> raise ValueError('{} not a valid type (int, float, ndarray, str)'.format(val))
|
Determine data-type of val.
Returns:
-----------
str
One of ('number', 'array', 'string'), else raises ValueError
|
625941bc31939e2706e4cd3d
|
def get_utility_combinations(agents, singleton_utilities, sum_utils): <NEW_LINE> <INDENT> utility_values = list(singleton_utilities.values()) <NEW_LINE> n = len(agents) <NEW_LINE> all_utility_combinations = [] <NEW_LINE> for i in range(sum_utils + 1): <NEW_LINE> <INDENT> for j in range(sum_utils + 1 - i): <NEW_LINE> <INDENT> all_utility_combinations.append((i, j, sum_utils + 1 - i - j - 1)) <NEW_LINE> <DEDENT> <DEDENT> in_core_candidates = list(); out_of_core_candidates = list(); <NEW_LINE> for i in range(len(all_utility_combinations)): <NEW_LINE> <INDENT> if sum(all_utility_combinations[i]) == sum_utils: <NEW_LINE> <INDENT> for j in range(n): <NEW_LINE> <INDENT> if all_utility_combinations[i][j] >= utility_values[j]: <NEW_LINE> <INDENT> if j == n - 1: <NEW_LINE> <INDENT> in_core_candidates.append((all_utility_combinations[i])) <NEW_LINE> <DEDENT> continue <NEW_LINE> <DEDENT> out_of_core_candidates.append((all_utility_combinations[i])) <NEW_LINE> break <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> if len(in_core_candidates) == 0: <NEW_LINE> <INDENT> print('Empty core') <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> print('Non-empty core') <NEW_LINE> print('Core length:', len(in_core_candidates)) <NEW_LINE> print('Core candidates:') <NEW_LINE> print(in_core_candidates[:10]) <NEW_LINE> <DEDENT> print() <NEW_LINE> print('Non-core length:', len(out_of_core_candidates)) <NEW_LINE> print('Out-of-core candidates:', out_of_core_candidates[:10])
|
powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)
|
625941bc4e696a04525c931b
|
def vcs_virtual_fabric_vfab_enable(self, **kwargs): <NEW_LINE> <INDENT> config = ET.Element("config") <NEW_LINE> vcs = ET.SubElement(config, "vcs", xmlns="urn:brocade.com:mgmt:brocade-vcs") <NEW_LINE> virtual_fabric = ET.SubElement(vcs, "virtual-fabric") <NEW_LINE> vfab_enable = ET.SubElement(virtual_fabric, "vfab-enable") <NEW_LINE> callback = kwargs.pop('callback', self._callback) <NEW_LINE> return callback(config)
|
Auto Generated Code
|
625941bc7b25080760e3932a
|
def annot_max_min(x, y, ax=None): <NEW_LINE> <INDENT> xmax = x[np.argmax(y)] <NEW_LINE> ymax = y.max() <NEW_LINE> xmin = x[np.argmin(y)] <NEW_LINE> ymin = y.min() <NEW_LINE> textmax = "x={:.2f}, y={:.8f}".format(xmax, ymax) <NEW_LINE> textmin = "x={:.2f}, y={:.13f}".format(xmin, ymin) <NEW_LINE> if not ax: <NEW_LINE> <INDENT> ax = plt.gca() <NEW_LINE> <DEDENT> bbox_props = dict(boxstyle="square, pad=0.3", fc="w", ec="k", lw=0.72) <NEW_LINE> arrowprops = dict(arrowstyle="->", connectionstyle="angle, angleA=0, angleB=60") <NEW_LINE> kw1 = dict(xycoords='data', textcoords="axes fraction", arrowprops=arrowprops, bbox=bbox_props, ha="right", va="top") <NEW_LINE> kw2 = dict(xycoords='data', textcoords="axes fraction", arrowprops=arrowprops, bbox=bbox_props, ha="left", va="bottom") <NEW_LINE> ax.annotate(textmin, xy=(xmin, ymin), xytext=(0.94, 0.96), **kw1) <NEW_LINE> ax.annotate(textmax, xy=(xmax, ymax), xytext=(0.94, 0.96), **kw2)
|
Mark max and min point on the plot.
|
625941bc55399d3f05588582
|
def get_basis_functions(self, reshape=True, symbolic=True, use_tensor_factorisation=False): <NEW_LINE> <INDENT> raise NotImplementedError()
|
Get the basis functions of the element.
|
625941bc3c8af77a43ae366d
|
def nextDay(year, month, day): <NEW_LINE> <INDENT> day=day+1 <NEW_LINE> if day > 30 : <NEW_LINE> <INDENT> month=month+1 <NEW_LINE> day=day-30 <NEW_LINE> <DEDENT> if month > 12 : <NEW_LINE> <INDENT> year=year+1 <NEW_LINE> month=month-12 <NEW_LINE> <DEDENT> else : <NEW_LINE> <INDENT> day <NEW_LINE> <DEDENT> return year,month,day
|
Returns the year, month, day of the next day.
Simple version: assume every month has 30 days.
|
625941bcac7a0e7691ed3fa8
|
def test_publish_db(self): <NEW_LINE> <INDENT> (out, err) = self.call(['-c', self.config_dir, '-a', '/dev/zero', '-d', os.path.join(self.workdir, 'dup.db'), '-vl', self.lock_file, '--publish-db', os.path.join(self.workdir, 'dupdb')]) <NEW_LINE> self.assertEqual(err, '', 'no error messages:\n' + err) <NEW_LINE> self.assertIn('retracing #0', out) <NEW_LINE> self.assertTrue(os.path.isdir(os.path.join(self.workdir, 'dupdb', 'sig')))
|
Duplicate database publishing
|
625941bc711fe17d82542240
|
def dumpstruct(id0, node,structs): <NEW_LINE> <INDENT> name = id0.name(node) <NEW_LINE> packed = id0.blob(node, 'M') <NEW_LINE> spec = idblib.idaunpack(packed) <NEW_LINE> entsize = 5 if id0.wordsize == 4 else 8 <NEW_LINE> k=[] <NEW_LINE> for i in range(spec[1]): <NEW_LINE> <INDENT> member=dumpstructmember(id0, spec[entsize * i + 2:entsize * (i + 1) + 2]) <NEW_LINE> k.append(member) <NEW_LINE> <DEDENT> structs[name]=k <NEW_LINE> return structs
|
dump all info for the struct defined by `node`
|
625941bc8e7ae83300e4ae9b
|
def __repr__(self) -> str: <NEW_LINE> <INDENT> return self.__str__()
|
Return a string representation of the Some().
|
625941bc9b70327d1c4e0ca3
|
def clientConnectionFailed(self, connector, reason): <NEW_LINE> <INDENT> moduleCoordinator.ModuleCoordinator().putError("Error connecting to " + self.config['botnet'], self.module)
|
Called on failed connection to server
|
625941bcb7558d58953c4de9
|
def normalization_function(x, mean, std): <NEW_LINE> <INDENT> assert len(mean) == 3, 'Custom norm function is for 3 channel images. Expected 3 elements for mean, got {}'.format(len(mean)) <NEW_LINE> assert len(std) == 3, 'Custom norm function is for 3 channel images. Expected 3 elements for std, got {}'.format(len(std)) <NEW_LINE> img_dims = x.size()[1:] <NEW_LINE> mean_expanded = torch.cat((torch.ones((1, img_dims[1], img_dims[2]))*mean[0], torch.ones((1, img_dims[1], img_dims[2]))*mean[1], torch.ones((1, img_dims[1], img_dims[2]))*mean[2] ), dim = 0).cuda() <NEW_LINE> std_expanded = torch.cat((torch.ones((1, img_dims[1], img_dims[2]))*std[0], torch.ones((1, img_dims[1], img_dims[2]))*std[1], torch.ones((1, img_dims[1], img_dims[2]))*std[2] ), dim = 0).cuda() <NEW_LINE> normalized_tensor = x.sub(mean_expanded.expand_as(x)).div(std_expanded.expand_as(x)) <NEW_LINE> return normalized_tensor
|
Normalizes input variable with a mean and std.
output = (input-mean)/std
:param x: input data, a 3-channel image
:param mean: mean of all the input channels, a list of 3 floats
:param std: standard deviation of all the input channels, a list of 3 floats
|
625941bc99fddb7c1c9de262
|
def get_style_defs(self, arg=""): <NEW_LINE> <INDENT> return self.tktags
|
Called by the client (a Tk Text object) to learn what styles to use.
|
625941bce64d504609d7470f
|
@register.filter(is_safe=True) <NEW_LINE> def escape_commas(text): <NEW_LINE> <INDENT> return mark_safe(text.replace(',', '\,'))
|
Escapes all commas with a backslash.
|
625941bc3d592f4c4ed1cf4d
|
def expand_path(self, path, role='File'): <NEW_LINE> <INDENT> if not os.path.isabs(path): <NEW_LINE> <INDENT> ret = os.path.abspath(path) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> ret = os.path.normpath(path) <NEW_LINE> <DEDENT> if not ret.startswith(self.root): <NEW_LINE> <INDENT> raise FileOutOfProject(ret, self.root, role) <NEW_LINE> <DEDENT> return ret
|
Return full path.
normalized path if path is absolute
user expanded path if path starts with ~ or ~user
project expanded path if path starts with @
current dir expanded path otherwise
|
625941bc090684286d50ebb1
|
def _setV(self, V): <NEW_LINE> <INDENT> self.V = V <NEW_LINE> self.Vhist.append(V)
|
Pre-allocating a numpy array for histories would be good,
but then we'd have to keep track of our current index in that array,
and we'd still have to extend it if we integrated past our expected
maximum time. Still, this remains a potential source of speedup if
any is later needed.
|
625941bcfbf16365ca6f608d
|
def render_authkeys(authkeys, config, key_type=None, key_value=None): <NEW_LINE> <INDENT> output = [] <NEW_LINE> ssh_key_attr = config['ldap']['attributes']['ssh_key'] <NEW_LINE> for username in authkeys.keys(): <NEW_LINE> <INDENT> user_entry = authkeys[username] <NEW_LINE> for k in user_entry[ssh_key_attr]: <NEW_LINE> <INDENT> kt, kv = k.split(' ')[0:2] <NEW_LINE> if (key_type is None or key_type == kt) and (key_value is None or key_value == kv): <NEW_LINE> <INDENT> output.append(print_authkey(kt, kv, username, config)) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return '\n'.join(output)
|
Render a hash of authorized keys that came either from a YAML cache file or
fetch_ldap_authkeys(). Returns a string.
|
625941bc63d6d428bbe443bf
|
def handle(self, *args, **options): <NEW_LINE> <INDENT> if len(args) != 1: <NEW_LINE> <INDENT> raise CommandError('Please specify JSON file to import!') <NEW_LINE> <DEDENT> data = json.load(open(args[0])) <NEW_LINE> for line in data: <NEW_LINE> <INDENT> if 'fields' in line: <NEW_LINE> <INDENT> line = line['fields'] <NEW_LINE> <DEDENT> if 'is_active' in line and not line['is_active']: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> if not line['email'] or not line['username']: <NEW_LINE> <INDENT> self.stderr.write( 'Skipping {}, has blank username or email'.format(line) ) <NEW_LINE> continue <NEW_LINE> <DEDENT> if User.objects.filter(username=line['username']).exists(): <NEW_LINE> <INDENT> self.stderr.write( 'Skipping {}, username exists'.format(line['username']) ) <NEW_LINE> continue <NEW_LINE> <DEDENT> if User.objects.filter(email=line['email']).exists(): <NEW_LINE> <INDENT> self.stderr.write( 'Skipping {}, email exists'.format(line['email']) ) <NEW_LINE> continue <NEW_LINE> <DEDENT> if line['last_name'] not in line['first_name']: <NEW_LINE> <INDENT> full_name = u'{0} {1}'.format( line['first_name'], line['last_name'] ) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> full_name = line['first_name'] <NEW_LINE> <DEDENT> if not options['check']: <NEW_LINE> <INDENT> User.objects.create( username=line['username'], first_name=full_name, last_name='', password=line['password'], email=line['email'] )
|
Creates default set of groups and optionally updates them and moves
users around to default group.
|
625941bc32920d7e50b2809c
|
def _get_background(self, empty_ws, cadmium_ws, transmission_ws, transmission_corr, max_empty_entry, max_cadmium_entry, entry_no, tof_background=""): <NEW_LINE> <INDENT> background_ws = "background_ws" <NEW_LINE> nMeasurements = self._data_structure_helper() <NEW_LINE> measurement_technique = self.getPropertyValue('MeasurementTechnique') <NEW_LINE> tmp_names = [] <NEW_LINE> if empty_ws != "": <NEW_LINE> <INDENT> empty_no = entry_no <NEW_LINE> if max_empty_entry == nMeasurements: <NEW_LINE> <INDENT> empty_no = entry_no % nMeasurements <NEW_LINE> <DEDENT> elif entry_no >= max_empty_entry: <NEW_LINE> <INDENT> empty_no = entry_no % max_empty_entry <NEW_LINE> <DEDENT> empty_entry = mtd[empty_ws][empty_no].name() <NEW_LINE> if measurement_technique != "TOF": <NEW_LINE> <INDENT> empty_corr = empty_entry + '_corr' <NEW_LINE> tmp_names.append(empty_corr) <NEW_LINE> Multiply(LHSWorkspace=transmission_ws, RHSWorkspace=empty_entry, OutputWorkspace=empty_corr) <NEW_LINE> <DEDENT> <DEDENT> if cadmium_ws != "": <NEW_LINE> <INDENT> cadmium_no = entry_no <NEW_LINE> if max_cadmium_entry == nMeasurements: <NEW_LINE> <INDENT> cadmium_no = entry_no % nMeasurements <NEW_LINE> <DEDENT> elif entry_no >= max_cadmium_entry: <NEW_LINE> <INDENT> cadmium_no = entry_no % max_cadmium_entry <NEW_LINE> <DEDENT> cadmium_entry = mtd[cadmium_ws][cadmium_no].name() <NEW_LINE> cadmium_corr = cadmium_entry + '_corr' <NEW_LINE> tmp_names.append(cadmium_corr) <NEW_LINE> Multiply(LHSWorkspace=transmission_corr, RHSWorkspace=cadmium_entry, OutputWorkspace=cadmium_corr) <NEW_LINE> <DEDENT> if measurement_technique == "TOF": <NEW_LINE> <INDENT> background_ws = mtd[tof_background][empty_no].name() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> if max_empty_entry != 0 and max_cadmium_entry != 0: <NEW_LINE> <INDENT> Plus(LHSWorkspace=empty_corr, RHSWorkspace=cadmium_corr, OutputWorkspace=background_ws) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> if max_empty_entry != 0: <NEW_LINE> <INDENT> tmp_names.pop() <NEW_LINE> RenameWorkspace(InputWorkspace=empty_corr, OutputWorkspace=background_ws) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> tmp_names.pop() <NEW_LINE> RenameWorkspace(InputWorkspace=cadmium_corr, OutputWorkspace=background_ws) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> tmp_names.append(background_ws) <NEW_LINE> return background_ws, tmp_names
|
Provides the background to be subtracted from currently reduced sample. This method takes into account
whether empty container, and cadmium are provided, and whether the measurement method is TOF or powder/single
crystal.
|
625941bcaad79263cf39090c
|
def import_binary(file_name: str, data_width: int, supersample: int) -> np.array(int): <NEW_LINE> <INDENT> with open(file_name, "r") as text_file: <NEW_LINE> <INDENT> data = [] <NEW_LINE> lines = text_file.readlines() <NEW_LINE> for line in lines: <NEW_LINE> <INDENT> r_line = line.strip() <NEW_LINE> for i in range(supersample): <NEW_LINE> <INDENT> r_data = r_line[-data_width:] <NEW_LINE> r_line = r_line[:-data_width] <NEW_LINE> data.append(slv_to_int(r_data)) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return np.array(data)
|
import data from the output of hdl simulation, change it to np.array
Parameters
----------
file_name : str
name of target file.
data_width : int
width of the each std_logic_vector in the output file
supersample : int
data supersample.
Returns
-------
data : np.array(int).
|
625941bc30bbd722463cbc92
|
def test_intersection(): <NEW_LINE> <INDENT> result = [["Number", "Surname", "Age"], [7432, "O'Malley", 39], [9824, "Darkes", 38]] <NEW_LINE> assert is_equal(result, intersection(GRADUATES, MANAGERS))
|
Test intersection operation.
|
625941bc99cbb53fe6792ab7
|
def logical_cpu_cores(): <NEW_LINE> <INDENT> return psutil.cpu_count()
|
:return: The number of logical CPU cores.
|
625941bc73bcbd0ca4b2bf4d
|
def AllValues(self, list_unset=False, include_hidden=False, properties_file=None, only_file_contents=False): <NEW_LINE> <INDENT> properties_file = properties_file or PropertiesFile.Load() <NEW_LINE> result = {} <NEW_LINE> for prop in self: <NEW_LINE> <INDENT> if (prop.is_hidden and not include_hidden and _GetPropertyWithoutCallback(prop, properties_file) is None): <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> if only_file_contents: <NEW_LINE> <INDENT> value = properties_file.Get(prop) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> value = _GetProperty(prop, properties_file, required=False) <NEW_LINE> <DEDENT> if value is None: <NEW_LINE> <INDENT> if not list_unset: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> if prop.is_hidden and not include_hidden: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> <DEDENT> result[prop.name] = value <NEW_LINE> <DEDENT> return result
|
Gets all the properties and their values for this section.
Args:
list_unset: bool, If True, include unset properties in the result.
include_hidden: bool, True to include hidden properties in the result.
If a property has a value set but is hidden, it will be included
regardless of this setting.
properties_file: PropertiesFile, the file to read settings from. If None
the active property file will be used.
only_file_contents: bool, True if values should be taken only from
the properties file, false if flags, env vars, etc. should
be consulted too. Mostly useful for listing file contents.
Returns:
{str:str}, The dict of {property:value} for this section.
|
625941bc32920d7e50b2809d
|
def white_elephant(gifts): <NEW_LINE> <INDENT> gift_list = [gift for gift in gifts.values()] <NEW_LINE> new_gifts = {person: None for person in gifts.keys()} <NEW_LINE> for person, gift in new_gifts.items(): <NEW_LINE> <INDENT> new_gift = choice(gift_list) <NEW_LINE> gift_list.remove(new_gift) <NEW_LINE> new_gifts[person] = new_gift <NEW_LINE> <DEDENT> return new_gifts
|
Plays a white elephant game with a given dictionary of guests and gifts
>>> white_elephant({})
{}
>>> white_elephant({'Leslie': 'stuffed dog'})
{'Leslie': 'stuffed dog'}
>>> len({'Leslie': 'stuffed dog', 'Joel': 'crossword puzzle', 'Meggie': 'candy', 'Bonnie': 'cat food', 'Katie': 'rubiks cube', 'Anges': 'starbucks gift card', 'Henry': 'graphic t-shirt', 'Sarah': 'christmas mug'}) == len(white_elephant({'Leslie': 'stuffed dog', 'Joel': 'crossword puzzle', 'Meggie': 'candy', 'Bonnie': 'cat food', 'Katie': 'rubiks cube', 'Anges': 'starbucks gift card', 'Henry': 'graphic t-shirt', 'Sarah': 'christmas mug'}))
True
|
625941bccc40096d61595822
|
def update(self, data: "Observation") -> None: <NEW_LINE> <INDENT> old = self.Q[self.last_state][self.last_action] <NEW_LINE> if data.new_state.agent_location in self.Q: <NEW_LINE> <INDENT> new = ( self.gamma * self.Q[data.new_state.agent_location][ max(self.Q[data.new_state.agent_location], key=self.Q[data.new_state.agent_location].get) ] ) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> new = 0 <NEW_LINE> <DEDENT> self.Q[self.last_state][self.last_action] = (1 - self.learning_rate) * old + self.learning_rate * ( data.reward + new ) <NEW_LINE> return None
|
Updates learner with new state/Q values
|
625941bcdd821e528d63b07a
|
def setUp(self): <NEW_LINE> <INDENT> self.inst = MoransI() <NEW_LINE> self.morans_i_results_str1 = morans_i_results_str1.split('\n') <NEW_LINE> self.morans_i_results_str2 = morans_i_results_str2.split('\n')
|
Define some sample data that will be used by the tests.
|
625941bc462c4b4f79d1d5a0
|
def __init__(self, file_name): <NEW_LINE> <INDENT> self.aut_stack = [] <NEW_LINE> self.aut_to_push = [] <NEW_LINE> self.operations = [] <NEW_LINE> self._read(file_name)
|
class initialization
:param file_name: name of file
with stack description
|
625941bc4527f215b584c32a
|
def a_mystery_function_2(binary_string): <NEW_LINE> <INDENT> binary_list=[int(i) for i in binary_string] <NEW_LINE> binary_list.reverse() <NEW_LINE> a,b=binary_list[0:2] <NEW_LINE> return a and not b
|
binary_string is a string that is at least 4 characters long with 1s and 0s with the rightmost character representing the 0th bit
|
625941bcf7d966606f6a9ed1
|
def choose_rsnapshot_config(self): <NEW_LINE> <INDENT> fname = QFileDialog.getOpenFileName(self, self.translate( 'MainWindow', 'Open file'), os.path.expanduser('~')) <NEW_LINE> if fname[0]: <NEW_LINE> <INDENT> self.settings.setValue('rsnapshot_config_path', fname[0]) <NEW_LINE> if self.settings.value('rsnapshot_bin_path'): <NEW_LINE> <INDENT> self.dispatcher.rsnapshot_firstset.emit()
|
dialog to choose rsnapshot conf file
|
625941bc187af65679ca4fee
|
def clean_dict(dct): <NEW_LINE> <INDENT> return dict((key, val) for key, val in dct.items() if val is not None)
|
Returns a dict where items with a None value are removed
|
625941bce5267d203edcdb70
|
def init_extra(self): <NEW_LINE> <INDENT> pass
|
Initializing the extra_widget attribute which is a QWidget object
for displaying rig specific property (e.g. length, segment)
|
625941bc2ae34c7f2600d002
|
def retrieve_output(self): <NEW_LINE> <INDENT> return self.result
|
Method for retrieving output after a model run
|
625941bc4527f215b584c32b
|
def _set_attr_reg(self): <NEW_LINE> <INDENT> tmos_v = self._meta_data['bigip']._meta_data['tmos_version'] <NEW_LINE> attributes = self._meta_data['attribute_registry'] <NEW_LINE> v12kind = 'tm:asm:policies:blocking-settings:blocking-settingcollectionstate' <NEW_LINE> v11kind = 'tm:asm:policies:blocking-settings' <NEW_LINE> builderv11 = 'tm:asm:policies:policy-builder:pbconfigstate' <NEW_LINE> builderv12 = 'tm:asm:policies:policy-builder:policy-builderstate' <NEW_LINE> if LooseVersion(tmos_v) < LooseVersion('12.0.0'): <NEW_LINE> <INDENT> attributes[v11kind] = Blocking_Settings <NEW_LINE> attributes[builderv11] = Policy_Builder <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> attributes[v12kind] = Blocking_Settings <NEW_LINE> attributes[builderv12] = Policy_Builder
|
Helper method.
Appends correct attribute registry, depending on TMOS version
|
625941bc94891a1f4081b978
|
def sliderplot2D(ZZZ, XX=None, YY=None, slidervals=None, *args, **kwargs): <NEW_LINE> <INDENT> if 'linewidth' and 'lw' not in kwargs.keys(): <NEW_LINE> <INDENT> kwargs['linewidth'] = 2 <NEW_LINE> <DEDENT> fig = plt.figure(figsize=FIGURE_SIZE, dpi=FIGURE_DPI) <NEW_LINE> ZZZ = np.asarray(ZZZ, dtype=np.float) <NEW_LINE> if slidervals is None: <NEW_LINE> <INDENT> slidervals = range(ZZZ.shape[2]) <NEW_LINE> <DEDENT> slidervals = np.asarray(slidervals, dtype=np.float) <NEW_LINE> if XX is None: <NEW_LINE> <INDENT> XX = range(ZZZ.shape[1]) <NEW_LINE> <DEDENT> if YY is None: <NEW_LINE> <INDENT> YY = range(ZZZ.shape[0]) <NEW_LINE> <DEDENT> XX = np.asarray(XX, dtype=np.float) <NEW_LINE> YY = np.asarray(YY, dtype=np.float) <NEW_LINE> if XX.ndim < 2: <NEW_LINE> <INDENT> XX, YY = np.meshgrid(XX, YY) <NEW_LINE> <DEDENT> p = plt.pcolormesh(XX, YY, ZZZ[:, :, 0]) <NEW_LINE> plt.subplots_adjust(bottom=0.2) <NEW_LINE> ax = plt.gca() <NEW_LINE> ax.set_aspect('equal') <NEW_LINE> ax.autoscale(tight=True) <NEW_LINE> " Create slider on plot" <NEW_LINE> axsldr = plt.axes([0.15, 0.05, 0.65, 0.03], axisbg='lightgoldenrodyellow') <NEW_LINE> sldr = plt.Slider(axsldr, '', 0, len(slidervals) - 1) <NEW_LINE> txt = axsldr.set_xlabel('{} [{}]'.format(slidervals[0], 0), fontsize=18) <NEW_LINE> plt.sca(ax) <NEW_LINE> " Slider update function" <NEW_LINE> def update(val): <NEW_LINE> <INDENT> pno = int(np.round(sldr.val)) <NEW_LINE> p.set_array(ZZZ[:-1, :-1, pno].ravel()) <NEW_LINE> txt.set_text('{} [{}]'.format(slidervals[pno], pno)) <NEW_LINE> plt.draw() <NEW_LINE> plt.gcf().canvas.draw() <NEW_LINE> <DEDENT> sldr.on_changed(update)
|
Shortcut to creating an image plot with a slider to go through a third dimension
ZZZ = [nxmxo]: z axis data
XX = [nxm] or [n]: x axis data
YY = [nxm] or [m]: y axis data
slidervals = None or [o]: Values to give in the slider
if XX and/or YY have a single dimension, the 2D values are generated via meshgrid
E.G.
sliderplot([1,2,3],[[2,4,6],[8,10,12],[14,16,18],[20,22,24]],slidervals=[3,6,9,12])
|
625941bcd486a94d0b98e015
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.