_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3 values | text stringlengths 75 19.8k | language stringclasses 1 value | meta_information dict |
|---|---|---|---|---|---|
q40200 | GenProcess.run_snr | train | def run_snr(self):
"""Run the snr calculation.
Takes results from ``self.set_parameters`` and other inputs and inputs these
into the snr calculator.
"""
if self.ecc:
required_kwargs = {'dist_type': self.dist_type,
'initial_cond_type': self.initial_cond_type,
'ecc': True}
input_args = [self.m1, self.m2, self.z_or_dist, self.initial_point,
self.eccentricity, self.observation_time]
else:
required_kwargs = {'dist_type': self.dist_type}
input_args = [self.m1, self.m2, self.spin_1, self.spin_2,
self.z_or_dist, self.start_time, self.end_time]
input_kwargs = {**required_kwargs,
**self.general,
**self.sensitivity_input,
**self.snr_input,
**self.parallel_input}
self.final_dict = snr(*input_args, **input_kwargs)
return | python | {
"resource": ""
} |
q40201 | BaseHandler.event_payment | train | def event_payment(self, date, time, pid, commerce_id, transaction_id, request_ip, token, webpay_server):
'''Record the payment event
Official handler writes this information to TBK_EVN%Y%m%d file.
'''
raise NotImplementedError("Logging Handler must implement event_payment") | python | {
"resource": ""
} |
q40202 | BaseHandler.event_confirmation | train | def event_confirmation(self, date, time, pid, commerce_id, transaction_id, request_ip, order_id):
'''Record the confirmation event.
Official handler writes this information to TBK_EVN%Y%m%d file.
'''
raise NotImplementedError("Logging Handler must implement event_confirmation") | python | {
"resource": ""
} |
q40203 | JSHostManager.stop | train | def stop(self):
"""
If the manager is running, tell it to stop its process
"""
res = self.send_request('manager/stop', post=True)
if res.status_code != 200:
raise UnexpectedResponse(
'Attempted to stop manager. {res_code}: {res_text}'.format(
res_code=res.status_code,
res_text=res.text,
)
)
if settings.VERBOSITY >= verbosity.PROCESS_STOP:
print('Stopped {}'.format(self.get_name()))
# The request will end just before the process stops, so there is a tiny
# possibility of a race condition. We delay as a precaution so that we
# can be reasonably confident of the system's state.
time.sleep(0.05) | python | {
"resource": ""
} |
q40204 | JSHostManager.stop_host | train | def stop_host(self, config_file):
"""
Stops a managed host specified by `config_file`.
"""
res = self.send_json_request('host/stop', data={'config': config_file})
if res.status_code != 200:
raise UnexpectedResponse(
'Attempted to stop a JSHost. Response: {res_code}: {res_text}'.format(
res_code=res.status_code,
res_text=res.text,
)
)
return res.json() | python | {
"resource": ""
} |
q40205 | PVWatts.get_data | train | def get_data(self, params={}):
"""
Make the request and return the deserialided JSON from the response
:param params: Dictionary mapping (string) query parameters to values
:type params: dict
:return: JSON object with the data fetched from that URL as a
JSON-format object.
:rtype: (dict or array)
"""
if self and hasattr(self, 'proxies') and self.proxies is not None:
response = requests.request('GET',
url=PVWatts.PVWATTS_QUERY_URL,
params=params,
headers={'User-Agent': ''.join(
['pypvwatts/', VERSION,
' (Python)'])},
proxies=self.proxies)
else:
response = requests.request('GET',
url=PVWatts.PVWATTS_QUERY_URL,
params=params,
headers={'User-Agent': ''.join(
['pypvwatts/', VERSION,
' (Python)'])})
if response.status_code == 403:
raise PVWattsError("Forbidden, 403")
return response.json() | python | {
"resource": ""
} |
q40206 | FiniteStateLogger.receive | train | def receive(self, input):
"""
Add logging of state transitions to the wrapped state machine.
@see: L{IFiniteStateMachine.receive}
"""
if IRichInput.providedBy(input):
richInput = unicode(input)
symbolInput = unicode(input.symbol())
else:
richInput = None
symbolInput = unicode(input)
action = LOG_FSM_TRANSITION(
self.logger,
fsm_identifier=self.identifier,
fsm_state=unicode(self.state),
fsm_rich_input=richInput,
fsm_input=symbolInput)
with action as theAction:
output = super(FiniteStateLogger, self).receive(input)
theAction.addSuccessFields(
fsm_next_state=unicode(self.state), fsm_output=[unicode(o) for o in output])
if self._action is not None and self._isTerminal(self.state):
self._action.addSuccessFields(
fsm_terminal_state=unicode(self.state))
self._action.finish()
self._action = None
return output | python | {
"resource": ""
} |
q40207 | ParallelContainer.prep_parallel | train | def prep_parallel(self, binary_args, other_args):
"""Prepare the parallel calculations
Prepares the arguments to be run in parallel.
It will divide up arrays according to num_splits.
Args:
binary_args (list): List of binary arguments for input into the SNR function.
other_args (tuple of obj): tuple of other args for input into parallel snr function.
"""
if self.length < 100:
raise Exception("Run this across 1 processor by setting num_processors kwarg to None.")
if self.num_processors == -1:
self.num_processors = mp.cpu_count()
split_val = int(np.ceil(self.length/self.num_splits))
split_inds = [self.num_splits*i for i in np.arange(1, split_val)]
inds_split_all = np.split(np.arange(self.length), split_inds)
self.args = []
for i, ind_split in enumerate(inds_split_all):
trans_args = []
for arg in binary_args:
try:
trans_args.append(arg[ind_split])
except TypeError:
trans_args.append(arg)
self.args.append((i, tuple(trans_args)) + other_args)
return | python | {
"resource": ""
} |
q40208 | ParallelContainer.run_parallel | train | def run_parallel(self, para_func):
"""Run parallel calulation
This will run the parallel calculation on self.num_processors.
Args:
para_func (obj): Function object to be used in parallel.
Returns:
(dict): Dictionary with parallel results.
"""
if self.timer:
start_timer = time.time()
# for testing
# check = parallel_snr_func(*self.args[10])
# import pdb
# pdb.set_trace()
with mp.Pool(self.num_processors) as pool:
print('start pool with {} processors: {} total processes.\n'.format(
self.num_processors, len(self.args)))
results = [pool.apply_async(para_func, arg) for arg in self.args]
out = [r.get() for r in results]
out = {key: np.concatenate([out_i[key] for out_i in out]) for key in out[0].keys()}
if self.timer:
print("SNR calculation time:", time.time()-start_timer)
return out | python | {
"resource": ""
} |
q40209 | RawParserUnparserFactory | train | def RawParserUnparserFactory(parser_name, parse_callable, *unparse_callables):
"""
Produces a callable object that also has callable attributes that
passes its first argument to the parent callable.
"""
def build_unparse(f):
@wraps(f)
def unparse(self, source, *a, **kw):
node = parse_callable(source)
return f(node, *a, **kw)
# a dumb and lazy docstring replacement
unparse.__doc__ = f.__doc__.replace(
'ast\n The AST ',
'source\n The source ',
)
return unparse
def build_parse(f):
@wraps(f)
def parse(self, source):
return f(source)
parse.__name__ = parser_name
parse.__qualname__ = parser_name
return parse
callables = {f.__name__: build_unparse(f) for f in unparse_callables}
callables['__call__'] = build_parse(parse_callable)
callables['__module__'] = PKGNAME
return type(parser_name, (object,), callables)() | python | {
"resource": ""
} |
q40210 | downsample_trajectories | train | def downsample_trajectories(trajectories, downsampler, *args, **kwargs):
'''Downsamples all points together, then re-splits into original trajectories.
trajectories : list of 2-d arrays, each representing a trajectory
downsampler(X, *args, **kwargs) : callable that returns indices into X
'''
X = np.vstack(trajectories)
traj_lengths = list(map(len, trajectories))
inds = np.sort(downsampler(X, *args, **kwargs))
new_traj = []
for stop in np.cumsum(traj_lengths):
n = np.searchsorted(inds, stop)
new_traj.append(X[inds[:n]])
inds = inds[n:]
return new_traj | python | {
"resource": ""
} |
q40211 | epsilon_net | train | def epsilon_net(points, close_distance):
'''Selects a subset of `points` to preserve graph structure while minimizing
the number of points used, by removing points within `close_distance`.
Returns the downsampled indices.'''
num_points = points.shape[0]
indices = set(range(num_points))
selected = []
while indices:
idx = indices.pop()
nn_inds, = nearest_neighbors(points[idx], points, epsilon=close_distance)
indices.difference_update(nn_inds)
selected.append(idx)
return selected | python | {
"resource": ""
} |
q40212 | transport_from_url | train | def transport_from_url(url):
""" Create a transport for the given URL.
"""
if '/' not in url and ':' in url and url.rsplit(':')[-1].isdigit():
url = 'scgi://' + url
url = urlparse.urlsplit(url, scheme="scgi", allow_fragments=False) # pylint: disable=redundant-keyword-arg
try:
transport = TRANSPORTS[url.scheme.lower()]
except KeyError:
if not any((url.netloc, url.query)) and url.path.isdigit():
# Support simplified "domain:port" URLs
return transport_from_url("scgi://%s:%s" % (url.scheme, url.path))
else:
raise URLError("Unsupported scheme in URL %r" % url.geturl())
else:
return transport(url) | python | {
"resource": ""
} |
q40213 | _encode_payload | train | def _encode_payload(data, headers=None):
"Wrap data in an SCGI request."
prolog = "CONTENT_LENGTH\0%d\0SCGI\x001\0" % len(data)
if headers:
prolog += _encode_headers(headers)
return _encode_netstring(prolog) + data | python | {
"resource": ""
} |
q40214 | _parse_headers | train | def _parse_headers(headers):
"Get headers dict from header string."
try:
return dict(line.rstrip().split(": ", 1)
for line in headers.splitlines()
if line
)
except (TypeError, ValueError) as exc:
raise SCGIException("Error in SCGI headers %r (%s)" % (headers, exc,)) | python | {
"resource": ""
} |
q40215 | _parse_response | train | def _parse_response(resp):
""" Get xmlrpc response from scgi response
"""
# Assume they care for standards and send us CRLF (not just LF)
try:
headers, payload = resp.split("\r\n\r\n", 1)
except (TypeError, ValueError) as exc:
raise SCGIException("No header delimiter in SCGI response of length %d (%s)" % (len(resp), exc,))
headers = _parse_headers(headers)
clen = headers.get("Content-Length")
if clen is not None:
# Check length, just in case the transport is bogus
assert len(payload) == int(clen)
return payload, headers | python | {
"resource": ""
} |
q40216 | scgi_request | train | def scgi_request(url, methodname, *params, **kw):
""" Send a XMLRPC request over SCGI to the given URL.
@param url: Endpoint URL.
@param methodname: XMLRPC method name.
@param params: Tuple of simple python objects.
@keyword deserialize: Parse XML result? (default is True)
@return: XMLRPC response, or the equivalent Python data.
"""
xmlreq = xmlrpclib.dumps(params, methodname)
xmlresp = SCGIRequest(url).send(xmlreq)
if kw.get("deserialize", True):
# This fixes a bug with the Python xmlrpclib module
# (has no handler for <i8> in some versions)
xmlresp = xmlresp.replace("<i8>", "<i4>").replace("</i8>", "</i4>")
# Return deserialized data
return xmlrpclib.loads(xmlresp)[0][0]
else:
# Return raw XML
return xmlresp | python | {
"resource": ""
} |
q40217 | SCGIRequest.send | train | def send(self, data):
""" Send data over scgi to URL and get response.
"""
start = time.time()
try:
scgi_resp = ''.join(self.transport.send(_encode_payload(data)))
finally:
self.latency = time.time() - start
resp, self.resp_headers = _parse_response(scgi_resp)
return resp | python | {
"resource": ""
} |
q40218 | Rollback._frames | train | def _frames(traceback):
'''
Returns generator that iterates over frames in a traceback
'''
frame = traceback
while frame.tb_next:
frame = frame.tb_next
yield frame.tb_frame
return | python | {
"resource": ""
} |
q40219 | Rollback._methodInTraceback | train | def _methodInTraceback(self, name, traceback):
'''
Returns boolean whether traceback contains method from this instance
'''
foundMethod = False
for frame in self._frames(traceback):
this = frame.f_locals.get('self')
if this is self and frame.f_code.co_name == name:
foundMethod = True
break
return foundMethod | python | {
"resource": ""
} |
q40220 | Rollback.addStep | train | def addStep(self, callback, *args, **kwargs):
'''
Add rollback step with optional arguments. If a rollback is
triggered, each step is called in LIFO order.
'''
self.steps.append((callback, args, kwargs)) | python | {
"resource": ""
} |
q40221 | Rollback.doRollback | train | def doRollback(self):
'''
Call each rollback step in LIFO order.
'''
while self.steps:
callback, args, kwargs = self.steps.pop()
callback(*args, **kwargs) | python | {
"resource": ""
} |
q40222 | describe | train | def describe(db, zip, case_insensitive):
"""Show .dbf file statistics."""
with open_db(db, zip, case_sensitive=not case_insensitive) as dbf:
click.secho('Rows count: %s' % (dbf.prolog.records_count))
click.secho('Fields:')
for field in dbf.fields:
click.secho(' %s: %s' % (field.type, field)) | python | {
"resource": ""
} |
q40223 | clone_repo | train | def clone_repo(pkg, dest, repo, repo_dest, branch):
"""Clone the Playdoh repo into a custom path."""
git(['clone', '--recursive', '-b', branch, repo, repo_dest]) | python | {
"resource": ""
} |
q40224 | create_virtualenv | train | def create_virtualenv(pkg, repo_dest, python):
"""Creates a virtualenv within which to install your new application."""
workon_home = os.environ.get('WORKON_HOME')
venv_cmd = find_executable('virtualenv')
python_bin = find_executable(python)
if not python_bin:
raise EnvironmentError('%s is not installed or not '
'available on your $PATH' % python)
if workon_home:
# Can't use mkvirtualenv directly here because relies too much on
# shell tricks. Simulate it:
venv = os.path.join(workon_home, pkg)
else:
venv = os.path.join(repo_dest, '.virtualenv')
if venv_cmd:
if not verbose:
log.info('Creating virtual environment in %r' % venv)
args = ['--python', python_bin, venv]
if not verbose:
args.insert(0, '-q')
subprocess.check_call([venv_cmd] + args)
else:
raise EnvironmentError('Could not locate the virtualenv. Install with '
'pip install virtualenv.')
return venv | python | {
"resource": ""
} |
q40225 | install_reqs | train | def install_reqs(venv, repo_dest):
"""Installs all compiled requirements that can't be shipped in vendor."""
with dir_path(repo_dest):
args = ['-r', 'requirements/compiled.txt']
if not verbose:
args.insert(0, '-q')
subprocess.check_call([os.path.join(venv, 'bin', 'pip'), 'install'] +
args) | python | {
"resource": ""
} |
q40226 | find_executable | train | def find_executable(name):
"""
Finds the actual path to a named command.
The first one on $PATH wins.
"""
for pt in os.environ.get('PATH', '').split(':'):
candidate = os.path.join(pt, name)
if os.path.exists(candidate):
return candidate | python | {
"resource": ""
} |
q40227 | _my_pdf_formatter | train | def _my_pdf_formatter(data, format, ordered_alphabets) :
""" Generate a logo in PDF format.
Modified from weblogo version 3.4 source code.
"""
eps = _my_eps_formatter(data, format, ordered_alphabets).decode()
gs = weblogolib.GhostscriptAPI()
return gs.convert('pdf', eps, format.logo_width, format.logo_height) | python | {
"resource": ""
} |
q40228 | Molecule.pruneToAtoms | train | def pruneToAtoms(self, atoms):
"""Prune the molecule to the specified atoms
bonds will be removed atomatically"""
_atoms = self.atoms[:]
for atom in _atoms:
if atom not in atoms:
self.remove_atom(atom) | python | {
"resource": ""
} |
q40229 | poll | train | def poll(connection: connection, timeout: float=1.0) -> Iterable[Event]:
"""Poll the connection for notification events.
This method operates as an iterable. It will keep returning events until
all events have been read.
Parameters
----------
connection: psycopg2.extensions.connection
Active connection to a PostGreSQL database.
timeout: float
Number of seconds to block for an event before timing out.
Returns
-------
event: Event or None
If an event is available, an Event is returned.
If no event is available, None is returned.
Examples
--------
>>> events = [evt for evt in connection.poll()]
>>> for evt in connection.poll():
print(evt)
"""
if timeout > 0.0:
log('Polling for events (Blocking, {} seconds)...'.format(timeout), logger_name=_LOGGER_NAME)
else:
log('Polling for events (Non-Blocking)...', logger_name=_LOGGER_NAME)
if select.select([connection], [], [], timeout) == ([], [], []):
log('...No events found', logger_name=_LOGGER_NAME)
return
else:
log('Events', logger_name=_LOGGER_NAME)
log('------', logger_name=_LOGGER_NAME)
connection.poll()
while connection.notifies:
event = connection.notifies.pop(0)
log(str(event), logger_name=_LOGGER_NAME)
yield Event.fromjson(event.payload) | python | {
"resource": ""
} |
q40230 | Event.fromjson | train | def fromjson(cls, json_string: str) -> 'Event':
"""Create a new Event from a from a psycopg2-pgevent event JSON.
Parameters
----------
json_string: str
Valid psycopg2-pgevent event JSON.
Returns
-------
Event
Event created from JSON deserialization.
"""
obj = json.loads(json_string)
return cls(
UUID(obj['event_id']),
obj['event_type'],
obj['schema_name'],
obj['table_name'],
obj['row_id']
) | python | {
"resource": ""
} |
q40231 | Event.tojson | train | def tojson(self) -> str:
"""Serialize an Event into JSON.
Returns
-------
str
JSON-serialized Event.
"""
return json.dumps({
'event_id': str(self.id),
'event_type': self.type,
'schema_name': self.schema_name,
'table_name': self.table_name,
'row_id': self.row_id
}) | python | {
"resource": ""
} |
q40232 | MakePlotProcess.setup_figure | train | def setup_figure(self):
"""Sets up the initial figure on to which every plot is added.
"""
# declare figure and axes environments
fig, ax = plt.subplots(nrows=int(self.num_rows),
ncols=int(self.num_cols),
sharex=self.sharex,
sharey=self.sharey)
fig.set_size_inches(self.figure_width, self.figure_height)
# create list of ax. Catch error if it is a single plot.
try:
ax = ax.ravel()
except AttributeError:
ax = [ax]
# create list of plot types
self.plot_types = [self.plot_info[str(i)]['plot_type'] for i in range(len(ax))]
if len(self.plot_types) == 1:
if self.plot_types[0] not in self.colorbars:
self.colorbars[self.plot_types[0]] = {'cbar_pos': 5}
else:
if 'cbar_pos' not in self.colorbars[self.plot_types[0]]:
self.colorbars[self.plot_types[0]]['cbar_pos'] = 5
# prepare colorbar classes
self.colorbar_classes = {}
for plot_type in self.plot_types:
if plot_type in self.colorbar_classes:
continue
if plot_type == 'Horizon':
self.colorbar_classes[plot_type] = None
elif plot_type in self.colorbars:
self.colorbar_classes[plot_type] = FigColorbar(fig, plot_type,
**self.colorbars[plot_type])
else:
self.colorbar_classes[plot_type] = FigColorbar(fig, plot_type)
# set subplots_adjust settings
if 'Ratio' in self.plot_types or 'Waterfall':
self.subplots_adjust_kwargs['right'] = 0.79
# adjust figure sizes
fig.subplots_adjust(**self.subplots_adjust_kwargs)
if 'fig_y_label' in self.__dict__.keys():
fig.text(self.fig_y_label_x,
self.fig_y_label_y,
r'{}'.format(self.fig_y_label),
**self.fig_y_label_kwargs)
if 'fig_x_label' in self.__dict__.keys():
fig.text(self.fig_x_label_x,
self.fig_x_label_y,
r'{}'.format(self.fig_x_label),
**self.fig_x_label_kwargs)
if 'fig_title' in self.__dict__.keys():
fig.text(self.fig_title_kwargs['x'],
self.fig_title_kwargs['y'],
r'{}'.format(self.fig_title),
**self.fig_title_kwargs)
self.fig, self.ax = fig, ax
return | python | {
"resource": ""
} |
q40233 | MakePlotProcess.create_plots | train | def create_plots(self):
"""Creates plots according to each plotting class.
"""
for i, axis in enumerate(self.ax):
# plot everything. First check general dict for parameters related to plots.
trans_plot_class_call = globals()[self.plot_types[i]]
trans_plot_class = trans_plot_class_call(self.fig, axis,
self.value_classes[i].x_arr_list,
self.value_classes[i].y_arr_list,
self.value_classes[i].z_arr_list,
colorbar=(
self.colorbar_classes[self.plot_types[i]]),
**{**self.general,
**self.figure,
**self.plot_info[str(i)],
**self.plot_info[str(i)]['limits'],
**self.plot_info[str(i)]['label'],
**self.plot_info[str(i)]['extra'],
**self.plot_info[str(i)]['legend']})
# create the plot
trans_plot_class.make_plot()
# setup the plot
trans_plot_class.setup_plot()
# print("Axis", i, "Complete")
return | python | {
"resource": ""
} |
q40234 | DatabaseConnector.create_ngram_table | train | def create_ngram_table(self, cardinality):
"""
Creates a table for n-gram of a give cardinality. The table name is
constructed from this parameter, for example for cardinality `2` there
will be a table `_2_gram` created.
Parameters
----------
cardinality : int
The cardinality to create a table for.
"""
query = "CREATE TABLE IF NOT EXISTS _{0}_gram (".format(cardinality)
unique = ""
for i in reversed(range(cardinality)):
if i != 0:
unique += "word_{0}, ".format(i)
query += "word_{0} TEXT, ".format(i)
else:
unique += "word"
query += "word TEXT, count INTEGER, UNIQUE({0}) );".format(
unique)
self.execute_sql(query) | python | {
"resource": ""
} |
q40235 | DatabaseConnector.ngrams | train | def ngrams(self, with_counts=False):
"""
Returns all ngrams that are in the table.
Parameters
----------
None
Returns
-------
ngrams : generator
A generator for ngram tuples.
"""
query = "SELECT "
for i in reversed(range(self.cardinality)):
if i != 0:
query += "word_{0}, ".format(i)
elif i == 0:
query += "word"
if with_counts:
query += ", count"
query += " FROM _{0}_gram;".format(self.cardinality)
result = self.execute_sql(query)
for row in result:
yield tuple(row) | python | {
"resource": ""
} |
q40236 | DatabaseConnector.ngram_count | train | def ngram_count(self, ngram):
"""
Gets the count for a given ngram from the database.
Parameters
----------
ngram : iterable of str
A list, set or tuple of strings.
Returns
-------
count : int
The count of the ngram.
"""
query = "SELECT count FROM _{0}_gram".format(len(ngram))
query += self._build_where_clause(ngram)
query += ";"
result = self.execute_sql(query)
return self._extract_first_integer(result) | python | {
"resource": ""
} |
q40237 | DatabaseConnector.insert_ngram | train | def insert_ngram(self, ngram, count):
"""
Inserts a given n-gram with count into the database.
Parameters
----------
ngram : iterable of str
A list, set or tuple of strings.
count : int
The count for the given n-gram.
"""
query = "INSERT INTO _{0}_gram {1};".format(len(ngram),
self._build_values_clause(ngram, count))
self.execute_sql(query) | python | {
"resource": ""
} |
q40238 | DatabaseConnector.update_ngram | train | def update_ngram(self, ngram, count):
"""
Updates a given ngram in the database. The ngram has to be in the
database, otherwise this method will stop with an error.
Parameters
----------
ngram : iterable of str
A list, set or tuple of strings.
count : int
The count for the given n-gram.
"""
query = "UPDATE _{0}_gram SET count = {1}".format(len(ngram), count)
query += self._build_where_clause(ngram)
query += ";"
self.execute_sql(query) | python | {
"resource": ""
} |
q40239 | DatabaseConnector.remove_ngram | train | def remove_ngram(self, ngram):
"""
Removes a given ngram from the databae. The ngram has to be in the
database, otherwise this method will stop with an error.
Parameters
----------
ngram : iterable of str
A list, set or tuple of strings.
"""
query = "DELETE FROM _{0}_gram".format(len(ngram))
query += self._build_where_clause(ngram)
query += ";"
self.execute_sql(query) | python | {
"resource": ""
} |
q40240 | SqliteDatabaseConnector.execute_sql | train | def execute_sql(self, query):
"""
Executes a given query string on an open sqlite database.
"""
c = self.con.cursor()
c.execute(query)
result = c.fetchall()
return result | python | {
"resource": ""
} |
q40241 | PostgresDatabaseConnector.create_database | train | def create_database(self):
"""
Creates an empty database if not exists.
"""
if not self._database_exists():
con = psycopg2.connect(host=self.host, database="postgres",
user=self.user, password=self.password, port=self.port)
con.set_isolation_level(
psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
query = "CREATE DATABASE {0};".format(self.dbname)
c = con.cursor()
c.execute(query)
con.close()
if self.normalize:
self.open_database()
query = "CREATE EXTENSION IF NOT EXISTS \"plperlu\";"
self.execute_sql(query)
# query = """CREATE OR REPLACE FUNCTION normalize(str text)
#RETURNS text
#AS $$
#import unicodedata
#return ''.join(c for c in unicodedata.normalize('NFKD', str)
#if unicodedata.category(c) != 'Mn')
#$$ LANGUAGE plpython3u IMMUTABLE;"""
# query = """CREATE OR REPLACE FUNCTION normalize(mystr text)
# RETURNS text
# AS $$
# from unidecode import unidecode
# return unidecode(mystr.decode("utf-8"))
# $$ LANGUAGE plpythonu IMMUTABLE;"""
query = """CREATE OR REPLACE FUNCTION normalize(text)
RETURNS text
AS $$
use Text::Unidecode;
return unidecode(shift);
$$ LANGUAGE plperlu IMMUTABLE;"""
self.execute_sql(query)
self.commit()
self.close_database() | python | {
"resource": ""
} |
q40242 | PostgresDatabaseConnector.reset_database | train | def reset_database(self):
"""
Re-create an empty database.
"""
if self._database_exists():
con = psycopg2.connect(host=self.host, database="postgres",
user=self.user, password=self.password, port=self.port)
con.set_isolation_level(
psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
query = "DROP DATABASE {0};".format(self.dbname)
c = con.cursor()
c.execute(query)
con.close()
self.create_database() | python | {
"resource": ""
} |
q40243 | PostgresDatabaseConnector.delete_index | train | def delete_index(self, cardinality):
"""
Delete index for the table with the given cardinality.
Parameters
----------
cardinality : int
The cardinality of the index to delete.
"""
DatabaseConnector.delete_index(self, cardinality)
query = "DROP INDEX IF EXISTS idx_{0}_gram_varchar;".format(cardinality)
self.execute_sql(query)
query = "DROP INDEX IF EXISTS idx_{0}_gram_normalized_varchar;".format(
cardinality)
self.execute_sql(query)
query = "DROP INDEX IF EXISTS idx_{0}_gram_lower_varchar;".format(
cardinality)
self.execute_sql(query)
query = "DROP INDEX IF EXISTS idx_{0}_gram_lower_normalized_varchar;".\
format(cardinality)
self.execute_sql(query)
for i in reversed(range(cardinality)):
if i != 0:
query = "DROP INDEX IF EXISTS idx_{0}_gram_{1}_lower;".format(
cardinality, i)
self.execute_sql(query) | python | {
"resource": ""
} |
q40244 | PostgresDatabaseConnector.open_database | train | def open_database(self):
"""
Opens the sqlite database.
"""
if not self.con:
try:
self.con = psycopg2.connect(host=self.host,
database=self.dbname, user=self.user,
password=self.password, port=self.port)
except psycopg2.Error as e:
print("Error while opening database:")
print(e.pgerror) | python | {
"resource": ""
} |
q40245 | PostgresDatabaseConnector.execute_sql | train | def execute_sql(self, query):
"""
Executes a given query string on an open postgres database.
"""
c = self.con.cursor()
c.execute(query)
result = []
if c.rowcount > 0:
try:
result = c.fetchall()
except psycopg2.ProgrammingError:
pass
return result | python | {
"resource": ""
} |
q40246 | PostgresDatabaseConnector._database_exists | train | def _database_exists(self):
"""
Check if the database exists.
"""
con = psycopg2.connect(host=self.host, database="postgres",
user=self.user, password=self.password, port=self.port)
query_check = "select datname from pg_catalog.pg_database"
query_check += " where datname = '{0}';".format(self.dbname)
c = con.cursor()
c.execute(query_check)
result = c.fetchall()
if len(result) > 0:
return True
return False | python | {
"resource": ""
} |
q40247 | log_cef | train | def log_cef(name, severity=logging.INFO, env=None, username='none',
signature=None, **kwargs):
"""
Wraps cef logging function so we don't need to pass in the config
dictionary every time. See bug 707060. ``env`` can be either a request
object or just the request.META dictionary.
"""
cef_logger = commonware.log.getLogger('cef')
c = {'product': settings.CEF_PRODUCT,
'vendor': settings.CEF_VENDOR,
'version': settings.CEF_VERSION,
'device_version': settings.CEF_DEVICE_VERSION}
# The CEF library looks for some things in the env object like
# REQUEST_METHOD and any REMOTE_ADDR stuff. Django not only doesn't send
# half the stuff you'd expect, but it specifically doesn't implement
# readline on its FakePayload object so these things fail. I have no idea
# if that's outdated code in Django or not, but andym made this
# <strike>awesome</strike> less crappy so the tests will actually pass.
# In theory, the last part of this if() will never be hit except in the
# test runner. Good luck with that.
if isinstance(env, HttpRequest):
r = env.META.copy()
elif isinstance(env, dict):
r = env
else:
r = {}
# Drop kwargs into CEF config array, then log.
c['environ'] = r
c.update({
'username': username,
'signature': signature,
'data': kwargs,
})
cef_logger.log(severity, name, c) | python | {
"resource": ""
} |
q40248 | remove_signals_listeners | train | def remove_signals_listeners(instance):
"""
utility function that disconnects all listeners from all signals on an
object
"""
if hasattr(instance, "__listeners__"):
for listener in list(instance.__listeners__):
for signal in instance.__listeners__[listener]:
signal.disconnect(listener) | python | {
"resource": ""
} |
q40249 | signal.connect | train | def connect(self, listener, pass_signal=False):
"""
Connect a new listener to this signal
:param listener:
The listener (callable) to add
:param pass_signal:
An optional argument that controls if the signal object is
explicitly passed to this listener when it is being fired.
If enabled, a ``signal=`` keyword argument is passed to the
listener function.
:returns:
None
The listener will be called whenever :meth:`fire()` or
:meth:`__call__()` are called. The listener is appended to the list of
listeners. Duplicates are not checked and if a listener is added twice
it gets called twice.
"""
info = listenerinfo(listener, pass_signal)
self._listeners.append(info)
_logger.debug("connect %r to %r", str(listener), self._name)
# Track listeners in the instances only
if inspect.ismethod(listener):
listener_object = listener.__self__
# Ensure that the instance has __listeners__ property
if not hasattr(listener_object, "__listeners__"):
listener_object.__listeners__ = collections.defaultdict(list)
# Append the signals a listener is connected to
listener_object.__listeners__[listener].append(self) | python | {
"resource": ""
} |
q40250 | signal.disconnect | train | def disconnect(self, listener, pass_signal=False):
"""
Disconnect an existing listener from this signal
:param listener:
The listener (callable) to remove
:param pass_signal:
An optional argument that controls if the signal object is
explicitly passed to this listener when it is being fired.
If enabled, a ``signal=`` keyword argument is passed to the
listener function.
Here, this argument simply aids in disconnecting the right
listener. Make sure to pass the same value as was passed to
:meth:`connect()`
:raises ValueError:
If the listener (with the same value of pass_signal) is not present
:returns:
None
"""
info = listenerinfo(listener, pass_signal)
self._listeners.remove(info)
_logger.debug(
"disconnect %r from %r", str(listener), self._name)
if inspect.ismethod(listener):
listener_object = listener.__self__
if hasattr(listener_object, "__listeners__"):
listener_object.__listeners__[listener].remove(self)
# Remove the listener from the list if any signals connected
if (len(listener_object.__listeners__[listener])) == 0:
del listener_object.__listeners__[listener] | python | {
"resource": ""
} |
q40251 | signal.fire | train | def fire(self, args, kwargs):
"""
Fire this signal with the specified arguments and keyword arguments.
Typically this is used by using :meth:`__call__()` on this object which
is more natural as it does all the argument packing/unpacking
transparently.
"""
for info in self._listeners[:]:
if info.pass_signal:
info.listener(*args, signal=self, **kwargs)
else:
info.listener(*args, **kwargs) | python | {
"resource": ""
} |
q40252 | SignalInterceptorMixIn.watchSignal | train | def watchSignal(self, signal):
"""
Setup provisions to watch a specified signal
:param signal:
The :class:`Signal` to watch for.
After calling this method you can use :meth:`assertSignalFired()`
and :meth:`assertSignalNotFired()` with the same signal.
"""
self._extend_state()
def signal_handler(*args, **kwargs):
self._events_seen.append((signal, args, kwargs))
signal.connect(signal_handler)
if hasattr(self, 'addCleanup'):
self.addCleanup(signal.disconnect, signal_handler) | python | {
"resource": ""
} |
q40253 | SignalInterceptorMixIn.assertSignalOrdering | train | def assertSignalOrdering(self, *expected_events):
"""
Assert that a signals were fired in a specific sequence.
:param expected_events:
A (varadic) list of events describing the signals that were fired
Each element is a 3-tuple (signal, args, kwargs) that describes
the event.
.. note::
If you are using :meth:`assertSignalFired()` then the return value
of that method is a single event that can be passed to this method
"""
expected_order = [self._events_seen.index(event)
for event in expected_events]
actual_order = sorted(expected_order)
self.assertEqual(
expected_order, actual_order,
"\nExpected order of fired signals:\n{}\n"
"Actual order observed:\n{}".format(
"\n".join(
"\t{}: {}".format(i, event)
for i, event in enumerate(expected_events, 1)),
"\n".join(
"\t{}: {}".format(i, event)
for i, event in enumerate(
(self._events_seen[idx] for idx in actual_order), 1)))) | python | {
"resource": ""
} |
q40254 | url | train | def url(viewname, *args, **kwargs):
"""Helper for Django's ``reverse`` in templates."""
return reverse(viewname, args=args, kwargs=kwargs) | python | {
"resource": ""
} |
q40255 | _urlencode | train | def _urlencode(items):
"""A Unicode-safe URLencoder."""
try:
return urllib.urlencode(items)
except UnicodeEncodeError:
return urllib.urlencode([(k, smart_str(v)) for k, v in items]) | python | {
"resource": ""
} |
q40256 | urlencode | train | def urlencode(txt):
"""Url encode a path."""
if isinstance(txt, unicode):
txt = txt.encode('utf-8')
return urllib.quote_plus(txt) | python | {
"resource": ""
} |
q40257 | token_handler_str_default | train | def token_handler_str_default(
token, dispatcher, node, subnode, sourcepath_stack=(None,)):
"""
Standard token handler that will return the value, ignoring any
tokens or strings that have been remapped.
"""
if isinstance(token.pos, int):
_, lineno, colno = node.getpos(subnode, token.pos)
else:
lineno, colno = None, None
yield StreamFragment(subnode, lineno, colno, None, sourcepath_stack[-1]) | python | {
"resource": ""
} |
q40258 | token_handler_unobfuscate | train | def token_handler_unobfuscate(
token, dispatcher, node, subnode, sourcepath_stack=(None,)):
"""
A token handler that will resolve and return the original identifier
value.
"""
original = (
node.value
if isinstance(node, Identifier) and node.value != subnode else
None
)
if isinstance(token.pos, int):
_, lineno, colno = node.getpos(original or subnode, token.pos)
else:
lineno, colno = None, None
yield StreamFragment(
subnode, lineno, colno, original, sourcepath_stack[-1]) | python | {
"resource": ""
} |
q40259 | read | train | def read(parser, stream):
"""
Return an AST from the input ES5 stream.
Arguments
parser
A parser instance.
stream
Either a stream object or a callable that produces one. The
stream object to read from; its 'read' method will be invoked.
If a callable was provided, the 'close' method on its return
value will be called to close the stream.
"""
source = stream() if callable(stream) else stream
try:
text = source.read()
stream_name = getattr(source, 'name', None)
try:
result = parser(text)
except ECMASyntaxError as e:
error_name = repr_compat(stream_name or source)
raise type(e)('%s in %s' % (str(e), error_name))
finally:
if callable(stream):
source.close()
result.sourcepath = stream_name
return result | python | {
"resource": ""
} |
q40260 | write | train | def write(
unparser, nodes, output_stream, sourcemap_stream=None,
sourcemap_normalize_mappings=True,
sourcemap_normalize_paths=True,
source_mapping_url=NotImplemented):
"""
Write out the node using the unparser into an output stream, and
optionally the sourcemap using the sourcemap stream.
Ideally, file objects should be passed to the *_stream arguments, so
that the name resolution built into the sourcemap builder function
will be used. Also, if these file objects are opened using absolute
path arguments, enabling the sourcemap_normalize_paths flag will
have all paths normalized to their relative form.
If the provided streams are not anchored on the filesystem, or that
the provide node was generated from a string or in-memory stream,
the generation of the sourcemap should be done using the lower level
`write` function provided by the sourcemap module, which this method
wraps. Alternatively, the top level node should have its sourcepath
set to path that this node originated from.
Arguments
unparser
An unparser instance.
nodes
The Node or list of Nodes to stream to the output stream with
the unparser.
output_stream
Either a stream object or a callable that produces one. The
stream object to write to; its 'write' method will be invoked.
If a callable was provided, the 'close' method on its return
value will be called to close the stream.
sourcemap_stream
If one is provided, the sourcemap will be written out to it.
Like output_stream, it could also be a callable and be handled
in the same manner.
If this argument is the same as output_stream (note: the return
value of any callables are not compared), the stream object that
is the same as the output_stream will be used for writing out
the source map, and the source map will instead be encoded as a
'data:application/json;base64,' URL.
sourcemap_normalize_mappings
Flag for the normalization of the sourcemap mappings; Defaults
to True to enable a reduction in output size.
sourcemap_normalize_paths
If set to true, all absolute paths will be converted to the
relative form when the sourcemap is generated, if all paths
provided are in the absolute form.
Defaults to True to enable a reduction in output size.
source_mapping_url
If unspecified, the default derived path will be written as a
sourceMappingURL comment into the output stream. If explicitly
specified with a value, that will be written instead. Set to
None to disable this.
"""
closer = []
def get_stream(stream):
if callable(stream):
result = stream()
closer.append(result.close)
else:
result = stream
return result
def cleanup():
for close in reversed(closer):
close()
chunks = None
if isinstance(nodes, Node):
chunks = unparser(nodes)
elif isinstance(nodes, Iterable):
raw = [unparser(node) for node in nodes if isinstance(node, Node)]
if raw:
chunks = chain(*raw)
if not chunks:
raise TypeError('must either provide a Node or list containing Nodes')
try:
out_s = get_stream(output_stream)
sourcemap_stream = (
out_s if sourcemap_stream is output_stream else sourcemap_stream)
mappings, sources, names = sourcemap.write(
chunks, out_s, normalize=sourcemap_normalize_mappings)
if sourcemap_stream:
sourcemap_stream = get_stream(sourcemap_stream)
sourcemap.write_sourcemap(
mappings, sources, names, out_s, sourcemap_stream,
normalize_paths=sourcemap_normalize_paths,
source_mapping_url=source_mapping_url,
)
finally:
cleanup() | python | {
"resource": ""
} |
q40261 | Mean.dof | train | def dof(self, index=None):
"""The number of degrees of freedom"""
if index is None:
dof = 0
for i in range(self.len):
dof += self.A[i].shape[0] * self.F[i].shape[1]
return dof
else:
return self.A[index].shape[0] * self.F[index].shape[1] | python | {
"resource": ""
} |
q40262 | Mean.beta_hat | train | def beta_hat(self):
"""compute ML beta"""
XKY = self.XKY()
XanyKY = self.XanyKY()
beta_hat, beta_hat_any = self.Areml_solver.solve(b_any=XanyKY,b=XKY,check_finite=True)
return beta_hat, beta_hat_any | python | {
"resource": ""
} |
q40263 | Mean.XanyKXany | train | def XanyKXany(self):
"""
compute self covariance for any
"""
result = np.empty((self.P,self.F_any.shape[1],self.F_any.shape[1]), order='C')
for p in range(self.P):
X1D = self.Fstar_any * self.D[:,p:p+1]
X1X2 = X1D.T.dot(self.Fstar_any)
result[p] = X1X2
return result | python | {
"resource": ""
} |
q40264 | Mean.XanyKX | train | def XanyKX(self):
"""
compute cross covariance for any and rest
"""
result = np.empty((self.P,self.F_any.shape[1],self.dof), order='C')
#This is trivially parallelizable:
for p in range(self.P):
FanyD = self.Fstar_any * self.D[:,p:p+1]
start = 0
#This is trivially parallelizable:
for term in range(self.len):
stop = start + self.F[term].shape[1]*self.A[term].shape[0]
result[p,:,start:stop] = self.XanyKX2_single_p_single_term(p=p, F1=FanyD, F2=self.Fstar[term], A2=self.Astar[term])
start = stop
return result | python | {
"resource": ""
} |
q40265 | Mean.XKX | train | def XKX(self):
"""
compute self covariance for rest
"""
cov_beta = np.zeros((self.dof,self.dof))
start_row = 0
#This is trivially parallelizable:
for term1 in range(self.len):
stop_row = start_row + self.A[term1].shape[0] * self.F[term1].shape[1]
start_col = start_row
#This is trivially parallelizable:
for term2 in range(term1,self.len):
stop_col = start_col + self.A[term2].shape[0] * self.F[term2].shape[1]
cov_beta[start_row:stop_row, start_col:stop_col] = compute_X1KX2(Y=self.Ystar(), D=self.D, X1=self.Fstar[term1], X2=self.Fstar[term2], A1=self.Astar[term1], A2=self.Astar[term2])
if term1!=term2:
cov_beta[start_col:stop_col, start_row:stop_row] = cov_beta[n_weights1:stop_row, n_weights2:stop_col].T
start_col = stop_col
start_row = stop_row
return cov_beta | python | {
"resource": ""
} |
q40266 | lint | train | def lint():
"report pylint results"
# report according to file extension
report_formats = {
".html": "html",
".log": "parseable",
".txt": "text",
}
lint_build_dir = easy.path("build/lint")
lint_build_dir.exists() or lint_build_dir.makedirs() # pylint: disable=expression-not-assigned
argv = []
rcfile = easy.options.lint.get("rcfile")
if not rcfile and easy.path("pylint.cfg").exists():
rcfile = "pylint.cfg"
if rcfile:
argv += ["--rcfile", os.path.abspath(rcfile)]
if easy.options.lint.get("msg_only", False):
argv += ["-rn"]
argv += [
"--import-graph", (lint_build_dir / "imports.dot").abspath(),
]
argv += support.toplevel_packages()
sys.stderr.write("Running %s::pylint '%s'\n" % (sys.argv[0], "' '".join(argv)))
outfile = easy.options.lint.get("output", None)
if outfile:
outfile = os.path.abspath(outfile)
try:
with easy.pushd("src" if easy.path("src").exists() else "."):
if outfile:
argv.extend(["-f", report_formats.get(easy.path(outfile).ext, "text")])
sys.stderr.write("Writing output to %r\n" % (str(outfile),))
outhandle = open(outfile, "w")
try:
subprocess.check_call(["pylint"] + argv, stdout=outhandle)
finally:
outhandle.close()
else:
subprocess.check_call(["pylint"] + argv, )
sys.stderr.write("paver::lint - No problems found.\n")
except subprocess.CalledProcessError as exc:
if exc.returncode & 32:
# usage error (internal error in this code)
sys.stderr.write("paver::lint - Usage error, bad arguments %r?!\n" % (argv,))
sys.exit(exc.returncode)
else:
bits = {
1: "fatal",
2: "error",
4: "warning",
8: "refactor",
16: "convention",
}
sys.stderr.write("paver::lint - Some %s message(s) issued.\n" % (
", ".join([text for bit, text in bits.items() if exc.returncode & bit])
))
if exc.returncode & 3:
sys.stderr.write("paver::lint - Exiting due to fatal / error message.\n")
sys.exit(exc.returncode) | python | {
"resource": ""
} |
q40267 | generate_contour_data | train | def generate_contour_data(pid):
"""
Main function for this program.
This will read in sensitivity_curves and binary parameters; calculate snrs
with a matched filtering approach; and then read the contour data out to a file.
Args:
pid (obj or dict): GenInput class or dictionary containing all of the input information for
the generation. See BOWIE documentation and example notebooks for usage of
this class.
"""
# check if pid is dicionary or GenInput class
# if GenInput, change to dictionary
if isinstance(pid, GenInput):
pid = pid.return_dict()
begin_time = time.time()
WORKING_DIRECTORY = '.'
if 'WORKING_DIRECTORY' not in pid['general'].keys():
pid['general']['WORKING_DIRECTORY'] = WORKING_DIRECTORY
# Generate the contour data.
running_process = GenProcess(**{**pid, **pid['generate_info']})
running_process.set_parameters()
running_process.run_snr()
# Read out
file_out = FileReadOut(running_process.xvals, running_process.yvals,
running_process.final_dict,
**{**pid['general'], **pid['generate_info'], **pid['output_info']})
print('outputing file:', pid['general']['WORKING_DIRECTORY'] + '/'
+ pid['output_info']['output_file_name'])
getattr(file_out, file_out.output_file_type + '_read_out')()
print(time.time()-begin_time)
return | python | {
"resource": ""
} |
q40268 | ClementineRemote.send_message | train | def send_message(self, msg):
"""
Internal method used to send messages through Clementine remote network protocol.
"""
if self.socket is not None:
msg.version = self.PROTOCOL_VERSION
serialized = msg.SerializeToString()
data = struct.pack(">I", len(serialized)) + serialized
#print("Sending message: %s" % msg)
try:
self.socket.send(data)
except Exception as e:
#self.state = "Disconnected"
pass | python | {
"resource": ""
} |
q40269 | ClementineRemote._connect | train | def _connect(self):
"""
Connects to the server defined in the constructor.
"""
self.first_data_sent_complete = False
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect((self.host, self.port))
msg = cr.Message()
msg.type = cr.CONNECT
msg.request_connect.auth_code = self.auth_code or 0
msg.request_connect.send_playlist_songs = False
msg.request_connect.downloader = False
self.send_message(msg) | python | {
"resource": ""
} |
q40270 | ClementineRemote.playpause | train | def playpause(self):
"""
Sends a "playpause" command to the player.
"""
msg = cr.Message()
msg.type = cr.PLAYPAUSE
self.send_message(msg) | python | {
"resource": ""
} |
q40271 | ClementineRemote.next | train | def next(self):
"""
Sends a "next" command to the player.
"""
msg = cr.Message()
msg.type = cr.NEXT
self.send_message(msg) | python | {
"resource": ""
} |
q40272 | ClementineRemote.previous | train | def previous(self):
"""
Sends a "previous" command to the player.
"""
msg = cr.Message()
msg.type = cr.PREVIOUS
self.send_message(msg) | python | {
"resource": ""
} |
q40273 | step_impl06 | train | def step_impl06(context, count):
"""Execute fuzzer.
:param count: number of string variants to generate.
:param context: test context.
"""
fuzz_factor = 11
context.fuzzed_string_list = fuzz_string(context.seed, count, fuzz_factor) | python | {
"resource": ""
} |
q40274 | step_impl08 | train | def step_impl08(context):
"""Create file list.
:param context: test context.
"""
assert context.table, "ENSURE: table is provided."
context.file_list = [row['file_path'] for row in context.table.rows] | python | {
"resource": ""
} |
q40275 | step_impl11 | train | def step_impl11(context, runs):
"""Execute multiple runs.
:param runs: number of test runs to perform.
:param context: test context.
"""
executor = context.fuzz_executor
executor.run_test(runs)
stats = executor.stats
count = stats.cumulated_counts()
assert count == runs, "VERIFY: stats available." | python | {
"resource": ""
} |
q40276 | number_of_modified_bytes | train | def number_of_modified_bytes(buf, fuzzed_buf):
"""Determine the number of differing bytes.
:param buf: original buffer.
:param fuzzed_buf: fuzzed buffer.
:return: number of different bytes.
:rtype: int
"""
count = 0
for idx, b in enumerate(buf):
if b != fuzzed_buf[idx]:
count += 1
return count | python | {
"resource": ""
} |
q40277 | MeanBase.W | train | def W(self,value):
""" set fixed effect design """
if value is None: value = sp.zeros((self._N, 0))
assert value.shape[0]==self._N, 'Dimension mismatch'
self._K = value.shape[1]
self._W = value
self._notify()
self.clear_cache('predict_in_sample','Yres') | python | {
"resource": ""
} |
q40278 | obfuscate | train | def obfuscate(
obfuscate_globals=False, shadow_funcname=False, reserved_keywords=()):
"""
An example, barebone name obfuscation ruleset
obfuscate_globals
If true, identifier names on the global scope will also be
obfuscated. Default is False.
shadow_funcname
If True, obfuscated function names will be shadowed. Default is
False.
reserved_keywords
A tuple of strings that should not be generated as obfuscated
identifiers.
"""
def name_obfuscation_rules():
inst = Obfuscator(
obfuscate_globals=obfuscate_globals,
shadow_funcname=shadow_funcname,
reserved_keywords=reserved_keywords,
)
return {
'token_handler': token_handler_unobfuscate,
'deferrable_handlers': {
Resolve: inst.resolve,
},
'prewalk_hooks': [
inst.prewalk_hook,
],
}
return name_obfuscation_rules | python | {
"resource": ""
} |
q40279 | Scope.declared_symbols | train | def declared_symbols(self):
"""
Return all local symbols here, and also of the parents
"""
return self.local_declared_symbols | (
self.parent.declared_symbols if self.parent else set()) | python | {
"resource": ""
} |
q40280 | Scope.global_symbols | train | def global_symbols(self):
"""
These are symbols that have been referenced, but not declared
within this scope or any parent scopes.
"""
declared_symbols = self.declared_symbols
return set(
s for s in self.referenced_symbols if s not in declared_symbols) | python | {
"resource": ""
} |
q40281 | Scope.global_symbols_in_children | train | def global_symbols_in_children(self):
"""
This is based on all children referenced symbols that have not
been declared.
The intended use case is to ban the symbols from being used as
remapped symbol values.
"""
result = set()
for child in self.children:
result |= (
child.global_symbols |
child.global_symbols_in_children)
return result | python | {
"resource": ""
} |
q40282 | Scope.close | train | def close(self):
"""
Mark the scope as closed, i.e. all symbols have been declared,
and no further declarations should be done.
"""
if self._closed:
raise ValueError('scope is already marked as closed')
# By letting parent know which symbols this scope has leaked, it
# will let them reserve all lowest identifiers first.
if self.parent:
for symbol, c in self.leaked_referenced_symbols.items():
self.parent.reference(symbol, c)
self._closed = True | python | {
"resource": ""
} |
q40283 | Scope._reserved_symbols | train | def _reserved_symbols(self):
"""
Helper property for the build_remap_symbols method. This
property first resolves _all_ local references from parents,
skipping all locally declared symbols as the goal is to generate
a local mapping for them, but in a way not to shadow over any
already declared symbols from parents, and also the implicit
globals in all children.
This is marked "private" as there are a number of computations
involved, and is really meant for the build_remap_symbols to use
for its standard flow.
"""
# In practice, and as a possible optimisation, the parent's
# remapped symbols table can be merged into this instance, but
# this bloats memory use and cause unspecified reservations that
# may not be applicable this or any child scope. So for clarity
# and purity of references made, this somewhat more involved way
# is done instead.
remapped_parents_symbols = {
self.resolve(v) for v in self.non_local_symbols}
return (
# block implicit children globals.
self.global_symbols_in_children |
# also not any global symbols
self.global_symbols |
# also all remapped parent symbols referenced here
remapped_parents_symbols
) | python | {
"resource": ""
} |
q40284 | Scope.build_remap_symbols | train | def build_remap_symbols(self, name_generator, children_only=True):
"""
This builds the replacement table for all the defined symbols
for all the children, and this scope, if the children_only
argument is False.
"""
if not children_only:
replacement = name_generator(skip=(self._reserved_symbols))
for symbol, c in reversed(sorted(
self.referenced_symbols.items(), key=itemgetter(1, 0))):
if symbol not in self.local_declared_symbols:
continue
self.remapped_symbols[symbol] = next(replacement)
for child in self.children:
child.build_remap_symbols(name_generator, False) | python | {
"resource": ""
} |
q40285 | Scope.nest | train | def nest(self, node, cls=None):
"""
Create a new nested scope that is within this instance, binding
the provided node to it.
"""
if cls is None:
cls = type(self)
nested_scope = cls(node, self)
self.children.append(nested_scope)
return nested_scope | python | {
"resource": ""
} |
q40286 | CatchScope.declare | train | def declare(self, symbol):
"""
Nothing gets declared here - it's the parents problem, except
for the case where the symbol is the one we have here.
"""
if symbol != self.catch_symbol:
self.parent.declare(symbol) | python | {
"resource": ""
} |
q40287 | CatchScope.reference | train | def reference(self, symbol, count=1):
"""
However, if referenced, ensure that the counter is applied to
the catch symbol.
"""
if symbol == self.catch_symbol:
self.catch_symbol_usage += count
else:
self.parent.reference(symbol, count) | python | {
"resource": ""
} |
q40288 | CatchScope.build_remap_symbols | train | def build_remap_symbols(self, name_generator, children_only=None):
"""
The children_only flag is inapplicable, but this is included as
the Scope class is defined like so.
Here this simply just place the catch symbol with the next
replacement available.
"""
replacement = name_generator(skip=(self._reserved_symbols))
self.remapped_symbols[self.catch_symbol] = next(replacement)
# also to continue down the children.
for child in self.children:
child.build_remap_symbols(name_generator, False) | python | {
"resource": ""
} |
q40289 | Obfuscator.register_reference | train | def register_reference(self, dispatcher, node):
"""
Register this identifier to the current scope, and mark it as
referenced in the current scope.
"""
# the identifier node itself will be mapped to the current scope
# for the resolve to work
# This should probably WARN about the node object being already
# assigned to an existing scope that isn't current_scope.
self.identifiers[node] = self.current_scope
self.current_scope.reference(node.value) | python | {
"resource": ""
} |
q40290 | Obfuscator.shadow_reference | train | def shadow_reference(self, dispatcher, node):
"""
Only simply make a reference to the value in the current scope,
specifically for the FuncBase type.
"""
# as opposed to the previous one, only add the value of the
# identifier itself to the scope so that it becomes reserved.
self.current_scope.reference(node.identifier.value) | python | {
"resource": ""
} |
q40291 | Obfuscator.resolve | train | def resolve(self, dispatcher, node):
"""
For the given node, resolve it into the scope it was declared
at, and if one was found, return its value.
"""
scope = self.identifiers.get(node)
if not scope:
return node.value
return scope.resolve(node.value) | python | {
"resource": ""
} |
q40292 | Obfuscator.walk | train | def walk(self, dispatcher, node):
"""
Walk through the node with a custom dispatcher for extraction of
details that are required.
"""
deferrable_handlers = {
Declare: self.declare,
Resolve: self.register_reference,
}
layout_handlers = {
PushScope: self.push_scope,
PopScope: self.pop_scope,
PushCatch: self.push_catch,
# should really be different, but given that the
# mechanism is within the same tree, the only difference
# would be sanity check which should have been tested in
# the first place in the primitives anyway.
PopCatch: self.pop_scope,
}
if not self.shadow_funcname:
layout_handlers[ResolveFuncName] = self.shadow_reference
local_dispatcher = Dispatcher(
definitions=dict(dispatcher),
token_handler=None,
layout_handlers=layout_handlers,
deferrable_handlers=deferrable_handlers,
)
return list(walk(local_dispatcher, node)) | python | {
"resource": ""
} |
q40293 | Obfuscator.finalize | train | def finalize(self):
"""
Finalize the run - build the name generator and use it to build
the remap symbol tables.
"""
self.global_scope.close()
name_generator = NameGenerator(skip=self.reserved_keywords)
self.global_scope.build_remap_symbols(
name_generator,
children_only=not self.obfuscate_globals,
) | python | {
"resource": ""
} |
q40294 | Obfuscator.prewalk_hook | train | def prewalk_hook(self, dispatcher, node):
"""
This is for the Unparser to use as a prewalk hook.
"""
self.walk(dispatcher, node)
self.finalize()
return node | python | {
"resource": ""
} |
q40295 | clean | train | def clean():
"take out the trash"
src_dir = easy.options.setdefault("docs", {}).get('src_dir', None)
if src_dir is None:
src_dir = 'src' if easy.path('src').exists() else '.'
with easy.pushd(src_dir):
for pkg in set(easy.options.setup.packages) | set(("tests",)):
for filename in glob.glob(pkg.replace('.', os.sep) + "/*.py[oc~]"):
easy.path(filename).remove() | python | {
"resource": ""
} |
q40296 | Generate._set_grid_info | train | def _set_grid_info(self, which, low, high, num, scale, name):
"""Set the grid values for x or y.
Create information for the grid of x and y values.
Args:
which (str): `x` or `y`.
low/high (float): Lowest/highest value for the axis.
num (int): Number of points on axis.
scale (str): Scale of the axis. Choices are 'log' or 'lin'.
name (str): Name representing the axis. See GenerateContainer documentation
for options for the name.
unit (str): Unit for this axis quantity. See GenerateContainer documentation
for options for the units.
Raises:
ValueError: If scale is not 'log' or 'lin'.
"""
setattr(self.generate_info, which + '_low', low)
setattr(self.generate_info, which + '_high', high)
setattr(self.generate_info, 'num_' + which, num)
setattr(self.generate_info, which + 'val_name', name)
if scale not in ['lin', 'log']:
raise ValueError('{} scale must be lin or log.'.format(which))
setattr(self.generate_info, which + 'scale', scale)
return | python | {
"resource": ""
} |
q40297 | Generate.set_y_grid_info | train | def set_y_grid_info(self, y_low, y_high, num_y, yscale, yval_name):
"""Set the grid values for y.
Create information for the grid of y values.
Args:
num_y (int): Number of points on axis.
y_low/y_high (float): Lowest/highest value for the axis.
yscale (str): Scale of the axis. Choices are 'log' or 'lin'.
yval_name (str): Name representing the axis. See GenerateContainer documentation
for options for the name.
"""
self._set_grid_info('y', y_low, y_high, num_y, yscale, yval_name)
return | python | {
"resource": ""
} |
q40298 | Generate.set_x_grid_info | train | def set_x_grid_info(self, x_low, x_high, num_x, xscale, xval_name):
"""Set the grid values for x.
Create information for the grid of x values.
Args:
num_x (int): Number of points on axis.
x_low/x_high (float): Lowest/highest value for the axis.
xscale (str): Scale of the axis. Choices are 'log' or 'lin'.
xval_name (str): Name representing the axis. See GenerateContainer documentation
for options for the name.
"""
self._set_grid_info('x', x_low, x_high, num_x, xscale, xval_name)
return | python | {
"resource": ""
} |
q40299 | SensitivityInput.add_noise_curve | train | def add_noise_curve(self, name, noise_type='ASD', is_wd_background=False):
"""Add a noise curve for generation.
This will add a noise curve for an SNR calculation by appending to the sensitivity_curves
list within the sensitivity_input dictionary.
The name of the noise curve prior to the file extension will appear as its
label in the final output dataset. Therefore, it is recommended prior to
running the generator that file names are renamed to simple names
for later reference.
Args:
name (str): Name of noise curve including file extension inside input_folder.
noise_type (str, optional): Type of noise. Choices are `ASD`, `PSD`, or `char_strain`.
Default is ASD.
is_wd_background (bool, optional): If True, this sensitivity is used as the white dwarf
background noise. Default is False.
"""
if is_wd_background:
self.sensitivity_input.wd_noise = name
self.sensitivity_input.wd_noise_type_in = noise_type
else:
if 'sensitivity_curves' not in self.sensitivity_input.__dict__:
self.sensitivity_input.sensitivity_curves = []
if 'noise_type_in' not in self.sensitivity_input.__dict__:
self.sensitivity_input.noise_type_in = []
self.sensitivity_input.sensitivity_curves.append(name)
self.sensitivity_input.noise_type_in.append(noise_type)
return | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.