code stringlengths 75 104k | docstring stringlengths 1 46.9k |
|---|---|
def execute_remote(self, remote_target, cmd, **kwargs):
"""
Executes the given command (with the given arguments)
on the given remote target of the connected machine
"""
data = self._build_command(cmd, kwargs, self._contexts[-1],
remote_target)
with self._lock:
rootelem = self.transport.send(data)
try:
return self._build_response(rootelem)
except ElementNotFoundException:
xlog.exception("XCLIClient.execute")
raise chained(CorruptResponse(rootelem))
except Exception as e:
xlog.exception("XCLIClient.execute")
raise e | Executes the given command (with the given arguments)
on the given remote target of the connected machine |
def _starfeatures_worker(task):
'''
This wraps starfeatures.
'''
try:
(lcfile, outdir, kdtree, objlist,
lcflist, neighbor_radius_arcsec,
deredden, custom_bandpasses, lcformat, lcformatdir) = task
return get_starfeatures(lcfile, outdir,
kdtree, objlist, lcflist,
neighbor_radius_arcsec,
deredden=deredden,
custom_bandpasses=custom_bandpasses,
lcformat=lcformat,
lcformatdir=lcformatdir)
except Exception as e:
return None | This wraps starfeatures. |
def visible(self):
"""
Read/write. |True| if axis is visible, |False| otherwise.
"""
delete = self._element.delete_
if delete is None:
return False
return False if delete.val else True | Read/write. |True| if axis is visible, |False| otherwise. |
async def iterUnivRows(self, prop):
'''
Iterate (buid, valu) rows for the given universal prop
'''
penc = prop.encode()
pref = penc + b'\x00'
for _, pval in self.layrslab.scanByPref(pref, db=self.byuniv):
buid = s_msgpack.un(pval)[0]
byts = self.layrslab.get(buid + penc, db=self.bybuid)
if byts is None:
continue
valu, indx = s_msgpack.un(byts)
yield buid, valu | Iterate (buid, valu) rows for the given universal prop |
def get_subsites(self):
""" Returns a list of subsites defined for this site
:rtype: list[Site]
"""
url = self.build_url(
self._endpoints.get('get_subsites').format(id=self.object_id))
response = self.con.get(url)
if not response:
return []
data = response.json()
# Everything received from cloud must be passed as self._cloud_data_key
return [self.__class__(parent=self, **{self._cloud_data_key: site}) for
site in data.get('value', [])] | Returns a list of subsites defined for this site
:rtype: list[Site] |
def get_reservations_for_booking_ids(self, booking_ids):
"""Gets booking information for a given list of booking ids.
:param booking_ids: a booking id or a list of room ids (comma separated).
:type booking_ids: string
"""
try:
resp = self._request("GET", "/1.1/space/booking/{}".format(booking_ids))
except resp.exceptions.HTTPError as error:
raise APIError("Server Error: {}".format(error))
return resp.json() | Gets booking information for a given list of booking ids.
:param booking_ids: a booking id or a list of room ids (comma separated).
:type booking_ids: string |
def derivative(xdata, ydata):
"""
performs d(ydata)/d(xdata) with nearest-neighbor slopes
must be well-ordered, returns new arrays [xdata, dydx_data]
neighbors:
"""
D_ydata = []
D_xdata = []
for n in range(1, len(xdata)-1):
D_xdata.append(xdata[n])
D_ydata.append((ydata[n+1]-ydata[n-1])/(xdata[n+1]-xdata[n-1]))
return [D_xdata, D_ydata] | performs d(ydata)/d(xdata) with nearest-neighbor slopes
must be well-ordered, returns new arrays [xdata, dydx_data]
neighbors: |
def element_count(self):
"""Retrieve the number of elements in this type.
Returns an int.
If the Type is not an array or vector, this raises.
"""
result = conf.lib.clang_getNumElements(self)
if result < 0:
raise Exception('Type does not have elements.')
return result | Retrieve the number of elements in this type.
Returns an int.
If the Type is not an array or vector, this raises. |
def _model_abilities_two_components(self,beta):
""" Creates the structure of the model - store abilities
Parameters
----------
beta : np.array
Contains untransformed starting values for latent variables
Returns
----------
theta : np.array
Contains the predicted values for the time series
Y : np.array
Contains the length-adjusted time series (accounting for lags)
scores : np.array
Contains the scores for the time series
"""
parm = np.array([self.latent_variables.z_list[k].prior.transform(beta[k]) for k in range(beta.shape[0])])
scale, shape, skewness = self._get_scale_and_shape(parm)
state_vectors = np.zeros(shape=(self.max_team+1))
state_vectors_2 = np.zeros(shape=(self.max_team_2+1))
state_vectors_store_1 = np.zeros(shape=(int(np.max(self.home_count)+50),int(self.max_team+1)))
state_vectors_store_2 = np.zeros(shape=(int(np.max(self.home_2_count)+50),int(self.max_team_2+1)))
theta = np.zeros(shape=(self.data.shape[0]))
for t in range(0,self.data.shape[0]):
theta[t] = parm[0] + state_vectors_2[self.home_2_id[t]] - state_vectors_2[self.away_2_id[t]] + state_vectors[self.home_id[t]] - state_vectors[self.away_id[t]]
state_vectors[self.home_id[t]] += parm[1]*self.family.score_function(self.data[t], self.link(theta[t]), scale, shape, skewness)
state_vectors[self.away_id[t]] += -parm[1]*self.family.score_function(self.data[t], self.link(theta[t]), scale, shape, skewness)
state_vectors_2[self.home_2_id[t]] += parm[2]*self.family.score_function(self.data[t], self.link(theta[t]), scale, shape, skewness)
state_vectors_2[self.away_2_id[t]] += -parm[2]*self.family.score_function(self.data[t], self.link(theta[t]), scale, shape, skewness)
state_vectors_store_1[int(self.home_count[t]), self.home_id[t]] = state_vectors_store_1[max(0,int(self.home_count[t])-1), self.home_id[t]] + parm[1]*self.family.score_function(self.data[t], self.link(theta[t]), scale, shape, skewness)
state_vectors_store_1[int(self.away_count[t]), self.away_id[t]] = state_vectors_store_1[max(0,int(self.away_count[t])-1), self.away_id[t]] -parm[1]*self.family.score_function(self.data[t], self.link(theta[t]), scale, shape, skewness)
state_vectors_store_2[int(self.home_2_count[t]), self.home_2_id[t]] = state_vectors_store_2[max(0,int(self.home_2_count[t])-1), self.home_2_id[t]] + parm[1]*self.family.score_function(self.data[t], self.link(theta[t]), scale, shape, skewness)
state_vectors_store_2[int(self.away_2_count[t]), self.away_2_id[t]] = state_vectors_store_2[max(0,int(self.away_2_count[t])-1), self.away_2_id[t]] -parm[1]*self.family.score_function(self.data[t], self.link(theta[t]), scale, shape, skewness)
return state_vectors_store_1, state_vectors_store_2 | Creates the structure of the model - store abilities
Parameters
----------
beta : np.array
Contains untransformed starting values for latent variables
Returns
----------
theta : np.array
Contains the predicted values for the time series
Y : np.array
Contains the length-adjusted time series (accounting for lags)
scores : np.array
Contains the scores for the time series |
def calc_directional_aop(self, report, parameter, parameter_dir):
"""
Will calcuate the directional AOP (only sub-surface rrs for now) if the direction is defined using @
e.g. rrs@32.0:45 where <zenith-theta>:<azimuth-phi>
:param report: The planarrad report dictionary. should include the quadtables and the directional info
:param parameter: parameter to calc. Currently only sub-surface reflectance rrs.
:return:
"""
lg.debug('calculating the directional ' + parameter)
tmp_zenith = []
param_zenith = parameter_dir.split(':')[0]
param_azimuth = parameter_dir.split(':')[1]
# --------------------------------------------------#
# find the mean directions values
# --------------------------------------------------#
for i_iter in range(0, int(report['vn'][1])):
tmp_zenith.append(report['Quad_solid_angle_mean_point_theta'][i_iter][:].split(',')[0]) #that was a pain!
tmp_azimuth = report['Quad_solid_angle_mean_point_phi'][1]
zenith = scipy.asarray(tmp_zenith, dtype=float)
azimuth = scipy.fromstring(tmp_azimuth, dtype=float, sep=',')
# --------------------------------------------------#
# now grab the min and max index of the closest match
# --------------------------------------------------#
#min_zenith_idx = (scipy.abs(zenith - param_zenith)).argmin()
from scipy import interpolate
lw = scipy.zeros(int(report['band_count'][1]))
for j_iter in range(0, int(report['band_count'][1])):
if parameter == 'rrs':
lg.info('Calculating directional rrs')
tmp_lw = report['L_w_band_' + str(j_iter + 1)]
elif parameter == 'Rrs':
lg.info('Calculating directional Rrs')
print(report.keys())
tmp_lw = report['L_it_band_' + str(j_iter + 1)]
lw_scal = scipy.zeros((int(report['vn'][1]), int(report['hn'][1])))
# for the fist and last line we have to replicate the top and bottom circle
for i_iter in range(0, int(report['hn'][1])):
lw_scal[0, i_iter] = tmp_lw[0].split(',')[0]
lw_scal[int(report['vn'][1]) - 1, i_iter] = tmp_lw[-1].split(',')[0]
for i_iter in range(1, int(report['vn'][1]) - 1):
lw_scal[i_iter, :] = scipy.asarray(tmp_lw[i_iter].split(','), dtype=float)
# to do, make an array of zeros and loop over each list an apply to eah line. bruteforce
f1 = interpolate.interp2d(zenith, azimuth, lw_scal)
lw[j_iter] = f1(float(param_zenith), float(param_azimuth))
# ----
# Now we finally have L_w we calculate the rrs
# ----
if parameter == 'rrs':
tmp_rrs = lw / scipy.asarray(report['Ed_w'], dtype=float)[1:] # ignore the first val as that is depth of val
elif parameter == 'Rrs':
tmp_rrs = lw / scipy.asarray(report['Ed_a'], dtype=float)[1:] # ignore the first val as that is depth of val
# make rrs a string so it can be written to file.
rrs = ",".join(map(str, tmp_rrs))
return " ," + rrs | Will calcuate the directional AOP (only sub-surface rrs for now) if the direction is defined using @
e.g. rrs@32.0:45 where <zenith-theta>:<azimuth-phi>
:param report: The planarrad report dictionary. should include the quadtables and the directional info
:param parameter: parameter to calc. Currently only sub-surface reflectance rrs.
:return: |
def check_native_jsonfield_postgres_engine(app_configs=None, **kwargs):
"""
Check that the DJSTRIPE_USE_NATIVE_JSONFIELD isn't set unless Postgres is in use.
"""
from . import settings as djstripe_settings
messages = []
error_msg = "DJSTRIPE_USE_NATIVE_JSONFIELD is not compatible with engine {engine} for database {name}"
if djstripe_settings.USE_NATIVE_JSONFIELD:
for db_name, db_config in settings.DATABASES.items():
# Hi there.
# You may be reading this because you are using Postgres, but
# dj-stripe is not detecting that correctly. For example, maybe you
# are using multiple databases with different engines, or you have
# your own backend. As long as you are certain you can support jsonb,
# you can use the SILENCED_SYSTEM_CHECKS setting to ignore this check.
engine = db_config.get("ENGINE", "")
if "postgresql" not in engine and "postgis" not in engine:
messages.append(
checks.Critical(
error_msg.format(name=repr(db_name), engine=repr(engine)),
hint="Switch to Postgres, or unset DJSTRIPE_USE_NATIVE_JSONFIELD",
id="djstripe.C005",
)
)
return messages | Check that the DJSTRIPE_USE_NATIVE_JSONFIELD isn't set unless Postgres is in use. |
def _set_precision(self, precision):
'''
function that sets precision to an (hopfully) reasonable guess based
on the length of the sequence if not explicitly set
'''
# if precision is explicitly specified, use it.
if self.one_mutation:
self.min_width = 10*self.one_mutation
else:
self.min_width = 0.001
if precision in [0,1,2,3]:
self.precision=precision
if self.one_mutation and self.one_mutation<1e-4 and precision<2:
self.logger("ClockTree._set_precision: FOR LONG SEQUENCES (>1e4) precision>=2 IS RECOMMENDED."
" \n\t **** precision %d was specified by the user"%precision, level=0)
else:
# otherwise adjust it depending on the minimal sensible branch length
if self.one_mutation:
if self.one_mutation>1e-4:
self.precision=1
else:
self.precision=2
else:
self.precision=1
self.logger("ClockTree: Setting precision to level %s"%self.precision, 2)
if self.precision==0:
self.node_grid_points = ttconf.NODE_GRID_SIZE_ROUGH
self.branch_grid_points = ttconf.BRANCH_GRID_SIZE_ROUGH
self.n_integral = ttconf.N_INTEGRAL_ROUGH
elif self.precision==2:
self.node_grid_points = ttconf.NODE_GRID_SIZE_FINE
self.branch_grid_points = ttconf.BRANCH_GRID_SIZE_FINE
self.n_integral = ttconf.N_INTEGRAL_FINE
elif self.precision==3:
self.node_grid_points = ttconf.NODE_GRID_SIZE_ULTRA
self.branch_grid_points = ttconf.BRANCH_GRID_SIZE_ULTRA
self.n_integral = ttconf.N_INTEGRAL_ULTRA
else:
self.node_grid_points = ttconf.NODE_GRID_SIZE
self.branch_grid_points = ttconf.BRANCH_GRID_SIZE
self.n_integral = ttconf.N_INTEGRAL | function that sets precision to an (hopfully) reasonable guess based
on the length of the sequence if not explicitly set |
def error_handler(self, e, request, meth, em_format):
"""
Override this method to add handling of errors customized for your
needs
"""
if isinstance(e, FormValidationError):
return self.form_validation_response(e)
elif isinstance(e, TypeError):
result = rc.BAD_REQUEST
hm = HandlerMethod(meth)
sig = hm.signature
msg = 'Method signature does not match.\n\n'
if sig:
msg += 'Signature should be: %s' % sig
else:
msg += 'Resource does not expect any parameters.'
if self.display_errors:
msg += '\n\nException was: %s' % str(e)
result.content = format_error(msg)
return result
elif isinstance(e, Http404):
return rc.NOT_FOUND
elif isinstance(e, HttpStatusCode):
return e.response
else:
"""
On errors (like code errors), we'd like to be able to
give crash reports to both admins and also the calling
user. There's two setting parameters for this:
Parameters::
- `PISTON_EMAIL_ERRORS`: Will send a Django formatted
error email to people in `settings.ADMINS`.
- `PISTON_DISPLAY_ERRORS`: Will return a simple traceback
to the caller, so he can tell you what error they got.
If `PISTON_DISPLAY_ERRORS` is not enabled, the caller will
receive a basic "500 Internal Server Error" message.
"""
exc_type, exc_value, tb = sys.exc_info()
rep = ExceptionReporter(request, exc_type, exc_value, tb.tb_next)
if self.email_errors:
self.email_exception(rep)
if self.display_errors:
return HttpResponseServerError(
format_error('\n'.join(rep.format_exception())))
else:
raise | Override this method to add handling of errors customized for your
needs |
def create_package_file(root, master_package, subroot, py_files, opts, subs, is_namespace):
# type: (unicode, unicode, unicode, List[unicode], Any, List[unicode], bool) -> None
"""Build the text of the file and write the file."""
use_templates = False
fullname = makename(master_package, subroot)
if opts.templates:
use_templates = True
template_loader = FileSystemLoader(opts.templates)
template_env = SandboxedEnvironment(loader=template_loader)
text = format_heading(
1, ('%s package' if not is_namespace else "%s namespace") % fullname)
if opts.modulefirst and not is_namespace:
text += format_directive(subroot, master_package)
text += '\n'
# build a list of directories that are szvpackages (contain an INITPY file)
subs = [sub for sub in subs if path.isfile(path.join(root, sub, INITPY))]
# if there are some package directories, add a TOC for theses subpackages
if subs:
text += format_heading(2, 'Subpackages')
text += '.. toctree::\n\n'
for sub in subs:
text += ' %s.%s\n' % (makename(master_package, subroot), sub)
text += '\n'
submods = [path.splitext(sub)[0] for sub in py_files
if not shall_skip(path.join(root, sub), opts) and
sub != INITPY]
if use_templates:
try:
package_ns = _get_mod_ns(name=subroot, fullname=fullname,
includeprivate=opts.includeprivate)
package_ns['subpackages'] = subs
package_ns['submodules'] = submods
except ImportError as e:
_warn('failed to import %r: %s' % (fullname, e))
if submods:
text += format_heading(2, 'Submodules')
if opts.separatemodules:
text += '.. toctree::\n\n'
for submod in submods:
modfile = makename(master_package, makename(subroot, submod))
text += ' %s\n' % modfile
# generate separate file for this module
if not opts.noheadings:
filetext = format_heading(1, '%s module' % modfile)
else:
filetext = ''
filetext += format_directive(makename(subroot, submod),
master_package)
if use_templates:
try:
mod_ns = _get_mod_ns(
name=submod, fullname=modfile,
includeprivate=opts.includeprivate)
template = template_env.get_template('module.rst')
add_get_members_to_template_env(
template_env, modfile, opts)
filetext = template.render(**mod_ns)
except ImportError as e:
_warn('failed to import %r: %s' % (modfile, e))
write_file(modfile, filetext, opts)
else:
for submod in submods:
modfile = makename(master_package, makename(subroot, submod))
if not opts.noheadings:
text += format_heading(2, '%s module' % modfile)
text += format_directive(makename(subroot, submod),
master_package)
text += '\n'
text += '\n'
if use_templates:
template = template_env.get_template('package.rst')
add_get_members_to_template_env(template_env, fullname, opts)
text = template.render(**package_ns)
else:
if not opts.modulefirst and not is_namespace:
text += format_heading(2, 'Module contents')
text += format_directive(subroot, master_package)
write_file(makename(master_package, subroot), text, opts) | Build the text of the file and write the file. |
def executed_block_set(trace):
"""
Given an execution trace, returns a python set object containing the names of each block for which the user code
was executed. Block names can be set via set_debug_name().
"""
executed_set = set()
for entry in trace:
if entry[0] == 'execute':
executed_set.add(entry[get_trace_index('execute', 'BLOCK_NAME')])
return executed_set | Given an execution trace, returns a python set object containing the names of each block for which the user code
was executed. Block names can be set via set_debug_name(). |
def find_line_containing(strings: Sequence[str], contents: str) -> int:
"""
Finds the index of the line in ``strings`` that contains ``contents``,
or ``-1`` if none is found.
"""
for i in range(len(strings)):
if strings[i].find(contents) != -1:
return i
return -1 | Finds the index of the line in ``strings`` that contains ``contents``,
or ``-1`` if none is found. |
def trim_ordered_range_list(ranges,start,finish):
"""A function to help with slicing a mapping
Start with a list of ranges and get another list of ranges constrained by start (0-indexed) and finish (1-indexed)
:param ranges: ordered non-overlapping ranges on the same chromosome
:param start: start 0-indexed
:param finish: ending 1-indexed
:type ranges: GenomicRange []
:type start: Int
:type finish: Int
:return: non-overlapping ranges on same chromosome constrained by start and finish
:rtype: GenomicRange []
"""
z = 0
keep_ranges = []
for inrng in self.ranges:
z+=1
original_rng = inrng
rng = inrng.copy() # we will be passing it along and possibly be cutting it
done = False;
#print 'exon length '+str(rng.length())
if start >= index and start < index+original_rng.length(): # we are in this one
rng.start = original_rng.start+(start-index) # fix the start
#print 'fixstart '+str(original_rng.start)+' to '+str(rng.start)
if finish > index and finish <= index+original_rng.length():
rng.end = original_rng.start+(finish-index)-1
done = True
#print 'fixend '+str(original_rng.end)+' to '+str(rng.end)
if finish <= index+original_rng.length(): # we are in the last exon we need
index+= original_rng.length()
keep_ranges.append(rng)
break
if index+original_rng.length() < start: # we don't need any bases from this
index += original_rng.length()
continue # we don't use this exon
keep_ranges.append(rng)
index += original_rng.length()
if index > finish: break
if done: break
return keep_ranges | A function to help with slicing a mapping
Start with a list of ranges and get another list of ranges constrained by start (0-indexed) and finish (1-indexed)
:param ranges: ordered non-overlapping ranges on the same chromosome
:param start: start 0-indexed
:param finish: ending 1-indexed
:type ranges: GenomicRange []
:type start: Int
:type finish: Int
:return: non-overlapping ranges on same chromosome constrained by start and finish
:rtype: GenomicRange [] |
def run(self):
"""Wrap _run method."""
# Catch all possible exceptions raised by the running thread
# and let parent process know about it.
try:
self._run()
except Exception: # pylint: disable=broad-except
self.action.put(
ServiceCheckDiedError(self.name, traceback.format_exc())
) | Wrap _run method. |
def fix_config(self, options):
"""
Fixes the options, if necessary. I.e., it adds all required elements to the dictionary.
:param options: the options to fix
:type options: dict
:return: the (potentially) fixed options
:rtype: dict
"""
options = super(CommandlineToAny, self).fix_config(options)
opt = "wrapper"
if opt not in options:
options[opt] = "weka.core.classes.OptionHandler"
if opt not in self.help:
self.help[opt] = "The name of the wrapper class to use (string)."
return options | Fixes the options, if necessary. I.e., it adds all required elements to the dictionary.
:param options: the options to fix
:type options: dict
:return: the (potentially) fixed options
:rtype: dict |
def external(name, value, dtype=tf.sg_floatx, summary=True, regularizer=None, trainable=True):
r"""Creates a tensor variable of which initial values are `value`.
For example,
```
external("external", [3,3,1,2])
=> [3. 3. 1. 2.]
```
Args:
name: The name of new variable.
value: A constant value (or list) of output type `dtype`.
dtype: The type of the elements of the resulting tensor.
summary: If True, add this constant to tensor board summary.
regularizer: A (Tensor -> Tensor or None) function; the result of applying it on a newly created variable
will be added to the collection tf.GraphKeys.REGULARIZATION_LOSSES and can be used for regularization
trainable: If True, add this constant to trainable collection. Default is True.
Returns:
A `Variable`. Has the same contents as `value` of `dtype`.
"""
# create variable
x = tf.get_variable(name,
initializer=tf.constant(value, dtype=dtype),
regularizer=regularizer, trainable=trainable)
# add summary
if summary:
tf.sg_summary_param(x)
return x | r"""Creates a tensor variable of which initial values are `value`.
For example,
```
external("external", [3,3,1,2])
=> [3. 3. 1. 2.]
```
Args:
name: The name of new variable.
value: A constant value (or list) of output type `dtype`.
dtype: The type of the elements of the resulting tensor.
summary: If True, add this constant to tensor board summary.
regularizer: A (Tensor -> Tensor or None) function; the result of applying it on a newly created variable
will be added to the collection tf.GraphKeys.REGULARIZATION_LOSSES and can be used for regularization
trainable: If True, add this constant to trainable collection. Default is True.
Returns:
A `Variable`. Has the same contents as `value` of `dtype`. |
def write_points(self, data, time_precision='s', *args, **kwargs):
"""Write to multiple time series names.
An example data blob is:
data = [
{
"points": [
[
12
]
],
"name": "cpu_load_short",
"columns": [
"value"
]
}
]
:param data: A list of dicts in InfluxDB 0.8.x data format.
:param time_precision: [Optional, default 's'] Either 's', 'm', 'ms'
or 'u'.
:param batch_size: [Optional] Value to write the points in batches
instead of all at one time. Useful for when doing data dumps from
one database to another or when doing a massive write operation
:type batch_size: int
"""
def list_chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in xrange(0, len(l), n):
yield l[i:i + n]
batch_size = kwargs.get('batch_size')
if batch_size and batch_size > 0:
for item in data:
name = item.get('name')
columns = item.get('columns')
point_list = item.get('points', [])
for batch in list_chunks(point_list, batch_size):
item = [{
"points": batch,
"name": name,
"columns": columns
}]
self._write_points(
data=item,
time_precision=time_precision)
return True
return self._write_points(data=data,
time_precision=time_precision) | Write to multiple time series names.
An example data blob is:
data = [
{
"points": [
[
12
]
],
"name": "cpu_load_short",
"columns": [
"value"
]
}
]
:param data: A list of dicts in InfluxDB 0.8.x data format.
:param time_precision: [Optional, default 's'] Either 's', 'm', 'ms'
or 'u'.
:param batch_size: [Optional] Value to write the points in batches
instead of all at one time. Useful for when doing data dumps from
one database to another or when doing a massive write operation
:type batch_size: int |
def _init_virtual_io(self, file):
"""Initialize callback functions for sf_open_virtual()."""
@_ffi.callback("sf_vio_get_filelen")
def vio_get_filelen(user_data):
curr = file.tell()
file.seek(0, SEEK_END)
size = file.tell()
file.seek(curr, SEEK_SET)
return size
@_ffi.callback("sf_vio_seek")
def vio_seek(offset, whence, user_data):
file.seek(offset, whence)
return file.tell()
@_ffi.callback("sf_vio_read")
def vio_read(ptr, count, user_data):
# first try readinto(), if not available fall back to read()
try:
buf = _ffi.buffer(ptr, count)
data_read = file.readinto(buf)
except AttributeError:
data = file.read(count)
data_read = len(data)
buf = _ffi.buffer(ptr, data_read)
buf[0:data_read] = data
return data_read
@_ffi.callback("sf_vio_write")
def vio_write(ptr, count, user_data):
buf = _ffi.buffer(ptr, count)
data = buf[:]
written = file.write(data)
# write() returns None for file objects in Python <= 2.7:
if written is None:
written = count
return written
@_ffi.callback("sf_vio_tell")
def vio_tell(user_data):
return file.tell()
# Note: the callback functions must be kept alive!
self._virtual_io = {'get_filelen': vio_get_filelen,
'seek': vio_seek,
'read': vio_read,
'write': vio_write,
'tell': vio_tell}
return _ffi.new("SF_VIRTUAL_IO*", self._virtual_io) | Initialize callback functions for sf_open_virtual(). |
def fit_interval_censoring(
self,
df,
lower_bound_col,
upper_bound_col,
event_col=None,
ancillary_df=None,
show_progress=False,
timeline=None,
weights_col=None,
robust=False,
initial_point=None,
entry_col=None,
):
"""
Fit the accelerated failure time model to a left-censored dataset.
Parameters
----------
df: DataFrame
a Pandas DataFrame with necessary columns ``lower_bound_col``, ``upper_bound_col`` (see below),
and any other covariates or weights.
lower_bound_col: string
the name of the column in DataFrame that contains the subjects'
left-most observation.
upper_bound_col: string
the name of the column in DataFrame that contains the subjects'
right-most observation. Values can be np.inf (and should be if the subject is right-censored).
event_col: string, optional
the name of the column in DataFrame that contains the subjects' death
observation. If left as None, will be inferred from the start and stop columns (lower_bound==upper_bound means uncensored)
show_progress: boolean, optional (default=False)
since the fitter is iterative, show convergence
diagnostics. Useful if convergence is failing.
ancillary_df: None, boolean, or DataFrame, optional (default=None)
Choose to model the ancillary parameters.
If None or False, explicitly do not fit the ancillary parameters using any covariates.
If True, model the ancillary parameters with the same covariates as ``df``.
If DataFrame, provide covariates to model the ancillary parameters. Must be the same row count as ``df``.
timeline: array, optional
Specify a timeline that will be used for plotting and prediction
weights_col: string
the column in DataFrame that specifies weights per observation.
robust: boolean, optional (default=False)
Compute the robust errors using the Huber sandwich estimator.
initial_point: (d,) numpy array, optional
initialize the starting point of the iterative
algorithm. Default is the zero vector.
entry_col: specify a column in the DataFrame that denotes any late-entries (left truncation) that occurred. See
the docs on `left truncation <https://lifelines.readthedocs.io/en/latest/Survival%20analysis%20with%20lifelines.html#left-truncated-late-entry-data>`__
Returns
-------
self:
self with additional new properties: ``print_summary``, ``params_``, ``confidence_intervals_`` and more
Examples
--------
>>> from lifelines import WeibullAFTFitter, LogNormalAFTFitter, LogLogisticAFTFitter
>>>
>>> df = pd.DataFrame({
>>> 'start': [5, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> 'stop': [5, 3, 9, 8, 7, 4, 8, 5, 2, 5, 6, np.inf], # this last subject is right-censored.
>>> 'E': [1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0],
>>> 'var': [0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2],
>>> 'age': [4, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> })
>>>
>>> aft = WeibullAFTFitter()
>>> aft.fit_interval_censoring(df, 'start', 'stop', 'E')
>>> aft.print_summary()
>>> aft.predict_median(df)
>>>
>>> aft = WeibullAFTFitter()
>>> aft.fit_interval_censoring(df, 'start', 'stop', 'E', ancillary_df=df)
>>> aft.print_summary()
>>> aft.predict_median(df)
"""
self.lower_bound_col = lower_bound_col
self.upper_bound_col = upper_bound_col
self._time_cols = [lower_bound_col, upper_bound_col]
self._censoring_type = CensoringType.INTERVAL
df = df.copy()
lower_bound = pass_for_numeric_dtypes_or_raise_array(df.pop(lower_bound_col)).astype(float)
upper_bound = pass_for_numeric_dtypes_or_raise_array(df.pop(upper_bound_col)).astype(float)
if event_col is None:
event_col = "E"
df["E"] = lower_bound == upper_bound
if ((lower_bound == upper_bound) != df[event_col]).any():
raise ValueError(
"For all rows, lower_bound == upper_bound if and only if event observed = 1 (uncensored). Likewise, lower_bound < upper_bound if and only if event observed = 0 (censored)"
)
if (lower_bound > upper_bound).any():
raise ValueError("All upper bound measurements must be greater than or equal to lower bound measurements.")
self.lower_bound = lower_bound
self.upper_bound = upper_bound
self._fit(
self._log_likelihood_interval_censoring,
df,
(lower_bound.values, np.clip(upper_bound.values, 0, 1e25)),
event_col=event_col,
ancillary_df=ancillary_df,
show_progress=show_progress,
timeline=timeline,
weights_col=weights_col,
robust=robust,
initial_point=initial_point,
entry_col=entry_col,
)
return self | Fit the accelerated failure time model to a left-censored dataset.
Parameters
----------
df: DataFrame
a Pandas DataFrame with necessary columns ``lower_bound_col``, ``upper_bound_col`` (see below),
and any other covariates or weights.
lower_bound_col: string
the name of the column in DataFrame that contains the subjects'
left-most observation.
upper_bound_col: string
the name of the column in DataFrame that contains the subjects'
right-most observation. Values can be np.inf (and should be if the subject is right-censored).
event_col: string, optional
the name of the column in DataFrame that contains the subjects' death
observation. If left as None, will be inferred from the start and stop columns (lower_bound==upper_bound means uncensored)
show_progress: boolean, optional (default=False)
since the fitter is iterative, show convergence
diagnostics. Useful if convergence is failing.
ancillary_df: None, boolean, or DataFrame, optional (default=None)
Choose to model the ancillary parameters.
If None or False, explicitly do not fit the ancillary parameters using any covariates.
If True, model the ancillary parameters with the same covariates as ``df``.
If DataFrame, provide covariates to model the ancillary parameters. Must be the same row count as ``df``.
timeline: array, optional
Specify a timeline that will be used for plotting and prediction
weights_col: string
the column in DataFrame that specifies weights per observation.
robust: boolean, optional (default=False)
Compute the robust errors using the Huber sandwich estimator.
initial_point: (d,) numpy array, optional
initialize the starting point of the iterative
algorithm. Default is the zero vector.
entry_col: specify a column in the DataFrame that denotes any late-entries (left truncation) that occurred. See
the docs on `left truncation <https://lifelines.readthedocs.io/en/latest/Survival%20analysis%20with%20lifelines.html#left-truncated-late-entry-data>`__
Returns
-------
self:
self with additional new properties: ``print_summary``, ``params_``, ``confidence_intervals_`` and more
Examples
--------
>>> from lifelines import WeibullAFTFitter, LogNormalAFTFitter, LogLogisticAFTFitter
>>>
>>> df = pd.DataFrame({
>>> 'start': [5, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> 'stop': [5, 3, 9, 8, 7, 4, 8, 5, 2, 5, 6, np.inf], # this last subject is right-censored.
>>> 'E': [1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0],
>>> 'var': [0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2],
>>> 'age': [4, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> })
>>>
>>> aft = WeibullAFTFitter()
>>> aft.fit_interval_censoring(df, 'start', 'stop', 'E')
>>> aft.print_summary()
>>> aft.predict_median(df)
>>>
>>> aft = WeibullAFTFitter()
>>> aft.fit_interval_censoring(df, 'start', 'stop', 'E', ancillary_df=df)
>>> aft.print_summary()
>>> aft.predict_median(df) |
def divideHosts(self, hosts, qty):
"""Divide processes among hosts."""
maximumWorkers = sum(host[1] for host in hosts)
# If specified amount of workers is greater than sum of each specified.
if qty > maximumWorkers:
index = 0
while qty > maximumWorkers:
hosts[index] = (hosts[index][0], hosts[index][1] + 1)
index = (index + 1) % len(hosts)
maximumWorkers += 1
# If specified amount of workers if lower than sum of each specified.
elif qty < maximumWorkers:
while qty < maximumWorkers:
maximumWorkers -= hosts[-1][1]
if qty > maximumWorkers:
hosts[-1] = (hosts[-1][0], qty - maximumWorkers)
maximumWorkers += hosts[-1][1]
else:
del hosts[-1]
# Checking if the broker if externally routable
if self.externalHostname in utils.loopbackReferences and \
len(hosts) > 1 and \
not self.tunnel:
raise Exception("\n"
"Could not find route from external worker to the "
"broker: Unresolvable hostname or IP address.\n "
"Please specify your externally routable hostname "
"or IP using the --external-hostname parameter or "
"use the --tunnel flag.")
return hosts | Divide processes among hosts. |
def collect_variables(self, selections) -> None:
"""Apply method |ExchangeItem.collect_variables| of the base class
|ExchangeItem| and determine the `ndim` attribute of the current
|ChangeItem| object afterwards.
The value of `ndim` depends on whether the values of the target
variable or its time series is of interest:
>>> from hydpy.core.examples import prepare_full_example_2
>>> hp, pub, TestIO = prepare_full_example_2()
>>> from hydpy.core.itemtools import SetItem
>>> for target in ('states.lz', 'states.lz.series',
... 'states.sm', 'states.sm.series'):
... item = GetItem('hland_v1', target)
... item.collect_variables(pub.selections)
... print(item, item.ndim)
GetItem('hland_v1', 'states.lz') 0
GetItem('hland_v1', 'states.lz.series') 1
GetItem('hland_v1', 'states.sm') 1
GetItem('hland_v1', 'states.sm.series') 2
"""
super().collect_variables(selections)
for device in sorted(self.device2target.keys(), key=lambda x: x.name):
self._device2name[device] = f'{device.name}_{self.target}'
for target in self.device2target.values():
self.ndim = target.NDIM
if self.targetspecs.series:
self.ndim += 1
break | Apply method |ExchangeItem.collect_variables| of the base class
|ExchangeItem| and determine the `ndim` attribute of the current
|ChangeItem| object afterwards.
The value of `ndim` depends on whether the values of the target
variable or its time series is of interest:
>>> from hydpy.core.examples import prepare_full_example_2
>>> hp, pub, TestIO = prepare_full_example_2()
>>> from hydpy.core.itemtools import SetItem
>>> for target in ('states.lz', 'states.lz.series',
... 'states.sm', 'states.sm.series'):
... item = GetItem('hland_v1', target)
... item.collect_variables(pub.selections)
... print(item, item.ndim)
GetItem('hland_v1', 'states.lz') 0
GetItem('hland_v1', 'states.lz.series') 1
GetItem('hland_v1', 'states.sm') 1
GetItem('hland_v1', 'states.sm.series') 2 |
def gt(self, v, limit=None, offset=None):
"""Returns the list of the members of the set that have scores
greater than v.
"""
if limit is not None and offset is None:
offset = 0
return self.zrangebyscore("(%f" % v, self._max_score,
start=offset, num=limit) | Returns the list of the members of the set that have scores
greater than v. |
def makeReadPacket(ID, reg, values=None):
"""
Creates a packet that reads the register(s) of servo ID at location reg. Make
sure the values are in little endian (use Packet.le() if necessary) for 16 b
(word size) values.
"""
pkt = makePacket(ID, xl320.XL320_READ, reg, values)
return pkt | Creates a packet that reads the register(s) of servo ID at location reg. Make
sure the values are in little endian (use Packet.le() if necessary) for 16 b
(word size) values. |
def get_column_def(self):
"""
Returns a column definition for CQL table definition
"""
static = "static" if self.static else ""
db_type = self.db_type.format(self.value_type.db_type)
return '{} {} {}'.format(self.cql, db_type, static) | Returns a column definition for CQL table definition |
def GET_account_balance(self, path_info, account_addr, token_type):
"""
Get the balance of a particular token
Returns {'balance': ...}
"""
if not check_account_address(account_addr):
return self._reply_json({'error': 'Invalid address'}, status_code=400)
if not check_token_type(token_type):
return self._reply_json({'error': 'Invalid token type'}, status_code=400)
blockstackd_url = get_blockstackd_url()
res = blockstackd_client.get_account_balance(account_addr, token_type, hostport=blockstackd_url)
if json_is_error(res):
log.error("Failed to get account balance for {} {}: {}".format(account_addr, token_type, res['error']))
return self._reply_json({'error': 'Failed to get balance of {} for {}: {}'.format(token_type, account_addr, res['error'])}, status_code=res.get('http_status', 500))
self._reply_json({'balance': str(res)}) # NOTE: use a string, since this can be too big for js clients to parse
return | Get the balance of a particular token
Returns {'balance': ...} |
def attributs(self):
"""
The user attributes, defined as the fields on the :attr:`user` object.
:return: a :class:`dict` with the :attr:`user` object fields. Attributes may be
If the user do not exists, the returned :class:`dict` is empty.
:rtype: dict
"""
if self.user:
attr = {}
# _meta.get_fields() is from the new documented _meta interface in django 1.8
try:
field_names = [
field.attname for field in self.user._meta.get_fields()
if hasattr(field, "attname")
]
# backward compatibility with django 1.7
except AttributeError: # pragma: no cover (only used by django 1.7)
field_names = self.user._meta.get_all_field_names()
for name in field_names:
attr[name] = getattr(self.user, name)
# unfold user_permissions many to many relation
if 'user_permissions' in attr:
attr['user_permissions'] = [
(
u"%s.%s" % (
perm.content_type.model_class().__module__,
perm.content_type.model_class().__name__
),
perm.codename
) for perm in attr['user_permissions'].filter()
]
# unfold group many to many relation
if 'groups' in attr:
attr['groups'] = [group.name for group in attr['groups'].filter()]
return attr
else:
return {} | The user attributes, defined as the fields on the :attr:`user` object.
:return: a :class:`dict` with the :attr:`user` object fields. Attributes may be
If the user do not exists, the returned :class:`dict` is empty.
:rtype: dict |
def stop_app(self):
"""Overrides superclass."""
try:
if self._conn:
# Be polite; let the dest know we're shutting down.
try:
self.closeSl4aSession()
except:
self.log.exception('Failed to gracefully shut down %s.',
self.app_name)
# Close the socket connection.
self.disconnect()
self.stop_event_dispatcher()
# Terminate the app
self._adb.shell('am force-stop com.googlecode.android_scripting')
finally:
# Always clean up the adb port
self.clear_host_port() | Overrides superclass. |
def check_trademark_symbol(text):
"""Use the trademark symbol instead of (TM)."""
err = "typography.symbols.trademark"
msg = u"(TM) is a goofy alphabetic approximation, use the symbol ™."
regex = "\(TM\)"
return existence_check(
text, [regex], err, msg, max_errors=3, require_padding=False) | Use the trademark symbol instead of (TM). |
def qteKeyPress(self, msgObj):
"""
Record the key presses reported by the key handler.
"""
# Unpack the data structure.
(srcObj, keysequence, macroName) = msgObj.data
key = keysequence.toQKeyEventList()[-1]
# If the current key did not complete a macro ignore it.
if macroName is None:
return
if self.input_complete:
# The user has terminated reading digits by calling this
# macro directly, ie. the 'macroName ==
# self.qteMacroName()' branch below ran previously.
self.qteRepeatTheMacro(msgObj)
elif (macroName == 'self-insert') and (key.text().isdigit()):
# User typed a digit.
self.repeat_cnt += key.text()
elif macroName == self.qteMacroName():
# User called us again. This completes reading the
# digits. The next macro is executed self.prefix_num
# times.
self.input_complete = True
else:
# If we got until here we know that a macro is supposed to
# be executed, that it is not the self-insert macro for a
# digit, and that it was not another call to this macro to
# complete the input explicitly.
self.qteRepeatTheMacro(msgObj) | Record the key presses reported by the key handler. |
def instance():
"""Return an PyVabamorf instance.
It returns the previously initialized instance or creates a new
one if nothing exists. Also creates new instance in case the
process has been forked.
"""
if not hasattr(Vabamorf, 'pid') or Vabamorf.pid != os.getpid():
Vabamorf.pid = os.getpid()
Vabamorf.morf = Vabamorf()
return Vabamorf.morf | Return an PyVabamorf instance.
It returns the previously initialized instance or creates a new
one if nothing exists. Also creates new instance in case the
process has been forked. |
def img(url, alt='', classes='', style=''):
'''
Image tag helper.
'''
if not url.startswith('http://') and not url[:1] == '/':
#add media_url for relative paths
url = settings.STATIC_URL + url
attr = {
'class': classes,
'alt': alt,
'style': style,
'src': url
}
return html.tag('img', '', attr) | Image tag helper. |
def get_payload(self):
"""Return Payload."""
ret = self._software_version
ret += bytes([self.hardware_version, self.product_group, self.product_type])
return ret | Return Payload. |
def deleteFile(self, CorpNum, MgtKeyType, MgtKey, FileID, UserID=None):
""" 첨부파일 삭제
args
CorpNum : 회원 사업자 번호
MgtKeyType : 관리번호 유형 one of ['SELL','BUY','TRUSTEE']
MgtKey : 파트너 관리번호
UserID : 팝빌 회원아이디
return
처리결과. consist of code and message
raise
PopbillException
"""
if MgtKeyType not in self.__MgtKeyTypes:
raise PopbillException(-99999999, "관리번호 형태가 올바르지 않습니다.")
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
if FileID == None or FileID == "":
raise PopbillException(-99999999, "파일아이디가 입력되지 않았습니다.")
postData = ''
return self._httppost('/Taxinvoice/' + MgtKeyType + "/" + MgtKey + "/Files/" + FileID, postData, CorpNum,
UserID, 'DELETE') | 첨부파일 삭제
args
CorpNum : 회원 사업자 번호
MgtKeyType : 관리번호 유형 one of ['SELL','BUY','TRUSTEE']
MgtKey : 파트너 관리번호
UserID : 팝빌 회원아이디
return
처리결과. consist of code and message
raise
PopbillException |
def to_fastq_str(self):
"""
:return: string representation of this NGS read in FastQ format
"""
return "@" + self.name + "\n" + self.sequenceData +\
"\n" + "+" + self.name + "\n" + self.seq_qual | :return: string representation of this NGS read in FastQ format |
def analyze(self, input_directory, output_directory, **kwargs):
"""
Run all the analysis saved in self._analyses, sorted by test_id.
This is useful when Naarad() is used by other programs and multiple analyses are run
In naarad CLI mode, len(_analyses) == 1
:param: input_directory: location of log files
:param: output_directory: root directory for analysis output
:param: **kwargs: Optional keyword args
:return: int: status code.
"""
is_api_call = True
if len(self._analyses) == 0:
if 'config' not in kwargs.keys():
return CONSTANTS.ERROR
self.create_analysis(kwargs['config'])
if 'args' in kwargs:
self._process_args(self._analyses[0], kwargs['args'])
is_api_call = False
error_count = 0
self._input_directory = input_directory
self._output_directory = output_directory
for test_id in sorted(self._analyses.keys()):
# Setup
if not self._analyses[test_id].input_directory:
self._analyses[test_id].input_directory = input_directory
if not self._analyses[test_id].output_directory:
if len(self._analyses) > 1:
self._analyses[test_id].output_directory = os.path.join(output_directory, str(test_id))
else:
self._analyses[test_id].output_directory = output_directory
if('config' in kwargs.keys()) and (not self._analyses[test_id].config):
self._analyses[test_id].config = kwargs['config']
self._create_output_directories(self._analyses[test_id])
# Actually run analysis
self._analyses[test_id].status = self.run(self._analyses[test_id], is_api_call, **kwargs)
if self._analyses[test_id].status != CONSTANTS.OK:
error_count += 1
if len(self._analyses) == 1:
return self._analyses[0].status
elif error_count > 0:
return CONSTANTS.ERROR
else:
return CONSTANTS.OK | Run all the analysis saved in self._analyses, sorted by test_id.
This is useful when Naarad() is used by other programs and multiple analyses are run
In naarad CLI mode, len(_analyses) == 1
:param: input_directory: location of log files
:param: output_directory: root directory for analysis output
:param: **kwargs: Optional keyword args
:return: int: status code. |
def _get_chain_parent_symbol(self, symbol, fullsymbol):
"""Gets the code element object for the parent of the specified
symbol in the fullsymbol chain."""
#We are only interested in the type of the variable immediately preceding our symbol
#in the chain so we can list its members.
chain = fullsymbol.split("%")
#We assume that if symbol != fullsymbol, we have at least a % at the end that
#tricked the symbol regex.
if len(chain) < 2:
return ([], None)
previous = chain[-2].lower()
#Now we need to use the name of the variable to find the actual type name
target_name = ""
if previous in self.element.members:
target_name = self.element.members[previous].kind
#The contextual element could be a module, in which case it has no parameters
if hasattr(self.element, "parameters") and previous in self.element.parameters:
target_name = self.element.parameters[previous].kind
if target_name == "":
return (None, None)
return self.context.parser.tree_find(target_name, self.context.module, "types") | Gets the code element object for the parent of the specified
symbol in the fullsymbol chain. |
def format_names(raw):
"""Format a string representing the names contained in the files.
"""
if raw:
raw = [
'{}:\n{}'.format(
header.lower(), ' '.join(func[0] for func in funcs)
)
for header, funcs in raw
]
return '\n'.join(raw)
return '' | Format a string representing the names contained in the files. |
def __reg_query_value(handle, value_name):
'''
Calls RegQueryValueEx
If PY2 ensure unicode string and expand REG_EXPAND_SZ before returning
Remember to catch not found exceptions when calling.
Args:
handle (object): open registry handle.
value_name (str): Name of the value you wished returned
Returns:
tuple: type, value
'''
# item_value, item_type = win32api.RegQueryValueEx(self.__reg_uninstall_handle, value_name)
item_value, item_type = win32api.RegQueryValueEx(handle, value_name) # pylint: disable=no-member
if six.PY2 and isinstance(item_value, six.string_types) and not isinstance(item_value, six.text_type):
try:
item_value = six.text_type(item_value, encoding='mbcs')
except UnicodeError:
pass
if item_type == win32con.REG_EXPAND_SZ:
# expects Unicode input
win32api.ExpandEnvironmentStrings(item_value) # pylint: disable=no-member
item_type = win32con.REG_SZ
return item_value, item_type | Calls RegQueryValueEx
If PY2 ensure unicode string and expand REG_EXPAND_SZ before returning
Remember to catch not found exceptions when calling.
Args:
handle (object): open registry handle.
value_name (str): Name of the value you wished returned
Returns:
tuple: type, value |
def _serialize_dict(cls, dict_):
"""
:type dict_ dict
:rtype: dict
"""
obj_serialized = {}
for key in dict_.keys():
item_serialized = cls.serialize(dict_[key])
if item_serialized is not None:
key = key.rstrip(cls._SUFFIX_KEY_OVERLAPPING)
key = key.lstrip(cls._PREFIX_KEY_PROTECTED)
obj_serialized[key] = item_serialized
return obj_serialized | :type dict_ dict
:rtype: dict |
def create_model(schema, collection, class_name=None):
"""
Main entry point to creating a new mongothon model. Both
schema and Pymongo collection objects must be provided.
Returns a new class which can be used as a model class.
The class name of the model class by default is inferred
from the provided collection (converted to camel case).
Optionally, a class_name argument can be provided to
override this.
"""
if not class_name:
class_name = camelize(str(collection.name))
model_class = type(class_name,
(Model,),
dict(schema=schema, _collection_factory=staticmethod(lambda: collection)))
# Since we are dynamically creating this class here, we modify __module__ on the
# created class to point back to the module from which `create_model` was called
model_class.__module__ = _module_name_from_previous_frame(1)
return model_class | Main entry point to creating a new mongothon model. Both
schema and Pymongo collection objects must be provided.
Returns a new class which can be used as a model class.
The class name of the model class by default is inferred
from the provided collection (converted to camel case).
Optionally, a class_name argument can be provided to
override this. |
def snakecase(string):
"""Convert string into snake case.
Join punctuation with underscore
Args:
string: String to convert.
Returns:
string: Snake cased string.
"""
string = re.sub(r"[\-\.\s]", '_', str(string))
if not string:
return string
return lowercase(string[0]) + re.sub(r"[A-Z]", lambda matched: '_' + lowercase(matched.group(0)), string[1:]) | Convert string into snake case.
Join punctuation with underscore
Args:
string: String to convert.
Returns:
string: Snake cased string. |
def createService(self, createServiceParameter,
description=None,
tags="Feature Service",
snippet=None):
"""
The Create Service operation allows users to create a hosted
feature service. You can use the API to create an empty hosted
feaure service from feature service metadata JSON.
Inputs:
createServiceParameter - create service object
"""
url = "%s/createService" % self.location
val = createServiceParameter.value
params = {
"f" : "json",
"outputType" : "featureService",
"createParameters" : json.dumps(val),
"tags" : tags
}
if snippet is not None:
params['snippet'] = snippet
if description is not None:
params['description'] = description
res = self._post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
if 'id' in res or \
'serviceItemId' in res:
if 'id' in res:
url = "%s/items/%s" % (self.location, res['id'])
else:
url = "%s/items/%s" % (self.location, res['serviceItemId'])
return UserItem(url=url,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
return res | The Create Service operation allows users to create a hosted
feature service. You can use the API to create an empty hosted
feaure service from feature service metadata JSON.
Inputs:
createServiceParameter - create service object |
def MetricValueTypeFromPythonType(python_type):
"""Converts Python types to MetricMetadata.ValueType enum values."""
if python_type in (int, long):
return rdf_stats.MetricMetadata.ValueType.INT
elif python_type == float:
return rdf_stats.MetricMetadata.ValueType.FLOAT
else:
raise ValueError("Invalid value type: %s" % python_type) | Converts Python types to MetricMetadata.ValueType enum values. |
def import_string(import_name, silent=False):
"""Imports an object based on a string. This is useful if you want to
use import paths as endpoints or something similar. An import path can
be specified either in dotted notation (``xml.sax.saxutils.escape``)
or with a colon as object delimiter (``xml.sax.saxutils:escape``).
If `silent` is True the return value will be `None` if the import fails.
:param import_name: the dotted name for the object to import.
:param silent: if set to `True` import errors are ignored and
`None` is returned instead.
:return: imported object
"""
#XXX: py3 review needed
assert isinstance(import_name, string_types)
# force the import name to automatically convert to strings
import_name = str(import_name)
try:
if ':' in import_name:
module, obj = import_name.split(':', 1)
elif '.' in import_name:
module, obj = import_name.rsplit('.', 1)
else:
return __import__(import_name)
# __import__ is not able to handle unicode strings in the fromlist
# if the module is a package
if PY2 and isinstance(obj, unicode):
obj = obj.encode('utf-8')
try:
return getattr(__import__(module, None, None, [obj]), obj)
except (ImportError, AttributeError):
# support importing modules not yet set up by the parent module
# (or package for that matter)
modname = module + '.' + obj
__import__(modname)
return sys.modules[modname]
except ImportError as e:
if not silent:
reraise(
ImportStringError,
ImportStringError(import_name, e),
sys.exc_info()[2]) | Imports an object based on a string. This is useful if you want to
use import paths as endpoints or something similar. An import path can
be specified either in dotted notation (``xml.sax.saxutils.escape``)
or with a colon as object delimiter (``xml.sax.saxutils:escape``).
If `silent` is True the return value will be `None` if the import fails.
:param import_name: the dotted name for the object to import.
:param silent: if set to `True` import errors are ignored and
`None` is returned instead.
:return: imported object |
def Print(self, x, data, message, **kwargs): # pylint: disable=invalid-name
"""Calls tf.Print.
Args:
x: LaidOutTensor.
data: list of LaidOutTensor.
message: str.
**kwargs: keyword arguments to tf.print.
Returns:
LaidOutTensor.
"""
del data, message, kwargs
tf.logging.warning("Warning - mtf.Print not implemented for this mesh type")
return x | Calls tf.Print.
Args:
x: LaidOutTensor.
data: list of LaidOutTensor.
message: str.
**kwargs: keyword arguments to tf.print.
Returns:
LaidOutTensor. |
def SendTextMessage(self, Text):
"""Sends a text message over channel.
:Parameters:
Text : unicode
Text to send.
"""
if self.Type == cctReliable:
self.Stream.Write(Text)
elif self.Type == cctDatagram:
self.Stream.SendDatagram(Text)
else:
raise SkypeError(0, 'Cannot send using %s channel type' & repr(self.Type)) | Sends a text message over channel.
:Parameters:
Text : unicode
Text to send. |
def refresh(self):
"""
Fetches all current container names from the client, along with their id.
"""
if not self._client:
return
current_containers = self._client.containers(all=True)
self.clear()
for container in current_containers:
container_names = container.get('Names')
if container_names:
c_id = container['Id']
self.update((name[1:], c_id)
for name in container_names) | Fetches all current container names from the client, along with their id. |
def render(self):
"""Runs the render until thread flag is set.
Returns
-------
self
"""
while not self._stop_spinner.is_set():
self._render_frame()
time.sleep(0.001 * self._interval)
return self | Runs the render until thread flag is set.
Returns
-------
self |
def estimate(init_values,
estimator,
method,
loss_tol,
gradient_tol,
maxiter,
print_results,
use_hessian=True,
just_point=False,
**kwargs):
"""
Estimate the given choice model that is defined by `estimator`.
Parameters
----------
init_vals : 1D ndarray.
Should contain the initial values to start the optimization process
with.
estimator : an instance of the EstimationObj class.
method : str, optional.
Should be a valid string for scipy.optimize.minimize. Determines
the optimization algorithm that is used for this problem.
Default `== 'bfgs'`.
loss_tol : float, optional.
Determines the tolerance on the difference in objective function
values from one iteration to the next that is needed to determine
convergence. Default `== 1e-06`.
gradient_tol : float, optional.
Determines the tolerance on the difference in gradient values from
one iteration to the next which is needed to determine convergence.
Default `== 1e-06`.
maxiter : int, optional.
Determines the maximum number of iterations used by the optimizer.
Default `== 1000`.
print_res : bool, optional.
Determines whether the timing and initial and final log likelihood
results will be printed as they they are determined.
Default `== True`.
use_hessian : bool, optional.
Determines whether the `calc_neg_hessian` method of the `estimator`
object will be used as the hessian function during the estimation. This
kwarg is used since some models (such as the Mixed Logit and Nested
Logit) use a rather crude (i.e. the BHHH) approximation to the Fisher
Information Matrix, and users may prefer to not use this approximation
for the hessian during estimation.
just_point : bool, optional.
Determines whether or not calculations that are non-critical for
obtaining the maximum likelihood point estimate will be performed.
Default == False.
Return
------
results : dict.
The dictionary of estimation results that is returned by
scipy.optimize.minimize. It will also have (at minimum) the following
keys:
- "log-likelihood_null"
- "final_log_likelihood"
- "utility_coefs"
- "intercept_params"
- "shape_params"
- "nest_params"
- "chosen_probs"
- "long_probs"
- "residuals"
- "ind_chi_squareds"
- "rho_squared"
- "rho_bar_squared"
- "final_gradient"
- "final_hessian"
- "fisher_info"
"""
if not just_point:
# Perform preliminary calculations
log_likelihood_at_zero =\
estimator.convenience_calc_log_likelihood(estimator.zero_vector)
initial_log_likelihood =\
estimator.convenience_calc_log_likelihood(init_values)
if print_results:
# Print the log-likelihood at zero
null_msg = "Log-likelihood at zero: {:,.4f}"
print(null_msg.format(log_likelihood_at_zero))
# Print the log-likelihood at the starting values
init_msg = "Initial Log-likelihood: {:,.4f}"
print(init_msg.format(initial_log_likelihood))
sys.stdout.flush()
# Get the hessian fucntion for this estimation process
hess_func = estimator.calc_neg_hessian if use_hessian else None
# Estimate the actual parameters of the model
start_time = time.time()
results = minimize(estimator.calc_neg_log_likelihood_and_neg_gradient,
init_values,
method=method,
jac=True,
hess=hess_func,
tol=loss_tol,
options={'gtol': gradient_tol,
"maxiter": maxiter},
**kwargs)
if not just_point:
if print_results:
# Stop timing the estimation process and report the timing results
end_time = time.time()
elapsed_sec = (end_time - start_time)
elapsed_min = elapsed_sec / 60.0
if elapsed_min > 1.0:
msg = "Estimation Time for Point Estimation: {:.2f} minutes."
print(msg.format(elapsed_min))
else:
msg = "Estimation Time for Point Estimation: {:.2f} seconds."
print(msg.format(elapsed_sec))
print("Final log-likelihood: {:,.4f}".format(-1 * results["fun"]))
sys.stdout.flush()
# Store the log-likelihood at zero
results["log_likelihood_null"] = log_likelihood_at_zero
# Calculate and store the post-estimation results
results = calc_and_store_post_estimation_results(results, estimator)
return results | Estimate the given choice model that is defined by `estimator`.
Parameters
----------
init_vals : 1D ndarray.
Should contain the initial values to start the optimization process
with.
estimator : an instance of the EstimationObj class.
method : str, optional.
Should be a valid string for scipy.optimize.minimize. Determines
the optimization algorithm that is used for this problem.
Default `== 'bfgs'`.
loss_tol : float, optional.
Determines the tolerance on the difference in objective function
values from one iteration to the next that is needed to determine
convergence. Default `== 1e-06`.
gradient_tol : float, optional.
Determines the tolerance on the difference in gradient values from
one iteration to the next which is needed to determine convergence.
Default `== 1e-06`.
maxiter : int, optional.
Determines the maximum number of iterations used by the optimizer.
Default `== 1000`.
print_res : bool, optional.
Determines whether the timing and initial and final log likelihood
results will be printed as they they are determined.
Default `== True`.
use_hessian : bool, optional.
Determines whether the `calc_neg_hessian` method of the `estimator`
object will be used as the hessian function during the estimation. This
kwarg is used since some models (such as the Mixed Logit and Nested
Logit) use a rather crude (i.e. the BHHH) approximation to the Fisher
Information Matrix, and users may prefer to not use this approximation
for the hessian during estimation.
just_point : bool, optional.
Determines whether or not calculations that are non-critical for
obtaining the maximum likelihood point estimate will be performed.
Default == False.
Return
------
results : dict.
The dictionary of estimation results that is returned by
scipy.optimize.minimize. It will also have (at minimum) the following
keys:
- "log-likelihood_null"
- "final_log_likelihood"
- "utility_coefs"
- "intercept_params"
- "shape_params"
- "nest_params"
- "chosen_probs"
- "long_probs"
- "residuals"
- "ind_chi_squareds"
- "rho_squared"
- "rho_bar_squared"
- "final_gradient"
- "final_hessian"
- "fisher_info" |
def _clean_data(cls, *args, **kwargs):
"""
Convert raw data into a dictionary with plot-type specific methods.
The result of the cleaning operation should be a dictionary.
If the dictionary contains a 'data' field it will be passed directly
(ensuring appropriate formatting). Otherwise, it should be a
dictionary of data-type specific array data (e.g. 'points',
'timeseries'), which will be labeled appropriately
(see _check_unkeyed_arrays).
"""
datadict = cls.clean(*args, **kwargs)
if 'data' in datadict:
data = datadict['data']
data = cls._ensure_dict_or_list(data)
else:
data = {}
for key in datadict:
if key == 'images':
data[key] = datadict[key]
else:
d = cls._ensure_dict_or_list(datadict[key])
data[key] = cls._check_unkeyed_arrays(key, d)
return data | Convert raw data into a dictionary with plot-type specific methods.
The result of the cleaning operation should be a dictionary.
If the dictionary contains a 'data' field it will be passed directly
(ensuring appropriate formatting). Otherwise, it should be a
dictionary of data-type specific array data (e.g. 'points',
'timeseries'), which will be labeled appropriately
(see _check_unkeyed_arrays). |
def wait_for_relation(service_name, relation_name, timeout=120):
"""Wait `timeout` seconds for a given relation to come up."""
start_time = time.time()
while True:
relation = unit_info(service_name, 'relations').get(relation_name)
if relation is not None and relation['state'] == 'up':
break
if time.time() - start_time >= timeout:
raise RuntimeError('timeout waiting for relation to be up')
time.sleep(SLEEP_AMOUNT) | Wait `timeout` seconds for a given relation to come up. |
def as_string(self):
"""Return the command as a single string for the docker file"""
if type(self.instruction) is str:
return self.instruction
if self.action == "FROM" and not isinstance(self.command, six.string_types):
extra = "" if self.extra is NotSpecified else " {0}".format(self.extra)
return "{0} {1}{2}".format(self.action, self.command.from_name, extra)
else:
return "{0} {1}".format(self.action, self.command) | Return the command as a single string for the docker file |
def __getLogger(cls):
""" Get the logger for this object.
:returns: (Logger) A Logger object.
"""
if cls.__logger is None:
cls.__logger = opf_utils.initLogger(cls)
return cls.__logger | Get the logger for this object.
:returns: (Logger) A Logger object. |
def add_resource(self, name, file_path, ind_obj):
"""Link a resource to an individual."""
new_resource = Resource(name=name, individual=ind_obj, path=file_path)
self.session.add(new_resource)
self.save()
return new_resource | Link a resource to an individual. |
def path_qs(self):
"""Decoded path of URL with query."""
if not self.query_string:
return self.path
return "{}?{}".format(self.path, self.query_string) | Decoded path of URL with query. |
def is_ipfs_uri(value: str) -> bool:
"""
Return a bool indicating whether or not the value is a valid IPFS URI.
"""
parse_result = parse.urlparse(value)
if parse_result.scheme != "ipfs":
return False
if not parse_result.netloc and not parse_result.path:
return False
return True | Return a bool indicating whether or not the value is a valid IPFS URI. |
def find_nearest_leaf(self, entry, search_node = None):
"""!
@brief Search nearest leaf to the specified clustering feature.
@param[in] entry (cfentry): Clustering feature.
@param[in] search_node (cfnode): Node from that searching should be started, if None then search process will be started for the root.
@return (leaf_node) Nearest node to the specified clustering feature.
"""
if (search_node is None):
search_node = self.__root;
nearest_node = search_node;
if (search_node.type == cfnode_type.CFNODE_NONLEAF):
min_key = lambda child_node: child_node.feature.get_distance(entry, self.__type_measurement);
nearest_child_node = min(search_node.successors, key = min_key);
nearest_node = self.find_nearest_leaf(entry, nearest_child_node);
return nearest_node; | !
@brief Search nearest leaf to the specified clustering feature.
@param[in] entry (cfentry): Clustering feature.
@param[in] search_node (cfnode): Node from that searching should be started, if None then search process will be started for the root.
@return (leaf_node) Nearest node to the specified clustering feature. |
def createReference(self, fromnode, tonode, edge_data=None):
"""
Create a reference from fromnode to tonode
"""
if fromnode is None:
fromnode = self
fromident, toident = self.getIdent(fromnode), self.getIdent(tonode)
if fromident is None or toident is None:
return
self.msg(4, "createReference", fromnode, tonode, edge_data)
self.graph.add_edge(fromident, toident, edge_data=edge_data) | Create a reference from fromnode to tonode |
def set_basic_params(self, msg_size=None, cheap=None, anti_loop_timeout=None):
"""
:param int msg_size: Set the max size of an alarm message in bytes. Default: 8192.
:param bool cheap: Use main alarm thread rather than create dedicated
threads for curl-based alarms
:param int anti_loop_timeout: Tune the anti-loop alarm system. Default: 3 seconds.
"""
self._set('alarm-msg-size', msg_size)
self._set('alarm-cheap', cheap, cast=bool)
self._set('alarm-freq', anti_loop_timeout)
return self._section | :param int msg_size: Set the max size of an alarm message in bytes. Default: 8192.
:param bool cheap: Use main alarm thread rather than create dedicated
threads for curl-based alarms
:param int anti_loop_timeout: Tune the anti-loop alarm system. Default: 3 seconds. |
def do_stored_procedure_check(self, instance, proc):
"""
Fetch the metrics from the stored proc
"""
guardSql = instance.get('proc_only_if')
custom_tags = instance.get("tags", [])
if (guardSql and self.proc_check_guard(instance, guardSql)) or not guardSql:
self.open_db_connections(instance, self.DEFAULT_DB_KEY)
cursor = self.get_cursor(instance, self.DEFAULT_DB_KEY)
try:
self.log.debug("Calling Stored Procedure : {}".format(proc))
if self._get_connector(instance) == 'adodbapi':
cursor.callproc(proc)
else:
# pyodbc does not support callproc; use execute instead.
# Reference: https://github.com/mkleehammer/pyodbc/wiki/Calling-Stored-Procedures
call_proc = '{{CALL {}}}'.format(proc)
cursor.execute(call_proc)
rows = cursor.fetchall()
self.log.debug("Row count ({}) : {}".format(proc, cursor.rowcount))
for row in rows:
tags = [] if row.tags is None or row.tags == '' else row.tags.split(',')
tags.extend(custom_tags)
if row.type.lower() in self.proc_type_mapping:
self.proc_type_mapping[row.type](row.metric, row.value, tags)
else:
self.log.warning(
'{} is not a recognised type from procedure {}, metric {}'.format(
row.type, proc, row.metric
)
)
except Exception as e:
self.log.warning("Could not call procedure {}: {}".format(proc, e))
self.close_cursor(cursor)
self.close_db_connections(instance, self.DEFAULT_DB_KEY)
else:
self.log.info("Skipping call to {} due to only_if".format(proc)) | Fetch the metrics from the stored proc |
def quadratic_forms(h1, h2):
r"""
Quadrativ forms metric.
Notes
-----
UNDER DEVELOPMENT
This distance measure shows very strange behaviour. The expression
transpose(h1-h2) * A * (h1-h2) yields egative values that can not be processed by the
square root. Some examples::
h1 h2 transpose(h1-h2) * A * (h1-h2)
[1, 0] to [0.0, 1.0] : -2.0
[1, 0] to [0.5, 0.5] : 0.0
[1, 0] to [0.6666666666666667, 0.3333333333333333] : 0.111111111111
[1, 0] to [0.75, 0.25] : 0.0833333333333
[1, 0] to [0.8, 0.2] : 0.06
[1, 0] to [0.8333333333333334, 0.16666666666666666] : 0.0444444444444
[1, 0] to [0.8571428571428572, 0.14285714285714285] : 0.0340136054422
[1, 0] to [0.875, 0.125] : 0.0267857142857
[1, 0] to [0.8888888888888888, 0.1111111111111111] : 0.0216049382716
[1, 0] to [0.9, 0.1] : 0.0177777777778
[1, 0] to [1, 0]: 0.0
It is clearly undesireable to recieve negative values and even worse to get a value
of zero for other cases than the same histograms.
"""
h1, h2 = __prepare_histogram(h1, h2)
A = __quadratic_forms_matrix_euclidean(h1, h2)
return math.sqrt((h1-h2).dot(A.dot(h1-h2))) | r"""
Quadrativ forms metric.
Notes
-----
UNDER DEVELOPMENT
This distance measure shows very strange behaviour. The expression
transpose(h1-h2) * A * (h1-h2) yields egative values that can not be processed by the
square root. Some examples::
h1 h2 transpose(h1-h2) * A * (h1-h2)
[1, 0] to [0.0, 1.0] : -2.0
[1, 0] to [0.5, 0.5] : 0.0
[1, 0] to [0.6666666666666667, 0.3333333333333333] : 0.111111111111
[1, 0] to [0.75, 0.25] : 0.0833333333333
[1, 0] to [0.8, 0.2] : 0.06
[1, 0] to [0.8333333333333334, 0.16666666666666666] : 0.0444444444444
[1, 0] to [0.8571428571428572, 0.14285714285714285] : 0.0340136054422
[1, 0] to [0.875, 0.125] : 0.0267857142857
[1, 0] to [0.8888888888888888, 0.1111111111111111] : 0.0216049382716
[1, 0] to [0.9, 0.1] : 0.0177777777778
[1, 0] to [1, 0]: 0.0
It is clearly undesireable to recieve negative values and even worse to get a value
of zero for other cases than the same histograms. |
def page_index(request):
"""Index of all pages."""
letters = {}
for page in Page.query.order_by(Page.name):
letters.setdefault(page.name.capitalize()[0], []).append(page)
return Response(
generate_template("page_index.html", letters=sorted(letters.items()))
) | Index of all pages. |
def create_apppool(name):
'''
Create an IIS application pool.
.. note::
This function only validates against the application pool name, and will
return True even if the application pool already exists with a different
configuration. It will not modify the configuration of an existing
application pool.
Args:
name (str): The name of the IIS application pool.
Returns:
bool: True if successful, otherwise False
CLI Example:
.. code-block:: bash
salt '*' win_iis.create_apppool name='MyTestPool'
'''
current_apppools = list_apppools()
apppool_path = r'IIS:\AppPools\{0}'.format(name)
if name in current_apppools:
log.debug("Application pool '%s' already present.", name)
return True
ps_cmd = ['New-Item', '-Path', r"'{0}'".format(apppool_path)]
cmd_ret = _srvmgr(ps_cmd)
if cmd_ret['retcode'] != 0:
msg = 'Unable to create application pool: {0}\nError: {1}' \
''.format(name, cmd_ret['stderr'])
raise CommandExecutionError(msg)
log.debug('Application pool created successfully: %s', name)
return True | Create an IIS application pool.
.. note::
This function only validates against the application pool name, and will
return True even if the application pool already exists with a different
configuration. It will not modify the configuration of an existing
application pool.
Args:
name (str): The name of the IIS application pool.
Returns:
bool: True if successful, otherwise False
CLI Example:
.. code-block:: bash
salt '*' win_iis.create_apppool name='MyTestPool' |
def get_removed_obs_importance(self,obslist_dict=None,
reset_zero_weight=False):
"""get a dataframe the posterior uncertainty
as a result of losing some observations
Parameters
----------
obslist_dict : dict
dictionary of groups of observations
that are to be treated as lost. key values become
row labels in returned dataframe. If None, then test every
(nonzero weight - see reset_zero_weight) observation
reset_zero_weight : bool or float
a flag to reset observations with zero weight in obslist_dict.
If the value of reset_zero_weights can be cast to a float,
then that value will be assigned to zero weight obs. Otherwise,
zero weight obs will be given a weight of 1.0
Returns
-------
pandas.DataFrame : pandas.DataFrame
a dataframe with index of obslist_dict.keys() and columns
of forecast names. The values in the dataframe are the posterior
variances of the forecasts resulting from losing the information
contained in obslist_dict[key value]
Note
----
all observations listed in obslist_dict with zero
weights will be dropped unless reset_zero_weight is set
Example
-------
``>>>import pyemu``
``>>>sc = pyemu.Schur(jco="pest.jcb")``
``df = sc.get_removed_obs_importance()``
"""
if obslist_dict is not None:
if type(obslist_dict) == list:
obslist_dict = dict(zip(obslist_dict,obslist_dict))
elif reset_zero_weight is False and self.pst.nnz_obs == 0:
raise Exception("not resetting weights and there are no non-zero weight obs to remove")
reset = False
if reset_zero_weight is not False:
if not self.obscov.isdiagonal:
raise NotImplementedError("cannot reset weights for non-"+\
"diagonal obscov")
reset = True
try:
weight = float(reset_zero_weight)
except:
weight = 1.0
self.logger.statement("resetting zero weights to {0}".format(weight))
# make copies of the original obscov and pst
org_obscov = self.obscov.get(self.obscov.row_names)
org_pst = self.pst.get()
self.log("calculating importance of observations")
if reset and obslist_dict is None:
obs = self.pst.observation_data
onames = [name for name in self.pst.zero_weight_obs_names
if name in self.jco.obs_names and name in self.obscov.row_names]
obs.loc[onames,"weight"] = weight
if obslist_dict is None:
obslist_dict = dict(zip(self.pst.nnz_obs_names,
self.pst.nnz_obs_names))
elif reset:
self.pst.observation_data.index = self.pst.observation_data.obsnme
for name,obslist in obslist_dict.items():
self.log("resetting weights in obs in group {0}".format(name))
self.pst._adjust_weights_by_list(obslist,weight)
self.log("resetting weights in obs in group {0}".format(name))
for case,obslist in obslist_dict.items():
if not isinstance(obslist,list):
obslist = [obslist]
obslist_dict[case] = obslist
if reset:
self.log("resetting self.obscov")
self.reset_obscov(self.pst)
self.log("resetting self.obscov")
results = {}
names = ["base"]
for forecast,pt in self.posterior_forecast.items():
results[forecast] = [pt]
for case_name,obslist in obslist_dict.items():
if not isinstance(obslist,list):
obslist = [obslist]
names.append(case_name)
self.log("calculating importance of observations by removing: " +
str(obslist) + '\n')
# check for missing names
missing_onames = [oname for oname in obslist if oname not in self.jco.obs_names]
if len(missing_onames) > 0:
raise Exception("case {0} has observation names ".format(case_name) + \
"not found: " + ','.join(missing_onames))
# find the set difference between obslist and jco obs names
#diff_onames = [oname for oname in self.jco.obs_names if oname not in obslist]
diff_onames = [oname for oname in self.nnz_obs_names if oname not in obslist and oname not in self.forecast_names]
# calculate the increase in forecast variance by not using the obs
# in obslist
case_post = self.get(par_names=self.jco.par_names,
obs_names=diff_onames).posterior_forecast
for forecast,pt in case_post.items():
results[forecast].append(pt)
df = pd.DataFrame(results,index=names)
self.log("calculating importance of observations by removing: " +
str(obslist) + '\n')
if reset:
self.reset_obscov(org_obscov)
self.reset_pst(org_pst)
return df | get a dataframe the posterior uncertainty
as a result of losing some observations
Parameters
----------
obslist_dict : dict
dictionary of groups of observations
that are to be treated as lost. key values become
row labels in returned dataframe. If None, then test every
(nonzero weight - see reset_zero_weight) observation
reset_zero_weight : bool or float
a flag to reset observations with zero weight in obslist_dict.
If the value of reset_zero_weights can be cast to a float,
then that value will be assigned to zero weight obs. Otherwise,
zero weight obs will be given a weight of 1.0
Returns
-------
pandas.DataFrame : pandas.DataFrame
a dataframe with index of obslist_dict.keys() and columns
of forecast names. The values in the dataframe are the posterior
variances of the forecasts resulting from losing the information
contained in obslist_dict[key value]
Note
----
all observations listed in obslist_dict with zero
weights will be dropped unless reset_zero_weight is set
Example
-------
``>>>import pyemu``
``>>>sc = pyemu.Schur(jco="pest.jcb")``
``df = sc.get_removed_obs_importance()`` |
def generalized_negative_binomial(mu=1, alpha=1, shape=_Null, dtype=_Null, **kwargs):
"""Draw random samples from a generalized negative binomial distribution.
Samples are distributed according to a generalized negative binomial
distribution parametrized by *mu* (mean) and *alpha* (dispersion).
*alpha* is defined as *1/k* where *k* is the failure limit of the
number of unsuccessful experiments (generalized to real numbers).
Samples will always be returned as a floating point data type.
Parameters
----------
mu : float or Symbol, optional
Mean of the negative binomial distribution.
alpha : float or Symbol, optional
Alpha (dispersion) parameter of the negative binomial distribution.
shape : int or tuple of ints, optional
The number of samples to draw. If shape is, e.g., `(m, n)` and `mu` and
`alpha` are scalars, output shape will be `(m, n)`. If `mu` and `alpha`
are Symbols with shape, e.g., `(x, y)`, then output will have shape
`(x, y, m, n)`, where `m*n` samples are drawn for each `[mu, alpha)` pair.
dtype : {'float16', 'float32', 'float64'}, optional
Data type of output samples. Default is 'float32'
Returns
-------
Symbol
If input `shape` has dimensions, e.g., `(m, n)`, and `mu` and
`alpha` are scalars, returned Symbol will resolve to shape `(m, n)`. If `mu`
and `alpha` are Symbols with shape, e.g., `(x, y)`, returned Symbol will resolve
to shape `(x, y, m, n)`, where `m*n` samples are drawn for each `[mu, alpha)` pair.
"""
return _random_helper(_internal._random_generalized_negative_binomial,
_internal._sample_generalized_negative_binomial,
[mu, alpha], shape, dtype, kwargs) | Draw random samples from a generalized negative binomial distribution.
Samples are distributed according to a generalized negative binomial
distribution parametrized by *mu* (mean) and *alpha* (dispersion).
*alpha* is defined as *1/k* where *k* is the failure limit of the
number of unsuccessful experiments (generalized to real numbers).
Samples will always be returned as a floating point data type.
Parameters
----------
mu : float or Symbol, optional
Mean of the negative binomial distribution.
alpha : float or Symbol, optional
Alpha (dispersion) parameter of the negative binomial distribution.
shape : int or tuple of ints, optional
The number of samples to draw. If shape is, e.g., `(m, n)` and `mu` and
`alpha` are scalars, output shape will be `(m, n)`. If `mu` and `alpha`
are Symbols with shape, e.g., `(x, y)`, then output will have shape
`(x, y, m, n)`, where `m*n` samples are drawn for each `[mu, alpha)` pair.
dtype : {'float16', 'float32', 'float64'}, optional
Data type of output samples. Default is 'float32'
Returns
-------
Symbol
If input `shape` has dimensions, e.g., `(m, n)`, and `mu` and
`alpha` are scalars, returned Symbol will resolve to shape `(m, n)`. If `mu`
and `alpha` are Symbols with shape, e.g., `(x, y)`, returned Symbol will resolve
to shape `(x, y, m, n)`, where `m*n` samples are drawn for each `[mu, alpha)` pair. |
def ConsultarTributos(self, sep="||"):
"Retorna un listado de tributos con código, descripción y signo."
ret = self.client.consultarTributos(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
)['respuesta']
self.__analizar_errores(ret)
array = ret.get('tributo', [])
if sep is None:
return dict([(it['codigo'], it['descripcion']) for it in array])
else:
return [("%s %%s %s %%s %s" % (sep, sep, sep)) %
(it['codigo'], it['descripcion']) for it in array] | Retorna un listado de tributos con código, descripción y signo. |
def insert(self, key, value):
"""Inserts a key and value in the map if the map does not already
contain the key.
:type key: :class: '~opencensus.tags.tag_key.TagKey'
:param key: a tag key to insert into the map
:type value: :class: '~opencensus.tags.tag_value.TagValue'
:param value: a tag value that is associated with the tag key and
the value to insert into the tag map
"""
if key in self.map:
return
try:
tag_key = TagKey(key)
tag_val = TagValue(value)
self.map[tag_key] = tag_val
except ValueError:
raise | Inserts a key and value in the map if the map does not already
contain the key.
:type key: :class: '~opencensus.tags.tag_key.TagKey'
:param key: a tag key to insert into the map
:type value: :class: '~opencensus.tags.tag_value.TagValue'
:param value: a tag value that is associated with the tag key and
the value to insert into the tag map |
def copy_security(source,
target,
obj_type='file',
copy_owner=True,
copy_group=True,
copy_dacl=True,
copy_sacl=True):
r'''
Copy the security descriptor of the Source to the Target. You can specify a
specific portion of the security descriptor to copy using one of the
`copy_*` parameters.
.. note::
At least one `copy_*` parameter must be ``True``
.. note::
The user account running this command must have the following
privileges:
- SeTakeOwnershipPrivilege
- SeRestorePrivilege
- SeSecurityPrivilege
Args:
source (str):
The full path to the source. This is where the security info will be
copied from
target (str):
The full path to the target. This is where the security info will be
applied
obj_type (str): file
The type of object to query. This value changes the format of the
``obj_name`` parameter as follows:
- file: indicates a file or directory
- a relative path, such as ``FileName.txt`` or ``..\FileName``
- an absolute path, such as ``C:\DirName\FileName.txt``
- A UNC name, such as ``\\ServerName\ShareName\FileName.txt``
- service: indicates the name of a Windows service
- printer: indicates the name of a printer
- registry: indicates a registry key
- Uses the following literal strings to denote the hive:
- HKEY_LOCAL_MACHINE
- MACHINE
- HKLM
- HKEY_USERS
- USERS
- HKU
- HKEY_CURRENT_USER
- CURRENT_USER
- HKCU
- HKEY_CLASSES_ROOT
- CLASSES_ROOT
- HKCR
- Should be in the format of ``HIVE\Path\To\Key``. For example,
``HKLM\SOFTWARE\Windows``
- registry32: indicates a registry key under WOW64. Formatting is
the same as it is for ``registry``
- share: indicates a network share
copy_owner (bool): True
``True`` copies owner information. Default is ``True``
copy_group (bool): True
``True`` copies group information. Default is ``True``
copy_dacl (bool): True
``True`` copies the DACL. Default is ``True``
copy_sacl (bool): True
``True`` copies the SACL. Default is ``True``
Returns:
bool: ``True`` if successful
Raises:
SaltInvocationError: When parameters are invalid
CommandExecutionError: On failure to set security
Usage:
.. code-block:: python
salt.utils.win_dacl.copy_security(
source='C:\\temp\\source_file.txt',
target='C:\\temp\\target_file.txt',
obj_type='file')
salt.utils.win_dacl.copy_security(
source='HKLM\\SOFTWARE\\salt\\test_source',
target='HKLM\\SOFTWARE\\salt\\test_target',
obj_type='registry',
copy_owner=False)
'''
obj_dacl = dacl(obj_type=obj_type)
if 'registry' in obj_type.lower():
source = obj_dacl.get_reg_name(source)
log.info('Source converted to: %s', source)
target = obj_dacl.get_reg_name(target)
log.info('Target converted to: %s', target)
# Set flags
try:
obj_type_flag = flags().obj_type[obj_type.lower()]
except KeyError:
raise SaltInvocationError(
'Invalid "obj_type" passed: {0}'.format(obj_type))
security_flags = 0
if copy_owner:
security_flags |= win32security.OWNER_SECURITY_INFORMATION
if copy_group:
security_flags |= win32security.GROUP_SECURITY_INFORMATION
if copy_dacl:
security_flags |= win32security.DACL_SECURITY_INFORMATION
if copy_sacl:
security_flags |= win32security.SACL_SECURITY_INFORMATION
if not security_flags:
raise SaltInvocationError(
'One of copy_owner, copy_group, copy_dacl, or copy_sacl must be '
'True')
# To set the owner to something other than the logged in user requires
# SE_TAKE_OWNERSHIP_NAME and SE_RESTORE_NAME privileges
# Enable them for the logged in user
# Setup the privilege set
new_privs = set()
luid = win32security.LookupPrivilegeValue('', 'SeTakeOwnershipPrivilege')
new_privs.add((luid, win32con.SE_PRIVILEGE_ENABLED))
luid = win32security.LookupPrivilegeValue('', 'SeRestorePrivilege')
new_privs.add((luid, win32con.SE_PRIVILEGE_ENABLED))
luid = win32security.LookupPrivilegeValue('', 'SeSecurityPrivilege')
new_privs.add((luid, win32con.SE_PRIVILEGE_ENABLED))
# Get the current token
p_handle = win32api.GetCurrentProcess()
t_handle = win32security.OpenProcessToken(
p_handle,
win32security.TOKEN_ALL_ACCESS | win32con.TOKEN_ADJUST_PRIVILEGES)
# Enable the privileges
win32security.AdjustTokenPrivileges(t_handle, 0, new_privs)
# Load object Security Info from the Source
sec = win32security.GetNamedSecurityInfo(
source, obj_type_flag, security_flags)
# The following return None if the corresponding flag is not set
sd_sid = sec.GetSecurityDescriptorOwner()
sd_gid = sec.GetSecurityDescriptorGroup()
sd_dacl = sec.GetSecurityDescriptorDacl()
sd_sacl = sec.GetSecurityDescriptorSacl()
# Set Security info on the target
try:
win32security.SetNamedSecurityInfo(
target, obj_type_flag, security_flags, sd_sid, sd_gid, sd_dacl,
sd_sacl)
except pywintypes.error as exc:
raise CommandExecutionError(
'Failed to set security info: {0}'.format(exc.strerror))
return True | r'''
Copy the security descriptor of the Source to the Target. You can specify a
specific portion of the security descriptor to copy using one of the
`copy_*` parameters.
.. note::
At least one `copy_*` parameter must be ``True``
.. note::
The user account running this command must have the following
privileges:
- SeTakeOwnershipPrivilege
- SeRestorePrivilege
- SeSecurityPrivilege
Args:
source (str):
The full path to the source. This is where the security info will be
copied from
target (str):
The full path to the target. This is where the security info will be
applied
obj_type (str): file
The type of object to query. This value changes the format of the
``obj_name`` parameter as follows:
- file: indicates a file or directory
- a relative path, such as ``FileName.txt`` or ``..\FileName``
- an absolute path, such as ``C:\DirName\FileName.txt``
- A UNC name, such as ``\\ServerName\ShareName\FileName.txt``
- service: indicates the name of a Windows service
- printer: indicates the name of a printer
- registry: indicates a registry key
- Uses the following literal strings to denote the hive:
- HKEY_LOCAL_MACHINE
- MACHINE
- HKLM
- HKEY_USERS
- USERS
- HKU
- HKEY_CURRENT_USER
- CURRENT_USER
- HKCU
- HKEY_CLASSES_ROOT
- CLASSES_ROOT
- HKCR
- Should be in the format of ``HIVE\Path\To\Key``. For example,
``HKLM\SOFTWARE\Windows``
- registry32: indicates a registry key under WOW64. Formatting is
the same as it is for ``registry``
- share: indicates a network share
copy_owner (bool): True
``True`` copies owner information. Default is ``True``
copy_group (bool): True
``True`` copies group information. Default is ``True``
copy_dacl (bool): True
``True`` copies the DACL. Default is ``True``
copy_sacl (bool): True
``True`` copies the SACL. Default is ``True``
Returns:
bool: ``True`` if successful
Raises:
SaltInvocationError: When parameters are invalid
CommandExecutionError: On failure to set security
Usage:
.. code-block:: python
salt.utils.win_dacl.copy_security(
source='C:\\temp\\source_file.txt',
target='C:\\temp\\target_file.txt',
obj_type='file')
salt.utils.win_dacl.copy_security(
source='HKLM\\SOFTWARE\\salt\\test_source',
target='HKLM\\SOFTWARE\\salt\\test_target',
obj_type='registry',
copy_owner=False) |
def publish_event(event_t, data=None, extra_channels=None, wait=None):
"""
Publish an event ot any subscribers.
:param event_t: event type
:param data: event data
:param extra_channels:
:param wait:
:return:
"""
event = Event(event_t, data)
pubsub.publish("shoebot", event)
for channel_name in extra_channels or []:
pubsub.publish(channel_name, event)
if wait is not None:
channel = pubsub.subscribe(wait)
channel.listen(wait) | Publish an event ot any subscribers.
:param event_t: event type
:param data: event data
:param extra_channels:
:param wait:
:return: |
def equirectangular_distance(self, other):
"""
Return the approximate equirectangular when the location is close to
the center of the cluster.
For small distances, Pythagoras’ theorem can be used on an
equirectangular projection.
Equirectangular formula::
x = Δλ ⋅ cos φm
y = Δφ
d = R ⋅ √(x² + y)²
It will always over-estimate compared to the real Haversine distance.
For example it will add no more than 0.05382 % to the real distance if
the delta latitude or longitude between your two points does not
exceed 4 decimal degrees.
The standard formula (Haversine) is the exact one (that is, it works
for any couple of longitude/latitude on earth) but is much slower as
it needs 7 trigonometric and 2 square roots. If your couple of points
are not too far apart, and absolute precision is not paramount, you
can use this approximate version (Equirectangular), which is much
faster as it uses only one trigonometric and one square root::
Python 2.7.6rc1 (v2.7.6rc1:4913d0e9be30+, Oct 27 2013, 20:52:11)
[GCC 4.2.1 (Apple Inc. build 5666) (dot 3)] on darwin
Type "help", "copyright", "credits" or "license" for more information.
>>> from majormode.perseus.model.geolocation import GeoPoint
>>> import time
>>>
>>> source = GeoPoint(106.739036, 10.797977)
>>> destination = GeoPoint(106.743325, 10.800195)
>>>
>>> start_time = time.time()
>>> for i in xrange(1000000):
... d = source.great_circle_distance(destination)
...
>>> print time.time() - start_time
5.62202811241
>>> print d
529.424701041
>>>
>>> start_time = time.time()
>>> for i in xrange(1000000):
... d = source.equirectangular_distance(destination)
...
>>> print time.time() - start_time
2.78262710571
>>> print d
529.424701073
>>>
@param other: a ``GeoPoint`` instance.
@return: the great-circle distance, in meters, between this geographic
coordinates to the specified other point.
"""
x = math.radians(other.longitude - self.longitude) \
* math.cos(math.radians(other.latitude + self.latitude) / 2);
y = math.radians(other.latitude - self.latitude);
return math.sqrt(x * x + y * y) * GeoPoint.EARTH_RADIUS_METERS; | Return the approximate equirectangular when the location is close to
the center of the cluster.
For small distances, Pythagoras’ theorem can be used on an
equirectangular projection.
Equirectangular formula::
x = Δλ ⋅ cos φm
y = Δφ
d = R ⋅ √(x² + y)²
It will always over-estimate compared to the real Haversine distance.
For example it will add no more than 0.05382 % to the real distance if
the delta latitude or longitude between your two points does not
exceed 4 decimal degrees.
The standard formula (Haversine) is the exact one (that is, it works
for any couple of longitude/latitude on earth) but is much slower as
it needs 7 trigonometric and 2 square roots. If your couple of points
are not too far apart, and absolute precision is not paramount, you
can use this approximate version (Equirectangular), which is much
faster as it uses only one trigonometric and one square root::
Python 2.7.6rc1 (v2.7.6rc1:4913d0e9be30+, Oct 27 2013, 20:52:11)
[GCC 4.2.1 (Apple Inc. build 5666) (dot 3)] on darwin
Type "help", "copyright", "credits" or "license" for more information.
>>> from majormode.perseus.model.geolocation import GeoPoint
>>> import time
>>>
>>> source = GeoPoint(106.739036, 10.797977)
>>> destination = GeoPoint(106.743325, 10.800195)
>>>
>>> start_time = time.time()
>>> for i in xrange(1000000):
... d = source.great_circle_distance(destination)
...
>>> print time.time() - start_time
5.62202811241
>>> print d
529.424701041
>>>
>>> start_time = time.time()
>>> for i in xrange(1000000):
... d = source.equirectangular_distance(destination)
...
>>> print time.time() - start_time
2.78262710571
>>> print d
529.424701073
>>>
@param other: a ``GeoPoint`` instance.
@return: the great-circle distance, in meters, between this geographic
coordinates to the specified other point. |
def summarize(self, n_timescales_to_report=5):
"""Some summary information."""
nonzeros = np.sum(np.abs(self.eigenvectors_) > 0, axis=0)
active = '[%s]' % ', '.join(['%d/%d' % (n, self.n_features) for n in nonzeros[:n_timescales_to_report]])
return """K-sparse time-structure Independent Components Analysis (tICA)
------------------------------------------------------------------
n_components : {n_components}
shrinkage : {shrinkage}
lag_time : {lag_time}
kinetic_mapping : {kinetic_mapping}
n_features : {n_features}
Top {n_timescales_to_report} timescales :
{timescales}
Top {n_timescales_to_report} eigenvalues :
{eigenvalues}
Number of active degrees of freedom:
{active}
""".format(n_components=self.n_components, shrinkage=self.shrinkage_, lag_time=self.lag_time,
kinetic_mapping=self.kinetic_mapping,
timescales=self.timescales_[:n_timescales_to_report], eigenvalues=self.eigenvalues_[:n_timescales_to_report],
n_features=self.n_features, active=active, n_timescales_to_report=n_timescales_to_report) | Some summary information. |
def meaning(phrase, source_lang="en", dest_lang="en", format="json"):
"""
make calls to the glosbe API
:param phrase: word for which meaning is to be found
:param source_lang: Defaults to : "en"
:param dest_lang: Defaults to : "en" For eg: "fr" for french
:param format: response structure type. Defaults to: "json"
:returns: returns a json object as str, False if invalid phrase
"""
base_url = Vocabulary.__get_api_link("glosbe")
url = base_url.format(word=phrase, source_lang=source_lang, dest_lang=dest_lang)
json_obj = Vocabulary.__return_json(url)
if json_obj:
try:
tuc_content = json_obj["tuc"] # "tuc_content" is a "list"
except KeyError:
return False
'''get meanings'''
meanings_list = Vocabulary.__parse_content(tuc_content, "meanings")
return Response().respond(meanings_list, format)
# print(meanings_list)
# return json.dumps(meanings_list)
else:
return False | make calls to the glosbe API
:param phrase: word for which meaning is to be found
:param source_lang: Defaults to : "en"
:param dest_lang: Defaults to : "en" For eg: "fr" for french
:param format: response structure type. Defaults to: "json"
:returns: returns a json object as str, False if invalid phrase |
def clean_data(data):
""" Shift to lower case, replace unknowns with UNK, and listify """
new_data = []
VALID = 'abcdefghijklmnopqrstuvwxyz123456789"\'?!.,:; '
for sample in data:
new_sample = []
for char in sample[1].lower(): # Just grab the string, not the label
if char in VALID:
new_sample.append(char)
else:
new_sample.append('UNK')
new_data.append(new_sample)
return new_data | Shift to lower case, replace unknowns with UNK, and listify |
def get_context(self, arr, expr, context):
"""
Returns a context dictionary for use in evaluating the expression.
:param arr: The input array.
:param expr: The input expression.
:param context: Evaluation context.
"""
expression_names = [x for x in self.get_expression_names(expr) if x not in set(context.keys()).union(['i'])]
if len(expression_names) != 1:
raise ValueError('The expression must have exactly one variable.')
return {expression_names[0]: arr} | Returns a context dictionary for use in evaluating the expression.
:param arr: The input array.
:param expr: The input expression.
:param context: Evaluation context. |
def _add_namespace(self, namespace):
"""Add an included and possibly renamed Namespace."""
src_name = namespace.source_name
if "*" in src_name:
self._regex_map.append((namespace_to_regex(src_name), namespace))
else:
self._add_plain_namespace(namespace) | Add an included and possibly renamed Namespace. |
def timezone(zone):
r''' Return a datetime.tzinfo implementation for the given timezone
>>> from datetime import datetime, timedelta
>>> utc = timezone('UTC')
>>> eastern = timezone('US/Eastern')
>>> eastern.zone
'US/Eastern'
>>> timezone(unicode('US/Eastern')) is eastern
True
>>> utc_dt = datetime(2002, 10, 27, 6, 0, 0, tzinfo=utc)
>>> loc_dt = utc_dt.astimezone(eastern)
>>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)'
>>> loc_dt.strftime(fmt)
'2002-10-27 01:00:00 EST (-0500)'
>>> (loc_dt - timedelta(minutes=10)).strftime(fmt)
'2002-10-27 00:50:00 EST (-0500)'
>>> eastern.normalize(loc_dt - timedelta(minutes=10)).strftime(fmt)
'2002-10-27 01:50:00 EDT (-0400)'
>>> (loc_dt + timedelta(minutes=10)).strftime(fmt)
'2002-10-27 01:10:00 EST (-0500)'
Raises UnknownTimeZoneError if passed an unknown zone.
>>> try:
... timezone('Asia/Shangri-La')
... except UnknownTimeZoneError:
... print('Unknown')
Unknown
>>> try:
... timezone(unicode('\N{TRADE MARK SIGN}'))
... except UnknownTimeZoneError:
... print('Unknown')
Unknown
'''
if zone.upper() == 'UTC':
return utc
try:
zone = ascii(zone)
except UnicodeEncodeError:
# All valid timezones are ASCII
raise UnknownTimeZoneError(zone)
zone = _unmunge_zone(zone)
if zone not in _tzinfo_cache:
if zone in all_timezones_set:
fp = open_resource(zone)
try:
_tzinfo_cache[zone] = build_tzinfo(zone, fp)
finally:
fp.close()
else:
raise UnknownTimeZoneError(zone)
return _tzinfo_cache[zone] | r''' Return a datetime.tzinfo implementation for the given timezone
>>> from datetime import datetime, timedelta
>>> utc = timezone('UTC')
>>> eastern = timezone('US/Eastern')
>>> eastern.zone
'US/Eastern'
>>> timezone(unicode('US/Eastern')) is eastern
True
>>> utc_dt = datetime(2002, 10, 27, 6, 0, 0, tzinfo=utc)
>>> loc_dt = utc_dt.astimezone(eastern)
>>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)'
>>> loc_dt.strftime(fmt)
'2002-10-27 01:00:00 EST (-0500)'
>>> (loc_dt - timedelta(minutes=10)).strftime(fmt)
'2002-10-27 00:50:00 EST (-0500)'
>>> eastern.normalize(loc_dt - timedelta(minutes=10)).strftime(fmt)
'2002-10-27 01:50:00 EDT (-0400)'
>>> (loc_dt + timedelta(minutes=10)).strftime(fmt)
'2002-10-27 01:10:00 EST (-0500)'
Raises UnknownTimeZoneError if passed an unknown zone.
>>> try:
... timezone('Asia/Shangri-La')
... except UnknownTimeZoneError:
... print('Unknown')
Unknown
>>> try:
... timezone(unicode('\N{TRADE MARK SIGN}'))
... except UnknownTimeZoneError:
... print('Unknown')
Unknown |
def import_complex_gateway_to_graph(diagram_graph, process_id, process_attributes, element):
"""
Adds to graph the new element that represents BPMN complex gateway.
In addition to attributes inherited from Gateway type, complex gateway
has additional attribute default flow (default value - none).
:param diagram_graph: NetworkX graph representing a BPMN process diagram,
:param process_id: string object, representing an ID of process element,
:param process_attributes: dictionary that holds attribute values of 'process' element, which is parent of
imported flow node,
:param element: object representing a BPMN XML 'complexGateway' element.
"""
element_id = element.getAttribute(consts.Consts.id)
BpmnDiagramGraphImport.import_gateway_to_graph(diagram_graph, process_id, process_attributes, element)
diagram_graph.node[element_id][consts.Consts.default] = element.getAttribute(consts.Consts.default) \
if element.hasAttribute(consts.Consts.default) else None | Adds to graph the new element that represents BPMN complex gateway.
In addition to attributes inherited from Gateway type, complex gateway
has additional attribute default flow (default value - none).
:param diagram_graph: NetworkX graph representing a BPMN process diagram,
:param process_id: string object, representing an ID of process element,
:param process_attributes: dictionary that holds attribute values of 'process' element, which is parent of
imported flow node,
:param element: object representing a BPMN XML 'complexGateway' element. |
def apply_all(self, force=False, quiet=False):
""" Apply all patches in series file """
self._check()
top = self.db.top_patch()
if top:
patches = self.series.patches_after(top)
else:
patches = self.series.patches()
if not patches:
raise AllPatchesApplied(self.series, top)
try:
for patch in patches:
self.applying(patch)
self._apply_patch(patch, force, quiet)
finally:
self.db.save()
self.applied(self.db.top_patch()) | Apply all patches in series file |
def delete_resource(self, session, data, api_type, obj_id):
"""
Delete a resource.
:param session: SQLAlchemy session
:param data: JSON data provided with the request
:param api_type: Type of the resource
:param obj_id: ID of the resource
"""
resource = self._fetch_resource(session, api_type, obj_id,
Permissions.VIEW)
self._check_instance_relationships_for_delete(resource)
session.delete(resource)
session.commit()
response = JSONAPIResponse()
response.status_code = 204
return response | Delete a resource.
:param session: SQLAlchemy session
:param data: JSON data provided with the request
:param api_type: Type of the resource
:param obj_id: ID of the resource |
def unpack_rsp(cls, rsp_pb):
"""Convert from PLS response to user response"""
if rsp_pb.retType != RET_OK:
return RET_ERROR, rsp_pb.retMsg, None
return RET_OK, "", None | Convert from PLS response to user response |
def process_block(self, current_block, previous_block, text):
"""
Processes a block and setup its folding info.
This method call ``detect_fold_level`` and handles most of the tricky
corner cases so that all you have to do is focus on getting the proper
fold level foreach meaningful block, skipping the blank ones.
:param current_block: current block to process
:param previous_block: previous block
:param text: current block text
"""
prev_fold_level = TextBlockHelper.get_fold_lvl(previous_block)
if text.strip() == '':
# blank line always have the same level as the previous line
fold_level = prev_fold_level
else:
fold_level = self.detect_fold_level(
previous_block, current_block)
if fold_level > self.limit:
fold_level = self.limit
prev_fold_level = TextBlockHelper.get_fold_lvl(previous_block)
if fold_level > prev_fold_level:
# apply on previous blank lines
block = current_block.previous()
while block.isValid() and block.text().strip() == '':
TextBlockHelper.set_fold_lvl(block, fold_level)
block = block.previous()
TextBlockHelper.set_fold_trigger(
block, True)
# update block fold level
if text.strip():
TextBlockHelper.set_fold_trigger(
previous_block, fold_level > prev_fold_level)
TextBlockHelper.set_fold_lvl(current_block, fold_level)
# user pressed enter at the beginning of a fold trigger line
# the previous blank line will keep the trigger state and the new line
# (which actually contains the trigger) must use the prev state (
# and prev state must then be reset).
prev = current_block.previous() # real prev block (may be blank)
if (prev and prev.isValid() and prev.text().strip() == '' and
TextBlockHelper.is_fold_trigger(prev)):
# prev line has the correct trigger fold state
TextBlockHelper.set_collapsed(
current_block, TextBlockHelper.is_collapsed(
prev))
# make empty line not a trigger
TextBlockHelper.set_fold_trigger(prev, False)
TextBlockHelper.set_collapsed(prev, False) | Processes a block and setup its folding info.
This method call ``detect_fold_level`` and handles most of the tricky
corner cases so that all you have to do is focus on getting the proper
fold level foreach meaningful block, skipping the blank ones.
:param current_block: current block to process
:param previous_block: previous block
:param text: current block text |
def make_fileitem_filepath(filepath, condition='contains', negate=False, preserve_case=False):
"""
Create a node for FileItem/FilePath
:return: A IndicatorItem represented as an Element node
"""
document = 'FileItem'
search = 'FileItem/FilePath'
content_type = 'string'
content = filepath
ii_node = ioc_api.make_indicatoritem_node(condition, document, search, content_type, content,
negate=negate, preserve_case=preserve_case)
return ii_node | Create a node for FileItem/FilePath
:return: A IndicatorItem represented as an Element node |
def main():
"""
Entry point for GNS3 server
"""
if not sys.platform.startswith("win"):
if "--daemon" in sys.argv:
daemonize()
from gns3server.run import run
run() | Entry point for GNS3 server |
def sendFuture(self, future):
"""Send a Future to be executed remotely."""
future = copy.copy(future)
future.greenlet = None
future.children = {}
try:
if shared.getConst(hash(future.callable), timeout=0):
# Enforce name reference passing if already shared
future.callable = SharedElementEncapsulation(hash(future.callable))
self.socket.send_multipart([
TASK,
pickle.dumps(future.id, pickle.HIGHEST_PROTOCOL),
pickle.dumps(future, pickle.HIGHEST_PROTOCOL),
])
except (pickle.PicklingError, TypeError) as e:
# If element not picklable, pickle its name
# TODO: use its fully qualified name
scoop.logger.warn("Pickling Error: {0}".format(e))
future.callable = hash(future.callable)
self.socket.send_multipart([
TASK,
pickle.dumps(future.id, pickle.HIGHEST_PROTOCOL),
pickle.dumps(future, pickle.HIGHEST_PROTOCOL),
]) | Send a Future to be executed remotely. |
def append_response(self, response):
"""Append the response to the stack of responses.
:param tornado.httpclient.HTTPResponse response: The HTTP response
"""
self._responses.append(response)
if 'Warning' in response.headers:
LOGGER.warning(
'HTTP %s %s Warning (%s): %s (attempt %s)',
response.request.method, response.request.url,
response.code, response.headers['Warning'],
len(self._responses)) | Append the response to the stack of responses.
:param tornado.httpclient.HTTPResponse response: The HTTP response |
def split_size(size):
'''Split the file size into several chunks.'''
rem = size % CHUNK_SIZE
if rem == 0:
cnt = size // CHUNK_SIZE
else:
cnt = size // CHUNK_SIZE + 1
chunks = []
for i in range(cnt):
pos = i * CHUNK_SIZE
if i == cnt - 1:
disp = size - pos
else:
disp = CHUNK_SIZE
chunks.append((pos, disp))
return chunks | Split the file size into several chunks. |
def writeUTFBytes(self, value):
"""
Writes a UTF-8 string. Similar to L{writeUTF}, but does
not prefix the string with a 16-bit length word.
@type value: C{str}
@param value: The string value to be written.
"""
val = None
if isinstance(value, unicode):
val = value
else:
val = unicode(value, 'utf8')
self.stream.write_utf8_string(val) | Writes a UTF-8 string. Similar to L{writeUTF}, but does
not prefix the string with a 16-bit length word.
@type value: C{str}
@param value: The string value to be written. |
def iter_doc_objs(self, **kwargs):
"""Returns a pair: (doc_id, nexson_blob)
for each document in this repository.
Order is arbitrary.
"""
_LOG = get_logger('TypeAwareGitShard')
try:
for doc_id, fp in self.iter_doc_filepaths(**kwargs):
if not self._is_alias(doc_id):
# TODO:hook for type-specific parser?
with codecs.open(fp, 'r', 'utf-8') as fo:
try:
nex_obj = anyjson.loads(fo.read())
yield (doc_id, nex_obj)
except Exception:
pass
except Exception as x:
f = 'iter_doc_filepaths FAILED with this error:\n{}'
f = f.format(str(x))
_LOG.warn(f) | Returns a pair: (doc_id, nexson_blob)
for each document in this repository.
Order is arbitrary. |
def sample_bitstrings(self, n_samples):
"""
Sample bitstrings from the distribution defined by the wavefunction.
:param n_samples: The number of bitstrings to sample
:return: An array of shape (n_samples, n_qubits)
"""
possible_bitstrings = np.array(list(itertools.product((0, 1), repeat=len(self))))
inds = np.random.choice(2 ** len(self), n_samples, p=self.probabilities())
bitstrings = possible_bitstrings[inds, :]
return bitstrings | Sample bitstrings from the distribution defined by the wavefunction.
:param n_samples: The number of bitstrings to sample
:return: An array of shape (n_samples, n_qubits) |
def call(self, event, *event_args):
"""Call the single registered listener for ``event``.
The listener will be called with any extra arguments passed to
:meth:`call` first, and then the extra arguments passed to :meth:`on`
Raises :exc:`AssertionError` if there is none or multiple listeners for
``event``. Returns the listener's return value on success.
"""
# XXX It would be a lot better for debugging if this error was raised
# when registering the second listener instead of when the event is
# emitted.
assert self.num_listeners(event) == 1, (
'Expected exactly 1 event listener, found %d listeners' %
self.num_listeners(event))
listener = self._listeners[event][0]
args = list(event_args) + list(listener.user_args)
return listener.callback(*args) | Call the single registered listener for ``event``.
The listener will be called with any extra arguments passed to
:meth:`call` first, and then the extra arguments passed to :meth:`on`
Raises :exc:`AssertionError` if there is none or multiple listeners for
``event``. Returns the listener's return value on success. |
def parse_config_files_and_bindings(config_files,
bindings,
finalize_config=True,
skip_unknown=False):
"""Parse a list of config files followed by extra Gin bindings.
This function is equivalent to:
for config_file in config_files:
gin.parse_config_file(config_file, skip_configurables)
gin.parse_config(bindings, skip_configurables)
if finalize_config:
gin.finalize()
Args:
config_files: A list of paths to the Gin config files.
bindings: A list of individual parameter binding strings.
finalize_config: Whether to finalize the config after parsing and binding
(defaults to True).
skip_unknown: A boolean indicating whether unknown configurables and imports
should be skipped instead of causing errors (alternatively a list of
configurable names to skip if unknown). See `parse_config` for additional
details.
"""
if config_files is None:
config_files = []
if bindings is None:
bindings = ''
for config_file in config_files:
parse_config_file(config_file, skip_unknown)
parse_config(bindings, skip_unknown)
if finalize_config:
finalize() | Parse a list of config files followed by extra Gin bindings.
This function is equivalent to:
for config_file in config_files:
gin.parse_config_file(config_file, skip_configurables)
gin.parse_config(bindings, skip_configurables)
if finalize_config:
gin.finalize()
Args:
config_files: A list of paths to the Gin config files.
bindings: A list of individual parameter binding strings.
finalize_config: Whether to finalize the config after parsing and binding
(defaults to True).
skip_unknown: A boolean indicating whether unknown configurables and imports
should be skipped instead of causing errors (alternatively a list of
configurable names to skip if unknown). See `parse_config` for additional
details. |
def z2r(z):
"""
Function that calculates the inverse Fisher z-transformation
Parameters
----------
z : int or ndarray
Fishers z transformed correlation value
Returns
----------
result : int or ndarray
Correlation value
"""
with np.errstate(invalid='ignore', divide='ignore'):
return (np.exp(2 * z) - 1) / (np.exp(2 * z) + 1) | Function that calculates the inverse Fisher z-transformation
Parameters
----------
z : int or ndarray
Fishers z transformed correlation value
Returns
----------
result : int or ndarray
Correlation value |
def send_dm_sos(self, message: str) -> None:
"""
Send DM to owner if something happens.
:param message: message to send to owner.
:returns: None.
"""
if self.owner_handle:
try:
# twitter changed the DM API and tweepy (as of 2019-03-08)
# has not adapted.
# fixing with
# https://github.com/tweepy/tweepy/issues/1081#issuecomment-423486837
owner_id = self.api.get_user(screen_name=self.owner_handle).id
event = {
"event": {
"type": "message_create",
"message_create": {
"target": {
"recipient_id": f"{owner_id}",
},
"message_data": {
"text": message
}
}
}
}
self._send_direct_message_new(event)
except tweepy.TweepError as de:
self.lerror(f"Error trying to send DM about error!: {de}")
else:
self.lerror("Can't send DM SOS, no owner handle.") | Send DM to owner if something happens.
:param message: message to send to owner.
:returns: None. |
def value(self):
"""
Take last known value as the value
"""
try:
value = self.lastValue
except IndexError:
value = "NaN"
except ValueError:
value = "NaN"
return value | Take last known value as the value |
def module2md(self, module):
"""Takes an imported module object and create a Markdown string containing functions and classes.
"""
modname = module.__name__
path = self.get_src_path(module, append_base=False)
path = "[{}]({})".format(path, os.path.join(self.github_link, path))
found = set()
classes = []
line_nos = []
for name, obj in getmembers(module, inspect.isclass):
# handle classes
found.add(name)
if not name.startswith("_") and hasattr(obj, "__module__") and obj.__module__ == modname:
classes.append(self.class2md(obj))
line_nos.append(self.get_line_no(obj) or 0)
classes = order_by_line_nos(classes, line_nos)
# Since functions can have multiple aliases.
func2names = defaultdict(list)
for name, obj in getmembers(module, inspect.isfunction):
func2names[obj].append(name)
functions = []
line_nos = []
for obj in func2names:
names = func2names[obj]
found.update(names)
# Include if within module or included modules within __init__.py and exclude from global variables
is_module_within_init = '__init__.py' in path and obj.__module__.startswith(modname)
if is_module_within_init:
found.add(obj.__module__.replace(modname + '.', ''))
if hasattr(obj, "__module__") and (obj.__module__ == modname or is_module_within_init):
names = list(filter(lambda name: not name.startswith("_"), names))
if len(names) > 0:
functions.append(self.func2md(obj, names=names))
line_nos.append(self.get_line_no(obj) or 0)
functions = order_by_line_nos(functions, line_nos)
variables = []
line_nos = []
for name, obj in module.__dict__.items():
if not name.startswith("_") and name not in found:
if hasattr(obj, "__module__") and obj.__module__ != modname:
continue
if hasattr(obj, "__name__") and not obj.__name__.startswith(modname):
continue
comments = inspect.getcomments(obj)
comments = ": %s" % comments if comments else ""
variables.append("- **%s**%s" % (name, comments))
line_nos.append(self.get_line_no(obj) or 0)
variables = order_by_line_nos(variables, line_nos)
if variables:
new_list = ["**Global Variables**", "---------------"]
new_list.extend(variables)
variables = new_list
string = MODULE_TEMPLATE.format(path=path,
global_vars="\n".join(variables) if variables else "",
functions="\n".join(functions) if functions else "",
classes="".join(classes) if classes else "")
return string | Takes an imported module object and create a Markdown string containing functions and classes. |
def load(self):
""" Load each path in order. Remember paths already loaded and only load new ones. """
data = self.dict_class()
for path in self.paths:
if path in self.paths_loaded: continue
try:
with open(path, 'r') as file:
path_data = yaml.load(file.read())
data = dict_merge(data, path_data)
self.paths_loaded.add(path)
except IOError:
# TODO: Log this correctly once logging is implemented
if not path.endswith('.local.yml'):
print 'CONFIG NOT FOUND: %s' % (path)
self.data = data | Load each path in order. Remember paths already loaded and only load new ones. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.