content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def create_observed_stats_file(observed_df, param_num, path_run_ABC):
"""
Create file of 'observed' stats from dataframe from one simulation.
:param observed_df: dataframe of parameter and summary stats from one simulation.
:param param_num: number of parameters
:param path_run_ABC: full or relative path to directory to run ABC in.
:return: observed_stats_df: dataframe of one randomly chosen simulation results file without the parameters.
"""
observed_stats_df = observed_df.iloc[:, param_num:]
observed_stats_file_name = '{}/results_observed.txt'.format(path_run_ABC)
observed_param_stats_file_name = '{}/results_param_observed.txt'.format(path_run_ABC)
observed_stats_df.to_csv(observed_stats_file_name, sep='\t', index=False)
observed_df.to_csv(observed_param_stats_file_name, sep='\t', index=False)
return | 5,325,500 |
def setup_source_space(subject, fname=True, spacing='oct6', surface='white',
overwrite=False, subjects_dir=None, add_dist=None,
verbose=None):
"""Setup a source space with subsampling
Parameters
----------
subject : str
Subject to process.
fname : str | None | bool
Filename to use. If True, a default name will be used. If None,
the source space will not be saved (only returned).
spacing : str
The spacing to use. Can be ``'ico#'`` for a recursively subdivided
icosahedron, ``'oct#'`` for a recursively subdivided octahedron,
or ``'all'`` for all points.
surface : str
The surface to use.
overwrite: bool
If True, overwrite output file (if it exists).
subjects_dir : string, or None
Path to SUBJECTS_DIR if it is not set in the environment.
add_dist : bool
Add distance and patch information to the source space. This takes some
time so precomputing it is recommended. The default is currently False
but will change to True in release 0.9.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
src : list
The source space for each hemisphere.
"""
if add_dist is None:
msg = ("The add_dist parameter to mne.setup_source_space currently "
"defaults to False, but the default will change to True in "
"release 0.9. Specify the parameter explicitly to avoid this "
"warning.")
logger.warning(msg)
cmd = ('setup_source_space(%s, fname=%s, spacing=%s, surface=%s, '
'overwrite=%s, subjects_dir=%s, add_dist=%s, verbose=%s)'
% (subject, fname, spacing, surface, overwrite,
subjects_dir, add_dist, verbose))
# check to make sure our parameters are good, parse 'spacing'
space_err = ('"spacing" must be a string with values '
'"ico#", "oct#", or "all", and "ico" and "oct"'
'numbers must be integers')
if not isinstance(spacing, string_types) or len(spacing) < 3:
raise ValueError(space_err)
if spacing == 'all':
stype = 'all'
sval = ''
elif spacing[:3] == 'ico':
stype = 'ico'
sval = spacing[3:]
elif spacing[:3] == 'oct':
stype = 'oct'
sval = spacing[3:]
else:
raise ValueError(space_err)
try:
if stype in ['ico', 'oct']:
sval = int(sval)
elif stype == 'spacing': # spacing
sval = float(sval)
except:
raise ValueError(space_err)
subjects_dir = get_subjects_dir(subjects_dir)
surfs = [op.join(subjects_dir, subject, 'surf', hemi + surface)
for hemi in ['lh.', 'rh.']]
bem_dir = op.join(subjects_dir, subject, 'bem')
for surf, hemi in zip(surfs, ['LH', 'RH']):
if surf is not None and not op.isfile(surf):
raise IOError('Could not find the %s surface %s'
% (hemi, surf))
if not (fname is True or fname is None or isinstance(fname, string_types)):
raise ValueError('"fname" must be a string, True, or None')
if fname is True:
extra = '%s-%s' % (stype, sval) if sval != '' else stype
fname = op.join(bem_dir, '%s-%s-src.fif' % (subject, extra))
if fname is not None and op.isfile(fname) and overwrite is False:
raise IOError('file "%s" exists, use overwrite=True if you want '
'to overwrite the file' % fname)
logger.info('Setting up the source space with the following parameters:\n')
logger.info('SUBJECTS_DIR = %s' % subjects_dir)
logger.info('Subject = %s' % subject)
logger.info('Surface = %s' % surface)
if stype == 'ico':
src_type_str = 'ico = %s' % sval
logger.info('Icosahedron subdivision grade %s\n' % sval)
elif stype == 'oct':
src_type_str = 'oct = %s' % sval
logger.info('Octahedron subdivision grade %s\n' % sval)
else:
src_type_str = 'all'
logger.info('Include all vertices\n')
# Create the fif file
if fname is not None:
logger.info('>>> 1. Creating the source space file %s...' % fname)
else:
logger.info('>>> 1. Creating the source space...\n')
# mne_make_source_space ... actually make the source spaces
src = []
# pre-load ico/oct surf (once) for speed, if necessary
if stype in ['ico', 'oct']:
### from mne_ico_downsample.c ###
if stype == 'ico':
logger.info('Doing the icosahedral vertex picking...')
ico_surf = _get_ico_surface(sval)
else:
logger.info('Doing the octahedral vertex picking...')
ico_surf = _tessellate_sphere_surf(sval)
else:
ico_surf = None
for hemi, surf in zip(['lh', 'rh'], surfs):
logger.info('Loading %s...' % surf)
s = _create_surf_spacing(surf, hemi, subject, stype, sval, ico_surf,
subjects_dir)
logger.info('loaded %s %d/%d selected to source space (%s)'
% (op.split(surf)[1], s['nuse'], s['np'], src_type_str))
src.append(s)
logger.info('') # newline after both subject types are eggie
# Fill in source space info
hemi_ids = [FIFF.FIFFV_MNE_SURF_LEFT_HEMI, FIFF.FIFFV_MNE_SURF_RIGHT_HEMI]
for s, s_id in zip(src, hemi_ids):
# Add missing fields
s.update(dict(dist=None, dist_limit=None, nearest=None, type='surf',
nearest_dist=None, pinfo=None, patch_inds=None, id=s_id,
coord_frame=np.array((FIFF.FIFFV_COORD_MRI,), np.int32)))
s['rr'] /= 1000.0
del s['tri_area']
del s['tri_cent']
del s['tri_nn']
del s['neighbor_tri']
# upconvert to object format from lists
src = SourceSpaces(src, dict(working_dir=os.getcwd(), command_line=cmd))
if add_dist:
add_source_space_distances(src, verbose=verbose)
# write out if requested, then return the data
if fname is not None:
write_source_spaces(fname, src)
logger.info('Wrote %s' % fname)
logger.info('You are now one step closer to computing the gain matrix')
return src | 5,325,501 |
def install_volta() -> None:
"""Install volta if missing, and set up node and npm."""
shaper.download.install_with_remote_script(
"volta", "https://get.volta.sh", ["--skip-setup"]
)
HOME = pathlib.Path.home()
os.environ.update(
{
"VOLTA_HOME": f"{HOME}/.volta",
"PATH": f"{HOME}/.volta/bin:{os.environ.get('PATH')}",
}
)
try:
subprocess.check_output(["node", "-v"])
except (subprocess.CalledProcessError, FileNotFoundError):
subprocess.check_call(["volta", "install", "node"]) | 5,325,502 |
def validate_model(model_data, model_schema, enum_info):
"""Ensure the value is valid for the field type. Scales values as needed."""
for k in model_schema:
label = model_schema[k][0]
field_type = model_schema[k][1]
if k in model_data:
value = model_data[k]
elif len(model_schema[k]) > 2:
value = model_schema[k][2]
else:
raise Exception('no value for field "{}" and no default value in schema'.format(k))
if field_type in enum_info:
if str(value) not in enum_info[field_type]:
# Check a comma-delimited string against the enumeration
for item in re.split(r'\s*,\s*', str(value)):
if item not in enum_info[field_type]:
assert item in enum_info[field_type], \
'{}: invalid enum "{}" value for field "{}"'.format(item, field_type, k)
elif field_type == 'Float':
if not value:
value = 0
v = float(value)
if re.search(r'\[m(m|rad)]', label):
v /= 1000
elif re.search(r'\[n(m|rad)]', label) or re.search(r'\[nm/pixel\]', label):
v /= 1e09
elif re.search(r'\[ps]', label):
v /= 1e12
#TODO(pjm): need to handle unicode in label better (mu)
elif re.search('\\[\xb5(m|rad)]', label) or re.search(r'\[mm-mrad]', label):
v /= 1e6
model_data[k] = float(v)
elif field_type == 'Integer':
if not value:
value = 0
model_data[k] = int(value)
elif value is None:
# value is already None, do not convert
pass
else:
model_data[k] = _escape(value) | 5,325,503 |
def get_my_ip() -> None:
"""
Funtion to get current ip in Network
"""
url = "http://checkip.dyndns.com/"
return re.compile(r"Address: (\d+.\d+.\d+.\d+)").search(get(url).text).group(1) | 5,325,504 |
def param_squared_mean(ns_run, logw=None, simulate=False, param_ind=0):
"""Mean of the square of single parameter (second moment of its
posterior distribution).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
param_ind: int, optional
Index of parameter for which the second moment should be
calculated. This corresponds to the column of ns_run['theta']
which contains the parameter.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max()) # protect against overflow
w_relative /= np.sum(w_relative)
return np.sum(w_relative * (ns_run['theta'][:, param_ind] ** 2)) | 5,325,505 |
def get_historical_data(Code=None):
"""
A function to get a historical data corresponding to a specified stock code.
Args:
Code (str): A stock code corresponding to the historical data.
Returns:
A historical data corresponding to the stock code in DataFrame format of Pandas.
"""
if Code is None:
raise ValueError("""Specify a stock code. """)
share_data = share.Share(Code).get_historical(
share.PERIOD_TYPE_YEAR,
6,
share.FREQUENCY_TYPE_DAY,
1
)
columns = list(share_data.keys())
columns[0] = "Date"
columns[-2] = "Adj. Close" # TODO: it's not actually Adjusted Close, but Close.
df = pd.DataFrame(
list(zip(*share_data.values())),
columns=columns
)
df["Date"] = pd.to_datetime(df["Date"], unit="ms").dt.date
return df | 5,325,506 |
def match_login_url_with(username, default="https://foo.bar/login"):
"""
Match a given username with the corresponding
login URL
:param username: username of user. type str
:returns URL: login URL for user. type str
"""
return matches(
{
"yelluw": "https://yelluw.com/login",
"Pablo": "https://pablojuan.com/login",
},
username,
default=default
) | 5,325,507 |
def odict_1to1(from_sp, to_sp):
"""
Filtered flat odict with only 1to1 orthologs.
"""
od = odict(from_sp, to_sp)
od_rev = odict(to_sp, from_sp)
return dict([(k,list(v)[0]) for k,v in od.items() if len(v)==1 and
len(od_rev[list(v)[0]])==1]) | 5,325,508 |
def find_devices(vendor=None, product=None, serial_number=None, custom_match=None, **kwargs):
"""Find connected USB devices matching certain keywords.
Wildcards can be used for vendor, product and serial_number.
:param vendor: name or id of the vendor (manufacturer)
:param product: name or id of the product
:param serial_number: serial number.
:param custom_match: callable returning True or False that takes a device as only input.
:param kwargs: other properties to match. See usb.core.find
:return:
"""
kwargs = kwargs or {}
attrs = {}
if isinstance(vendor, str):
attrs['manufacturer'] = vendor
elif vendor is not None:
kwargs['idVendor'] = vendor
if isinstance(product, str):
attrs['product'] = product
elif product is not None:
kwargs['idProduct'] = product
if serial_number:
attrs['serial_number'] = str(serial_number)
if attrs:
def cm(dev):
if custom_match is not None and not custom_match(dev):
return False
info = DeviceInfo.from_device(dev)
for attr, pattern in attrs.items():
if not fnmatch(getattr(info, attr).lower(), pattern.lower()):
return False
return True
else:
cm = custom_match
return usb.core.find(find_all=True, custom_match=cm, **kwargs) | 5,325,509 |
def get_static_graph(app_name=None, app_dict=None, *args, **kwargs):
""" Explicityl avoid request and user. """
return get_graph(app_name=app_name, app_dict=app_dict, request=None) | 5,325,510 |
def primitive_triplets(limit):
"""See Euclid's formula
(https://en.wikipedia.org/wiki/Pythagorean_triple#Generating_a_triple)
for more information
"""
for m, n in euclidian_coprimes(limit):
m2, n2 = m * m, n * n
a, b, c = m2 - n2, 2 * m * n, m2 + n2
if a > b:
a, b = b, a
yield a, b, c | 5,325,511 |
def tune(data_path, labels, tempdir, batch_size, weights_out):
""" [NOT IMPLEMENTED] Update network with new data.
Finetune the network for a different task by keeping the
trained weights, replacing the top layer with one that outputs
the new classes, and re-training for a few epochs to have the
model output the new classes instead.
"""
click.echo("Finetuning an algorithm to new data is not yet implemented.") | 5,325,512 |
def run_post_install(post_install: List[Text], source: Location):
"""
Running the post-install scripts
"""
for script in post_install:
source.run_script(script) | 5,325,513 |
def test_initial_position_zero():
"""Tests that the inverted path initial position is the right one."""
batch_size = 10
input_stream = 10
input_channels = 3
path = torch.rand((batch_size, input_stream, input_channels))
for depth in (2, 4, 6):
signature = signatory.signature(path, depth)
inverted_path = signatory.invert_signature(signature, depth, input_channels)
assert torch.equal(inverted_path[:, 0, :], torch.zeros(batch_size, input_channels))
initial_position = torch.rand((batch_size, input_channels))
inverted_path = signatory.invert_signature(signature, depth, input_channels, initial_position=initial_position)
assert torch.equal(inverted_path[:, 0, :], initial_position) | 5,325,514 |
def downsample_spectrum(ar_wavelength, ar_flux, ar_ivar, scale):
"""
:type ar_wavelength: np.ndarray
:type ar_flux: np.ndarray
:type ar_ivar: np.ndarray
:type scale: int
:return: (np.ndarray, np.ndarray, np.ndarray)
"""
new_length = ar_wavelength.size // scale
old_length_clipped = new_length * scale
ar_wavelength_2d = ar_wavelength[:old_length_clipped].reshape((new_length, scale))
ar_flux_2d = ar_flux[:old_length_clipped].reshape((new_length, scale))
ar_ivar_2d = ar_ivar[:old_length_clipped].reshape((new_length, scale))
ar_weighted_flux_2d = ar_flux_2d * ar_ivar_2d
ar_wavelength_small = np.nanmean(ar_wavelength_2d, axis=1)
ar_ivar_small = np.nansum(ar_ivar_2d, axis=1)
with np.errstate(invalid='ignore'):
ar_flux_small = np.nansum(ar_weighted_flux_2d, axis=1) / ar_ivar_small
return ar_wavelength_small, ar_flux_small, ar_ivar_small | 5,325,515 |
def format_date(date):
"""
Format date for creation of Twitter URL and Facebook API.
Format a datetime object to a string in the form of '%Y-%m-%d', e.g. '2018-01-21'
Parameters
----------
date : datetime
date to be formated
Returns
-------
str
date in string representation
"""
return date.strftime('%Y-%m-%d') | 5,325,516 |
def basic_token(username, password):
"""Generate the Authorization token for Resource Orchestrator (SO-ub container).
Args:
username (str): the SO-ub username
password (str): the SO-ub password
Returns:
str: the Basic token
"""
if not isinstance(username, str):
raise TypeError("The given type of username is `{}`. Expected str.".format(type(username)))
if not isinstance(password, str):
raise TypeError("The given type of password is `{}`. Expected str.".format(type(password)))
credentials = str.encode(username + ":" + password)
return bytes.decode(base64.b64encode(credentials)) | 5,325,517 |
def read_alias(alias_csv_path):
"""Reads alias.csv at the specified path.
Then returns a dict mapping from alias to monster id.
"""
with open(alias_csv_path) as alias_csv:
return {
alias: int(monster_id)
for alias, monster_id in csv.reader(alias_csv)} | 5,325,518 |
def makedirpath(fpath: str):
"""Make path."""
dpath = os.path.dirname(fpath)
if dpath:
os.makedirs(dpath, exist_ok=True) | 5,325,519 |
async def get_pipeline_run_node_steps(request: web.Request, organization, pipeline, run, node) -> web.Response:
"""get_pipeline_run_node_steps
Retrieve run node steps details for an organization pipeline
:param organization: Name of the organization
:type organization: str
:param pipeline: Name of the pipeline
:type pipeline: str
:param run: Name of the run
:type run: str
:param node: Name of the node
:type node: str
"""
return web.Response(status=200) | 5,325,520 |
def query_available_collections(opts: Options) -> pd.DataFrame:
"""Search for the available collections."""
# Graphql query to get the collections
query = create_collections_query()
# Call the server
reply = query_server(opts.web, query)
collections = json_properties_to_dataframe(reply["collections"])
print("Available collections:\n", collections)
return collections | 5,325,521 |
def _create_agent_object_list(
trial_list: List[List[Dict[str, Any]]],
agent_object_config_list: List[ObjectConfigWithMaterial],
unit_size: Tuple[float, float]
) -> List[Dict[str, Any]]:
"""Create and return the MCS scene's agent object list using the given
trial list from the JSON file data."""
agent_object_list = []
# Retrieve the agent data from the first frame of the first trial.
# Assume only one agent and the agent will never change shape/color.
json_agent = trial_list[0][0]['agent']
json_coords = json_agent[0]
json_radius = json_agent[1]
json_size = [json_radius * 2, json_radius * 2]
# Create the MCS agent object.
config_with_material = agent_object_config_list[0]
agent_object = _create_object(
'agent_',
config_with_material.object_type,
config_with_material.material,
[config_with_material.center_y, config_with_material.scale_y],
[config_with_material.scale_x, config_with_material.scale_z],
json_coords,
json_size,
unit_size
)
agent_object[tags.SCENE.UNTRAINED_SHAPE] = config_with_material.untrained
agent_object_list.append(agent_object)
# Remove the agent's first appearance (we will override it later).
agent_object['shows'] = []
agent_object['boundsAtStep'] = []
# Add data for the agent's movement across the frames to each step.
step = 0
for trial in trial_list:
for frame in trial:
json_agent = frame['agent']
json_coords = json_agent[0]
json_radius = json_agent[1]
json_size = [json_radius * 2, json_radius * 2]
# Move the agent to its new position for the step.
agent_object['shows'].append(_create_show(
step,
agent_object['configHeight'],
agent_object['configSize'],
json_coords,
json_size,
unit_size
))
step += 1
agent_object['boundsAtStep'].append(
agent_object['shows'][-1]['boundingBox']
)
# Add 1 for the EndHabituation action step at the end of the trial.
step += 1
agent_object['boundsAtStep'].append(
agent_object['shows'][-1]['boundingBox']
)
# Remove the scale from each element in 'shows' except for the first, or
# it will really mess up the simulation.
for show in agent_object['shows'][1:]:
del show['scale']
return agent_object_list | 5,325,522 |
async def send_data_controller_details_message_handler(request: web.BaseRequest):
"""Send data controller details message to remote agent hosted by Data Controller."""
context = request.app["request_context"]
connection_id = request.match_info["connection_id"]
# Initialise MyData DID Manager.
mydata_did_manager: ADAManager = ADAManager(context=context)
try:
# Call the function
await mydata_did_manager.send_data_controller_details_message(connection_id)
except (ConnectionManagerError, BaseModelError, ADAManagerError) as err:
raise web.HTTPBadRequest(reason=err.roll_up) from err
except Exception as err:
raise web.HTTPInternalServerError(reason=str(err)) from err
return web.json_response({}, status=200) | 5,325,523 |
def test_llhelper(monkeypatch):
"""Show how to get function pointers used in type slots"""
FT = lltype.FuncType([], lltype.Signed)
FTPTR = lltype.Ptr(FT)
def make_wrapper(self, space):
def wrapper():
return self.callable(space)
return wrapper
monkeypatch.setattr(pypy.module.cpyext.api.ApiFunction, '_make_wrapper', make_wrapper)
@specialize.memo()
def get_tp_function(space, typedef):
@slot_function([], lltype.Signed, error=-1)
def slot_tp_function(space):
return typedef.value
api_func = slot_tp_function.api_func
return lambda: llhelper(api_func.functype, api_func.get_wrapper(space))
class Space:
_cache = {}
@specialize.memo()
def fromcache(self, key):
try:
return self._cache[key]
except KeyError:
result = self._cache[key] = self.build(key)
return result
def _freeze_(self):
return True
class TypeDef:
def __init__(self, value):
self.value = value
def _freeze_(self):
return True
class W_Type:
def __init__(self, typedef):
self.instancetypedef = typedef
def _freeze(self):
try:
del self.funcptr
except AttributeError:
pass
return False
w_type1 = W_Type(TypeDef(123))
w_type2 = W_Type(TypeDef(456))
space = Space()
def run(x):
if x:
w_type = w_type1
else:
w_type = w_type2
typedef = w_type.instancetypedef
w_type.funcptr = get_tp_function(space, typedef)()
return w_type.funcptr()
fn = compile(run, [bool])
assert fn(True) == 123
assert fn(False) == 456 | 5,325,524 |
def get_chronicle_http_client(account_info):
"""
Return an http client that is authorized with the given credentials
using oauth2client or google-auth.
"""
try:
credentials = service_account.Credentials.from_service_account_info(
account_info, scopes=current_app.config['AUTH_SCOPES']
)
except ValueError as e:
raise AuthorizationError(str(e))
return _auth.authorized_http(credentials) | 5,325,525 |
def add_singlesample_parser(subparsers):
"""Add function 'singlesample' argument parsers."""
argparser_gsea = subparsers.add_parser("ssgsea", help="Run Single Sample GSEA.")
# group for input files
group_input = argparser_gsea.add_argument_group("Input files arguments")
group_input.add_argument("-d", "--data", dest="data", action="store", type=str, required=True,
help="Input gene expression dataset file in txt format. Same with GSEA.")
group_input.add_argument("-g", "--gmt", dest="gmt", action="store", type=str, required=True,
help="Gene set database in GMT format. Same with GSEA.")
# group for output files
group_output = argparser_gsea.add_argument_group("Output arguments")
add_output_option(group_output)
# group for General options.
group_opt = argparser_gsea.add_argument_group("Single Sample GSEA advanced arguments")
group_opt.add_argument("--sn", "--sample-norm", dest = "norm", action="store", type=str,
default='rank', metavar='normalize',
choices=("rank", "log", "log_rank", "custom"),
help="Sample normalization method. Choose from {'rank', 'log', 'log_rank','custom'}. Default: rank")
group_opt.add_argument("--ns", "--no-scale", action='store_false', dest='scale', default=True,
help="If the flag was set, don't normalize the enrichment scores by number of genes.")
group_opt.add_argument("-n", "--permu-num", dest = "n", action="store", type=int, default=0, metavar='nperm',
help="Number of random permutations. For calculating esnulls. Default: 0")
group_opt.add_argument("--min-size", dest="mins", action="store", type=int, default=15, metavar='int',
help="Min size of input genes presented in Gene Sets. Default: 15")
group_opt.add_argument("--max-size", dest = "maxs", action="store", type=int, default=2000,metavar='int',
help="Max size of input genes presented in Gene Sets. Default: 2000")
group_opt.add_argument("-w", "--weight", action='store', dest='weight', default=0.25, type=float, metavar='weight',
help='Weighted_score of rank_metrics. For weighting input genes. Default: 0.25',)
group_opt.add_argument("-a", "--ascending", action='store_true', dest='ascending', default=False,
help='Rank metric sorting order. If the -a flag was chosen, then ascending equals to True. Default: False.')
group_opt.add_argument("-s", "--seed", dest = "seed", action="store", type=int, default=None, metavar='',
help="Number of random seed. Default: None")
group_opt.add_argument("-p", "--threads", dest = "threads", action="store", type=int, default=1, metavar='procs',
help="Number of Processes you are going to use. Default: 1")
return | 5,325,526 |
def svn_auth_save_credentials(*args):
"""svn_auth_save_credentials(svn_auth_iterstate_t state, apr_pool_t pool) -> svn_error_t"""
return apply(_core.svn_auth_save_credentials, args) | 5,325,527 |
def setProgressParent(parent):
"""
Enable the FTP progress GUI, and set the parent frame.
Usage: setProgressParent(parent)
"""
global _progressParent
_progressParent = parent
# Notify cache module that window dialogs should be used.
cache.useWindow() | 5,325,528 |
def write_directory_status(directory_status):
"""
Writes a status to the status file:
Overwrites anything that is in the file
Writes a timestamp to the time of last written
:param directory_status: DirectoryStatus object containing status to write to directory
:return: None
"""
if config.read_config_option("readonly", bool, False) is False:
if not os.access(directory_status.directory, os.W_OK): # Cannot access upload directory
raise exceptions.DirectoryError("Cannot access directory", directory_status.directory)
json_data = directory_status.to_json_dict()
uploader_info_file = os.path.join(directory_status.directory, STATUS_FILE_NAME)
with open(uploader_info_file, "w") as json_file:
json.dump(json_data, json_file, indent=4, sort_keys=True)
json_file.write("\n") | 5,325,529 |
def toggleDisplayOrigShape(inMesh, inColor =(.8, 0.2, 0.2), both = False, progressBar=None):
"""
toggle the display of the mesh beteen the output and the input shape of the skincluster. the input shape will receive default lamber + vertex colors to make sure there is a big distinction between the 2
:todo: maybe instead of lambert shader we can use the original shader + red vertex color overlay to make sure the textures can still be viewed
:todo: add an option that shows both shapes? so we can display 1 in movement and one in default pose
:param inMesh: the object that has a skincluster attached which we want to toggle
:type inMesh: string
:param inColor: the color in RGB values from 0to1 used as color value
:type inColor: tuple/list
:param progressBar: progress bar instance to be used for progress display, if `None` it will print the progress instead
:type progressBar: QProgressBar
:return: `True` if the function is completed
:rtype: bool
"""
for shape in cmds.listRelatives(inMesh, s=1, ni=0, fullPath=1):
if both and cmds.listConnections(shape, s=1, d=1, type= "skinCluster"):
continue
cmds.setAttr("%s.intermediateObject" % shape, not cmds.getAttr("%s.intermediateObject" % shape))
if not cmds.listConnections(shape, s=0, d=1, type="shadingEngine"):
setOrigShapeColor(shape, inColor) | 5,325,530 |
def get_nodes_str (name, nodes):
"""
helper function to dump nodes as a list of names
"""
nodes_str = " %s nodes = %d\n" % (name, len(nodes))
nodes_str += " " + ", ".join(map(lambda x: x._name, nodes)) + "\n"
return nodes_str | 5,325,531 |
def test_corr():
"""
Test top-level functions in pyccl.correlation module.
"""
for cosmo in reference_models():
yield check_corr, cosmo
for cosmo_nu in reference_models_nu():
yield check_corr, cosmo_nu
for cosmo in reference_models():
yield check_corr_3d, cosmo
for cosmo_nu in reference_models_nu():
yield check_corr_3d, cosmo_nu | 5,325,532 |
def compute_X_axis(dataset, frame, dss, ref_freq=None, vspline=None,
time=None):
"""
Computes the appropriate X-axis for the averaged difference spectrum
'vobj' is velocity of the object in the rest velocity of frame specified.
Acceptable frames are defined in the SAOhdf5.rel_freq_units() docstring.
In addition we allow here RADIO-OBJ which defines the rest frame of the
object.
@param header : information about the target source
@type header : dict
@param frame : the rest frame and the X-axis type (freq or vel)
@type frame : str
@param dss : DSS station
@type dss : int
@param ref_freq : frequency in MHz for computing relative frequencies
@type ref_freq : float
@param vspline : radial velocity of a moving body as a function of time
@type vspline : function
@param time : the time at which the spline is to ve evaluated
@type time : UNIX seconds
"""
n_chans = dataset.data[1].shape[0]
if ref_freq:
f_ref = ref_freq
else:
f_ref = dataset.header['linefreq']/1e6 # MHz
v_ref = dataset.header['VELOCITY']
logger.debug(" compute_X-axis: requested frame is %s", frame)
logger.debug(" compute_X_axis: reference frequency is %10.3f", f_ref)
if frame == "CHAN-OBS" or frame == "FREQ-OBS" or frame == "RELA-OBS":
x = dataset.rel_freq_units(frame=frame, ref_freq=f_ref)
vobj = None
elif frame == "RADI-OBS":
vobj = V_LSR(dataset.header, time, dss)
x = dataset.rel_freq_units(frame=frame, ref_freq=f_ref)
elif frame == "RADI-LSR":
x = dataset.rel_freq_units(frame=frame, ref_freq=f_ref,
v_frame=V_LSR(dataset.header, time, dss))
vobj = v_ref
logger.debug("compute_X_axis: vobj = %.2f", vobj)
elif frame == "RADI-OBJ":
# This is the object's rest frame
if vspline and time:
vobj = vspline(time)
x = -(c/1000)*dataset.rel_freq_units(frame="DELF-OBS")/f_ref - vobj
else:
vobj = dataset.header[0]['VELOCITY']
x = dataset.rel_freq_units(frame=frame, ref_freq=f_ref,
v_frame=V_LSR(dataset.header, time, dss) + vobj)
else:
self.logger.warning(" frame %s is not valid", frame)
return
return x, frame, vobj | 5,325,533 |
def _reduce_child(node: Node, child: Node, root: RootNode):
"""
Sets node's results to the child's result, keeping node's tag_name.
"""
node._set_result(child._result)
update_attr(node, (child,), root)
# update_attr(child, (node,), root)
# if child.has_attr():
# node._xml_attr = child._xml_attr | 5,325,534 |
def clean_df(df, selected_columns=default_columns):
"""Take a dataframe with GDELT2.0 data and only retain the useful columns for us and also add the country where the news was written
Keyword arguments:
df -- The dataframe complying to GDELT2.0 columns format
selected_columns (optionnal) -- The set of columns we want to keep
"""
df = df[selected_columns]
df = df.dropna(axis=0, how='any')
mapping = get_mapping(df).set_index('ActionGeo_CountryCode')
df['Country_Code'] = df['ActionGeo_CountryCode'].apply(
lambda x: mapping.loc[x]['Country_Code'] if x in mapping['Country_Code'].index.values else 'None')
df['Country_Source'] = get_countries_for_dataframe(df, 'SOURCEURL', get_all_newspapers_to_country_dict(),
get_tld_to_country_dict())
r = requests.get('https://raw.githubusercontent.com/mledoze/countries/master/countries.json')
d = {}
for c in r.json():
d[c['cca3']] = c['name']['common']
df['Country_Name'] = df['Country_Code'].apply(lambda x: d[x] if x in d else 'None')
return df[cleaned_columns] | 5,325,535 |
def mse(y_true, y_pred, reduce_mode="mean"):
"""mean squared error。"""
return reduce(tf.math.square(y_pred - y_true), reduce_mode) | 5,325,536 |
def calculate_assignment_probabilites(assignments, num_clusters):
"""
Just counts the occurence of each assignment to get an empirical pdf estimate
"""
temp = np.arange(num_clusters)
hist_b_edges = np.hstack([-np.inf, (temp[:-1] + temp[1:]) / 2, np.inf])
assignment_counts, _ = np.histogram(assignments, hist_b_edges)
empirical_density = assignment_counts / np.sum(assignment_counts)
return empirical_density | 5,325,537 |
def unix_getpass(prompt='Password: ', stream=None):
"""Prompt for a password, with echo turned off.
Args:
prompt: Written on stream to ask for the input. Default: 'Password: '
stream: A writable file object to display the prompt. Defaults to
the tty. If no tty is available defaults to sys.stderr.
Returns:
The seKr3t input.
Raises:
EOFError: If our input tty or stdin was closed.
GetPassWarning: When we were unable to turn echo off on the input.
Always restores terminal settings before returning.
"""
passwd = None
with contextlib.ExitStack() as stack:
try:
# Always try reading and writing directly on the tty first.
fd = os.open('/dev/tty', os.O_RDWR|os.O_NOCTTY)
tty = io.FileIO(fd, 'w+')
stack.enter_context(tty)
input = io.TextIOWrapper(tty)
stack.enter_context(input)
if not stream:
stream = input
except OSError as e:
# If that fails, see if stdin can be controlled.
stack.close()
try:
fd = sys.stdin.fileno()
except (AttributeError, ValueError):
fd = None
passwd = fallback_getpass(prompt, stream)
input = sys.stdin
if not stream:
stream = sys.stderr
if fd is not None:
try:
old = termios.tcgetattr(fd) # a copy to save
new = old[:]
new[3] &= ~termios.ECHO # 3 == 'lflags'
tcsetattr_flags = termios.TCSAFLUSH
if hasattr(termios, 'TCSASOFT'):
tcsetattr_flags |= termios.TCSASOFT
try:
termios.tcsetattr(fd, tcsetattr_flags, new)
passwd = raw_input(prompt, stream, input=input)
finally:
termios.tcsetattr(fd, tcsetattr_flags, old)
stream.flush() # issue7208
except termios.error:
if passwd is not None:
# _raw_input succeeded. The final tcsetattr failed. Reraise
# instead of leaving the terminal in an unknown state.
raise
# We can't control the tty or stdin. Give up and use normal IO.
# fallback_getpass() raises an appropriate warning.
if stream is not input:
# clean up unused file objects before blocking
stack.close()
passwd = fallback_getpass(prompt, stream)
stream.write('\n')
return passwd | 5,325,538 |
def getStudiesOptions(request, id):
""" Get a list of studies for an investigation id.
Input:
id, investigation id.
"""
seekdb = SeekDB(None, None, None)
user_seek = seekdb.getSeekLogin(request, False)
investigation_id = id
studies = seekdb.getStudiesFromID(investigation_id)
#print(studies)
#study_options = json.dumps(convertDicToOptions(studies))
study_options = convertDicToOptions(studies)
#print(study_options)
data = {'msg':'okay', 'status': 1, 'study_options':study_options}
return HttpResponse(simplejson.dumps(data)) | 5,325,539 |
def fibonacci_thrid(n):
"""计算斐波那契数列3"""
return n if n < 2 else fibonacci_thrid(n - 2) + fibonacci_thrid(n - 1) | 5,325,540 |
def make_frame_image(xs, ys, Cs):
""" Create a figure displaying the frame data. """
## The minimum x value.
x_min = 0
## The maximum x value.
x_max = 256
## The minimum y value.
y_min = 0
## The maximum y value.
y_max = 256
## The width of the frame.
w = 256
## The height of the frame.
h = 256
## The maximum count value.
C_max = 1
# We add this just in case there are no pixels supplied.
if len(Cs) > 0:
C_max = max(Cs)
# Create the figure.
plt.close('all')
## The default size of the figure [inches].
fig_size = 5.0
## The figure for the frame.
fig = plt.figure(1, figsize=(fig_size*1.27, fig_size), dpi=150, facecolor='w', edgecolor='w')
## The frame axes.
figax = fig.add_subplot(111, axisbg='#222222')
# Add the frame background (blue).
figax.add_patch(plt.Rectangle((0,0),256,256,facecolor='#82bcff'))
# Add a grid.
plt.grid(1)
# Select the "hot" colour map for the pixel counts and add a
# colour bar to indicate the count value for each pixel.
#
cmap = plt.cm.hot
#
colax, _ = colorbar.make_axes(plt.gca())
#
col_max = 10*(np.floor(C_max/10.)+1)
#
colorbar.ColorbarBase(colax,cmap=cmap,norm=colors.Normalize(vmin=0,vmax=col_max))
# Loop over the pixels and add them to the figure.
for i, x in enumerate(xs):
## The scaled count value (for the colour map).
scaled_C = float(Cs[i])/float(col_max)
# Rather than use, say, a 2D histogram for the pixels, we add
# a coloured square to the plot for each pixel.
figax.add_patch(plt.Rectangle((xs[i],ys[i]),1,1,edgecolor=cmap(scaled_C),facecolor=cmap(scaled_C)))
## The frame border width [pixels].
b = 3
# Set the axis limits based on the cluster radius.
figax.set_xlim([0 - b, 256 + b])
figax.set_ylim([0 - b, 256 + b])
#return fig | 5,325,541 |
def background_thread():
"""Example of how to send server generated events to clients."""
global viewerParams, updateViewerParams, MLParams, updateMLParams
while True:
socketio.sleep(seconds)
if (updateViewerParams):
print("========= viewerParams:",viewerParams.keys())
socketio.emit('update_viewerParams', viewerParams, namespace='/test')
if (updateMLParams):
print("========= MLParams:",MLParams.keys())
socketio.emit('update_MLParams', MLParams, namespace='/test')
updateViewerParams = False
updateMLParams = False | 5,325,542 |
def main(pargs):
""" Run
"""
import warnings
from frb.builds import build_specdb
from frb.builds import build_frbs
from frb.builds import build_hosts
from frb.builds import build_fg
# Parse
item = pargs.item.lower()
if item == 'frbs':
build_frbs.main(inflg=pargs.flag)
elif item == 'hosts':
if pargs.frb is None:
print("You must specify --frb")
return
#
frbs = pargs.frb.split(',')
frbs = [ifrb.strip() for ifrb in frbs]
build_hosts.main(frbs, options=pargs.galaxy_options,
hosts_file=pargs.hosts_file,
lit_refs=pargs.lit_refs,
override=pargs.override)
elif item == 'specdb':
build_specdb.main(inflg=pargs.flag)
elif item == 'fg':
build_fg.main(inflg=pargs.flag, options=pargs.galaxy_options)
else:
raise IOError("Bad build item {:s}".format(item)) | 5,325,543 |
def site_url(url):
"""
Determine the server URL.
"""
base_url = 'http://%s' % socket.gethostname()
if server.port is not 80:
base_url += ':%d' % server.port
return urlparse.urljoin(base_url, url) | 5,325,544 |
def get_coordinates(
mask: np.ndarray, ths: int = 5, kernel_len: int = 10
) -> Tuple[List, np.ndarray, np.ndarray]:
"""This function extract the coordinate of table, horizontal and vertical lines.
Args:
mask (np.darray): A binary table image
ths (int, optional): Threshold value to ignore the lines
has not same y coordinate for horizontal lines or x coordinate
for vertical lines. Defaults to 5.
kernel_len (int, optional): The size of kernel is applied.
Raises:
ValueError: will be raised if the number of detected lines is not enough to
rebuild the table
Returns:
Tuple[List, np.ndarray, np.ndarray]: Tuple contain the coordinate of
table, vertical and horizontal lines.
"""
# get horizontal lines mask image
horizontal_lines_mask = get_hor_lines_mask(mask, kernel_len)
# get vertical lines mask image
vertical_lines_mask = get_ver_lines_mask(mask, kernel_len)
# get coordinate of horizontal and vertical lines
hor_lines = get_lines_coordinate(horizontal_lines_mask, axis=0, ths=ths)
ver_lines = get_lines_coordinate(vertical_lines_mask, axis=1, ths=ths)
if len(hor_lines.shape) != 2 or len(ver_lines.shape) != 2:
raise ValueError("Empty line coords array")
# remove noise edge
hor_lines, ver_lines = remove_noise(hor_lines, ver_lines, ths)
# get coordinate of table
tab_x1, tab_y1, tab_x2, tab_y2 = get_table_coordinate(hor_lines, ver_lines)
# preserve sure that all table has 4 borders
new_ver_lines = []
new_hor_lines = []
for e in ver_lines:
x1, y1, x2, y2 = e
# dont add left and right border
if abs(x1 - tab_x1) >= ths and abs(x2 - tab_x2) >= ths:
new_ver_lines.append([x1, y1, x2, y2])
for e in hor_lines:
x1, y1, x2, y2 = e
# dont add top and bottom border
if abs(y1 - tab_y1) >= ths and abs(y2 - tab_y2) >= ths:
new_hor_lines.append([x1, y1, x2, y2])
# add top, bottom ,left, right border
new_ver_lines.append([tab_x1, tab_y1, tab_x1, tab_y2])
new_ver_lines.append([tab_x2, tab_y1, tab_x2, tab_y2])
new_hor_lines.append([tab_x1, tab_y1, tab_x2, tab_y1])
new_hor_lines.append([tab_x1, tab_y2, tab_x2, tab_y2])
# normalize
final_hor_lines = normalize_v1(new_hor_lines, axis=0, ths=ths)
final_ver_lines = normalize_v1(new_ver_lines, axis=1, ths=ths)
final_hor_lines, final_ver_lines = normalize_v2(final_ver_lines, final_hor_lines)
return [tab_x1, tab_y1, tab_x2, tab_y2], final_ver_lines, final_hor_lines | 5,325,545 |
def save_users(user):
"""
Function that saves the new user
"""
user.save_user() | 5,325,546 |
def get_pixels(extrinsic, intrinsic, X):
"""
Returns the x, y pixels for the given X vector
:param extrinsic: extrinsic (4*4) matrix obtained from the headset
:param intrinsic: intrinsic (3*3) matrix obtained from the headset
:param X: the position vector
:return: image pixels for the vector
"""
intm = np.dot(extrinsic, np.append(X, 1))
intm = (intm / intm[2])[:3]
intm = np.dot(intrinsic, intm)[:2]
return [intm[0], intm[1]] | 5,325,547 |
def boggle_helper(word, used_position, current_word, x, y, found_list):
"""
:param word: the nested list of the alphabets of the boggle
:param used_position: the (x, y) coordinates which have been used
:param current_word: the current word that the program is processing
:param x: the x coordinate of the boggle
:param y: the y coordinate of the boggle
:param found_list: the list which is made up of vocabularies we found
:return: None
"""
global dictionary_set
global words_count
if current_word in dictionary_set and current_word not in found_list:
print(f'Found \"{current_word}\"')
found_list.append(current_word)
# for a word which is start from other word, such as room and roomy
dictionary_set.remove(current_word)
if has_prefix(current_word):
boggle_helper(word, used_position, current_word, x, y, found_list)
words_count += 1
else:
for i in range(-1, 2):
for j in range(-1, 2):
if 4 > x + i >= 0 and 4 > y + j >= 0 and (x + i, y + j) not in used_position:
if has_prefix(current_word + word[x + i][y + j]):
new_x = x + i
new_y = y + j
used_position.append((new_x, new_y))
boggle_helper(word, used_position, current_word + word[new_x][new_y], new_x, new_y, found_list)
used_position.pop() | 5,325,548 |
def delete_question(media_package, level=0):
"""
Ask user the question whether they want to delete the distribution artefacts for the next media package or for all
remaining media packages.
:param media_package: The media package to ask the question for
:type: str
:param level: The level to indent the question to
:type level: int
:return: The answer.
:rtype: FixAnswer
"""
long_descriptions = ["deleting the distribution artefacts of the next media package",
"deleting all(remaining) distribution artefacts",
"quitting the script"]
short_descriptions = ["next", "all", "quit"]
options = ['n', 'a', 'q']
question = "Delete distribution artefacts of media package {}?".format(media_package)
answer = get_configurable_answer(options, short_descriptions, long_descriptions, question, level)
return answer | 5,325,549 |
def import_submodules(package, recursive=True):
""" Import all submodules of a package, recursively, including subpackages
Arguments:
1. package = (string) name of the package
(module) loader of the package
2. recrusive = (bool) True = load packages and modules from all sub-packages as well.
(bool) False = load only first level of packages and modules, do not load modules from sub packages
"""
if isinstance(package, str):
package = importlib.import_module(package)
results = {}
for loader, name, is_pkg in pkgutil.walk_packages(package.__path__):
full_name = package.__name__ + '.' + name
results[full_name] = importlib.import_module(full_name)
if recursive and is_pkg:
results.update(import_submodules(full_name))
return results | 5,325,550 |
def is_collision_ray_cell(map_obj, cell):
"""
cell : cell r, c index from left bottom.
"""
idx = cell[0] + map_obj.mapdim[0] * cell[1]
if (cell[0] < 0) or (cell[1] < 0) or (cell[0] >= map_obj.mapdim[0]) or (cell[1] >= map_obj.mapdim[1]):
return True
#elif (map_obj.map is not None) and map_obj.map[cell[0], cell[1]] == 1:
elif (map_obj.map is not None) and map_obj.map_linear[idx] == 1:
return True
else:
return False | 5,325,551 |
def test_override_tab_value():
"""Test the override_tab_value() function."""
data = [[1, '\tJohn'], [2, 'Jill']]
headers = ['id', 'name']
expected = ([[1, ' John'], [2, 'Jill']], ['id', 'name'])
results = override_tab_value(data, headers)
assert expected == (list(results[0]), results[1]) | 5,325,552 |
def time_of_trip(datum, city):
"""
Takes as input a dictionary containing info about a single trip (datum) and
its origin city (city) and returns the month, hour, and day of the week in
which the trip was made.
Remember that NYC includes seconds, while Washington and Chicago do not.
HINT: You should use the datetime module to parse the original date
strings into a format that is useful for extracting the desired information.
see https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior
"""
# YOUR CODE HERE
dt = None
if city == "NYC":
dt = datetime.strptime(datum['starttime'], "%m/%d/%Y %H:%M:%S")
elif city == "Chicago":
dt = datetime.strptime(datum['starttime'], "%m/%d/%Y %H:%M")
elif city == "Washington":
dt = datetime.strptime(datum['Start date'], "%m/%d/%Y %H:%M")
month = dt.strftime("%m")
hour = dt.strftime("%H")
day_of_week = dt.strftime("%A")
return (int(month), int(hour), day_of_week) | 5,325,553 |
def transform_resource_name(ctx, param, value):
"""Callback to transform resource_name into title case."""
if value is not None:
return value.title()
return value | 5,325,554 |
def extrema (im):
"""
Return the minimum and maximum of an image.
Arguments:
im image whose extrema are to be found
"""
return [im.min(), im.max()] | 5,325,555 |
def _create_statement(name, colnames):
"""Create table if not exists foo (...).
Note:
Every type is numeric.
Table name and column names are all lowercased
"""
# every col is numeric, this may not be so elegant but simple to handle.
# If you want to change this, Think again
schema = ', '.join([col + ' ' + 'numeric' for col in colnames])
return "create table if not exists %s (%s)" % (name, schema) | 5,325,556 |
def createBundle():
"""create bundled type of OSC messages"""
b = OSC.OSCMessage()
b.address = ""
b.append("#bundle")
b.append(0)
b.append(0)
return b | 5,325,557 |
def binary_or(a: int, b: int):
"""
Take in 2 integers, convert them to binary, and return a binary number that is the
result of a binary or operation on the integers provided.
>>> binary_or(25, 32)
'0b111001'
>>> binary_or(37, 50)
'0b110111'
>>> binary_or(21, 30)
'0b11111'
>>> binary_or(58, 73)
'0b1111011'
>>> binary_or(0, 255)
'0b11111111'
>>> binary_or(0, 256)
'0b100000000'
>>> binary_or(0, -1)
Traceback (most recent call last):
...
ValueError: the value of both input must be positive
>>> binary_or(0, 1.1)
Traceback (most recent call last):
...
TypeError: 'float' object cannot be interpreted as an integer
>>> binary_or("0", "1")
Traceback (most recent call last):
...
TypeError: '<' not supported between instances of 'str' and 'int'
"""
if a < 0 or b < 0:
raise ValueError("the value of both input must be positive")
a_binary = str(bin(a))[2:] # remove the leading "0b"
b_binary = str(bin(b))[2:]
max_len = max(len(a_binary), len(b_binary))
return "0b" + "".join(
str(int("1" in (char_a, char_b)))
for char_a, char_b in zip(a_binary.zfill(max_len), b_binary.zfill(max_len))
) | 5,325,558 |
def tukey(N, alpha):
"""
generate a tukey window
The Tukey window, also known as the tapered cosine window, can be regarded as a cosine lobe of width \alpha * N / 2
that is convolved with a rectangle window of width (1 - \alpha / 2). At \alpha = 1 it becomes rectangular, and
at \alpha = 0 it becomes a Hann window.
"""
# Special cases
if alpha <= 0:
return np.ones(N) #rectangular window
elif alpha >= 1:
return np.hanning(N)
# Normal case
x = np.linspace(0, 1, N)
w = np.ones(x.shape)
# first condition 0 <= x < alpha/2
first_condition = x<alpha/2
w[first_condition] = 0.5 * (1 + np.cos(2*np.pi/alpha * (x[first_condition] - alpha/2) ))
# second condition already taken care of
# third condition 1 - alpha / 2 <= x <= 1
third_condition = x>=(1 - alpha/2)
w[third_condition] = 0.5 * (1 + np.cos(2*np.pi/alpha * (x[third_condition] - 1 + alpha/2)))
return w | 5,325,559 |
def get_response(log, url, **kwargs):
"""
Get data from server at given url.
Parameters:
- log: Standard python log instance
- url: The url to make a post/get request to.
- kwargs: Keyword arguments.
- data: dict of post data. If data != None, get_response makes a
http POST request, otherwise a http GET.
- timeout: int, timeout in seconds. Defaults to 120.
Returns:
- Text read from url.
Raises:
- ServiceError if return code is != 200, httpError or timeout.
"""
log.debug("Trying url: %s", url)
data = urlencode(kwargs['data']).encode() if 'data' in kwargs else None
to = kwargs['timeout'] if 'timeout' in kwargs else URL_TIMEOUT
if data:
log.debug("Posting data: " + data.decode('ascii'))
try:
with urllib.request.urlopen(url, data, timeout=to) as response:
code = response.getcode()
html = response.read().decode('ascii')
except timeoutError:
raise ServiceError("Timeout reading %s" % url)
except (urllib.error.HTTPError, urllib.error.URLError) as err:
raise ServiceError("Error reading %s :%s" % (url, err))
log.debug("Got response (%d) : %s", code, html)
if code != 200:
raise ServiceError("Cannot update, response code: %d" % code)
return html | 5,325,560 |
def check_dir(path):
""" (path:str) -> bool
Check if path is absolute and is a directory.
"""
if not isabs(path):
raise ValueError("Path %r is not absolute" % path)
if not isdir(path):
raise ValueError("Path %r is not a directory" % path) | 5,325,561 |
def psi(X, Y, c_i, A, config, pkg='numpy'):
"""Computes the value of magnetic flux at point (X, Y)
according to coefficients ci.
Args:
X (float or numpy.array): x coordinate
Y (float or numpy.array): y coordinate
c_i (list): list of floats, the ci coefficients
A (float): plasma parameter
config (str): shape of the plasma 'non-null', 'single-null',
'double-null'.
pkg (str, optional): if set to 'numpy' (resp. 'sympy'), numpy
(resp. sympy) objects will be used. Defaults to 'numpy'.
Raises:
ValueError: If argument pkg is not in ['numpy', 'np', 'sympy', 'sp']
Returns:
float or numpy.array or sympy.Add: value(s) of magnetic flux
"""
if pkg in ['numpy', 'np']:
pkg = np
elif pkg in ['sympy', 'sp']:
pkg = sp
else:
raise ValueError("Unexpected string for argument pkg")
psi_1 = 1
psi_2 = X**2
psi_3 = Y**2 - X**2*pkg.log(X)
psi_4 = X**4 - 4*X**2*Y**2
psi_5 = 2*Y**4 - 9*Y**2*X**2 + 3*X**4*pkg.log(X) - 12*X**2*Y**2*pkg.log(X)
psi_6 = X**6 - 12*X**4*Y**2 + 8*X**2*Y**4
psi_7 = 8*Y**6 - 140*Y**4*X**2 + 75*Y**2*X**4 - 15*X**6*pkg.log(X) + \
180*X**4*Y**2*pkg.log(X) - 120*X**2*Y**4*pkg.log(X)
psis = [psi_1, psi_2, psi_3, psi_4, psi_5, psi_6, psi_7]
if config == 'single-null':
psi_8 = Y
psi_9 = Y*X**2
psi_10 = Y**3 - 3*Y*X**2*pkg.log(X)
psi_11 = 3*Y*X**4 - 4*Y**3*X**2
psi_12 = 8*Y**5 - 45*Y*X**4 - 80*Y**3*X**2*pkg.log(X) + \
60*Y*X**4*pkg.log(X)
psis += [psi_8, psi_9, psi_10, psi_11, psi_12]
val = X**4/8 + A*(1/2*X**2*pkg.log(X) - X**4/8) + \
sum([c_i[i]*psis[i] for i in range(len(c_i))])
return val | 5,325,562 |
def get(baseurl, params=None, headers=None, private_keys_to_ignore=["key", "secret"], permanent_cache_file=PERMANENT_CACHE_FNAME, temp_cache_file=TEMP_CACHE_FNAME):
"""
Return a Response object (defined in this file) for the given URL.
Look in temp_cache first, then permanent_cache.
If not found, fetch data from the internet.
"""
logger = logging.getLogger('requests_with_caching')
if params == None:
params = {}
for k in params:
params[k] = str(params[k])
if headers == None:
headers = {}
if "user-agent" not in headers: # avoid captcha
headers["user-agent"] = "Lynx/2.9.0dev.5 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/3.6.13"
cache_key = make_cache_key(baseurl, params, private_keys_to_ignore)
full_url = requests.Request("GET", baseurl, params=params, headers=headers).prepare().url
logger.info(ctime() + ": fetching " + full_url)
if not DISABLE_CACHING:
# Load the permanent and page-specific caches from files
permanent_cache = _read_from_file(permanent_cache_file)
temp_cache = _read_from_file(temp_cache_file)
if cache_key in temp_cache:
logger.debug("found in temp_cache")
# make a Response object containing text from the change, and the full_url that would have been fetched
return Response(temp_cache[cache_key], full_url)
elif cache_key in permanent_cache:
logger.debug("found in permanent_cache")
# make a Response object containing text from the change, and the full_url that would have been fetched
return Response(permanent_cache[cache_key], full_url)
logger.debug("new; adding to cache")
# actually request it
resp = requests.get(baseurl, params=params, headers=headers)
# save it
if resp.status_code == requests.codes.ok:
add_to_cache(temp_cache_file, cache_key, resp.text)
else:
logger.debug(f"not adding due to error code {resp.status_code}")
return resp | 5,325,563 |
def tempConvert(temp, unit):
""" Convert Fahrenheit to Celsius """
if unit == 'F':
celsius = (temp-32) * 5/9
return celsius
else:
return temp | 5,325,564 |
def blueprintCreation(inputs: dict, con: bytes) -> None:
"""Creates blueprints if correct config present
Args:
inputDict (dict): User YAML input
con (bytes): JCL class instance object
"""
for item in inputs:
response = con.blueprintCreate(blueprintName=item['name'], templateName=item['templateName'])
if 'errors' in response.json():
con.customError(response)
else:
con.customSuccess(response) | 5,325,565 |
def register_snapshot_listeners():
"""Attaches listeners to various models."""
rules = get_rules()
# Initialize listening on parent objects
for model_cls in rules.rules.iterkeys():
model = getattr(models.all_models, model_cls)
signals.Restful.model_posted_after_commit.connect(
create_all, model, weak=False)
signals.Restful.model_put_after_commit.connect(
upsert_all, model, weak=False)
signals.Restful.model_posted_after_commit.connect(
_copy_snapshot_relationships, models.Snapshot) | 5,325,566 |
def scriptJob(allChildren: bool = False,attributeAdded: Tuple[str, str] = tuple("", ""),attributeChange: Tuple[str, str] = tuple("", ""),attributeDeleted: Tuple[str, str] = tuple("", ""),compressUndo: bool = False,conditionChange: Tuple[str, str] = tuple("", ""),conditionFalse: Tuple[str, str] = tuple("", ""),conditionTrue: Tuple[str, str] = tuple("", ""),connectionChange: Tuple[str, str] = tuple("", ""),disregardIndex: bool = False,event: Tuple[str, str] = tuple("", ""),exists: int = 1,force: bool = False,idleEvent: str = "",kill: int = 1,killAll: bool = False,killWithScene: bool = False,listConditions: bool = False,listEvents: bool = False,listJobs: bool = False,nodeDeleted: Tuple[str, str] = tuple("", ""),nodeNameChanged: Tuple[str, str] = tuple("", ""),parent: str = "",permanent: bool = False,protected: bool = False,replacePrevious: bool = False,runOnce: bool = False,timeChange: str = "",uiDeleted: Tuple[str, str] = tuple("", "")) -> None:
"""
このコマンドは、MEL コマンドまたはスクリプトである「スクリプト ジョブ」を作成します。このジョブは、指定した条件、イベント、またはアトリビュートにアタッチされます。条件が目的の状態に切り替わるたびに(またはトリガが起動されるたびに)、スクリプトが実行されます。
-----------------------------------------
Flags:
-----------------------------------------
allChildren (boolean): このフラグは-ac/attributeChangeフラグと一緒に使用した場合のみに使用できます。これが指定されており、ジョブが複合アトリビュートにアタッチされている場合、指定したアトリビュートとその子に変更が加えられるとジョブが実行されます。
-----------------------------------------
attributeAdded ([string, script]): 指定したアトリビュートが追加されるときにスクリプトを実行します。文字列はディペンデンシーノードと特定のアトリビュートの両方を識別する必要があります。ディペンデンシーノードが削除された場合、(削除を元に戻すことができても)このジョブは中断されます。
-----------------------------------------
attributeChange ([string, script]): 指定したアトリビュートの値が変わったときにスクリプトを実行します。文字列はディペンデンシーノードと特定のアトリビュートの両方を識別する必要があります。ディペンデンシーノードが削除された場合、(削除を元に戻すことができても)このジョブは中断されます。
-----------------------------------------
attributeDeleted ([string, script]): 指定したアトリビュートが削除されるときにスクリプトを実行します。文字列はディペンデンシーノードと特定のアトリビュートの両方を識別する必要があります。ディペンデンシーノードが削除された場合、(削除を元に戻すことができても)このジョブは中断されます。
-----------------------------------------
compressUndo (boolean): これをtrueに設定するとscriptJobを元に戻すことができるようになり、そのアクションは、取り消しのために最後に行ったアクションとセットになります。たとえば、セレクションの変更によりscriptJobがトリガされた場合にundoを押すと、scriptJobとセレクションの変更が両方一度に元に戻されます。
-----------------------------------------
conditionChange ([string, script]): 指定した条件の状態が変わったときにスクリプトを実行します。文字列は定義済みまたはユーザ定義のブーリアン条件の名前である必要があります。存在する条件のリストを取得するには、-listConditionsフラグを使用します。
-----------------------------------------
conditionFalse ([string, script]): 指定した条件がfalseになったときにスクリプトを実行します。文字列は定義済みまたはユーザ定義のブーリアン条件の名前である必要があります。存在する条件のリストを取得するには、-listConditionsフラグを使用します。
-----------------------------------------
conditionTrue ([string, script]): 指定した条件がtrueになったときにスクリプトを実行します。文字列は定義済みまたはユーザ定義のブーリアン条件の名前である必要があります。存在する条件のリストを取得するには、-listConditionsフラグを使用します。
-----------------------------------------
connectionChange ([string, script]): 指定したアトリビュートの結合関係が変わったときにスクリプトを実行します。文字列はディペンデンシーノードと特定のアトリビュートの両方を識別する必要があります。ディペンデンシーノードが削除された場合、(削除を元に戻すことができても)このジョブは中断されます。
-----------------------------------------
disregardIndex (boolean): このフラグは-ac/attributeChangeフラグと一緒に使用した場合のみに使用できます。これが指定されており、ジョブがマルチ(インデックス付き)アトリビュートにアタッチされている場合、マルチアトリビュートのいかなるアトリビュートが変更されてもジョブが実行されます。
-----------------------------------------
event ([string, script]): 指定したイベントが発生したときにスクリプトを実行します。文字列は定義済みのMayaイベントの名前である必要があります。存在するイベントのリストを取得するには、-listEventsフラグを使用します。
-----------------------------------------
exists (int): 指定した「ジョブ番号」を持つscriptJobが存在する場合はtrueを返し、存在しない場合はfalseを返します。「ジョブ番号」は、新しいscriptJobの作成時に返された値である必要があります。
-----------------------------------------
force (boolean): このフラグは、-kill、-killAll、または-replacePreviousと一緒にのみ使用でき、保護されたジョブの削除が可能になります。
-----------------------------------------
idleEvent (script): Mayaがアイドル状態になるたびにスクリプトを実行します。警告:アイドルイベントが登録されているかぎり、アプリケーションはイベントをコールし続け、使用可能なCPU時間をすべて使い果たします。idleEventsは注意して使用してください。
-----------------------------------------
kill (int): 指定したジョブ番号を持つジョブを中断します。ただし、永続ジョブを中断することはできません。また、保護されたジョブは、コマンドで-forceフラグが使用されている場合にかぎり中断できます。
-----------------------------------------
killAll (boolean): すべてのジョブを中断します。永続ジョブは削除されません。保護されたジョブは、-forceフラグが使用されている場合にかぎり削除されます。
-----------------------------------------
killWithScene (boolean): 現在のシーンが空になったときに、ジョブを現在のシーンにアタッチします。現在のシーンは、新しいシーンまたは既存のシーンを開くと空になります。
-----------------------------------------
listConditions (boolean): このフラグを指定すると、既存のすべての条件の名前を含む文字配列がコマンドから返されます。以下に、既存のすべての条件について説明します。使用可能なMaya機能に基づくイベント指定した機能が使用できる場合、これらのイベントはtrueになります。イベント名Maya機能AnimationExistsアニメーションAnimationUIExistsアニメーションのユーザインタフェースBaseMayaExistsMayaの任意の基本機能BaseUIExists任意のインタラクティブなMaya機能DatabaseUIExistsDeformersExistsデフォーマ機能DeformersUIExistsデフォーマのユーザインタフェースDevicesExistsデバイスサポートDimensionsExists次元DynamicsExistsダイナミクスDynamicsUIExistsダイナミクスのユーザインタフェースExplorerExistsエクスプローImageUIExistsイメージングのユーザインタフェースKinematicsExistsキネマティクスKinematicsUIExistsキネマティクスのユーザインタフェースManipsExistsマニピュレータModelExists基本モデリングツールModelUIExists基本モデリングのユーザインタフェースNurbsExistsNURBSモデリングツールNurbsUIExistsNURBSモデリングのユーザインタフェースPolyCoreExists基本ポリゴンサポートPolygonsExistsポリゴンモデリングPolygonsUIExistsポリゴンモデリングのユーザインタフェースPolyTextureExistsポリゴナルテクスチャリングRenderingExistsビルトインレンダリングRenderingUIExistsレンダリングのユーザインタフェースその他のイベントautoKeyframeState:MayaでautoKeyframingが有効なときはtruebusy:MayaがビジーなときはtruedeleteAllCondition:すべてを削除の操作の最中はtrueflushingScene:シーンをフラッシュしている間はtrueGoButtonEnabled:パネルコンテキストのGoボタンが使用可能なときはtruehotkeyListChange:ホットキーの定義リストが変更されたときはtrueplayingBack:Mayaでアニメーションキーフレームが再生されているときはtrueplaybackIconsCondition:タイムスライダで使用されている再生状態のインスタンスreadingFile:Mayaがファイルを読み取り中のときはtrueRedoAvailable:再実行のために使用可能なコマンドがあるときはtrueSomethingSelected:何らかのオブジェクト(1つまたは複数)が選択されているときはtrueUndoAvailable:元に戻すための使用可能なコマンドがあるときはtrue
-----------------------------------------
listEvents (boolean): このフラグを指定すると、既存のすべてのイベントの名前を含む文字配列がコマンドから返されます。以下に、既存のすべてのイベントについて説明します。angularToleranceChanged:角度単位の許容値が変更されたとき。許容値は次の場合に変更されます。MELコマンド「tolerance」に「-angular」フラグを付けて使用する場合オプション->GeneralPreferences->モデリングタブ->接線許容値でプリファレンスを変更する場合angularUnitChanged:ユーザが角度単位を変更したとき。axisAtOriginChanged:原点の軸が変更されたとき。axisInViewChanged:特定のビューの軸が変更されたとき。ColorIndexChanged:カラーインデックスの値が変更されたとき。constructionHistoryChanged:コンストラクションヒストリをオンまたはオフにしたとき。currentContainerChanged:ユーザーが現在のコンテナを設定または設定解除したとき。currentSoundNodeChanged:次の理由から、タイムスライダに表示されているサウンドが変更されたとき:サウンドが除去されている(表示されない)[タイムスライダで右マウスボタン]新しいサウンドが表示されている[タイムスライダで右マウスボタン]サウンド表示が切り替えられている[アニメーションオプション]サウンド表示モードが変更されている[アニメーションオプション]DagObjectCreated:新しいDAGオブジェクトが作成されたとき。deleteAll:filenewが実行されたとき。DisplayColorChanged:表示カラーが変更されたとき。displayLayerChange:レイヤが作成または破壊されたとき。displayLayerManagerChange:ディスプレイレイヤマネージャが変更されたとき。DisplayRGBColorChanged:RGB表示カラーが変更されたとき。glFrameTrigger:内部使用限定。ChannelBoxLabelSelected:チャネルボックスラベル(最初の列)の選択が変更された場合。gridDisplayChanged:内部使用限定。idle:Mayaがアイドル状態で優先順位の高いタスクが存在しないとき。idleHigh:Mayaがアイドル状態のとき。これは、優先順位が低いアイドルタスクより前にコールされます。通常は「idle」を使用します。lightLinkingChanged:ライトリンクリレーションシップを修正する変更が生じたとき。lightLinkingChangedNonSG:ライトリンクリレーションシップを修正する変更が生じたとき(シェーディング割り当ての変更を除く)。linearToleranceChanged:リニア許容値が変更されたとき。許容値は次の場合に変更されます。MELコマンド「tolerance」に「-linear」フラグを付けて使用する場合オプション->GeneralPreferences->モデリングタブ->位置許容値でプリファレンスを変更する場合linearUnitChanged:ユーザがオプション(Options)メニューからリニア単位を変更したときMenuModeChanged:ユーザがMayaメインウィンドウでメニューバーのメニューセットを変更したとき(たとえば、モデリング(Modeling)をアニメーション(Animation)に変更したとき)。RecentCommandChanged:内部使用限定。NewSceneOpened:新しいシーンが開かれたとき。PostSceneRead:シーンが読み込まれた後。特に、ファイルを開いた後や読み込み後、またはすべての子リファレンスが読み込まれた後。nurbsToPolygonsPrefsChanged:NURBSをポリゴンに(NurbsToPolygons)のいずれかのプリファレンスが変更されたとき。プリファレンスは次の場合に変更されます。MELコマンドを使用(「nurbsToPolygonsPref」)ポリゴン->NURBSをポリゴンに->オプションボックス(Polygons->NurbsToPolygons->OptionBox)でプリファレンスを変更playbackRangeChanged:再生キーフレームの範囲が変更されたときplaybackRangeSliderChanged:アニメーション開始/終了範囲(タイムスライダ範囲の右端または左端のエントリセル、その間のセルで再生範囲を調整)が変更されたときpreferredRendererChanged:優先レンダラが変更されたときquitApplication:ユーザが、MELコマンドのquit、または終了(Exit)メニュー項目を使用して、終了を選択したときRedo:ユーザがメニューから再実行を選択し、対象が再実行されたときこのコールバックは、UIまたはローカルストレージの更新に使用できます。このコールバックの最中は、シーンやDGの状態を変更しないでください。renderLayerChange:レンダーレイヤノードの作成や削除が行われたとき。renderLayerManagerChange:現在のレンダーレイヤが変更されたとき。RebuildUIValues:内部使用限定。SceneOpened:シーンが開かれたとき。SceneSaved:シーンが保存されたとき。SelectionChanged:新しい選択が作成されたとき。SelectModeChanged:セレクションモードが変更されたとき。SelectPreferenceChanged:内部使用限定。SelectPriorityChanged:選択の優先順位が変更されたとき。SelectTypeChanged:セレクションタイプが変更されたとき。setEditorChanged:廃止されました。現在は使用されていません。SetModified:setコマンドを使用してセットを修正したとき。SequencerActiveShotChanged:アクティブなシーケンサショットが変更されたとき。snapModeChanged:スナップモードが変更されたとき。たとえば、グリッドスナップに変更されたとき。timeChanged:時間が変更されたとき。timeUnitChanged:時間単位が変更されたとき。ToolChanged:ツール/コンテキストが変更されたとき。PostToolChanged:ツール/コンテキストの変更後。NameChanged:オブジェクトの名前がrenameコマンドを使って変更されたとき。Undo:ユーザがメニューから元に戻すを選択し、対象が取り消されたとき。このコールバックは、UIまたはローカルストレージの更新に使用できます。このコールバックの最中は、シーンやDGの状態を変更しないでください。modelEditorChanged:ユーザがモデルエディタのオプションを変更した場合。colorMgtEnabledChanged:グローバルのシーン単位カラー管理がフラグの変更を許可した場合。colorMgtConfigFileEnableChanged:グローバルのシーン単位カラー管理OCIO設定がフラグの変更を許可した場合。colorMgtPrefsViewTransformChanged:グローバルのシーン単位カラー管理ビュートランスフォーム設定が変更をトランスフォームした場合。colorMgtWorkingSpaceChanged:グローバルのシーン単位カラー管理作業スペースが変更された場合。colorMgtConfigFilePathChanged:グローバルのシーン単位カラー管理OCIO設定ファイルパスが変更された場合。colorMgtConfigChanged:カラー管理モードがネイティブからOCIOに変更されたか、別のOCIO設定がロードされた場合。colorMgtPrefsReloaded:すべてのグローバルのシーン単位カラー管理設定が再ロードされた場合。colorMgtUserPrefsChanged:ユーザレベルのカラー管理プリファレンスが変更された場合。colorMgtOutputChanged:カラー管理トランスフォーム、またはその有効な状態が変更された場合。colorMgtOCIORulesChanged:OCIOモードでルールの種類が変更された場合。colorMgtRefreshed:環境変数の変更をトラップするためにカラー管理が更新された場合。metadataVisualStatusChanged:内部使用限定。shapeEditorTreeviewSelectionChanged:シェイプエディタのツリービューに新しい選択が作成された場合。RenderViewCameraChanged:レンダービューの現在のカメラが変更された場合。
-----------------------------------------
listJobs (boolean): このフラグが指定されている場合、既存のすべてのジョブの説明を含む文字配列とジョブ番号がコマンドから返されます。返されたジョブ番号を使用して、後からジョブを中断することができます。
-----------------------------------------
nodeDeleted ([string, script]): 指定したノードが削除されるときにスクリプトを実行します。
-----------------------------------------
nodeNameChanged ([string, script]): 指定されたノードの名前が変更されたときにスクリプトを実行します。
-----------------------------------------
parent (string): このジョブをMayaUIのピースにアタッチします。UIが破壊されると、このジョブも一緒に中断されます。
-----------------------------------------
permanent (boolean): ジョブを中断できないようにします。永続ジョブは、アプリケーションの存続期間中、またはジョブの親オブジェクトの存続期間中、存在します。-killWithSceneフラグは、永続ジョブには適用されません。
-----------------------------------------
protected (boolean): ジョブを中断されにくくします。保護されたジョブは、-forceフラグを使用して意図的に中断または置き換えを行う必要があります。-killWithSceneフラグは、保護されたジョブには適用されません。
-----------------------------------------
replacePrevious (boolean): このフラグは、必ず-parentフラグと一緒に使用します。新しいscriptJobが作成される前に、同じ親を持つ既存のscriptJobsがまず削除されます。
-----------------------------------------
runOnce (boolean): これがtrueに設定されている場合、スクリプトは一度だけ実行されます。false(既定)に設定されている場合、スクリプトは条件/イベントのトリガが発生するたびに実行されます。-uidフラグまたは-ndフラグが使用される場合、runOnceが自動的にオンになります。
-----------------------------------------
timeChange (script): 現在のタイムが変更されたときにスクリプトを実行します。タイムスライダのクリックによって時間が変更された場合、スクリプトは実行されません。これに対して、「timeChanged」条件によってトリガされたスクリプトは実行されます。
-----------------------------------------
uiDeleted ([string, script]): 指定したUIのピースが削除されたときにスクリプトを実行します。
-----------------------------------------
Return Value:
None: intジョブの中断に使用できるジョブ番号です。ジョブ番号は 0 以上の整数値です。string[]list フラグ使用時の文字列リストbooleanexists フラグの場合
"""
pass | 5,325,567 |
def _find_best_twitter_key(type, reset, remaining, limit, proxies, auth):
"""
This function switches to another pair of Twitter API keys, if they are available, to avoid pausing.
* WANT TO SWAP KEYS HERE B/C PAUSE IS MORE THAN 3 MINUTES
:param type: Type of API call: "timeline", "friends", "followers", "search_tweets", "search_users", "retweets",
"rls", or "users"
:param reset: The remaining window before the limit resets in UTC epoch seconds
:param remaining: The number of requests left for the 15 minute window
:param limit: The rate limit ceiling for that given reque
:param proxies: Proxy dictionary, ex. {'http': 'http://%s:%s' % (HOST, PORT), 'https': 'http://%s:%s' % (HOST, PORT)}
:param auth: Twitter application authentication, see the get_authorization method
:return best_key_auth: Authorization object using the best keys
:return isNewAuth: Boolean value representing whether a new authorization has been produced
"""
rls_types = _rls_type_list()
assert (type in rls_types), "Specify an RLS type as: {}".format("', '".join(rls_types))
# Count JSON files in key directory
key_dir = os.path.join(os.path.dirname(pyTweet.__file__), 'twitter_api_keys')
key_jsons = _get_key_list()
isNewAuth = False
# Check if there are enough keys to continue with this function
assert (len(key_jsons) > 0), "You have no Twitter API key files saved in {}. \nRefer to the documentation to " \
"create key files, or move your key files to that location.".format(key_dir)
if len(key_jsons) == 1:
print "\tThere are no other API keys to use...returning current API key."
pause = abs(int(time.time()) - reset) + 5
print "\tThere are no alternative keys. Pause for {} seconds.".format(pause)
time.sleep(pause)
return (auth, isNewAuth)
# Define best auth and key
best_key_auth = auth
best_key = {}
best_key[type] = {'RESET': reset, 'LIMIT': limit, 'REMAINING': remaining}
for k in key_jsons:
try:
key = load_twitter_api_key_set(key_file=k)
except (ValueError, AttributeError):
print "\tWarning! The file {} does not contain a valid Twitter API key. Please refer to the " \
"documentation on creating an API key".format(k)
continue
if ('API_KEY' not in key.keys()) or ('API_SECRET' not in key.keys()) or ('ACCESS_TOKEN' not in key.keys()) or ('ACCESS_TOKEN_SECRET' not in key.keys()):
print "\tWarning! The file {} does not contain a valid Twitter API key. Please refer to the documentation " \
"on creating an API key".format(k)
continue
# Be sure that this is not the same key we started the function with
if auth['KEY_FILE'] == k:
continue
if (auth['API_KEY'] == key['API_KEY']) and (auth['API_SECRET'] == key['API_SECRET']) and (auth['ACCESS_TOKEN'] == key['ACCESS_TOKEN']) and (auth['ACCESS_TOKEN_SECRET'] == key['ACCESS_TOKEN_SECRET']):
continue
# Check the RLS of RLS for key
key_auth = get_authorization(key)
_, _, _ = _get_rate_limit_status(type=type, proxies=proxies, auth=key_auth)
key = load_twitter_api_key_set(key_file=k)
# Skip key if it doesn't have appropriate fields
if ('RESET' not in key[type].keys()) or ('REMAINING' not in key[type].keys()) or ('LIMIT' not in key[type].keys()):
continue
# Check keys!
if key[type]['REMAINING'] == key[type]['LIMIT']:
best_key = key
best_key_auth = key_auth
isNewAuth = True
break
if key[type]['REMAINING'] < 1:
continue
if key[type]['REMAINING'] > best_key[type]['REMAINING']:
best_key = key
best_key_auth = key_auth
isNewAuth = True
break
if isNewAuth:
print "\nSwitch to Twitter key {} after using {}".format(best_key_auth['KEY_FILE'], auth['KEY_FILE'])
else:
pause = abs(int(time.time()) - best_key[type]['RESET']) + 5
print "\nUnable to find a better Twitter key, they all appear to be exahusted for the {} call. \nPause for {} " \
"minutes".format(type, np.ceil(pause/60))
time.sleep(pause)
return (best_key_auth, isNewAuth) | 5,325,568 |
def allowed_file(filename, allowed_exts):
"""
The validator for blueimp that limits which file extensions are allowed.
Args:
filename (str): a filepath
allowed_exts (str): set of allowed file extensions
Returns:
bool: True if extension is an allowed file type, False otherwise
"""
allowed_extensions = ["*."+str(e) for e in list(allowed_exts)]
for ext in allowed_extensions:
if fnmatch.fnmatch(filename.lower(), ext):
return True
return False | 5,325,569 |
def tokenize(document):
"""
Given a document (represented as a string), return a list of all of the
words in that document, in order.
Process document by coverting all words to lowercase, and removing any
punctuation or English stopwords.
"""
words = document.lower()
words = nltk.word_tokenize(words)
x = []
for w in words:
for i in string.punctuation:
if i in w:
w = w.replace(i, "")
x.append(w)
words = x
words = [w for w in words if w != ""]
words = [w for w in words if not w in nltk.corpus.stopwords.words("english")]
words = sorted(words, reverse=True)
return words | 5,325,570 |
def _test_divide_uniform():
"""
Currently fails because division poorly defined for identical Normal Messages
"""
prior = af.UniformPrior(
lower_limit=0.2,
upper_limit=0.8
)
result = prior / af.UniformPrior()
assert not np.isnan(result.lower_limit) | 5,325,571 |
def return_file_size(file_path):
"""This is taking the final size of the pre-processed file, and this number will be used in the rendering process.
"""
size = os.path.getsize(file_path)
return size | 5,325,572 |
def test_get_intervals_valid_duration():
""" Tests that InterestInterval entries returned by 'get_interest_intervals' function last no longer than LendingInterval entries they originated from """
lending_intervals = generate_sample_lending_intervals(30, 10, 1450000000, 1510000000)
for lending_interval in lending_intervals:
filtered_intervals, filteredout_intervals = test_tgt.get_interest_intervals([lending_interval])
for filtered_interval in filtered_intervals:
assert filtered_interval.start_date >= lending_interval.start_date and filtered_interval <= lending_interval.end_date
for filteredout_interval in filteredout_intervals:
assert filteredout_interval.start_date >= lending_interval.start_date and filteredout_interval <= lending_interval.end_date | 5,325,573 |
def check_auth(username, password):
"""This function is called to check if a username /
password combination is valid.
"""
database = db_connect()
cursor = database.cursor()
cursor.execute("SELECT password, active FROM users WHERE api_user=%s;", (username,))
results = cursor.fetchone()
cursor.close()
database.close()
if results is None:
return False
if results[0] != password or results[1] != 1:
return False
return True | 5,325,574 |
def train(
data="StephenColbert/medium_no_vtx1",
labels="labels.json",
sentence_dataset=False,
occlussion_threshold=0.8,
train_split=0.8,
num_workers=1,
refresh=False,
patience=10,
batch_size=4,
learning_rate=1e-4,
annealings=2,
enable_ctc=False,
grad_norm=50,
tr_epochs=50,
max_tfr=0.9,
min_tfr=0.0,
num_layers=1,
frame_dim=68*3,
hidden_size=700,
char_dim=300,
rnn_type='LSTM',
attention_type='1_layer_nn',
attn_hidden_size=-1,
bidirectional=False,
rnn_dropout=0.0,
seed=123456,
cuda=False,
):
""" Runs the primary training loop.
:param data:
:param labels:
:param sentence_dataset:
:param occlussion_threshold:
:param train_split:
:param num_workers:
:param patience:
:param batch_size:
:param learning_rate:
:param annealings: Number of times to anneal learning rate before training is finished.
:param enable_ctc:
:param max_tfr:
:param grad_norm:
:param num_layers:
:param frame_dim:
:param hidden_size:
:param char_dim:
:param rnn_type:
:param attention_type:
:param attn_hidden_size:
:param bidirectional:
:param rnn_dropout:
:param seed:
:param cuda:
"""
# Setup seed.
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
rand = np.random.RandomState(seed=seed)
# Setup device.
# REVIEW josephz: Is there a clean way to use multiple or different GPUs?
device = torch.device('cuda') if cuda else torch.device('cpu')
print("Device: ", device)
# Init Data.
print("Initializing dataset '{}'".format(data))
train_dataset, val_dataset, test_dataset = _get_datasets(data, train_split, sentence_dataset,
threshold=occlussion_threshold, labels=labels, rand=rand, refresh=refresh, include_test=True)
train_loader = _data.DataLoader(train_dataset, batch_size=batch_size, num_workers=num_workers, collate_fn=_data_loader._collate_fn)
val_loader = _data.DataLoader(val_dataset, batch_size=batch_size, num_workers=num_workers, collate_fn=_data_loader._collate_fn)
test_loader = _data.DataLoader(test_dataset, batch_size=batch_size, num_workers=num_workers, collate_fn=_data_loader._collate_fn)
# Init Models.
print("Initializing model")
encoder, decoding_step = _init_models(train_dataset.char2idx, num_layers, frame_dim, hidden_size, char_dim,
enable_ctc, rnn_type, attention_type, attn_hidden_size, bidirectional, rnn_dropout, device)
# Initialize Logging.
weights_dir = _util.getRelWeightsPath(data, use_existing=False)
tensorboard_writer = tensorboardX.SummaryWriter(weights_dir)
_getSharedLogger().info("Writing Tensorboard logs to '%s'", weights_dir)
print()
print("Try visualizing by running the following:")
print(f"\ttensorboard --logdir='{weights_dir}'")
print("Then open the following URL in your local browser. "
"\n\tIf you're running on a remote machine see `README_TENSORBOARD.md` for help...")
# REVIEW josephz: Multi-input support doesn't seem ready yet: https://github.com/lanpa/tensorboardX/issues/256
# tensorboard_writer.add_graph(encoder,
# torch.autograd.Variable(
# torch.tensor([torch.zeros(batch_size, 100, 68, 3), torch.zeros(batch_size,))))
# tensorboard_writer.add_graph(decoding_step,
# torch.autograd.Variable(
# torch.tensor(torch.zeros(batch_size,), torch.zeros(num_layers, batch_size, hidden_size), torch.zeros(batch_size,), torch.zeros(batch_size, 100,
# hidden_size))))
# Train.
val_cers = []
train_decoder_losses = []
train_ctc_losses = []
best_val_cer = 1.0
best_val_cer_idx = -1
# Initial evaluation
print("Initial evaluation...")
decoder_loss, val_correct, val_count = _train.eval(encoder, decoding_step, val_loader, device, train_dataset.char2idx)
val_cer = (val_count - val_correct).float() / val_count
print("\tCER: ", str(val_cer))
encoder_path = os.path.join(weights_dir, "best_encoder.pth")
decoder_path = os.path.join(weights_dir, "best_decoder.pth")
num_epochs = 0
num_annealings = 0
print("Beginning training loop")
ts = time.time()
while val_cer < best_val_cer or num_annealings < annealings:
print("Epoch {}:".format(num_epochs + 1))
if num_epochs - best_val_cer_idx > patience:
# If the model does not improve after our set 'patience' number of epochs, we will reduce the learning rate.
num_annealings += 1
learning_rate /= 5
print(f'\tAnnealing to {learning_rate}')
restore(encoder, encoder_path)
restore(decoding_step, decoder_path)
# Must set best val CER to here, or else this will also trigger next loop
# if val CER does not go down.
best_val_cer_idx = num_epochs
# Apply linear teacher-forcing ratio decay.
curr_tfr = max(min_tfr, max_tfr - num_epochs / tr_epochs)
assert 0.0 <= curr_tfr <= 1.0
print(f'\tCurrent Teacher Forcing Ratio: {curr_tfr}')
avg_decoder_loss, avg_ctc_loss = _train.train(encoder, decoding_step, train_loader,
opt=torch.optim.Adam(list(encoder.parameters()) + list(decoding_step.parameters()), lr=learning_rate),
device=device,
char2idx=train_dataset.char2idx,
teacher_forcing_ratio=curr_tfr,
grad_norm=grad_norm)
print(f'\tAVG Decoder Loss: {avg_decoder_loss}')
print(f'\tAVG CTC Loss: {avg_ctc_loss}')
tensorboard_writer.add_scalar(os.path.join(data, 'avg decoder loss'), avg_decoder_loss, global_step=num_epochs)
tensorboard_writer.add_scalar(os.path.join(data, 'avg CTC loss'), avg_ctc_loss, global_step=num_epochs)
decoder_loss, val_correct, val_count = _train.eval(encoder, decoding_step, val_loader, device, train_dataset.char2idx)
_, train_correct, train_count = _train.eval(encoder, decoding_step, train_loader, device, train_dataset.char2idx)
val_cer = (val_count - val_correct).float() / val_count
train_cer = (train_count - train_correct).float() / train_count
encoder.save_best_model(val_cer, encoder_path)
decoding_step.save_best_model(val_cer, decoder_path)
print(f'\tTrain CER: {train_cer}')
print(f'\tVal CER: {val_cer}')
# ANALYSIS
encoder.eval()
decoding_step.eval()
with torch.no_grad():
# CER
_, test_correct, test_count = _train.eval(encoder, decoding_step, test_loader, device, train_dataset.char2idx)
test_cer = (test_count - test_correct).float() / test_count
print(f'\tTest CER: {train_cer}')
# Sample teacher forcing output
print('Some teacher-forcing outputs:')
_analysis.print_samples(encoder, decoding_step, test_loader, device, train_dataset.char2idx, max_=10)
# confusion matrix
print('drawing confusion matrix:')
try:
_analysis.get_confusion_matrix(encoder, decoding_step, test_loader, device, test_dataset.char2idx, num_epochs)
except:
print('oops something wrong happened in drawing confusion matrix')
# inference
print('Some student-forcing outputs with beam search:')
for frames, frame_lens, chars, char_lens in test_loader:
frames, frame_lens, chars, char_lens = frames[:2], frame_lens[:2], chars[:2], char_lens[:2]
frames, frame_lens, chars, char_lens = frames.to(device), frame_lens.to(device), chars.to(device), char_lens.to(device)
pred, gt = _analysis.inference(encoder, decoding_step, frames, frame_lens, chars, char_lens, device,
test_dataset.char2idx, beam_width=10, max_label_len=100)
for gt_, pred_ in zip(gt, pred):
print(f'GTL\t: {gt_}')
print(f'Pred\t: {pred_}')
break
tensorboard_writer.add_scalars(os.path.join(data, 'CER'), {"Train": train_cer, "Val": val_cer}, global_step=num_epochs)
tensorboard_writer.add_scalar(os.path.join(data, 'learning rate'), learning_rate, global_step=num_epochs)
val_cers.append(val_cer)
train_decoder_losses.append(avg_decoder_loss)
train_ctc_losses.append(avg_ctc_loss)
if val_cer < best_val_cer:
best_val_cer = val_cer
best_val_cer_idx = num_epochs
num_epochs += 1
te = time.time()
total_time = te - ts
print()
print("Training complete: Took '{}' seconds, or '{}' per epoch".format(total_time, total_time / num_epochs))
print("Training Statistics")
print("\tBest Val CER: '{}'".format(np.min(val_cers)))
print("\tBest Decoder Loss: '{}'".format(np.min(train_decoder_losses)))
print("\tBest CTC Loss: '{}'".format(np.min(train_ctc_losses)))
print() | 5,325,575 |
def _handle_axis(axis: Union[str, int]) -> int:
"""Handles axis arguments including "columns" and "index" strings."""
if axis not in {0, 1, 'columns', 'index'}:
raise ValueError(
"axis value error: not in {0, 1, 'columns', 'index'}"
)
# Map to int if str
if isinstance(axis, str):
axis_mapper = {'index': 0, 'columns': 1}
axis = axis_mapper.get(axis)
return axis | 5,325,576 |
def test_odds_type_raise_value_error():
"""Test the raise of value error for check of odds type."""
dataloader = DummySoccerDataLoader()
with pytest.raises(
ValueError,
match="Parameter `odds_type` should be a prefix of available odds columns. "
"Got `pinnacle` instead.",
):
dataloader.extract_train_data(odds_type='pinnacle') | 5,325,577 |
async def create_app() -> web.Application:
"""Create an web application."""
app = web.Application(
middlewares=[
cors_middleware(allow_all=True),
error_middleware(), # default error handler for whole application
]
)
# Set up logging
logging.basicConfig(level=LOGGING_LEVEL)
logging.getLogger("chardet.charsetprober").setLevel(LOGGING_LEVEL)
# Set up routes:
app.add_routes(
[
web.view("/ping", Ping),
web.view("/ready", Ready),
web.view("/competition-formats", CompetitionFormatsView),
web.view("/competition-formats/{id}", CompetitionFormatView),
web.view("/events", EventsView),
web.view("/events/{eventId}", EventView),
web.view(
"/events/{eventId}/generate-raceclasses", EventGenerateRaceclassesView
),
web.view("/events/{eventId}/format", EventFormatView),
web.view("/events/{eventId}/raceclasses", RaceclassesView),
web.view("/events/{eventId}/raceclasses/{raceclassId}", RaceclassView),
web.view("/events/{eventId}/contestants", ContestantsView),
web.view(
"/events/{eventId}/contestants/assign-bibs", ContestantsAssignBibsView
),
web.view("/events/{eventId}/contestants/{contestantId}", ContestantView),
]
)
async def mongo_context(app: Any) -> Any:
# Set up database connection:
logging.debug(f"Connecting to db at {DB_HOST}:{DB_PORT}")
mongo = motor.motor_asyncio.AsyncIOMotorClient(
host=DB_HOST, port=DB_PORT, username=DB_USER, password=DB_PASSWORD
)
db = mongo.DB_NAME
app["db"] = db
yield
mongo.close()
app.cleanup_ctx.append(mongo_context)
return app | 5,325,578 |
def section_setup(section, zip_directory, corpus_directory):
"""Make folders for individual SE site (section)'s unzipped files and
processed data, and generate expected path to 7z file on disk"""
# makes folder for unzipped files for a site
section_directory = os.path.join(zip_directory, section)
make_directory(section_directory)
# generate path to release zip file (saved at root zip directory)
file_name = section + ".7z"
full_file_name = os.path.join(zip_directory, file_name)
# Generate folder for processed data
corpus_section_directory = os.path.join(corpus_directory, section)
make_directory(corpus_section_directory)
return full_file_name, section_directory, corpus_section_directory | 5,325,579 |
def get_file_size(file):
"""Get file size.
Args:
file (str): Input file.
Returns:
int: Return size of the file in bytes.
"""
return os.stat(file).st_size | 5,325,580 |
def is_icmp_dest_unreach(icmp_data):
"""is ICMP_DEST_UNREACH?"""
return icmp_data["TYPE"] == ICMP_DEST_UNREACH | 5,325,581 |
def printFeature(f, default=0.0, t=60):
"""Prints feature f."""
traverse(f, lambda i, x: print(str((i, x))), default, t) | 5,325,582 |
def save_tweets(tweets: List[Status], overwrite=False, sentiment_analyzer=None):
""" Saves a list of tweets to postgres """
save_users([t.user for t in tweets])
unique_tweets = [*toolz.unique(tweets, key=lambda t: t.id)]
conn = db_conn()
crs = conn.cursor()
records = [*map(tweet_to_record, unique_tweets)]
if sentiment_analyzer is not None and len(records) > 0:
logging.info(f"Calculating sentiment for {len(records)} records...")
records = add_sentiment_to_records(sentiment_analyzer, records)
if overwrite:
conflict_clause = "(status_id) DO UPDATE SET data = EXCLUDED.data"
else:
conflict_clause = "DO NOTHING"
execute_values(crs, f"""INSERT INTO tweets (status_id, created_at, data)
VALUES %s ON CONFLICT {conflict_clause};""",
records)
conn.commit() | 5,325,583 |
def exists_user_notifications(session, user_id):
"""Helper method to check if notifications for user exists."""
res = session.execute(text("""SELECT EXISTS(
SELECT 1 FROM public.notification WHERE user_id='{0}') AS user"""
.format(user_id))).fetchone()
return res.user | 5,325,584 |
def export_onnx(model, config, device, onnx_model_path, verbose):
""" Export GPT-2 model with past state to ONNX model
"""
num_layer = config.n_layer
dummy_inputs = get_dummy_inputs(batch_size=1,
past_sequence_length=1,
sequence_length=1,
num_attention_heads=config.num_attention_heads,
hidden_size=config.hidden_size,
num_layer=num_layer,
vocab_size=config.vocab_size,
device=device,
float16=False)
dummy_input_ids, dummy_position_ids, dummy_attention_mask, dummy_past = dummy_inputs
input_list = [dummy_input_ids, dummy_position_ids, dummy_attention_mask] + dummy_past
with torch.no_grad():
outputs = model(*input_list)
past_names = [f'past_{i}' for i in range(num_layer)]
present_names = [f'present_{i}' for i in range(num_layer)]
# GPT2Model outputs last_state; GPT2LMHeadModel outputs logits (prediction_scores)
assert outputs[0].shape[2] == config.vocab_size or outputs[0].shape[2] == config.hidden_size
output_names = ["logits" if outputs[0].shape[2] == config.vocab_size else "last_state"] + present_names
# Shape of input tensors:
# input_ids: (batch_size, seq_len)
# past_{i}: (2, batch_size, num_heads, past_seq_len, hidden_size/num_heads)
# attention_mask: (batch_size, past_seq_len + seq_len)
# Shape of output tensors:
# last_state: (batch_size, seq_len, hidden_size)
# or logits: (batch_size, seq_len, vocab_size)
# present_{i}: (2, batch_size, num_heads, past_seq_len + seq_len, hidden_size/num_heads)
dynamic_axes = {'input_ids': {0: 'batch_size', 1: 'seq_len'}, output_names[0]: {0: 'batch_size', 1: 'seq_len'}}
for name in past_names:
dynamic_axes[name] = {1: 'batch_size', 3: 'past_seq_len'}
for name in present_names:
dynamic_axes[name] = {1: 'batch_size', 3: 'total_seq_len'}
dynamic_axes['attention_mask'] = {0: 'batch_size', 1: 'total_seq_len'}
dynamic_axes['position_ids'] = {0: 'batch_size', 1: 'seq_len'}
logger.info(
f"Shapes: input_ids={dummy_input_ids.shape} past={dummy_past[0].shape} output={outputs[0].shape} present={outputs[1][0].shape}"
)
torch.onnx.export(model,
args=tuple(input_list),
f=onnx_model_path,
input_names=['input_ids', 'position_ids', 'attention_mask'] + past_names,
output_names=output_names,
example_outputs=outputs,
dynamic_axes=dynamic_axes,
opset_version=11,
do_constant_folding=True,
verbose=verbose)
return onnx_model_path | 5,325,585 |
def merge_ref(mask, found_id, position):
"""
Merges the ids that are congruent to each other in the mask
Note: side effect of merge_ref leaves pixels of the mask skipping IDs if
merged
TODO: optimize by splitting into a equivalency list and passing merge_ref
at end
Parameters
----------
mask : int[][]
Binary mask - mask to edit
found_id : int[]
list of objects to merge
position : int[2] = [row, col]
y, x integer coordinates of the pixel. Also, the pixel to halt merging
at.
"""
width, height = np.shape(mask)
min_id = min(found_id)
for i in range(position[0] * width + position[1]):
if mask[int(i / width)][i % width] in found_id:
mask[int(i / width)][i % width] = min_id
mask[position[0]][position[1]] = min_id | 5,325,586 |
def set_peak_elo(df: DataFrame, playersElo) -> DataFrame:
"""Add 2 columns PeakElo and PeakEloSince to a dataframe containing Date, P1Id and P2Id fields
Args:
df (DataFrame): the dataframe from where we read row by row (match by match)
playersElo ([type]): dict <id>:[eloratings_history]
Returns:
DataFrame: the input dataframe with 2 additionnal columns PeakElo, PeakEloSince
"""
(
df.loc[:, ["PeakElo1"]],
df.loc[:, ["PeakEloSince1"]],
df.loc[:, ["PeakElo2"]],
df.loc[:, ["PeakEloSince2"]],
) = zip(
*df.apply(
lambda row: set_peak_elo_match(row, playersElo),
axis=1,
)
)
# save a dataframe with all matches and Elo rating of each player for the matches
df.to_csv("./results/dfWithElos9m_peak.csv")
return df | 5,325,587 |
def _async_validate_auto_generated_cost_entity(
hass: HomeAssistant, energy_entity_id: str, result: list[ValidationIssue]
) -> None:
"""Validate that the auto generated cost entity is correct."""
if energy_entity_id not in hass.data[DOMAIN]["cost_sensors"]:
# The cost entity has not been setup
return
cost_entity_id = hass.data[DOMAIN]["cost_sensors"][energy_entity_id]
if not recorder.is_entity_recorded(hass, cost_entity_id):
result.append(ValidationIssue("recorder_untracked", cost_entity_id)) | 5,325,588 |
def calc_plane_vector(atom_pos):
"""
Method to calculate best-fitted (unit) plane vector given a set of points using SVD
ARGS:
atom_pos (ndarray) :: ndarray storing atomic positions
returns:
ndarray
"""
# Zero-centering centroid of atoms before SVD
atom_pos_0 = atom_pos.T - np.mean(atom_pos.T, axis=1, keepdims=True)
u, v, sh = svd(atom_pos_0, full_matrices=True)
# Obtain unit plane vector and ensure it points upwards (z>0)
unit_n = u[:, -1] / norm(u[:, -1])
return unit_n * np.sign(unit_n[-1]) | 5,325,589 |
def search_reddit(search, subreddit='', t='week', limit='100',
sort='new', restrict_sr='1'):
"""
search - string object, representing your search query
subreddit - string object, representing the subreddit
t - string object, one of (hour, day, week, month, year, all)
limit - string object, limits the number of posts returned
sort - string object, one of 'hot', 'old', 'top' or 'new'
restrict_sr - string object, '0' or '1', specifies if restriction
to the subreddit is applied
"""
print(f"Retrieving reddit posts for {search=} and {subreddit=}")
headers = get_reddit_token('nie_irek_ubuntu')
reddit_url = "https://oauth.reddit.com/r/" + subreddit + "/search"
res = requests.get(reddit_url,
headers=headers, params={'q': search,
'sort': sort,
'restrict_sr': restrict_sr,
'limit': limit,
't': t
})
return(res) | 5,325,590 |
def _expected_type_expression(typedef: Typedef) -> str:
"""
Determine the type expression supplied to ``from_obj`` function corresponding to the type definition.
:param typedef: type definition in Python representation
:return: Python code representing the type definition
"""
# pylint: disable=too-many-return-statements
if isinstance(typedef, Booldef):
return 'bool'
elif isinstance(typedef, Intdef):
return 'int'
elif isinstance(typedef, Floatdef):
return 'float'
elif isinstance(typedef, Strdef):
return 'str'
elif isinstance(typedef, Bytesdef):
return 'bytes'
elif isinstance(typedef, Listdef):
if typedef.items is None:
raise ValueError('Unexpected None items in typedef: {!r}'.format(typedef.identifier))
return 'list, {}'.format(_expected_type_expression(typedef=typedef.items))
elif isinstance(typedef, Dictdef):
if typedef.values is None:
raise ValueError('Unexpected None values in typedef: {!r}'.format(typedef.identifier))
return 'dict, {}'.format(_expected_type_expression(typedef=typedef.values))
elif isinstance(typedef, Classdef):
return _class_name(typedef.identifier)
else:
raise NotImplementedError('Translating the typedef to an expected type is not supported: {}'.format(typedef)) | 5,325,591 |
def adjoint(m):
"""Compute the Hermitian adjoint."""
return np.transpose(np.conj(m)) | 5,325,592 |
def get_graphic_template_variables(path, graphic_number):
"""
Generates the template variables for each graphic
"""
slug, abspath = utils.parse_path(path)
graphic_path = '%s/%s' % (abspath, slug)
## Get Spreadsheet Path
try:
graphic_config = load_graphic_config(graphic_path)
except IOError:
print '%s/graphic_config.py does not exist.' % slug
return
if not hasattr(graphic_config, 'COPY_GOOGLE_DOC_KEY') or not graphic_config.COPY_GOOGLE_DOC_KEY:
print 'COPY_GOOGLE_DOC_KEY is not defined in %s/graphic_config.py.' % slug
return
## Generate Links From Slug
spreadsheet_id = graphic_config.COPY_GOOGLE_DOC_KEY
app_id = slug
## Update Spreadsheet
copy_path = os.path.join(graphic_path, '%s.xlsx' % slug)
get_document(graphic_config.COPY_GOOGLE_DOC_KEY, copy_path)
## Get Sheet Data
copy = copytext.Copy(filename=copy_path)
sheet = copy['labels']
note = {
"spreadsheet_id": spreadsheet_id,
"app_id": app_id,
"graphic_number": graphic_number + 1,
"sheet": sheet,
}
return note | 5,325,593 |
def svc(self, model):
""" Obtain the model and the search space of the SVC
classifier. """
svc_sp = {}
if model == "linear":
svc = LinearSVC(dual=False, class_weight='balanced')
else:
svc = SVC(cache_size=1000, class_weight='balanced')
svc_sp['kernel'] = ['linear', 'poly', 'rbf']
svc_sp['degree'] = [2, 3, 4]
svc_sp['gamma'] = ["auto", "scale"]
svc_sp['C'] = [0.001, 0.01, 0.1, 1.0]
return svc, svc_sp | 5,325,594 |
def plot_difference( field, coord, iteration, F_cpu, F_gpu, info ):
"""
Plots the simulation results on CPU and GPU
"""
import matplotlib.pyplot as plt
plt.figure()
plt.suptitle( field + coord )
plt.subplot(311)
plt.imshow( F_gpu, aspect='auto',
origin='lower', extent=1.e6*info.imshow_extent )
plt.colorbar()
plt.title('GPU')
plt.subplot(312)
plt.imshow( F_cpu, aspect='auto',
origin='lower', extent=1.e6*info.imshow_extent )
plt.colorbar()
plt.title('CPU')
plt.subplot(313)
plt.imshow( F_cpu - F_gpu, aspect='auto',
origin='lower', extent=1.e6*info.imshow_extent )
plt.colorbar()
plt.title('Difference')
plt.tight_layout()
plt.show() | 5,325,595 |
def _FormatDataTransferIdentifiers(client, transfer_identifier):
"""Formats a transfer config or run identifier.
Transfer configuration/run commands should be able to support different
formats of how the user could input the project information. This function
will take the user input and create a uniform transfer config or
transfer run reference that can be used for various commands.
This function will also set the client's project id to the specified
project id.
Returns:
The formatted transfer config or run.
"""
formatted_identifier = transfer_identifier
match = re.search(r'projects/([^/]+)', transfer_identifier)
if not match:
formatted_identifier = ('projects/' +
client.GetProjectReference().projectId + '/' +
transfer_identifier)
else:
client.project_id = match.group(1)
return formatted_identifier | 5,325,596 |
def getReactionUrl(reaction, family=None, estimator=None, resonance=True):
"""
Get the URL (for kinetics data) of a reaction.
Returns '' if the reaction contains functional Groups or LogicNodes instead
of real Species or Molecules.
"""
kwargs = dict()
for index, reactant in enumerate(reaction.reactants):
if isinstance(reactant, Entry):
reactant = reactant.item
if isinstance(reactant, Group) or isinstance(reactant, LogicNode):
return ''
mol = reactant if isinstance(reactant, Molecule) else reactant.molecule[0]
kwargs['reactant{0:d}'.format(index+1)] = moleculeToAdjlist(mol)
for index, product in enumerate(reaction.products):
mol = product if isinstance(product, Molecule) else product.molecule[0]
kwargs['product{0:d}'.format(index+1)] = moleculeToAdjlist(mol)
kwargs['resonance'] = resonance
if family:
if estimator:
kwargs['family'] = family
kwargs['estimator'] = estimator.replace(' ', '_')
reaction_url = reverse('database:kinetics-group', kwargs=kwargs)
else:
reaction_url = ''
else:
reaction_url = reverse('database:kinetics-data', kwargs=kwargs)
return reaction_url | 5,325,597 |
def test_mcs_nonmatch(graph, subgraph, attrs):
"""
Test against networkx reference implementation using graphs that are
probably not subgraphs without considering symmetry.
"""
if attrs is None:
node_match = lambda n1, n2: True
attrs = []
else:
node_match = nx.isomorphism.categorical_node_match(attrs, [None]*len(attrs))
note(("Graph nodes", graph.nodes(data=True)))
note(("Graph edges", graph.edges(data=True)))
note(("Subgraph nodes", subgraph.nodes(data=True)))
note(("Subgraph edges", subgraph.edges(data=True)))
ref_time = perf_counter()
expected = make_into_set(MCS(graph, subgraph, attributes=attrs))
ref_time -= perf_counter()
a_ism_time = perf_counter()
ismags = vermouth.ismags.ISMAGS(graph, subgraph, node_match=node_match)
asymmetric = make_into_set(ismags.largest_common_subgraph(False))
a_ism_time -= perf_counter()
s_ism_time = perf_counter()
ismags = vermouth.ismags.ISMAGS(graph, subgraph, node_match=node_match)
symmetric = make_into_set(ismags.largest_common_subgraph(True))
s_ism_time -= perf_counter()
note(("Symmetric", symmetric))
note(("Asymmetric", asymmetric))
note(("Expected", expected))
if a_ism_time < ref_time:
event('Asymmetric ISMAGS faster than reference')
if s_ism_time < a_ism_time:
event('Symmetric ISMAGS faster than asymmetric')
if s_ism_time < ref_time:
event('Symmetric ISMAGS faster than reference')
assert asymmetric == expected or not expected
assert symmetric <= asymmetric | 5,325,598 |
def hexLat2W(nrows=5, ncols=5):
"""
Create a W object for a hexagonal lattice.
Parameters
----------
nrows : int
number of rows
ncols : int
number of columns
Returns
-------
w : W
instance of spatial weights class W
Notes
-----
Observations are row ordered: first k observations are in row 0, next k in row 1, and so on.
Construction is based on shifting every other column of a regular lattice
down 1/2 of a cell.
Examples
--------
>>> import pysal as ps
>>> w = ps.lat2W()
>>> w.neighbors[1]
[0, 6, 2]
>>> w.neighbors[21]
[16, 20, 22]
>>> wh = ps.hexLat2W()
>>> wh.neighbors[1]
[0, 6, 2, 5, 7]
>>> wh.neighbors[21]
[16, 20, 22]
>>>
"""
if nrows == 1 or ncols == 1:
print "Hexagon lattice requires at least 2 rows and columns"
print "Returning a linear contiguity structure"
return lat2W(nrows, ncols)
n = nrows * ncols
rid = [i // ncols for i in xrange(n)]
cid = [i % ncols for i in xrange(n)]
r1 = nrows - 1
c1 = ncols - 1
w = lat2W(nrows, ncols).neighbors
for i in xrange(n):
odd = cid[i] % 2
if odd:
if rid[i] < r1: # odd col index above last row
# new sw neighbor
if cid[i] > 0:
j = i + ncols - 1
w[i] = w.get(i, []) + [j]
# new se neighbor
if cid[i] < c1:
j = i + ncols + 1
w[i] = w.get(i, []) + [j]
else: # even col
# nw
jnw = [i - ncols - 1]
# ne
jne = [i - ncols + 1]
if rid[i] > 0:
w[i]
if cid[i] == 0:
w[i] = w.get(i, []) + jne
elif cid[i] == c1:
w[i] = w.get(i, []) + jnw
else:
w[i] = w.get(i, []) + jne
w[i] = w.get(i, []) + jnw
return pysal.weights.W(w) | 5,325,599 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.