content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def _get_data_tuple(sptoks, asp_termIn, label):
"""
Method obtained from Trusca et al. (2020), no original docstring provided.
:param sptoks:
:param asp_termIn:
:param label:
:return:
"""
# Find the ids of aspect term.
aspect_is = []
asp_term = ' '.join(sp for sp in asp_termIn).lower()
for _i, group in enumerate(window(sptoks, len(asp_termIn))):
if asp_term == ' '.join([g.lower() for g in group]):
aspect_is = list(range(_i, _i + len(asp_termIn)))
break
elif asp_term in ' '.join([g.lower() for g in group]):
aspect_is = list(range(_i, _i + len(asp_termIn)))
break
pos_info = []
for _i, sptok in enumerate(sptoks):
pos_info.append(min([abs(_i - i) for i in aspect_is]))
lab = None
if label == 'negative':
lab = -1
elif label == 'neutral':
lab = 0
elif label == "positive":
lab = 1
else:
raise ValueError("Unknown label: %s" % lab)
return pos_info, lab | 33,400 |
def setup_environment(new_region: Path) -> bool:
"""Try to create new_region folder"""
if new_region.exists():
print(f"{new_region.resolve()} exists, this may cause problems")
proceed = input("Do you want to proceed regardless? [y/N] ")
sep()
return proceed.startswith("y")
new_region.mkdir()
print(f"Saving newly generated region files to {new_region.resolve()}")
return True | 33,401 |
def main():
"""Function: main
Description: Initializes program-wide used variables and processes command
line arguments and values.
Variables:
dir_chk_list -> contains options which will be directories.
dir_crt_list -> contain options that require directory to be created.
opt_arg_list -> contains arguments to add to command line by default.
opt_con_req_dict -> contains options requiring other options.
opt_dump_list -> contains optional arguments to mysqldump.
opt_multi_list -> contains the options that will have multiple values.
opt_req_list -> contains the options that are required for the program.
opt_val_list -> contains options which require values.
opt_xor_dict -> contains options which are XOR with its values.
Arguments:
(input) argv -> Arguments from the command line.
"""
cmdline = gen_libs.get_inst(sys)
dir_chk_list = ["-o", "-d", "-p"]
dir_crt_list = ["-o"]
# --ignore-table=mysql.event -> Skips dumping the event table.
opt_arg_list = ["--ignore-table=mysql.event"]
opt_con_req_dict = {
"-t": ["-e"], "-A": ["-o"], "-B": ["-o"], "-D": ["-o"], "-u": ["-e"]}
opt_dump_list = {
"-s": "--single-transaction",
"-D": ["--all-databases", "--triggers", "--routines", "--events"],
"-r": "--set-gtid-purged=OFF"}
opt_multi_list = ["-B", "-e", "-t"]
opt_req_list = ["-c", "-d"]
opt_val_list = ["-B", "-c", "-d", "-o", "-p", "-y", "-e", "-t"]
opt_xor_dict = {"-A": ["-B", "-D"], "-B": ["-A", "-D"], "-D": ["-A", "-B"]}
# Process argument list from command line.
args_array = arg_parser.arg_parse2(
cmdline.argv, opt_val_list, multi_val=opt_multi_list)
if not gen_libs.help_func(args_array, __version__, help_message) \
and not arg_parser.arg_require(args_array, opt_req_list) \
and arg_parser.arg_xor_dict(args_array, opt_xor_dict) \
and not arg_parser.arg_dir_chk_crt(args_array, dir_chk_list,
dir_crt_list) \
and arg_parser.arg_cond_req_or(args_array, opt_con_req_dict):
try:
prog_lock = gen_class.ProgramLock(cmdline.argv,
args_array.get("-y", ""))
run_program(args_array, opt_arg_list, opt_dump_list)
del prog_lock
except gen_class.SingleInstanceException:
print("WARNING: Lock in place for mysql_db_dump with id: %s"
% (args_array.get("-y", ""))) | 33,402 |
def is_favorable_halide_environment(
i_seq,
contacts,
pdb_atoms,
sites_frac,
connectivity,
unit_cell,
params,
assume_hydrogens_all_missing=Auto):
"""
Detects if an atom's site exists in a favorable environment for a halide
ion. This includes coordinating by a positively charged sidechain or backbone
as well as an absense of negatively charged coordinating groups.
Parameters
----------
i_seq : int
contacts : list of mmtbx.ions.environment.atom_contact
pdb_atoms : iotbx.pdb.hierarchy.af_shared_atom
sites_frac : tuple of float, float, float
connectivity : scitbx.array_family.shared.stl_set_unsigned
unit_cell : uctbx.unit_cell
params : libtbx.phil.scope_extract
assume_hydrogens_all_missing : bool, optional
Returns
-------
bool
"""
if (assume_hydrogens_all_missing in [None, Auto]):
elements = pdb_atoms.extract_element()
assume_hydrogens_all_missing = not ("H" in elements or "D" in elements)
atom = pdb_atoms[i_seq]
binds_amide_hydrogen = False
near_cation = False
near_lys = False
near_hydroxyl = False
xyz = col(atom.xyz)
min_distance_to_cation = max(unit_cell.parameters()[0:3])
min_distance_to_hydroxyl = min_distance_to_cation
for contact in contacts :
# to analyze local geometry, we use the target site mapped to be in the
# same ASU as the interacting site
def get_site(k_seq):
return unit_cell.orthogonalize(
site_frac = (contact.rt_mx * sites_frac[k_seq]))
other = contact.atom
resname = contact.resname()
atom_name = contact.atom_name()
element = contact.element
distance = abs(contact)
j_seq = other.i_seq
# XXX need to figure out exactly what this should be - CL has a
# fairly large radius though (1.67A according to ener_lib.cif)
if (distance < params.min_distance_to_other_sites):
return False
if not element in ["C", "N", "H", "O", "S"]:
charge = server.get_charge(element)
if charge < 0 and distance <= params.min_distance_to_anion:
# Nearby anion that is too close
return False
if charge > 0 and distance <= params.max_distance_to_cation:
# Nearby cation
near_cation = True
if (distance < min_distance_to_cation):
min_distance_to_cation = distance
# Lysine sidechains (can't determine planarity)
elif (atom_name in ["NZ"] and #, "NE", "NH1", "NH2"] and
resname in ["LYS"] and
distance <= params.max_distance_to_cation):
near_lys = True
if (distance < min_distance_to_cation):
min_distance_to_cation = distance
# sidechain amide groups, no hydrogens (except Arg)
# XXX this would be more reliable if we also calculate the expected
# hydrogen positions and use the vector method below
elif (atom_name in ["NZ","NH1","NH2","ND2","NE2"] and
resname in ["ARG","ASN","GLN"] and
(assume_hydrogens_all_missing or resname == "ARG") and
distance <= params.max_distance_to_cation):
if (_is_coplanar_with_sidechain(atom, other.parent(),
distance_cutoff = params.max_deviation_from_plane)):
binds_amide_hydrogen = True
if (resname == "ARG") and (distance < min_distance_to_cation):
min_distance_to_cation = distance
# hydroxyl groups - note that the orientation of the hydrogen is usually
# arbitrary and we can't determine precise bonding
elif ((atom_name in ["OG1", "OG2", "OH1"]) and
(resname in ["SER", "THR", "TYR"]) and
(distance <= params.max_distance_to_hydroxyl)):
near_hydroxyl = True
if (distance < min_distance_to_hydroxyl):
min_distance_to_hydroxyl = distance
# Backbone amide, explicit H
elif atom_name in ["H"]:
# TODO make this more general for any amide H?
xyz_h = col(contact.site_cart)
bonded_atoms = connectivity[j_seq]
if (len(bonded_atoms) != 1):
continue
xyz_n = col(get_site(bonded_atoms[0]))
vec_hn = xyz_h - xyz_n
vec_hx = xyz_h - xyz
angle = abs(vec_hn.angle(vec_hx, deg = True))
# If Cl, H, and N line up, Cl binds the amide group
if abs(angle - 180) <= params.delta_amide_h_angle:
binds_amide_hydrogen = True
else :
pass #print "%s N-H-X angle: %s" % (atom.id_str(), angle)
# Backbone amide, implicit H
elif atom_name in ["N"] and assume_hydrogens_all_missing:
xyz_n = col(contact.site_cart)
bonded_atoms = connectivity[j_seq]
ca_same = c_prev = None
for k_seq in bonded_atoms :
other2 = pdb_atoms[k_seq]
if other2.name.strip().upper() in ["CA"]:
ca_same = col(get_site(k_seq))
elif other2.name.strip().upper() in ["C"]:
c_prev = col(get_site(k_seq))
if ca_same is not None and c_prev is not None:
xyz_cca = (ca_same + c_prev) / 2
vec_ncca = xyz_n - xyz_cca
# 0.86 is the backbone N-H bond distance in geostd
xyz_h = xyz_n + (vec_ncca.normalize() * 0.86)
vec_nh = xyz_n - xyz_h
vec_nx = xyz_n - xyz
angle = abs(vec_nh.angle(vec_nx, deg = True))
if abs(angle - 180) <= params.delta_amide_h_angle:
binds_amide_hydrogen = True
# sidechain NH2 groups, explicit H
elif ((atom_name in ["HD1","HD2"] and resname in ["ASN"]) or
(atom_name in ["HE1","HE2"] and resname in ["GLN"])):
# XXX not doing this for Arg because it can't handle the bidentate
# coordination
#(atom_name in ["HH11","HH12","HH21","HH22"] and resname == "ARG")):
bonded_atoms = connectivity[j_seq]
assert (len(bonded_atoms) == 1)
xyz_n = col(get_site(bonded_atoms[0]))
xyz_h = col(contact.site_cart)
vec_nh = xyz_n - xyz_h
vec_xh = xyz - xyz_h
angle = abs(vec_nh.angle(vec_xh, deg = True))
if abs(angle - 180) <= params.delta_amide_h_angle:
binds_amide_hydrogen = True
else :
pass #print "%s amide angle: %s" % (atom.id_str(), angle)
# now check again for negatively charged sidechain (etc.) atoms (e.g.
# carboxyl groups), but with some leeway if a cation is also nearby.
# backbone carbonyl atoms are also excluded.
for contact in contacts :
if (contact.altloc() not in ["", "A"]):
continue
resname = contact.resname()
atom_name = contact.atom_name()
distance = abs(contact)
if ((distance < 3.2) and
(distance < (min_distance_to_cation + 0.2)) and
is_negatively_charged_oxygen(atom_name, resname)):
#print contact.id_str(), distance
return False
return (binds_amide_hydrogen or near_cation or near_lys) | 33,403 |
def interlock(word_list):
"""Finds all pairs of words that interlock to form a word also in the list"""
for word in word_list:
first_word = word[0::2]
second_word = word[1::2]
if first_word in word_list and second_word in word_list:
print(word, first_word, second_word) | 33,404 |
def headers():
"""Default headers for making requests."""
return {
'content-type': 'application/json',
'accept': 'application/json',
} | 33,405 |
def merge_two_lists(l1: ListNode, l2: ListNode) -> ListNode:
"""Returns a single sorted, in-place merged linked list of two sorted input linked lists
The linked list is made by splicing together the nodes of l1 and l2
Args:
l1:
l2:
Examples:
>>> l1 = linked_list.convert_list_to_linked_list([1,2,4])
>>> l2 = linked_list.convert_list_to_linked_list([1,3,4])
>>> merge_two_lists(l1, l2).as_list()
[1, 1, 2, 3, 4, 4]
>>> l1 = linked_list.convert_list_to_linked_list([])
>>> l2 = linked_list.convert_list_to_linked_list([0])
>>> merge_two_lists(l1, l2).as_list()
[0]
>>> merge_two_lists(l2, l1).as_list()
[0]
>>> merge_two_lists(None, None)
"""
"""ALGORITHM"""
head_handle = curr = ListNode(None)
while l1 is not None and l2 is not None:
if l1.val <= l2.val:
curr.next, l1 = l1, l1.next
else:
curr.next, l2 = l2, l2.next
curr = curr.next
# Post-condition:
# if at least one list was not None, one list is now exhausted and `curr`
# is the last node of the now exhausted list; complete splice by assigning
# the head of the remaining non-exhausted list to `curr.next`
curr.next = l1 if l1 is not None else l2
return head_handle.next | 33,406 |
def write_database_integrity_violation(results, headers, reason_message, action_message=None):
"""Emit a integrity violation warning and write the violating records to a log file in the current directory
:param results: a list of tuples representing the violating records
:param headers: a tuple of strings that will be used as a header for the log file. Should have the same length
as each tuple in the results list.
:param reason_message: a human readable message detailing the reason of the integrity violation
:param action_message: an optional human readable message detailing a performed action, if any
"""
# pylint: disable=duplicate-string-formatting-argument
from datetime import datetime
from tabulate import tabulate
from tempfile import NamedTemporaryFile
from aiida.cmdline.utils import echo
from aiida.manage import configuration
if configuration.PROFILE.is_test_profile:
return
if action_message is None:
action_message = 'nothing'
with NamedTemporaryFile(prefix='migration-', suffix='.log', dir='.', delete=False, mode='w+') as handle:
echo.echo('')
echo.echo_warning(
'\n{}\nFound one or multiple records that violate the integrity of the database\nViolation reason: {}\n'
'Performed action: {}\nViolators written to: {}\n{}\n'.format(
WARNING_BORDER, reason_message, action_message, handle.name, WARNING_BORDER
)
)
handle.write('# {}\n'.format(datetime.utcnow().isoformat()))
handle.write('# Violation reason: {}\n'.format(reason_message))
handle.write('# Performed action: {}\n'.format(action_message))
handle.write('\n')
handle.write(tabulate(results, headers)) | 33,407 |
def test_merge():
""" testing the merge helper function of merge_sort """
assert merge([1], [2]) == [1, 2]
assert merge([1, 2, 4], [3, 5]) == [1, 2, 3, 4, 5] | 33,408 |
def map_SOPR_to_firm():
"""
Map SOPR identifiers to a lobbying CUID.
Return a dictionnary.
"""
firms = {}
with open(DATASET_PATH_TO['LOBBYING_FIRMS'], 'rb') as f:
reader = csv.reader(f, delimiter='%', quoting=csv.QUOTE_NONE)
for record in reader:
SOPR_reports = record[3].split(';')
CUID_firm = record[0]
for report_id in SOPR_reports:
firms[report_id] = CUID_firm
return firms | 33,409 |
def specialbefores_given_external_square(
befores: Set[Before],
directly_playable_squares: Set[Square],
external_directly_playable_square: Square) -> Set[Specialbefore]:
"""
Args:
befores (Set[Before]): a set of Befores used to create Specialbefores.
directly_playable_squares (Set[Square]): a set of directly playable squares, possibly including square.
external_directly_playable_square (Square): a square to be used as the external directly playable
square of each Specialbefore.
Returns:
specialbefores (Set[Specialbefore]): a set of Specialbefores. Each Specialbefore uses square as its external
directly playable square.
"""
specialbefores = set()
for before in befores:
directly_playable_squares_in_before_group = internal_directly_playable_squares(
before, directly_playable_squares)
for internal_directly_playable_square in directly_playable_squares_in_before_group:
if can_be_used_with_before(external_directly_playable_square, before):
specialbefores.add(Specialbefore(
before=before,
internal_directly_playable_square=internal_directly_playable_square,
external_directly_playable_square=external_directly_playable_square,
))
return specialbefores | 33,410 |
def test_exit_status_unknown_user(salt_factories, master_id):
"""
Ensure correct exit status when the master is configured to run as an unknown user.
"""
with pytest.raises(FactoryNotStarted) as exc:
factory = salt_factories.get_salt_master_daemon(
master_id, config_overrides={"user": "unknown-user"}
)
with factory.started(start_timeout=10, max_start_attempts=1):
# We should not get here
pass
assert exc.value.exitcode == salt.defaults.exitcodes.EX_NOUSER, exc.value
assert "The user is not available." in exc.value.stderr, exc.value | 33,411 |
def load_sets(path: WindowsPath = project_dir / 'data/processed') -> \
Tuple[pd.DataFrame,
pd.DataFrame,
pd.Series,
pd.Series]:
"""
:param path:
:return:
"""
import os
X_train = pd.read_csv(os.path.join(path, "X_train.csv"))
X_test = pd.read_csv(os.path.join(path,"X_test.csv"))
X_val = pd.read_csv(os.path.join(path,"X_val.csv"))
y_train = pd.read_csv(os.path.join(path,"y_train.csv"))
y_test = pd.read_csv(os.path.join(path,"y_test.csv"))
y_val = pd.read_csv(os.path.join(path,"y_val.csv"))
return X_train, X_test, X_val, y_train, y_test, y_val | 33,412 |
def check_if_all_tests_pass(option='-x'):
"""Runs all of the tests and only returns True if all tests pass.
The -x option is the default, and -x will tell pytest to exit on the first encountered failure.
The -s option prints out stdout from the tests (normally hidden.)"""
import pytest
options = [option]
arguments = options
exitcode = pytest.main(arguments)
all_passed = exitcode == 0
if not all_passed:
input()
return all_passed | 33,413 |
def register_metrics():
"""Registers all metrics used with package 'trackstats'.
Make sure this is called before using metrics.
Returns
-------
None
"""
from trackstats.models import Domain, Metric
Domain.objects.REQUESTS = Domain.objects.register(
ref='requests',
name='Requests'
)
Metric.objects.TOTAL_REQUEST_COUNT = Metric.objects.register(
domain=Domain.objects.REQUESTS,
ref='total_request_count',
name='Total number of requests of any kind'
)
Domain.objects.VIEWS = Domain.objects.register(
ref='views',
name='Views'
)
Metric.objects.SEARCH_VIEW_COUNT = Metric.objects.register(
domain=Domain.objects.VIEWS,
ref='search_view_count',
name='Number of views for Search page'
)
Metric.objects.ANALYSES_RESULTS_VIEW_COUNT = Metric.objects.register(
domain=Domain.objects.VIEWS,
ref='analyses_results_view_count',
name='Number of views for Analyses results'
)
Metric.objects.SURFACE_VIEW_COUNT = Metric.objects.register(
domain=Domain.objects.VIEWS,
ref='surface_view_count',
name='Number of views for surfaces'
)
Metric.objects.PUBLICATION_VIEW_COUNT = Metric.objects.register(
domain=Domain.objects.VIEWS,
ref='publication_view_count',
name='Number of requests for publication URLs'
)
Domain.objects.DOWNLOADS = Domain.objects.register(
ref='downloads',
name='Downloads'
)
Metric.objects.SURFACE_DOWNLOAD_COUNT = Metric.objects.register(
domain=Domain.objects.DOWNLOADS,
ref='surface_download_count',
name='Number of downloads of surfaces'
)
Domain.objects.USERS = Domain.objects.register(
ref='users',
name='Users'
)
Metric.objects.USERS_LOGIN_COUNT = Metric.objects.register(
domain=Domain.objects.USERS,
ref='login_count',
name='Number of users having logged in'
)
Domain.objects.PROFILE = Domain.objects.register(
ref='profile',
name='Profile'
)
Metric.objects.TOTAL_ANALYSIS_CPU_MS = Metric.objects.register(
domain=Domain.objects.PROFILE,
ref='total_analysis_cpu_ms',
name='Total number of milliseconds spent for analysis computation'
)
Domain.objects.OBJECTS = Domain.objects.register(
ref='objects',
name='Objects'
)
Metric.objects.USER_COUNT = Metric.objects.register(
domain=Domain.objects.OBJECTS,
ref='total_number_users',
name='Total number of registered users'
)
Metric.objects.SURFACE_COUNT = Metric.objects.register(
domain=Domain.objects.OBJECTS,
ref='total_number_surfaces',
name='Total number of surfaces'
)
Metric.objects.TOPOGRAPHY_COUNT = Metric.objects.register(
domain=Domain.objects.OBJECTS,
ref='total_number_topographies',
name='Total number of topographies'
)
Metric.objects.ANALYSIS_COUNT = Metric.objects.register(
domain=Domain.objects.OBJECTS,
ref='total_number_analyses',
name='Total number of analyses'
) | 33,414 |
def prim_roots(modulo: int) -> tp.Iterable[int]:
"""Calculate all :term:`primitive roots <primitive root>` for the given modulo
"""
if is_prime(modulo):
required = set(range(1, modulo))
for g in range(modulo):
powers = get_powers_modulo(g, modulo)
if powers == required:
yield g | 33,415 |
def array_to_tiff(arr, outputTiff):
"""Transform a density map array into a tiff image, to be visualized."""
arr = np.array(arr)
arr *=10000
arr = arr.astype('uint16')
arr[arr > 60000] = 0
tifffile.imsave(outputTiff, arr) | 33,416 |
def init(api, _cors, impl):
"""Configures REST handlers for allocation resource."""
namespace = webutils.namespace(
api, __name__, 'Local nodeinfo redirect API.'
)
@namespace.route('/<hostname>/<path:path>')
class _NodeRedirect(restplus.Resource):
"""Redirects to local nodeinfo endpoint."""
def get(self, hostname, path):
"""Returns list of local instances."""
hostport = impl.get(hostname)
if not hostport:
return 'Host not found.', http.client.NOT_FOUND
url = utils.encode_uri_parts(path)
return flask.redirect('http://%s/%s' % (hostport, url),
code=http.client.FOUND) | 33,417 |
def split_matrix_2(input1):
"""
Split matrix.
Args:
inputs:tvm.Tensor of type float32.
Returns:
akg.tvm.Tensor of type float32 with 3d shape.
"""
dim = input1.shape[0]
split_num = dim // split_dim
result_3 = allocate((split_num, split_dim, split_dim), input1.dtype, 'local')
for i in range(split_num):
for j in range(split_dim):
for k in range(split_dim):
result_3[i,j,k] = input1[i * split_dim + j, i * split_dim + k]
return result_3 | 33,418 |
def load_all(path, jobmanager=None):
"""Load all jobs from *path*.
This function works as a multiple execution of |load_job|. It searches for ``.dill`` files inside the directory given by *path*, yet not directly in it, but one level deeper. In other words, all files matching ``path/*/*.dill`` are used. That way a path to the main working folder of previously run script can be used to import all jobs run by that script.
The purpose of this function is to provide quick and easy way of restarting a script that previously failed. Loading all successful jobs from the previous run prevents double work and allows the script to proceed directly to the place where it failed.
Jobs are loaded using default job manager stored in ``config.jm``. If you wish to use a different one you can pass it as *jobmanager* argument of this function.
Returned value is a dictionary containing all loaded jobs as values and absolute paths to ``.dill`` files as keys.
"""
jm = jobmanager or config.jm
loaded_jobs = {}
for f in glob.glob(opj(path, '*', '*.dill')):
loaded_jobs[f] = jm.load_job(f)
return loaded_jobs | 33,419 |
def process_img(img1, img2):
"""
图片处理
:param img1: 处理后图片
:param img2: 待处理图片
:return:
"""
cv2.imwrite(img1, img2)
target = cv2.imread(img1)
target = cv2.cvtColor(target, cv2.COLOR_BGR2GRAY)
target = abs(255 - target)
cv2.imwrite(img1, target) | 33,420 |
def relative_error(estimate, exact):
"""
Compute the relative error of an estimate, in percent.
"""
tol = 1e-15
if numpy.abs(exact) < tol:
if numpy.abs(estimate - exact) < tol:
relative_error = 0.0
else:
relative_error = numpy.inf
else:
relative_error = numpy.abs((estimate - exact) / exact) * 100.0
return relative_error | 33,421 |
def AddTrainingOperators(model, softmax, label):
"""Adds training operators to the model."""
xent = model.LabelCrossEntropy([softmax, label], 'xent')
# compute the expected loss
loss = model.AveragedLoss(xent, "loss")
# track the accuracy of the model
AddAccuracy(model, softmax, label)
# use the average loss we just computed to add gradient operators to the model
model.AddGradientOperators([loss])
# do a simple stochastic gradient descent
ITER = brew.iter(model, "iter")
# set the learning rate schedule
LR = model.LearningRate(
ITER, "LR", base_lr=-0.1, policy="step", stepsize=1, gamma=0.999)
# LR = model.LearningRate(
# ITER, "LR", base_lr=-0.01, policy="inv", stepsize=1, gamma=0.0001, power=.75)
# ONE is a constant value that is used in the gradient update. We only need
# to create it once, so it is explicitly placed in param_init_net.
ONE = model.param_init_net.ConstantFill([], "ONE", shape=[1], value=1.0)
# Now, for each parameter, we do the gradient updates.
for param in model.params:
# Note how we get the gradient of each parameter - ModelHelper keeps
# track of that.
param_grad = model.param_to_grad[param]
# The update is a simple weighted sum: param = param + param_grad * LR
model.WeightedSum([param, ONE, param_grad, LR], param) | 33,422 |
def extract_sample_paths(seq_dir):
""" Obtain the sample paths.
Parameters
----------
seq_dir : str
Input directory containing all of the sample files.
Returns
-------
dict of list of str
Samples with a list of their forward and reverse files.
"""
fps = os.listdir(seq_dir)
files_df = illumina_filenames_to_df(fps)
sample_reads_dict = extract_sample_reads(files_df, seq_dir)
return sample_reads_dict | 33,423 |
def list_users(cursor):
"""
Returns the current roles
"""
cursor.execute(
"""
SELECT
r.rolname AS name,
r.rolcanlogin AS login,
ARRAY(
SELECT b.rolname
FROM pg_catalog.pg_auth_members m
JOIN pg_catalog.pg_roles b ON (m.roleid = b.oid)
WHERE m.member = r.oid
) AS memberof
FROM pg_catalog.pg_roles r
"""
)
return map(User.create, cursor.fetchall()) | 33,424 |
def get_stack_policy(ctx, environment, stack):
"""
Displays the stack policy used.
Prints ENVIRONMENT/STACK policy.
"""
env = get_env(ctx.obj["sceptre_dir"], environment, ctx.obj["options"])
response = env.stacks[stack].get_policy()
write(response.get('StackPolicyBody', {})) | 33,425 |
def getVelocityRange(vis, options={}):
"""
Parse the velocity range from uvlist.
Useful when resampling and re-binning data in `line` selections
Returns a tuple of the form (starting velocity, end velocity)
"""
options['vis'] = vis
options['options'] = 'spec'
specdata = uvlist(options, stdout=subprocess.PIPE).stdout
specdata = str(specdata)
# starting velocity
startvel = specdata[specdata.find('starting velocity'):]
startvel = startvel[startvel.find(':')+1:startvel.find('\\n')].split()[0]
# ending velocity
endvel = specdata[specdata.rfind('ending velocity'):]
endvel = endvel[endvel.find(':')+1:endvel.find('\\n')].split()[-1]
return (float(startvel), float(endvel)) | 33,426 |
def polynomial_classification():
"""Classification using the Perceptron algorithm and polynomial features
"""
students_data_set = load_grades()
students_data_set = preprocess_data(students_data_set, poly_features=True)
X_train = students_data_set['train_data']
X_test = students_data_set['test_data']
# #construct polynomial features from the coefficients
# poly = PolynomialFeatures(degree=3, interaction_only=False)
# X_train = poly.fit_transform(X_train)
# X_test = poly.fit_transform(X_test)
Y_train = students_data_set['train_discrete_labels']
Y_test = students_data_set['test_discrete_labels']
#transform Y_nn and Y_nn_test
Y_train[Y_train < 5] = 0
Y_train[Y_train >= 5] = 1
Y_test[Y_test < 5] = 0
Y_test[Y_test >= 5] = 1
clf = Perceptron(penalty=None, fit_intercept=False, n_iter=1000, shuffle=False, verbose=False)
clf.fit(X_train, Y_train)
#evaluate the perceptron model with polynomial features
model_eval(clf, X_train, Y_train, X_test, Y_test, LABEL_NAMES_BIN) | 33,427 |
def get_job_exe_output_vol_name(job_exe):
"""Returns the container output volume name for the given job execution
:param job_exe: The job execution model (must not be queued) with related job and job_type fields
:type job_exe: :class:`job.models.JobExecution`
:returns: The container output volume name
:rtype: string
:raises Exception: If the job execution is still queued
"""
return '%s_output_data' % job_exe.get_cluster_id() | 33,428 |
def bytes2human(n, format='%(value).1f %(symbol)s', symbols='customary'):
"""
Convert n bytes into a human readable string based on format.
symbols can be either "customary", "customary_ext", "iec" or "iec_ext",
see: http://goo.gl/kTQMs
>>> bytes2human(0)
'0.0 B'
>>> bytes2human(0.9)
'0.0 B'
>>> bytes2human(1)
'1.0 B'
>>> bytes2human(1.9)
'1.0 B'
>>> bytes2human(1024)
'1.0 K'
>>> bytes2human(1048576)
'1.0 M'
>>> bytes2human(1099511627776127398123789121)
'909.5 Y'
>>> bytes2human(9856, symbols="customary")
'9.6 K'
>>> bytes2human(9856, symbols="customary_ext")
'9.6 kilo'
>>> bytes2human(9856, symbols="iec")
'9.6 Ki'
>>> bytes2human(9856, symbols="iec_ext")
'9.6 kibi'
>>> bytes2human(10000, "%(value).1f %(symbol)s/sec")
'9.8 K/sec'
>>> # precision can be adjusted by playing with %f operator
>>> bytes2human(10000, format="%(value).5f %(symbol)s")
'9.76562 K'
"""
n = int(n)
if n < 0:
raise ValueError("n < 0")
symbols = SYMBOLS[symbols]
prefix = {}
for i, s in enumerate(symbols[1:]):
prefix[s] = 1 << (i+1)*10
for symbol in reversed(symbols[1:]):
if n >= prefix[symbol]:
value = float(n) / prefix[symbol]
return format % locals()
return format % dict(symbol=symbols[0], value=n) | 33,429 |
def test_example(save_to_file, progress_bar, tmp_path, file_cache):
"""basic test: retrieve an example dataset """
# get the reference dataset
dataset_id, ref_csv, ref_df, ref_shape = ref_dataset_public_platform()
# check the cache status
cached_entry = None
if file_cache:
cache_root = DEFAULT_CACHE_ROOT if file_cache is True else file_cache
cached_entry = get_cached_dataset_entry(dataset_id=dataset_id, platform_id="public",
cache_root=None if file_cache is True else cache_root)
assert cached_entry.file_path.as_posix() == "%s/public/%s.csv" % (cache_root, dataset_id)
# the cache is supposed to be clean (cleaned at the end of this test)
assert not cached_entry.exists()
# with debug_requests():
to_path = tmp_path / "blah" / "tmp.csv" if save_to_file else None
csv_str = get_whole_dataset(dataset_id, platform_id='public', file_cache=file_cache, to_path=to_path,
tqdm=progress_bar)
if save_to_file:
assert csv_str is None
# note: newline='' preserves line ending while opening. See https://stackoverflow.com/a/50996542/7262247
with open(str(to_path), mode="rt", encoding="utf-8", newline='') as f:
csv_str = f.read()
os.remove(str(to_path))
# compare the text string (if order does not change across queries...)
# assert csv_str == ref_csv
# move to pandas
df = pd.read_csv(create_reading_buffer(csv_str, is_literal=False), sep=';')
# compare with ref
df = df.set_index(['Office Name']).sort_index()
pd.testing.assert_frame_equal(df, ref_df)
assert df.shape == ref_shape
# test the pandas direct streaming API without cache
df2 = get_whole_dataframe(dataset_id, tqdm=progress_bar)
df2 = df2.set_index(['Office Name']).sort_index()
pd.testing.assert_frame_equal(df, df2)
# make sure the cached entry exists now and can be read without internet connection
if cached_entry:
# Make sure that the cache entry contains the dataset
assert cached_entry.exists()
# note: newline='' preserves line ending while opening. See https://stackoverflow.com/a/50996542/7262247
with open(str(cached_entry.file_path), mode="rt", encoding="utf-8", newline='') as f:
cached_csv_str = f.read()
assert cached_csv_str == csv_str
# New offline query: the cache should be hit even if the public platform is identified using its base_url here
csv_str2 = get_whole_dataset(dataset_id=dataset_id, file_cache=file_cache,
base_url="https://public.opendatasoft.com", tqdm=progress_bar,
requests_session=make_invalid_network_session())
assert csv_str2 == cached_csv_str
# Same with the other method
df3 = get_whole_dataframe(dataset_id, file_cache=file_cache, requests_session=make_invalid_network_session(),
tqdm=progress_bar)
df3 = df3.set_index(['Office Name']).sort_index()
pd.testing.assert_frame_equal(df, df3)
# clean it for next time
cached_entry.delete()
assert not cached_entry.exists()
# Make sure it is re-cached if we use the dataframe-getter method directly
df4 = get_whole_dataframe(dataset_id, file_cache=file_cache, tqdm=progress_bar)
df4 = df4.set_index(['Office Name']).sort_index()
assert cached_entry.exists()
pd.testing.assert_frame_equal(df, df4)
# clean it for next time
cached_entry.delete()
assert not cached_entry.exists() | 33,430 |
def check_if_free(driver, available, movie_hulu_url):
"""
Check if "Watch Movie" button is there
if not, it's likely available in a special package (Starz etc) or availabe for Rent on Hulu.
"""
is_free = False
if available:
driver.get(movie_hulu_url)
sleep(3)
watch_movie_button = driver.find_elements_by_class_name("WatchAction")
for e in watch_movie_button:
#print(e.text)
#print(e.get_attribute('href'))
if e.text == "WATCH MOVIE":
is_free = True
return is_free | 33,431 |
def scatter_add(data, indices, updates, axis=0):
"""Update data by adding values in updates at positions defined by indices
Parameters
----------
data : relay.Expr
The input data to the operator.
indices : relay.Expr
The index locations to update.
updates : relay.Expr
The values to be added.
axis : int
The axis to scatter on
Returns
-------
ret : relay.Expr
The computed result.
"""
if axis < 0:
axis += len(data.shape)
assert axis >= 0
assert axis < len(data.shape)
rank = len(data.shape)
assert 1 <= rank <= 4, "scatter_add only supports 1-4 dimensions"
ir_funcs = {
1: gen_scatter_add_1d_atomic,
2: gen_ir_2d,
3: gen_ir_3d,
4: gen_ir_4d,
}
def update_func(dst_ptr, dst_index, update):
dst_ptr[dst_index] += update
out_shape = data.shape
out_buf = tvm.tir.decl_buffer(out_shape, data.dtype, "out_buf")
out = te.extern(
[out_shape],
[data, indices, updates],
lambda ins, outs: ir_funcs[rank](ins[0], ins[1], ins[2], axis, outs[0], update_func),
dtype=data.dtype,
out_buffers=[out_buf],
name="scatter_add_gpu",
tag="scatter_add_gpu",
)
return out | 33,432 |
def LTE_end(verbose=False):
"""Clean disconnection of the LTE network. This is required for
future successful connections without a complete power cycle between.
Parameters
----------
verbose : bool, print debug statements
"""
if verbose: print("Disonnecting LTE ... ", end='')
lte_modem.disconnect()
if verbose: print("OK")
time.sleep(1)
if verbose: print("Detaching LTE ... ", end='')
lte_modem.dettach()
if verbose: print("OK") | 33,433 |
def generate_test_linked_list(size=5, singly=False):
"""
Generate node list for test case
:param size: size of linked list
:type size: int
:param singly: whether or not this linked list is singly
:type singly: bool
:return: value list and generated linked list
"""
assert size >= 1
val_list = [i for i in range(size)]
node_list = LinkedList(singly=singly)
node_list.append_val_list(val_list)
return val_list, node_list | 33,434 |
def write_galaxy_provenance(gi,history_id,output_dir):
"""
Writes provenance information from Galaxy to JSON output files.
:param history_id: The history id in Galaxy to examine.
:param output_dir: The directory to write the output files.
:return: None.
"""
histories_provenance_file=output_dir+"/history-provenance.json"
dataset_provenance_file=output_dir+"/dataset-provenance.json"
histories_prov_fh=open(histories_provenance_file,'w')
dataset_prov_fh=open(dataset_provenance_file,'w')
all_datasets=gi.histories.show_history(history_id,details='all',contents=True)
dataset_content=[]
for dataset in all_datasets:
if (dataset['history_content_type'] == 'dataset'):
dataset_content.append(gi.histories.show_dataset_provenance(history_id,dataset['id'],follow=True))
elif (dataset['history_content_type'] == 'dataset_collection'):
dataset_content.append(gi.histories.show_dataset_collection(history_id,dataset['id']))
else:
raise Exception("Error: dataset with id="+dataset['id']+" in history="+history_id+" has history_content_type="+dataset['history_content_type']+". Expected one of 'dataset' or 'dataset_collection'")
dataset_prov_fh.write(json.dumps(dataset_content,indent=4,separators=(',', ': ')))
histories_prov_fh.write(json.dumps(all_datasets,indent=4,separators=(',', ': ')))
histories_prov_fh.close()
dataset_prov_fh.close() | 33,435 |
def process_install_task(self):
"""Creates the installation task for the current task generator; uses :py:func:`waflib.Build.add_install_task` internally."""
self.add_install_task(**self.__dict__) | 33,436 |
def build(build_args=None, docs_dir=None, build_dir=None, fmt=None, open_url=False):
"""Use sphinx-build to build the documentation."""
status = sphinx_build(build_args)
if status:
sys.exit(status)
if open_url and fmt == 'html':
webbrowser.open(str(build_dir / 'index.html')) | 33,437 |
def _bytes_to_long(bytestring, byteorder):
"""Convert a bytestring to a long
For use in python version prior to 3.2
"""
result = []
if byteorder == 'little':
result = (v << i * 8 for (i, v) in enumerate(bytestring))
else:
result = (v << i * 8 for (i, v) in enumerate(reversed(bytestring)))
return sum(result) | 33,438 |
def cli():
"""Utility to update dependency requirements for `aiida-core`.
Since `aiida-core` fixes the versions of almost all of its dependencies, once in a while these need to be updated.
This is a manual process, but this CLI attempts to simplify it somewhat. The idea is to remote all explicit version
restrictions from the `setup.json`, except for those packages where it is known that a upper limit is necessary.
This is accomplished by the command:
python update_dependencies.py unrestrict
The command will update the `setup.json` to remove all explicit limits, except for those packages specified by the
`--exclude` option. After this step, install `aiida-core` through pip with the `[all]` flag to install all optional
extra requirements as well. Since there are no explicit version requirements anymore, pip should install the latest
available version for each dependency.
Once all the tests complete successfully, run the following command:
pip freeze > requirements.txt
This will now capture the exact versions of the packages installed in the virtual environment. Since the tests run
for this setup, we can now set those versions as the new requirements in the `setup.json`. Note that this is why a
clean virtual environment should be used for this entire procedure. Now execute the command:
python update_dependencies.py update requirements.txt
This will now update the `setup.json` to reinstate the exact version requirements for all dependencies. Commit the
changes to `setup.json` and make a pull request.
""" | 33,439 |
def process_manifest_for_key(manifest, manifest_key, installinfo,
parentcatalogs=None):
"""Processes keys in manifests to build the lists of items to install and
remove.
Can be recursive if manifests include other manifests.
Probably doesn't handle circular manifest references well.
manifest can be a path to a manifest file or a dictionary object.
"""
if isinstance(manifest, basestring):
display.display_debug1(
"** Processing manifest %s for %s" %
(os.path.basename(manifest), manifest_key))
manifestdata = manifestutils.get_manifest_data(manifest)
else:
manifestdata = manifest
manifest = 'embedded manifest'
cataloglist = manifestdata.get('catalogs')
if cataloglist:
catalogs.get_catalogs(cataloglist)
elif parentcatalogs:
cataloglist = parentcatalogs
if not cataloglist:
display.display_warning('Manifest %s has no catalogs', manifest)
return
for item in manifestdata.get('included_manifests', []):
nestedmanifestpath = manifestutils.get_manifest(item)
if not nestedmanifestpath:
raise manifestutils.ManifestException
if processes.stop_requested():
return {}
process_manifest_for_key(nestedmanifestpath, manifest_key,
installinfo, cataloglist)
conditionalitems = manifestdata.get('conditional_items', [])
if conditionalitems:
display.display_debug1(
'** Processing conditional_items in %s', manifest)
# conditionalitems should be an array of dicts
# each dict has a predicate; the rest consists of the
# same keys as a manifest
for item in conditionalitems:
try:
predicate = item['condition']
except (AttributeError, KeyError):
display.display_warning(
'Missing predicate for conditional_item %s', item)
continue
except BaseException:
display.display_warning(
'Conditional item is malformed: %s', item)
continue
if info.predicate_evaluates_as_true(
predicate, additional_info={'catalogs': cataloglist}):
conditionalmanifest = item
process_manifest_for_key(
conditionalmanifest, manifest_key, installinfo, cataloglist)
for item in manifestdata.get(manifest_key, []):
if processes.stop_requested():
return {}
if manifest_key == 'managed_installs':
dummy_result = process_install(item, cataloglist, installinfo)
elif manifest_key == 'managed_updates':
process_managed_update(item, cataloglist, installinfo)
elif manifest_key == 'optional_installs':
process_optional_install(item, cataloglist, installinfo)
elif manifest_key == 'managed_uninstalls':
dummy_result = process_removal(item, cataloglist, installinfo) | 33,440 |
def SoS_exec(script: str, _dict: dict = None, return_result: bool = True) -> None:
"""Execute a statement."""
if _dict is None:
_dict = env.sos_dict.dict()
if not return_result:
if env.verbosity == 0:
with contextlib.redirect_stdout(None):
exec(
compile(script, filename=stmtHash.hash(script), mode="exec"), _dict
)
else:
exec(compile(script, filename=stmtHash.hash(script), mode="exec"), _dict)
return None
try:
stmts = list(ast.iter_child_nodes(ast.parse(script)))
if not stmts:
return
if isinstance(stmts[-1], ast.Expr):
# the last one is an expression and we will try to return the results
# so we first execute the previous statements
if len(stmts) > 1:
if env.verbosity == 0:
with contextlib.redirect_stdout(None):
exec(
compile(
ast.Module(body=stmts[:-1], type_ignores=[]),
filename=stmtHash.hash(script),
mode="exec",
),
_dict,
)
else:
exec(
compile(
ast.Module(body=stmts[:-1], type_ignores=[]),
filename=stmtHash.hash(script),
mode="exec",
),
_dict,
)
# then we eval the last one
if env.verbosity == 0:
with contextlib.redirect_stdout(None):
res = eval(
compile(
ast.Expression(body=stmts[-1].value),
filename=stmtHash.hash(script),
mode="eval",
),
_dict,
)
else:
res = eval(
compile(
ast.Expression(body=stmts[-1].value),
filename=stmtHash.hash(script),
mode="eval",
),
_dict,
)
else:
# otherwise we just execute the entire code
if env.verbosity == 0:
with contextlib.redirect_stdout(None):
exec(
compile(script, filename=stmtHash.hash(script), mode="exec"),
_dict,
)
else:
exec(
compile(script, filename=stmtHash.hash(script), mode="exec"), _dict
)
res = None
except SyntaxError as e:
raise SyntaxError(f"Invalid code {script}: {e}")
# if check_readonly:
# env.sos_dict.check_readonly_vars()
return res | 33,441 |
def main():
"""hash-it main entry point"""
try:
from hashit.cli.cli import cli_main
sys.exit(cli_main(docopt(__doc__)))
except KeyboardInterrupt:
sys.exit(130) | 33,442 |
def aten_dim(mapper, graph, node):
""" 构造获取维度的PaddleLayer。
TorchScript示例:
%106 : int = aten::dim(%101)
参数含义:
%106 (int): 输出,Tensor的维度。
%101 (Tensor): 输入的Tensor。
"""
scope_name = mapper.normalize_scope_name(node)
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_inputs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%input.8
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs, scope_name)
layer_inputs["input"] = inputs_name[0]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
graph.add_layer(
"prim.shape", inputs=layer_inputs, outputs=layer_outputs, scope_name=scope_name)
graph.add_layer(
"prim.len", inputs={"input": output_name}, outputs=layer_outputs, scope_name=scope_name)
return current_inputs, current_outputs | 33,443 |
def test_iterdict(reference):
""" GIVEN a dict for iteration """
# WHEN passing dict to this function
dict_gen = iterdict(reference)
# THEN it will create dict generator, we can iterate it, get the key, values as string
for key, value in dict_gen:
assert isinstance(key, str)
assert isinstance(value, str) | 33,444 |
def create_config(config: HMAConfig) -> None:
"""
Creates a config, exception if one exists with the same type and name
"""
_assert_initialized()
config._assert_writable()
# TODO - we should probably sanity check here to make sure all the fields
# are the expected types, because lolpython. Otherwise, it will
# fail to deserialize later
get_dynamodb().meta.client.put_item(
TableName=_TABLE_NAME,
Item=_config_to_dynamodb_item(config),
ConditionExpression=Attr("ConfigType").not_exists(),
) | 33,445 |
def regenerate():
"""Automatically regenerate site upon file modification"""
local('pelican -r -s pelicanconf.py') | 33,446 |
def calcBlockingMatrix(vs , NC = 1 ):
"""Calculate the blocking matrix for a distortionless beamformer,
and return its Hermitian transpose."""
vsize = len(vs)
bsize = vsize - NC
blockMat = numpy.zeros((vsize,bsize), numpy.complex)
# Calculate the perpendicular projection operator 'PcPerp' for 'vs'.
norm_vs = numpy.inner( vs, numpy.conjugate(vs) )
if norm_vs.real > 0.0:
PcPerp = numpy.eye(len(vs)) - numpy.outer( numpy.conjugate(vs), vs ) / norm_vs
# Do Gram-Schmidt orthogonalization on the columns of 'PcPerp'.
for idim in range(bsize):
vec = PcPerp[:,idim]
for jdim in range(idim):
rvec = blockMat[:,jdim]
ip = numpy.inner(numpy.conjugate(rvec), vec)
vec -= rvec * ip
norm_vec = numpy.sqrt( abs(numpy.inner(numpy.conjugate(vec),vec)) )
blockMat[:,idim] = vec / norm_vec
# Debugging:
#print "len",len(vs),len(blockMat),len(blockMat[0])
#print matrixmultiply(vs, blockMat)
# return numpy.conjugate(numpy.transpose(blockMat))
return blockMat | 33,447 |
def test_main_expression_parser_if_not():
"""
UT for main
"""
tree = ET.parse(os.path.join(os.path.split(__file__)[0], "tc1_exec_type_driver.xml"))
# get root element
root = tree.getroot()
# getting steps
steps = root.find("Steps")
sstep = steps[6]
data_repository = {'step_1_result': 'PASS'}
exec_type_driver.rule_parser = MagicMock()
config_Utils.data_repository = MagicMock(return_value=data_repository)
result1, result2 = exec_type_driver.main(sstep, skip_invoked=True)
assert result2 == 'next'
del config_Utils.data_repository
del exec_type_driver.rule_parser | 33,448 |
def plot_LA(mobile, ref, GDT_TS, GDT_HA, GDT_ndx,
sel1="protein and name CA", sel2="protein and name CA",
cmap="GDT_HA", **kwargs):
"""
Create LocalAccuracy Plot (heatmap) with
- xdata = residue ID
- ydata = frame number
- color = color-coded pair distance
.. Note:: do not pass too many data points otherwise the plot will get squeezed
Args:
mobile (universe, atomgrp): mobile structure with trajectory
ref (universe, atomgrp): reference structure
GDT_TS (array): array with GDT_TS scores.
GDT_HA (array): array with GDT_HA scores.
GTD_ndx (array): array with corresponding index values (representative for frame numbers).
sel1 (str): selection string of mobile structure (calculation of pair distances)
sel2 (str): selection string of reference structure (calculation of pair distances)
cmap (str):
| "GDT_TS" or "TS": color map with new colors at values (0, 1, 2, 4, 8)
and vmin, vmax = (0, 10).
| "GDT_HA" or "HA": color map with new colors at values (0, .5, 1, 2, 4)
and vmin, vmax = (0, 5).
| "nucleic" or "RNA" or "DNA": color map with new colors at values (0, .5, 1, 2, 4)
and vmin, vmax = (0, 20).
| other cmap names: see help(plt.colormaps) or alternatively
https://matplotlib.org/examples/color/colormaps_reference.html
Keyword Args:
prec (None, int):
| rounding precision of scores
| None: rounding off
| int: rounding on to <int> decimals
ndx_offset (int):
| offset/shift of GDT_ndx to match real "mobile" frames. Defaults to 0.
| Look up "start" parameter during execution of gdt.GDT()
rank_num (int): plot only <rank_num> best ranked frames. Defaults to 30.
show_cbar (bool): show/hide colorbar. Defaults to True.
show_frames (bool): show/hide frame numbers. Defaults to False.
show_scores (bool): show/hide GDT_TS and GDT_HA scores. Defaults to True.
save_as (None, str): save name or realpath to save file. Defaults to None.
cbar_ticks (None, list): color bar tick positions. Defaults to None.
cbar_label/label (str)
cbar_fontweight/fontweight (str): "normal", "bold"
cbar_location/location (str): "right", "bottom", "left", "top"
cbar_orientation/orientation (str): "horizontal", "vertical"
cbar_min/vmin (None, int): min value of colorbar and heatmap. Gets
overwritten by cmaps such as "GDT_TS", "GDT_HA", "RNA" etc.
cbar_max/vmax (None, int): max value of colorbar and heatmap. Gets
overwritten by cmaps such as "GDT_TS", "GDT_HA", "RNA" etc.
text_pos_Frame (list): [x0, y0] position of the "Frame" text box (label)
text_pos_TS (list): [x0, y0] position of the "TS" text box (label)
text_pos_HA (list): [x0, y0] position of the "HA" text box (label)
font_scale (float)
.. Hint:: Args and Keyword of misc.figure() are also valid.
Returns:
fig (class)
matplotlib.figure.Figure
ax (class, list)
ax or list of axes ~ matplotlib.axes._subplots.Axes
LA_data (tuple)
| LA_data[0]: PairDistances (list)
| LA_data[1]: Frames (list)
Example:
| # obtain data
| >> GDT = gdt.GDT(mobile, ref, sss=[None,None,None])
| >> GDT_percent, GDT_resids, GDT_cutoff, RMSD, FRAME = GDT
|
| # rank data
| >> SCORES = gdt.GDT_rank_scores(GDT_percent, ranking_order="GDT_HA")
| >> GDT_TS_ranked, GDT_HA_ranked, GDT_ndx_ranked = SCORES
|
| # edit text box positions of labels "Frame", "TS", "HA"
| >>text_pos_kws = {"text_pos_Frame": [-8.8, -0.3],
| "text_pos_TS": [-4.2, -0.3],
| "text_pos_HA": [-1.9, -0.3]}
|
| # plot
| >> gdt.plot_LA(mobile, ref, SCORES[0], SCORES[1], SCORES[2], **text_pos_kws)
"""
# init CONFIG object with default parameter and overwrite them if kwargs contain the same keywords.
default = {"figsize": (7.5, 6),
"font_scale": 1.2,
"ndx_offset": 0,
"rank_num": 30,
"show_cbar": True,
"show_frames": False,
"show_scores": True,
"save_as": None,
"prec": 2,
"cmap": cmap,
"cbar_ticks": None,
"cbar_label": r"mobile-reference CA-CA distances ($\AA$)",
"cbar_fontweight": "bold",
"cbar_location": 'right',
"cbar_orientation": 'vertical',
"cbar_min": None,
"cbar_max": None,
"vmin": None,
"vmax": None,
"text_pos_Frame": [-8.8, -0.3],
"text_pos_TS": [-3.8, -0.3],
"text_pos_HA": [-1.7, -0.3]}
cfg = _misc.CONFIG(default, **kwargs)
cfg.update_by_alias(alias="label", key="cbar_label", **kwargs)
cfg.update_by_alias(alias="fontweight", key="cbar_fontweight", **kwargs)
cfg.update_by_alias(alias="location", key="cbar_location", **kwargs)
cfg.update_by_alias(alias="orientation", key="cbar_orientation", **kwargs)
cfg.update_by_alias(alias="vmin", key="cbar_min", **kwargs)
cfg.update_by_alias(alias="vmax", key="cbar_max", **kwargs)
############################################################################
### load data
PAIR_DISTANCES = []
FRAMES = [i+cfg.ndx_offset for i in GDT_ndx[:cfg.rank_num]]
for ts in mobile.trajectory[FRAMES]:
PD, *_ = get_Pair_Distances(mobile, ref, sel1=sel1, sel2=sel2)
PAIR_DISTANCES.append(PD)
if cfg.prec != None and cfg.prec != -1:
GDT_TS = np.around(GDT_TS[: cfg.rank_num], cfg.prec)
GDT_HA = np.around(GDT_HA[: cfg.rank_num], cfg.prec)
xticks = mobile.select_atoms(sel1).residues.resids
xticks = [x if x % 5 == 0 else "." for x in xticks]
xticklabels = xticks
if cfg.show_frames and cfg.show_scores:
yticks = [f"{FRAMES[i]:>9}{GDT_TS[i]:>10.2f}{GDT_HA[i]:>8.2f} " if GDT_TS[i] != 100 else
f"{FRAMES[i]:>9}{GDT_TS[i]:>9.2f}{GDT_HA[i]:>8.2f} " for i in range(len(FRAMES))]
elif cfg.show_frames:
yticks = FRAMES
elif cfg.show_scores:
yticks = [f"{GDT_TS[i]:>10.2f}{GDT_HA[i]:>8.2f} " if GDT_TS[i] != 100 else
f"{GDT_TS[i]:>9.2f}{GDT_HA[i]:>8.2f} " for i in range(len(FRAMES))]
yticklabels = yticks
############################################################################
### heatmap/cbar settings
cmap_GDT = ["lightblue", "lightgreen", "yellow", "yellow", "orange", "orange",
"orange", "orange", "red", "red"]
cmap_RNA = ["lightblue", "lightblue", "lightgreen", "lightgreen",
"yellow", "yellow", "orange", "orange", "red", "red"]
# apply color bar limits if passed (vmin and vmax have higher priority than cbar_min and cbar_max)
if cfg.cbar_min is not None:
cfg.vmin = cfg.cbar_min
if cfg.cbar_max is not None:
cfg.vmax = cfg.cbar_max
# if no limits passed: apply pre-defined limits
if cfg.cmap in ["GDT_HA", "HA"]:
if cfg.vmin is None:
cfg.vmin = 0.0
if cfg.vmax is None:
cfg.vmax = 5.0
elif cfg.cmap in ["GDT_TS", "TS"]:
if cfg.vmin is None:
cfg.vmin = 0.0
if cfg.vmax is None:
cfg.vmax = 10.0
elif cfg.cmap in ["nucleic", "rna", "dna", "RNA", "DNA"]:
if cfg.vmin is None:
cfg.vmin = 0.0
if cfg.vmax is None:
cfg.vmax = 14.0
############################################################################
### plot
fig, ax = _misc.figure(**cfg)
if cfg.show_cbar:
cbar_ax = _misc.add_cbar_ax(ax, location=cfg.cbar_location,
orientation=cfg.cbar_orientation)
cbar_kws = {'orientation': cfg.cbar_orientation}
else:
cbar_ax = None
cbar_kws = dict()
if cfg.cmap in ["GDT_TS", "TS", "GDT_HA", "HA"]:
hm = sns.heatmap(PAIR_DISTANCES, cmap=cmap_GDT, vmin=cfg.vmin, vmax=cfg.vmax,
xticklabels=xticklabels, yticklabels=yticklabels,
square=False, annot=False, linewidths=1.0,
ax=ax, cbar_ax=cbar_ax, cbar_kws=cbar_kws, cbar=cfg.show_cbar)
elif cfg.cmap in ["nucleic", "rna", "dna", "RNA", "DNA"]:
hm = sns.heatmap(PAIR_DISTANCES, cmap=cmap_RNA, vmin=cfg.vmin, vmax=cfg.vmax,
xticklabels=xticklabels, yticklabels=yticklabels,
square=False, annot=False, linewidths=1.0,
ax=ax, cbar_ax=cbar_ax, cbar_kws=cbar_kws, cbar=cfg.show_cbar)
else:
hm = sns.heatmap(PAIR_DISTANCES, cmap=cfg.cmap, vmin=cfg.vmin, vmax=cfg.vmax,
xticklabels=xticklabels, yticklabels=yticklabels,
square=False, annot=False, linewidths=1.0,
ax=ax, cbar_ax=cbar_ax, cbar_kws=cbar_kws, cbar=cfg.show_cbar)
if cfg.show_cbar:
cbar = hm.collections[0].colorbar
cbar.set_label(label=cfg.cbar_label, fontweight=cfg.cbar_fontweight)
_misc.cbar_set_ticks_position(cbar, cfg.cbar_location)
if cfg.cbar_ticks is None and cfg.cmap in ["nucleic", "rna", "dna", "RNA", "DNA"]:
cbar.set_ticks(np.arange(0, 22, 2))
if cfg.cbar_ticks is not None:
cbar.set_ticks(cfg.cbar_ticks)
ax.tick_params(left=False, bottom=False) # hide ticks of heatmap
plt.title("Local Accuracy", fontweight='bold')
plt.xlabel("Residue ID", fontweight='bold')
# table labels
if cfg.show_frames:
ax.text(cfg.text_pos_Frame[0], cfg.text_pos_Frame[1], 'Frame', fontweight='bold')
if cfg.show_scores:
ax.text(cfg.text_pos_TS[0], cfg.text_pos_TS[1], 'TS', fontweight='bold')
ax.text(cfg.text_pos_HA[0], cfg.text_pos_HA[1], 'HA', fontweight='bold')
plt.tight_layout()
plt.tight_layout()
if cfg.save_as != None:
_misc.savefig(cfg.save_as)
if len(FRAMES) > 50:
print("Displaying data for more than 50 frames...")
print("Consider reducing the input data (e.g. rank scores and use top 40 frames).")
LA_data = (PAIR_DISTANCES, FRAMES)
return(fig, ax, LA_data) | 33,449 |
def calculate_psfs(output_prefix):
"""Tune a family of comparable line-STED vs. point-STED psfs.
"""
comparison_filename = os.path.join(output_prefix, 'psf_comparisons.pkl')
if os.path.exists(comparison_filename):
print("Loading saved PSF comparisons...")
comparisons = pickle.load(open(comparison_filename, 'rb'))
else:
comparisons = {}
# Yes, I really did tune all the parameters below by hand so the
# comparisons came out perfectly. Ugh.
comparisons['1p0x_ld'] = psf_comparison_pair(
point_resolution_improvement=0.99, #Juuust under 1, ensures no STED
line_resolution_improvement=0.99,
point_emissions_per_molecule=4,
line_emissions_per_molecule=4,
line_scan_type='descanned',
line_num_orientations=1,
max_excitation_brightness=0.01) # Without STED, no reason to saturate
comparisons['1p0x_lr'] = psf_comparison_pair(
point_resolution_improvement=0.99, #Juuust under 1, ensures no STED
line_resolution_improvement=1.38282445,
point_emissions_per_molecule=4,
line_emissions_per_molecule=4,
line_scan_type='rescanned',
line_num_orientations=2,
max_excitation_brightness=0.01) # Without STED, no reason to saturate
comparisons['1p5x_ld'] = psf_comparison_pair(
point_resolution_improvement=1.5,
line_resolution_improvement=2.68125,
point_emissions_per_molecule=4,
line_emissions_per_molecule=2.825,
line_scan_type='descanned',
line_num_orientations=3)
comparisons['1p5x_lr'] = psf_comparison_pair(
point_resolution_improvement=1.5,
line_resolution_improvement=2.95425,
point_emissions_per_molecule=4,
line_emissions_per_molecule=2.618,
line_scan_type='rescanned',
line_num_orientations=3)
comparisons['2p0x_ld'] = psf_comparison_pair(
point_resolution_improvement=2,
line_resolution_improvement=4.04057,
point_emissions_per_molecule=4,
line_emissions_per_molecule=3.007,
line_scan_type='descanned',
line_num_orientations=4)
comparisons['2p0x_lr'] = psf_comparison_pair(
point_resolution_improvement=2,
line_resolution_improvement=4.07614,
point_emissions_per_molecule=4,
line_emissions_per_molecule=3.0227,
line_scan_type='rescanned',
line_num_orientations=4)
comparisons['2p5x_ld'] = psf_comparison_pair(
point_resolution_improvement=2.5,
line_resolution_improvement=5.13325,
point_emissions_per_molecule=4,
line_emissions_per_molecule=3.792,
line_scan_type='descanned',
line_num_orientations=6)
comparisons['2p5x_lr'] = psf_comparison_pair(
point_resolution_improvement=2.5,
line_resolution_improvement=5.15129,
point_emissions_per_molecule=4,
line_emissions_per_molecule=3.8,
line_scan_type='rescanned',
line_num_orientations=6)
comparisons['3p0x_ld'] = psf_comparison_pair(
point_resolution_improvement=3,
line_resolution_improvement=5.94563,
point_emissions_per_molecule=4,
line_emissions_per_molecule=5.034,
line_scan_type='descanned',
line_num_orientations=8)
comparisons['3p0x_lr'] = psf_comparison_pair(
point_resolution_improvement=3,
line_resolution_improvement=5.95587,
point_emissions_per_molecule=4,
line_emissions_per_molecule=5.0385,
line_scan_type='rescanned',
line_num_orientations=8)
comparisons['4p0x_ld'] = psf_comparison_pair(
point_resolution_improvement=4,
line_resolution_improvement=7.8386627,
point_emissions_per_molecule=4,
line_emissions_per_molecule=7.371,
line_scan_type='descanned',
line_num_orientations=10)
comparisons['4p0x_lr'] = psf_comparison_pair(
point_resolution_improvement=4,
line_resolution_improvement=7.840982,
point_emissions_per_molecule=4,
line_emissions_per_molecule=7.37195,
line_scan_type='rescanned',
line_num_orientations=10)
print("Done calculating PSFs.\n")
if not os.path.isdir(output_prefix): os.makedirs(output_prefix)
pickle.dump(comparisons, open(comparison_filename, 'wb'))
print("Light dose (saturation units):")
for c in sorted(comparisons.keys()):
print("%s point-STED:%6s (excitation),%9s (depletion)"%(
c,
"%0.2f"%(comparisons[c]['point']['excitation_dose']),
"%0.2f"%(comparisons[c]['point']['depletion_dose'])))
print("%7s-line-STED:%6s (excitation),%9s (depletion)"%(
c + '%3s'%('%i'%len(comparisons[c]['line_sted_psfs'])),
"%0.2f"%(comparisons[c]['line']['excitation_dose']),
"%0.2f"%(comparisons[c]['line']['depletion_dose'])))
psfs = {}
for c in comparisons.keys():
psfs[c + '_point_sted'] = comparisons[c]['point_sted_psf']
psfs[c + '_line_%i_angles_sted'%len(comparisons[c]['line_sted_psfs'])
] = comparisons[c]['line_sted_psfs']
return psfs, comparisons | 33,450 |
def update(uuid: UUID, obj: BaseModel, db_table: DeclarativeMeta, db: Session):
"""Updates the object with the given UUID in the database.
Designed to be called only by the API since it raises an HTTPException."""
# Try to perform the update
try:
result = db.execute(
sql_update(db_table)
.where(db_table.uuid == uuid)
.values(
# exclude_unset is needed for update routes so that any values in the Pydantic model
# that are not being updated are not set to None. Instead they will be removed from the dict.
**obj.dict(exclude_unset=True)
)
)
# Verify a row was actually updated
if result.rowcount != 1:
raise HTTPException(status_code=404, detail=f"UUID {uuid} does not exist.")
commit(db)
# An IntegrityError will happen if value already exists or was set to None
except IntegrityError:
db.rollback()
raise HTTPException(
status_code=status.HTTP_409_CONFLICT,
detail=f"Got an IntegrityError while updating UUID {uuid}.",
) | 33,451 |
def simulateGVecs(pd, detector_params, grain_params,
ome_range=[(-np.pi, np.pi), ],
ome_period=(-np.pi, np.pi),
eta_range=[(-np.pi, np.pi), ],
panel_dims=[(-204.8, -204.8), (204.8, 204.8)],
pixel_pitch=(0.2, 0.2),
distortion=None):
"""
returns valid_ids, valid_hkl, valid_ang, valid_xy, ang_ps
panel_dims are [(xmin, ymin), (xmax, ymax)] in mm
pixel_pitch is [row_size, column_size] in mm
simulate the monochormatic scattering for a specified
- space group
- wavelength
- orientation
- strain
- position
- detector parameters
- oscillation axis tilt (chi)
subject to
- omega (oscillation) ranges (list of (min, max) tuples)
- eta (azimuth) ranges
pd................a hexrd.crystallography.PlaneData instance
detector_params...a (10,) ndarray containing the tilt angles (3),
translation (3), chi (1), and sample frame translation
(3) parameters
grain_params......a (12,) ndarray containing the exponential map (3),
translation (3), and inverse stretch tensor compnents
in Mandel-Voigt notation (6).
* currently only one panel is supported, but this will likely change soon
"""
bMat = pd.latVecOps['B']
wlen = pd.wavelength
full_hkls = _fetch_hkls_from_planedata(pd)
# extract variables for convenience
rMat_d = xfcapi.makeDetectorRotMat(detector_params[:3])
tVec_d = np.ascontiguousarray(detector_params[3:6])
chi = detector_params[6]
tVec_s = np.ascontiguousarray(detector_params[7:10])
rMat_c = xfcapi.makeRotMatOfExpMap(grain_params[:3])
tVec_c = np.ascontiguousarray(grain_params[3:6])
vInv_s = np.ascontiguousarray(grain_params[6:12])
# first find valid G-vectors
angList = np.vstack(
xfcapi.oscillAnglesOfHKLs(
full_hkls[:, 1:], chi, rMat_c, bMat, wlen, vInv=vInv_s
)
)
allAngs, allHKLs = _filter_hkls_eta_ome(
full_hkls, angList, eta_range, ome_range
)
if len(allAngs) == 0:
valid_ids = []
valid_hkl = []
valid_ang = []
valid_xy = []
ang_ps = []
else:
# ??? preallocate for speed?
det_xy, rMat_s, on_plane = _project_on_detector_plane(
allAngs,
rMat_d, rMat_c, chi,
tVec_d, tVec_c, tVec_s,
distortion
)
#
on_panel_x = np.logical_and(
det_xy[:, 0] >= panel_dims[0][0],
det_xy[:, 0] <= panel_dims[1][0]
)
on_panel_y = np.logical_and(
det_xy[:, 1] >= panel_dims[0][1],
det_xy[:, 1] <= panel_dims[1][1]
)
on_panel = np.logical_and(on_panel_x, on_panel_y)
#
op_idx = np.where(on_panel)[0]
#
valid_ang = allAngs[op_idx, :]
valid_ang[:, 2] = xfcapi.mapAngle(valid_ang[:, 2], ome_period)
valid_ids = allHKLs[op_idx, 0]
valid_hkl = allHKLs[op_idx, 1:]
valid_xy = det_xy[op_idx, :]
ang_ps = angularPixelSize(valid_xy, pixel_pitch,
rMat_d, rMat_s,
tVec_d, tVec_s, tVec_c,
distortion=distortion)
return valid_ids, valid_hkl, valid_ang, valid_xy, ang_ps | 33,452 |
def get_logger(name: str):
"""Get logger call.
Args:
name (str): Module name
Returns:
Logger: Return Logger object
"""
logger = logging.getLogger(name)
logger.setLevel(logging.INFO)
logger.addHandler(get_file_handler())
logger.addHandler(get_stream_handler())
return logger | 33,453 |
def intel(csifile, index, ntxnum=2):
"""csitool"""
print("="*40+"[intel]")
members = [s for s in dir(csiread.Intel) if not s.startswith("__") and callable(getattr(csiread.Intel, s))]
print("Methods: \n ", members)
print('Time:')
last = default_timer()
csidata = csiread.Intel(csifile, ntxnum=ntxnum, pl_size=10, if_report=False)
csidata.read()
print(" read ", default_timer() - last, "s")
last = default_timer()
total_rss = csidata.get_total_rss()
print(" get_total_rss ", default_timer() - last, "s")
last = default_timer()
scaled_csi = csidata.get_scaled_csi()
print(" get_scaled_csi ", default_timer() - last, "s")
last = default_timer()
scaled_csi_sm = csidata.get_scaled_csi_sm()
# scaled_csi_sm = csidata.apply_sm(scaled_csi)
print(" get_scaled_csi_sm ", default_timer() - last, "s")
# Setting inplace to True may be dangerous but more efficient.
temp = csiread.Intel(csifile, ntxnum=ntxnum, if_report=False)
temp.read()
last = default_timer()
_ = temp.get_scaled_csi(inplace=True) # _ is temp.csi == True
print(" get_scaled_csi(T) ", default_timer() - last, "s")
temp = csiread.Intel(csifile, ntxnum=ntxnum, if_report=False)
temp.read()
last = default_timer()
_ = temp.get_scaled_csi_sm(inplace=True) # _ is temp.csi == True
print(" get_scaled_csi_sm(T)", default_timer() - last, "s")
print('-'*40)
csidata.display(index)
# print("a limitation: csidata.rate.size %d, csidata.rate.base.size %d" % (csidata.rate.size, csidata.rate.base.size))
# print("help: \n", csidata.__doc__) | 33,454 |
def create_label_colormap(dataset=_PASCAL):
"""Creates a label colormap for the specified dataset.
Args:
dataset: The colormap used in the dataset.
Returns:
A numpy array of the dataset colormap.
Raises:
ValueError: If the dataset is not supported.
"""
if dataset == _PASCAL:
return create_pascal_label_colormap()
elif dataset == _CITYSCAPES:
return create_cityscapes_label_colormap()
elif dataset == _ADE:
return create_ade_label_colormap()
else:
raise ValueError('Unsupported dataset.') | 33,455 |
async def delete_messages_by(context, target: int, name=None):
"""Delete all messages from a user."""
guild = context.guild
async with context.typing():
delete_gen = _delete_messages(guild, delete_messages_by_target(target))
member = guild.get_member(target)
target_user = bot.get_user(target)
if member is not None:
name = member.nick is not None if member.nick else member.name
title_text = f"Deleting messages for {name}"
elif target_user is not None:
title_text = f"Deleting messages for {target_user.name}"
else:
title_text = f"Deleting messages for {target}"
embed_args = {
"title": title_text,
"done_message": "Purge complete"
}
await counter_messages(delete_gen, context, embed_args)
await context.message.delete() | 33,456 |
def verify_access_profile(access_profile):
"""
Verify IdentityNow Access profile.
"""
assert access_profile['id'] is not None
assert access_profile['name'] is not None
assert access_profile['source'] is not None
assert access_profile['entitlements'] is not None
assert access_profile['entitlementCount'] is not None
assert access_profile['created'] is not None
assert access_profile['modified'] is not None
assert access_profile['synced'] is not None
assert isinstance(access_profile['enabled'], (bool))
assert isinstance(access_profile['requestable'], (bool))
assert isinstance(access_profile['requestCommentsRequired'], (bool))
assert access_profile['owner'] is not None
assert access_profile['pod'] is not None
assert access_profile['org'] is not None
assert access_profile['type'] == 'accessprofile' | 33,457 |
def test_continuousopacityflags():
"""Code coverage tests for ContinuousOpacityFlags() class and methods.
"""
cof = ContinuousOpacityFlags()
assert cof.smelib[2] == 1
cof['H-'] = False
assert cof.smelib[2] == 0
with raises(ValueError):
cof['H++'] = True
with raises(ValueError):
cof['H-'] = 1
assert 'H-' in cof.__str__() | 33,458 |
def SDM_lune(params, dvals, title=None, label_prefix='ham='):
"""Exact calculation for SDM circle intersection. For some reason mine is a slight upper bound on the results found in the book. Uses a proof from Appendix B of the SDM book (Kanerva, 1988). Difference is neglible when norm=True."""
res = expected_intersection_lune(params.n, dvals, params.hamm_radius, params.r )
if params.plot_lines:
plot_line(dvals, res, label_prefix, params.hamm_radius, params.norm)
if params.fit_beta_and_plot_attention:
fit_beta_res, beta = fit_beta_regression(params.n, dvals, res)
plot_line(dvals, fit_beta_res, 'fit_beta | '+label_prefix, params.hamm_radius, params.norm)
if title: # else can call "label plot separately"
label_plot(title, params.norm)
return res | 33,459 |
def font_match(obj):
"""
Matches the given input againts the available
font type matchers.
Args:
obj: path to file, bytes or bytearray.
Returns:
Type instance if matches. Otherwise None.
Raises:
TypeError: if obj is not a supported type.
"""
return match(obj, font_matchers) | 33,460 |
async def insert_new_idol(*args):
"""
Insert a new idol.
"""
await self.conn.execute(f"INSERT INTO groupmembers.member({', '.join(IDOL_COLUMNS)}) VALUES "
f"({', '.join([f'${value}' for value in range(1, len(IDOL_COLUMNS) + 1)])})", *args) | 33,461 |
def test_pde_vector():
""" test PDE with a single vector field """
eq = PDE({"u": "vector_laplace(u) + exp(-t)"})
assert eq.explicit_time_dependence
assert not eq.complex_valued
grid = grids.UnitGrid([8, 8])
field = VectorField.random_normal(grid)
res_a = eq.solve(field, t_range=1, dt=0.01, backend="numpy", tracker=None)
res_b = eq.solve(field, t_range=1, dt=0.01, backend="numba", tracker=None)
res_a.assert_field_compatible(res_b)
np.testing.assert_allclose(res_a.data, res_b.data) | 33,462 |
def process_image(debug=False):
"""Processes an image by:
-> sending the file to Vision Azure api
-> returns a dictionary containing the caption
and confidence level associated with that image
TODO implement
"""
# get the json data
json_data = response.json()
if json_data is None or type(json_data) is not list:
return jsonify({'error': 'Not a JSON'}), 402
return jsonify(caption_response.to_dict()) | 33,463 |
def time_monotonically_increases(func_or_granularity):
"""
Decorate a unittest method with this function to cause the value
of :func:`time.time` and :func:`time.gmtime` to monotonically
increase by one each time it is called. This ensures things like
last modified dates always increase.
We make three guarantees about the value of :func:`time.time`
returned while the decorated function is running:
1. It is always *at least* the value of the *real*
:func:`time.time`;
2. Each call returns a value greater than the previous call;
3. Those two constraints hold across different invocations of
functions decorated. This decorator can be applied to a
method in a test case::
class TestThing(unittest.TestCase)
@time_monotonically_increases
def test_method(self):
t = time.time()
...
It can also be applied to a bare function taking any number of
arguments::
@time_monotonically_increases
def utility_function(a, b, c=1):
t = time.time()
...
By default, the time will be incremented in 1.0 second intervals.
You can specify a particular granularity as an argument; this is
useful to keep from running too far ahead of the real clock::
@time_monotonically_increases(0.1)
def smaller_increment():
t1 = time.time()
t2 = time.time()
assrt t2 == t1 + 0.1
"""
if isinstance(func_or_granularity, (six.integer_types, float)):
# We're being used as a factory.
wrapper_factory = _TimeWrapper(func_or_granularity)
return wrapper_factory
# We're being used bare
wrapper_factory = _TimeWrapper()
return wrapper_factory(func_or_granularity) | 33,464 |
def vis_step(writer, step, dicts):
"""
Add several curves.
"""
for k in dicts:
writer.add_scalar(k, dicts[k], step) | 33,465 |
def rotate_coordinates(coords: np.ndarray, axis_coords: np.ndarray) -> np.ndarray:
"""
Given a set of coordinates, `coords`, and the eigenvectors of the principal
moments of inertia tensor, use the scipy `Rotation` class to rotate the
coordinates into the principal axis frame.
Parameters
----------
coords : np.ndarray
NumPy 1D array containing xyz coordinates
axis_coords : np.ndarray
NumPy 2D array (shape 3x3) containing the principal axis
vectors
Returns
-------
np.ndarray
NumPy 1D array containing the rotated coordinates.
"""
# Create a Rotation object from the eigenvectors
r_mat = R.from_matrix(axis_coords)
# transform the coordinates into the principal axis
return r_mat.apply(coords) | 33,466 |
def get_maggy_ddp_wrapper(module: Type[TorchModule]):
"""Factory function for MaggyDDPModuleWrapper.
:param module: PyTorch module passed by the user.
"""
class MaggyDDPModuleWrapper(TorchDistributedDataParallel):
"""Wrapper around PyTorch's DDP Module.
The wrapper replaces the user's module. Since the module's signature needs to be preserved,
we cannot add the module as an additional parameter during initialization. Instead, it is
configured by its factory function.
"""
__module = module # Avoid overwriting torch module
def __init__(self, *args: Any, **kwargs: Any):
"""Initializes the previously set module, moves it to the GPU and initializes a DDP
module with it.
:param args: Arguments passed by the user for module initialization.
:param kwargs: Keyword arguments passed by the user for module initialization.
"""
# Avoid self because bound method adds to args which makes the function call fail
model = MaggyDDPModuleWrapper.__module(*args, **kwargs).cuda()
super().__init__(model)
return MaggyDDPModuleWrapper | 33,467 |
def random_jitter(cv_img, saturation_range, brightness_range, contrast_range):
"""
图像亮度、饱和度、对比度调节,在调整范围内随机获得调节比例,并随机顺序叠加三种效果
Args:
cv_img(numpy.ndarray): 输入图像
saturation_range(float): 饱和对调节范围,0-1
brightness_range(float): 亮度调节范围,0-1
contrast_range(float): 对比度调节范围,0-1
Returns:
亮度、饱和度、对比度调整后图像
"""
saturation_ratio = np.random.uniform(-saturation_range, saturation_range)
brightness_ratio = np.random.uniform(-brightness_range, brightness_range)
contrast_ratio = np.random.uniform(-contrast_range, contrast_range)
order = [1, 2, 3]
np.random.shuffle(order)
for i in range(3):
if order[i] == 0:
cv_img = saturation_jitter(cv_img, saturation_ratio)
if order[i] == 1:
cv_img = brightness_jitter(cv_img, brightness_ratio)
if order[i] == 2:
cv_img = contrast_jitter(cv_img, contrast_ratio)
return cv_img | 33,468 |
def render_orchestrator_inputs() -> Union[Driver, None]:
""" Renders input form for collecting orchestrator-related connection
metadata, and assembles a Synergos Driver object for subsequent use.
Returns:
Connected Synergos Driver (Driver)
"""
with st.sidebar.beta_container():
st.header("NETWORK")
with st.beta_expander("Orchestrator Parameters", expanded=True):
orchestrator_host = st.text_input(
label="Orchestrator IP:",
help="Declare the server IP of your selected orchestrator."
)
orchestrator_port = st.number_input(
label="Orchestrator Port:",
value=5000,
help="Declare the access port of your selected orchestrator."
)
if is_connection_valid(host=orchestrator_host, port=orchestrator_port):
driver = Driver(host=orchestrator_host, port=orchestrator_port)
else:
driver = None # Ensures rendering of unpopulated widgets
return driver | 33,469 |
def json_cache_wrapper(func, intf, cache_file_ident):
"""
Wrapper for saving/restoring rpc-call results inside cache files.
"""
def json_call_wrapper(*args, **kwargs):
cache_file = intf.config.cache_dir + '/insight_dash_' + cache_file_ident + '.json'
try: # looking into cache first
j = simplejson.load(open(cache_file))
logging.debug('Loaded data from existing cache file: ' + cache_file)
return j
except:
pass
# if not found in cache, call the original function
j = func(*args, **kwargs)
try:
simplejson.dump(j, open(cache_file, 'w'))
except Exception as e:
logging.exception('Cannot save data to a cache file')
pass
return j
return json_call_wrapper | 33,470 |
def recode_graph(dot, new_dot, pretty_names, rules_to_drop, color=None, use_pretty_names=True):
"""Change `dot` label info to pretty_names and alter styling."""
if color is None:
color = "#50D0FF"
node_patterns_to_drop = []
with open(dot, mode='r') as dot:
with open(new_dot, mode='w') as new_dot:
for line in dot:
if '[label = "' in line:
# Add pretty names and single color IF pretty names are provided.
data = digest_node_line(line=line)
rule_name = data['label']
if use_pretty_names:
pretty_name = textwrap.fill(pretty_names[rule_name], width=40).replace('\n', '\\n')
full_name = "[{rule_name}]\\n{pretty_name}".format(rule_name=rule_name,
pretty_name=pretty_name)
data['label'] = full_name
data['color'] = color
else:
pass
fields = ', '.join(['{k} = "{v}"'.format(k=k, v=v) for k, v in data.items()][1:])
if should_ignore_line(line, strings_to_ignore=rules_to_drop):
node_patterns_to_drop.append("\t{num} ->".format(num=data['num']))
node_patterns_to_drop.append("-> {num}\n".format(num=data['num']))
continue
new_line = """\t{num}[{fields}];\n""".format(num=data['num'], fields=fields)
new_dot.write(new_line)
else:
if should_ignore_line(line, strings_to_ignore=node_patterns_to_drop):
continue
elif "fontname=sans" in line:
line = line.replace("fontname=sans", "fontname=Cantarell")
line = line.replace("fontsize=10", "fontsize=11")
new_dot.write(line)
else:
new_dot.write(line) | 33,471 |
def type_assert_dict(
d,
kcls=None,
vcls=None,
allow_none: bool=False,
cast_from=None,
cast_to=None,
dynamic=None,
objcls=None,
ctor=None,
desc: str=None,
false_to_none: bool=False,
check=None,
):
""" Checks that every key/value in @d is an instance of @kcls: @vcls
Will also unmarshal JSON objects to Python objects if
the value is an instance of dict and @vcls is a class type
Args:
d: The dict to type assert
kcls: The class to type assert for keys.
NOTE: JSON only allows str keys
vcls: The class to type assert for values
allow_none: Allow a None value for the values.
This would not make sense for the keys.
cast_from: type-or-tuple-of-types, If @obj is an instance
of this type(s), cast it to @cast_to
cast_to: type, The type to cast @obj to if it's an instance
of @cast_from, or None to cast to @cls.
If you need more than type(x), use a lambda or
factory function.
dynamic: @cls, A dynamic default value if @d is None,
and @dynamic is not None.
objcls: None-or-type, a type to assert @d is,
ie: dict, etc...
Note that isinstance considers
collections.OrderedDict to be of type dict
ctor: None-or-static-method: Use this method as the
constructor instead of __init__
desc: None-or-string, an optional description for this field,
for using this function to fully replace docstrings
false_to_none: bool, True to cast falsey values such as "", 0, [],
to None
check: None-lambda-function, Single argument function to check
a value, return False if not valid, for example:
lambda x: x >= 0 and x < 256
Returns:
@d, note that @d will be recreated, which
may be a performance concern if @d has many items
Raises:
TypeError: If a key is not an instance of @kcls or
a value is not an instance of @vcls
ValueError: If @check is not None and a value fails @check
"""
_check_dstruct(d, objcls)
if (
d is None
and
dynamic is not None
):
d = dynamic
t = type(d)
return t(
(
_check(k, kcls) if kcls else k,
_check(
v,
vcls,
allow_none,
cast_from,
cast_to,
ctor=ctor,
false_to_none=false_to_none,
check=check,
) if vcls else v,
)
for k, v in d.items()
) | 33,472 |
def get_missing_ids(raw, results):
"""
Compare cached results with overall expected IDs, return missing ones.
Returns a set.
"""
all_ids = set(raw.keys())
cached_ids = set(results.keys())
print("There are {0} IDs in the dataset, we already have {1}. {2} are missing.".format(len(all_ids), len(cached_ids), len(all_ids) - len(cached_ids)))
return all_ids - cached_ids | 33,473 |
def _spaghettinet_edgetpu_s():
"""Architecture definition for SpaghettiNet-EdgeTPU-S."""
nodes = collections.OrderedDict()
outputs = ['c0n1', 'c0n2', 'c0n3', 'c0n4', 'c0n5']
nodes['s0'] = SpaghettiStemNode(kernel_size=5, num_filters=24)
nodes['n0'] = SpaghettiNode(
num_filters=48,
level=2,
layers=[
IbnFusedGrouped(3, 8, 2, 3, False),
],
edges=[SpaghettiPassthroughEdge(input='s0')])
nodes['n1'] = SpaghettiNode(
num_filters=64,
level=3,
layers=[
IbnFusedGrouped(3, 4, 2, 4, False),
IbnFusedGrouped(3, 4, 1, 4, True),
IbnFusedGrouped(3, 4, 1, 4, True),
],
edges=[SpaghettiPassthroughEdge(input='n0')])
nodes['n2'] = SpaghettiNode(
num_filters=72,
level=4,
layers=[
IbnOp(3, 8, 2, False),
IbnFusedGrouped(3, 8, 1, 4, True),
IbnOp(3, 8, 1, True),
IbnOp(3, 4, 1, True),
],
edges=[SpaghettiPassthroughEdge(input='n1')])
nodes['n3'] = SpaghettiNode(
num_filters=88,
level=5,
layers=[
IbnOp(3, 8, 2, False),
IbnOp(3, 8, 1, True),
IbnOp(3, 4, 1, True),
IbnOp(3, 4, 1, True),
],
edges=[SpaghettiPassthroughEdge(input='n2')])
nodes['n4'] = SpaghettiNode(
num_filters=88,
level=6,
layers=[
IbnOp(3, 8, 2, False),
SepConvOp(5, 1, True),
SepConvOp(5, 1, True),
SepConvOp(5, 1, True),
],
edges=[SpaghettiPassthroughEdge(input='n3')])
nodes['n5'] = SpaghettiNode(
num_filters=88,
level=7,
layers=[
SepConvOp(5, 2, False),
SepConvOp(3, 1, True),
],
edges=[SpaghettiPassthroughEdge(input='n4')])
nodes['c0n0'] = SpaghettiNode(
num_filters=144,
level=5,
layers=[
IbnOp(3, 4, 1, False),
IbnOp(3, 4, 1, True),
IbnOp(3, 4, 1, True),
IbnOp(3, 4, 1, True),
],
edges=[
SpaghettiResampleEdge(input='n3'),
SpaghettiResampleEdge(input='n4')
])
nodes['c0n1'] = SpaghettiNode(
num_filters=120,
level=4,
layers=[
IbnOp(3, 8, 1, False),
IbnOp(3, 4, 1, True),
IbnOp(3, 4, 1, True),
IbnOp(3, 4, 1, True),
],
edges=[
SpaghettiResampleEdge(input='n2'),
SpaghettiResampleEdge(input='c0n0')
])
nodes['c0n2'] = SpaghettiNode(
num_filters=168,
level=5,
layers=[
IbnOp(3, 4, 1, False),
],
edges=[
SpaghettiResampleEdge(input='c0n1'),
SpaghettiResampleEdge(input='c0n0')
])
nodes['c0n3'] = SpaghettiNode(
num_filters=136,
level=6,
layers=[
IbnOp(3, 4, 1, False),
SepConvOp(3, 1, True),
],
edges=[
SpaghettiResampleEdge(input='n5'),
SpaghettiResampleEdge(input='c0n0')
])
nodes['c0n4'] = SpaghettiNode(
num_filters=136,
level=7,
layers=[
IbnOp(3, 4, 1, False),
],
edges=[
SpaghettiResampleEdge(input='n5'),
SpaghettiResampleEdge(input='c0n0')
])
nodes['c0n5'] = SpaghettiNode(
num_filters=64,
level=8,
layers=[
SepConvOp(3, 1, False),
SepConvOp(3, 1, True),
],
edges=[SpaghettiPassthroughEdge(input='c0n4')])
node_specs = SpaghettiNodeSpecs(nodes=nodes, outputs=outputs)
return node_specs | 33,474 |
def test_memoryview_supports_bytes(valid_bytes_128):
"""
Assert that the `bytes` representation of a :class:`~ulid.ulid.MemoryView` is equal to the
result of the :meth:`~ulid.ulid.MemoryView.bytes` method.
"""
mv = ulid.MemoryView(valid_bytes_128)
assert bytes(mv) == mv.bytes | 33,475 |
def edges(nodes, score) -> Iterable[Tuple[int, int]]:
"""
Return the edges of the tree with the desires score.
1. Check if the score is possible.
2. Determine the highest edge, that I can insert.
3. Insert the edge with the highest possible score.
:time: O(n)
:space: O(n)
"""
if score < nodes - 1 or score > (nodes ** 2 - nodes) // 2:
yield -1, -1
return
score_left = score
parent = 1
for n in range(2, nodes + 1):
nodes_left = nodes - n
if score_left - parent >= nodes_left:
score_left -= parent
parent += 1
yield n - 1, n
else:
next_score = score_left - nodes_left
score_left -= next_score
yield next_score, n | 33,476 |
def instance_of(type):
"""
A validator that raises a :exc:`TypeError` if the initializer is called
with a wrong type for this particular attribute (checks are perfomed using
:func:`isinstance` therefore it's also valid to pass a tuple of types).
:param type: The type to check for.
:type type: type or tuple of types
The :exc:`TypeError` is raised with a human readable error message, the
attribute (of type :class:`attr.Attribute`), the expected type, and the
value it got.
"""
return _InstanceOfValidator(type) | 33,477 |
def execute_command(command):
"""
execute command
"""
output = subprocess.PIPE
flag = 0
# for background execution
if("nohup" in command) or ("\&" in command):
output = open('result.log', 'w')
flag = 1
child = subprocess.Popen(command.split(), shell=False, stdout=output, stderr=output, preexec_fn=os.setpgrp)
ret = child.communicate()
err = ""
if (flag == 0):
result = ret[0]
err = ret[1]
else:
status, result = execute_command("cat result.log")
status = child.returncode
if(status != 0):
LOG_ERROR("=== execute " + command + " failed, information = " + result + " error information:" + err.decode('utf-8'))
if err is not None:
result += err.decode('utf-8')
return (status, result) | 33,478 |
def ass(stream: Stream, *args, **kwargs) -> FilterableStream:
"""https://ffmpeg.org/ffmpeg-filters.html#ass"""
return filter(stream, ass.__name__, *args, **kwargs) | 33,479 |
def record_generator(rec):
"""
Creates BioRec from SeqRec
"""
# Already in the correct format
if isinstance(rec, BioRec):
yield rec
else:
# Handling single or nested records.
if not rec.features:
# Attempts to read annotations from description.
ann, desc = parse_desc(rec)
brec = BioRec(id=rec.id, ann=ann, parent=ann, type=SOURCE, seq=rec.seq, desc=ann)
yield brec
else:
# Reads annotations from the sequence record
pairs = [(k, json_ready(v)) for (k, v) in rec.annotations.items()]
# These are the parent annotations that apply globally to all features.
parent = dict(pairs)
# Yield a record for each feature.
for feat in rec.features:
# Extract feature qualifiers.
pairs = [(k, json_ready(v)) for (k, v) in feat.qualifiers.items()]
ann = dict(pairs)
# Remap types into standard naming.
ftype = SEQUENCE_ONTOLOGY.get(feat.type, feat.type)
# Extract the sequence for the feature.
seq = feat.extract(rec.seq)
# The source annotations are global
if feat.type == SOURCE:
parent.update(ann)
# Generate uid and description
uid, desc = generate_uid(ftype=ftype, ann=ann, description=rec.description)
# Inherit from source
uid = uid or rec.id
# Create the BioRecord for the feature.
brec = BioRec(id=uid, ann=ann, parent=parent, seq=seq, type=ftype, desc=desc, source=rec.id)
# Correct the feature coordinates.
brec.strand = feat.strand
brec.start, brec.end = int(feat.location.start) + 1, int(feat.location.end)
brec.locs = [(loc.start + 1, loc.end, loc.strand) for loc in
feat.location.parts]
yield brec | 33,480 |
def resultcallback(group):
"""Compatibility layer for Click 7 and 8."""
if hasattr(group, "result_callback") and group.result_callback is not None:
decorator = group.result_callback()
else:
# Click < 8.0
decorator = group.resultcallback()
return decorator | 33,481 |
def test_non_existent_weight(base_clumper):
"""
Make sure that providing non existent weight key throws error
"""
with pytest.raises(KeyError):
base_clumper.sample(n=len(base_clumper) - 1, replace=False, weights="my-key") | 33,482 |
def get_license_matches(location=None, query_string=None):
"""
Return a sequence of LicenseMatch objects.
"""
if not query_string:
return []
from licensedcode import cache
idx = cache.get_index()
return idx.match(location=location, query_string=query_string) | 33,483 |
def deploy():
"""Run deployment tasks."""
setup_app() | 33,484 |
def bond_topology_summaries_from_csv(filename):
"""Beam DoFn for generating bare BondTopologySummary.
Args:
filename: csv file of bond topologies to read
Yields:
dataset_pb2.Entry
"""
for bt in smu_utils_lib.generate_bond_topologies_from_csv(filename):
summary = dataset_pb2.BondTopologySummary()
summary.bond_topology.CopyFrom(bt)
# Note that we leave all the counts as 0.
yield bt.bond_topology_id, summary | 33,485 |
def test_read_additional_molecules_invalid() -> None:
"""Raises ValueError for profiles without additional molecules."""
with pytest.raises(ValueError):
read_additional_molecules(Identifier.DAY_200KM) | 33,486 |
def compute_corrector_prf(results, logger, on_detected=True):
"""
References:
https://github.com/sunnyqiny/
Confusionset-guided-Pointer-Networks-for-Chinese-Spelling-Check/blob/master/utils/evaluation_metrics.py
"""
TP = 0
FP = 0
FN = 0
all_predict_true_index = []
all_gold_index = []
for item in results:
src, tgt, predict, d_predict = item
gold_index = []
for i in range(len(list(src))):
if src[i] == tgt[i]:
continue
else:
gold_index.append(i)
all_gold_index.append(gold_index)
predict_index = []
for i in range(len(list(src))):
if src[i] == predict[i]:
continue
else:
predict_index.append(i)
each_true_index = []
for i in predict_index:
if i in gold_index:
TP += 1
each_true_index.append(i)
else:
FP += 1
for i in gold_index:
if i in predict_index:
continue
else:
FN += 1
all_predict_true_index.append(each_true_index)
# For the detection Precision, Recall and F1
dp, dr, detection_f1 = report_prf(TP, FP, FN,
'detection', logger=logger)
# store FN counts
n_misreported = int(FN)
TP = 0
FP = 0
FN = 0
# we only detect those correctly detected location, which is a different from the common metrics since
# we wanna to see the precision improve by using the confusion set
for i in range(len(all_predict_true_index)):
if len(all_predict_true_index[i]) > 0:
predict_words = []
for j in all_predict_true_index[i]:
predict_words.append(results[i][2][j])
if results[i][1][j] == results[i][2][j]:
TP += 1
else:
FP += 1
for j in all_gold_index[i]:
if results[i][1][j] in predict_words:
continue
else:
FN += 1
# For the correction Precision, Recall and F1
cp, cr, correction_f1 = report_prf(TP, FP, FN,
'correction', logger=logger)
# common metrics to compare with other baseline methods.
ccp, ccr, correction_cf1 = report_prf(TP, FP, FN + n_misreported,
'correction_common', logger=logger)
if not on_detected:
correction_f1 = correction_cf1
details = {
'det_p': dp,
'det_r': dr,
'det_f1': detection_f1,
'cor_p': cp,
'cor_r': cr,
'cor_f1': correction_f1,
'common_cor_p': ccp,
'common_cor_r': ccr,
'common_cor_f1': correction_cf1,
}
return detection_f1, correction_f1, details | 33,487 |
def task_ut():
"""run unit-tests"""
for test in TEST_FILES:
yield {'name': test,
'actions': [(run_test, (test,))],
'file_dep': PY_FILES,
'verbosity': 0} | 33,488 |
def nonzero_sign(x, name=None):
"""Returns the sign of x with sign(0) defined as 1 instead of 0."""
with tf.compat.v1.name_scope(name, 'nonzero_sign', [x]):
x = tf.convert_to_tensor(value=x)
one = tf.ones_like(x)
return tf.compat.v1.where(tf.greater_equal(x, 0.0), one, -one) | 33,489 |
def compile_modules(current_dir: str, use_cgo: bool = False) -> None:
"""
Compile modules via GNU Make
"""
for root, dirs, files in os.walk(os.path.join(current_dir, "modules"), topdown=False):
for name in files:
if name == "Makefile":
os.chdir(root)
print("-" * 80)
print("Compiling module at %s", root)
for opt in ["gcc" if not use_cgo else "all"]:
out = os.system("make {}".format(opt))
if out:
print("Error: module {} compilation failed. Check the output above.".format(root))
sys.exit(1)
os.chdir(current_dir) | 33,490 |
def tls_control_system_tdcops(tls_control_system):
"""Control system with time-dependent collapse operators"""
objectives, controls, _ = tls_control_system
c_op = [[0.1 * sigmap(), controls[0]]]
c_ops = [c_op]
H1 = objectives[0].H
H2 = objectives[1].H
objectives = [
krotov.Objective(
initial_state=ket('0'), target=ket('1'), H=H1, c_ops=c_ops
),
krotov.Objective(
initial_state=ket('0'), target=ket('1'), H=H2, c_ops=c_ops
),
]
controls_mapping = krotov.conversions.extract_controls_mapping(
objectives, controls
)
return objectives, controls, controls_mapping | 33,491 |
def test_verify_test_data_sanity():
"""Checks all test data is consistent.
Keys within each test category must be consistent, but keys can vary
category to category. E.g date-based episodes do not have a season number
"""
from helpers import assertEquals
for test_category, testcases in files.items():
keys = [ctest.keys() for ctest in testcases]
for k1 in keys:
for k2 in keys:
assertEquals(sorted(k1), sorted(k2)) | 33,492 |
def create_new_credential(site_name,account_name, account_password):
"""Function to create a new account and its credentials"""
new_credential = Credentials(site_name,account_name, account_password)
return new_credential | 33,493 |
def test_enum_strings_changed(qtbot, signals, values, selected_index, expected):
"""
Test the widget's handling of enum strings, which are choices presented to the user, and the widget's ability to
update the selected enum string when the user provides a choice index.
This test will also cover value_changed() testing.
Expectations:
The widget displays the correct enum string whose index from the enum string tuple is selected by the user.
Parameters
----------
qtbot : fixture
pytest-qt window for widget testing
signals : fixture
The signals fixture, which provides access signals to be bound to the appropriate slots
values : tuple
A set of enum strings for the user to choose from
selected_index : int
The index from the enum string tuple chosen by the user
expected : str
The expected enum string displayed by the widget after receiving the user's choice index
"""
pydm_enumcombobox = PyDMEnumComboBox()
qtbot.addWidget(pydm_enumcombobox)
signals.enum_strings_signal.connect(pydm_enumcombobox.enumStringsChanged)
signals.enum_strings_signal.emit(values)
signals.new_value_signal[type(selected_index)].connect(pydm_enumcombobox.channelValueChanged)
signals.new_value_signal[type(selected_index)].emit(selected_index)
assert pydm_enumcombobox.value == selected_index
assert pydm_enumcombobox.currentText() == expected | 33,494 |
def create_inception_graph():
""""Creates a graph from saved GraphDef file and returns a Graph object.
Returns:
Graph holding the trained Inception network, and various tensors we'll be
manipulating.
"""
with tf.Session() as sess:
model_filename = os.path.join(
'imagenet', 'classify_image_graph_def.pb')
with gfile.FastGFile(model_filename, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
bottleneck_tensor, jpeg_data_tensor, resized_input_tensor = (
tf.import_graph_def(graph_def, name='', return_elements=[
BOTTLENECK_TENSOR_NAME, JPEG_DATA_TENSOR_NAME,
RESIZED_INPUT_TENSOR_NAME]))
return sess.graph, bottleneck_tensor, jpeg_data_tensor, resized_input_tensor | 33,495 |
def prepare_request_params(
request_params: Dict, model_id: Text, model_data: Dict
) -> Dict:
""" reverse hash names and correct types of input params """
request_params = correct_types(request_params, model_data["columns_data"])
if model_data["hashed_indexes"]:
request_params = reverse_hash_names(model_id, request_params)
return request_params | 33,496 |
def get_new_data_NC():
"""Updates the global variable 'data' with new data"""
global EN_NC_df
EN_NC_df = pd.read_csv('https://en2020.s3.amazonaws.com/ncar_dash.csv')
EN_NC_df['County'] = EN_NC_df.CountyName | 33,497 |
def fib_demo():
"""使用生成器的方式生成斐波那契数列"""
ret = generator.get_sequence(20)
print(ret) | 33,498 |
def help_full(path):
"""Health Evaluation and Linkage to Primary Care
The HELP study was a clinical trial for adult inpatients recruited from
a detoxification unit. Patients with no primary care physician were
randomized to receive a multidisciplinary assessment and a brief
motivational intervention or usual care, with the goal of linking them
to primary medical care.
A data frame with 1472 observations on the following variables.
- `ID` Subject ID
- `TIME` Interview time point
- `NUM_INTERVALS` Number of 6-month intervals from previous to
current interview
- `INT_TIME1` # of months from baseline to current interview
- `DAYS_SINCE_BL` # of days from baseline to current interview
- `INT_TIME2` # of months from previous to current interview
- `DAYS_SINCE_PREV` # of days from previous to current interview
- `PREV_TIME` Previous interview time
- `DEAD` a numeric vector
- `A1` Gender (1=Male, 2=Female)
- `A9` Years of education completed
- `A10` Marital Status (1=Married, 2=Remarried, 3=Widowed, 4=
Separated, 5=Divorced, 6=Never Married
- `A11A` Do you currently have a living mother? (0=No, 1= Yes
- `A11B` Do you currently have a living father? (0=No, 1=Yes
- `A11C` Do you currently have siblings? (0=No, 1=Yes
- `A11D` Do you currently have a partner (0=No, 1=Yes)
- `A11E` Do you currently have children? (0=No, 1=Yes)
- `A12B` Hollingshead categories (1=Major profess, 2= Lesser profess,
3=Minor profess, 4=Clerical/sales, 5=Skilled manual, 6=Semi-skilled,
7=Unskilled, 8= Homemaker, 9=No occupation)
- `A13` Usual employment pattern in last 6 months (1=Full time, 2=
Part time, 3=Student, 4=Unemployed, 5=Control envir)
- `A14A` Loved alone-last 6 mos (0=No, 1=Yes)
- `A14B` Lived w/a partner-last 6 mos (0=No, 1=Yes
- `A14C` Lived with parent(s)-last 6 mos (0=No, 1=Yes)
- `A14D` Lived w/children-last 6 mos (0=No, 1=Yes)
- `A14E` Lived w/other family-last 6 mos (0=No, 1=Yes
- `A14F` Lived w/friend(s)-last 6 mos (0=No, 1=Yes)
- `A14G` Lived w/other-last 6 mos (0=No, 1=Yes)
- `A14G_T` a factor with levels `1/2 WAY HOUSE` `3/4 HOUSE`
`ANCHOR INN` `ARMY` `ASSOCIATES` `BOARDERS`
`BOYFRIENDS MOM` `CORRECTIONAL FACILIT` `CRACK HOUSE`
`DEALER` `ENTRE FAMILIA` `FENWOOD` `GAVIN HSE`
`GIRLFRIENDS DAUGHTE` `GIRLFRIENDS SON` `GIRLFRIENDS CHILDREN`
`GIRLFRIENDS DAUGHTER` `GROUP HOME` `HALF-WAY HOUSE`
`HALFWAY HOUSE` `HALFWAY HOUSES` `HALFWAY HSE` `HOLDING UNIT`
`HOME BORDER` `HOMELESS` `HOMELESS SHELTER` `IN JAIL`
`IN PROGRAMS` `INCARCERATED` `JAIL` `JAIL HALFWAY HOUSE`
`JAIL, SHELTER` `JAIL, STREET` `JAIL/PROGRAM` `JAIL/SHELTER`
`JAILS` `LANDLADY` `LANDLORD` `LODGING HOUSE`
`MERIDIAN HOUSE` `NURSING HOME` `ON THE STREET`
`PARTNERS MOTHER` `PARTNERS CHILD` `PARTNERS CHILDREN`
`PRDGRAMS` `PRISON` `PROGRAM` `PROGRAM MTHP`
`PROGRAM ROOMMATES` `PROGRAM SOBER HOUSE` `PROGRAM-RESIDENTIAL`
`PROGRAM/HALFWAY HOUS` `PROGRAM/JAIL` `PROGRAM/SHELTER`
`PROGRAM/SHELTERS` `PROGRAMS` `PROGRAMS SUBSTANCE`
`PROGRAMS/SHELTER` `PROGRAMS/SHELTERS` `PROGRAMS/SHELTERS/DE`
`PROJECT SOAR` `RESIDENTIAL FACILITY` `RESIDENTIAL PROGRAM`
`ROOMING HOUSE` `ROOMING HOUSE (RELIG` `ROOMMATE` `ROOMMATES`
`ROOMMATES AT TRANSIT` `RYAN HOUSE` `SALVATION ARMY`
`SHELTER` `SHELTER/HALFWAY HSE` `SHELTER/HOTEL`
`SHELTER/PROGRAM` `SHELTERS` `SHELTERS/HOSPITALS`
`SHELTERS/JAIL` `SHELTERS/PROGRAMS` `SHELTERS/STREETS`
`SOBER HOUSE` `SOBER HOUSING` `SOUTH BAY JAIL` `STEPSON`
`STREET` `STREETS` `SUBSTANCE ABUSE TREA`
`TRANSITIONAL HOUSE` `VA SHELTER`
- `A15A` #nights in ovrnight shelter-last 6 mos
- `A15B` # nights on street-last 6 mos
- `A15C` #months in jail-last 6 mos
- `A16A` # months in ovrnight shelter-last 5 yrs
- `A16B` #moths on street-last 5 yrs
- `A16C` #months in jail-last 5 yrs
- `A17A` Received SSI-past 6 mos (0=No, 1=Yes)
- `A17B` Received SSDI-past 6 mos (0=No, 1=Yes)
- `A17C` Received AFDC-past 6 mos (0=No, 1=Yes)
- `A17D` Received EAEDC-past 6 mos (0=No, 1=Yes)
- `A17E` Received WIC-past 6 mos (0=No, 1=Yes)
- `A17F` Received unemployment benefits-past 6 mos (0=No, 1=Yes)
- `A17G` Received Workman's Comp-past 6 mos (0=No, 1=Yes)
- `A17H` Received Child Support-past 6 mos (0=No, 1=Yes)
- `A17I` Received other income-past 6 mos (0=No, 1=Yes)
- `A17I_T` a factor with levels `DISABLED VETERAN`
`EBT (FOOD STAMPS)` `EMERGENCY FOOD STAMP` `FOOD STAMP`
`FOOD STAMPS` `FOOD STAMPS/VETERAN` `FOOD STAMPS/VETERANS`
`INSURANCE SETTLEMENT` `PENSION CHECK` `SECTION 8`
`SERVICE CONNECTED DI` `SOCIAL SECURITY` `SSDI FOR SON`
`SURVIVORS BENEFITS` `TEMPORARY DISABILITY`
`VA BENEFITS-DISABILI` `VA COMPENSATION` `VA DISABILITY PENSIO`
`VETERAN BENEFITS` `VETERANS SERVICES` `VETERANS AFFAIRS`
- `A18` Most money made in any 1 year-last 5 yrs (1=<5000,
2=5000-10000, 3=11000-19000, 4=20000-29000, 5=30000-39000,
6=40000-49000, 7=50000+
- `B1` In general, how is your health (1=Excellent, 2=Very Good,
3=Good, 4=Fair, 5=Poor)
- `B2` Comp to 1 yr ago, how is your health now (1=Much better,
2=Somewhat better, 3=About the same, 4=Somewhat worse, 5=Much worse)
- `B3A` Does health limit you in vigorous activity (1=Limited a lot,
2=Limited a little, 3=Not limited)
- `B3B` Does your health limit you in moderate activity (1=Limited a
lot, 2=Limited a little, 3=Not limited)
- `B3C` Does health limit you in lift/carry groceries (1=Limited a
lot, 2=Limited a little, 3=Not limited)
- `B3D` Hlth limit you in climb sev stair flights (1=Limited a lot,
2=Limited a little, 3=Not limited)
- `B3E` Health limit you in climb 1 stair flight (1=Limited a lot,
2=Limited a little, 3=Not limited)
- `B3F` Health limit you in bend/kneel/stoop (1=Limited a lot,
2=Limited a little, 3=Not limited)
- `B3G` Does health limit you in walking >1 mile (1=Limited a lot,
2=Limited a little, 3=Not limited)
- `B3H` Hlth limit you in walking sevrl blocks (1=Limited a lot,
2=Limited a little, 3=Not limited)
- `B3I` Does health limit you in walking 1 block (1=Limited a lot,
2=Limited a little, 3=Not limited)
- `B3J` Hlth limit you in bathing/dressing self (1=Limited a lot,
2=Limited a little, 3=Not limited)
- `B4A` Cut down wrk/act due to phys hlth-lst 4 wks (0=No, 1=Yes)
- `B4B` Accomplish less due to phys hlth-lst 4 wks (0=No, 1=Yes)
- `B4C` Lim wrk/act type due to phys hlth-lst 4 wks (0=No, 1=Yes)
- `B4D` Diff perf work due to phys hlth-lst 4 wks (0=No, 1=Yes)
- `B5A` Cut wrk/act time due to emot prbs-lst 4 wks (0=No, 1=Yes)
- `B5B` Accomplish ess due to emot probs-lst 4 wks (0=No, 1=Yes)
- `B5C` <carefl w/wrk/act due to em prb-lst 4 wks (0=No, 1=Yes)
- `B6` Ext phys/em intf w/norm soc act-lst 4 wk (1-Not al all,
2=Slightly, 3=Moderately, 4=Quite a bit, 5=Extremely)
- `B7` Amount of bodily pain-past 4 wks (1=None, 2=Very mild, 3=
Mild, 4=Moderate, 5= Severe, 6= Very severe)
- `B8` Amt pain interf with norm work-last 4 wks (1=Not at all, 2=A
little bit, 3=Moderately, 4=Quite a bit, 5=Extremely
- `B9A` Did you feel full of pep-past 4 wks (1=All of the time,
2=Most of the time, 3 = Good bit of the time, 4=Some of the time, 5=A
little of time, 6=None of the time)
- `B9B` Have you been nervous-past 4 wks (1=All of the time, 2=Most
of the time, 3 = Good bit of the time, 4=Some of the time, 5=A little
of time, 6=None of the time)
- `B9C` Felt nothing could cheer you-lst 4 wks (1=All of the time,
2=Most of the time, 3 = Good bit of the time, 4=Some of the time, 5=A
little of time, 6=None of the time)
- `B9D` Have you felt calm/peaceful-past 4 wks (1=All of the time,
2=Most of the time, 3 = Good bit of the time, 4=Some of the time, 5=A
little of time, 6=None of the time)
- `B9E` Did you have a lot of energy-past 4 wks (1=All of the time,
2=Most of the time, 3 = Good bit of the time, 4=Some of the time, 5=A
little of time, 6=None of the time)
- `B9F` Did you feel downhearted-past 4 wks (1=All of the time,
2=Most of the time, 3 = Good bit of the time, 4=Some of the time, 5=A
little of time, 6=None of the time)
- `B9G` Did you feel worn out-past 4 wks (1=All of the time, 2=Most
of the time, 3 = Good bit of the time, 4=Some of the time, 5=A little
of time, 6=None of the time)
- `B9H` Have you been a happy pers-past 4 wks (1=All of the time,
2=Most of the time, 3 = Good bit of the time, 4=Some of the time, 5=A
little of time, 6=None of the time)
- `B9I` Did you feel tired-past 4 wks (1=All of the time, 2=Most of
the time, 3 = Good bit of the time, 4=Some of the time, 5=A little of
time, 6=None of the time)
- `B10` Amyphys/em prb intf w/soc act-lst 4 wks (1All of the time,
2=Most of the time, 3=Some of the time, 4= A lttle of time, 5= Non of
the time)
- `B11A` I seem to get sick easier than oth peop (1=Definitely true,
2=Mostly True, 3=Don't know, 4=Mostly false, 5=Definitely false)
- `B11B` I am as healthy as anybody I know (1=Definitely true,
2=Mostly true, 3=Don't know, 4=Mostly false, 5=Definitely False)
- `B11C` I expect my health to get worse (1=Definitely true, 2=Mostly
true, 3=Don't know, 3=Mostly false, 5=Definitely false)
- `B11D` My health is excellent (1=Definitely true, 2=Mostly true,
3=Don't know, 4=Mostly false, 5=Definitely false)
- `C1A` Tolf by MD had seix, epil, convuls (0=No, 1=Yes)
- `C1B` Told by MD had asth, emphys, chr lung dis (0=No, 1=Yes)
- `C1C` Told by MD had MI (0=No, 1=Yes)
- `C1D` Told by MD had CHF (0=No, 1=Yes)
- `C1E` Told by MD had other heart dis (req med) (0=No, 1=Yes)
- `C1F` Told by MD had HBP (0=No, 1=Yes)
- `C1G` Told by MD had chronic liver disease (0=No, 1=Yes)
- `C1H` Told by MD had kidney failure (0=No, 1=Yes)
- `C1I` Told by MD had chronic art, osteoarth (0=No, 1=Yes)
- `C1J` Told by MD had peripheral neuropathy (0=No, 1=Yes)
- `C1K` Ever told by MD had cancer (0=No, 1=Yes)
- `C1L` Ever told by MD had diabetes (0=No, 1=Yes)
- `C1M` Ever told by MD had stroke (0=No, 1=Yes)
- `C2A1` Have you ever had skin infections (0=No, 1=Yes)
- `C2A2` Have you had skin infections-past 6 mos (0=No, 1=Yes)
- `C2B1` Have you ever had pneumonia (0=No, 1=Yes)
- `C2B2` Have you had pneumonia-past 6 mos (0=No, 1=Yes)
- `C2C1` Have you ever had septic arthritis (0=No, 1=Yes)
- `C2C2` Have you had septic arthritis-past 6 mos (0=No, 1=Yes)
- `C2D1` Have you ever had TB (0=No, 1=Yes)
- `C2D2` Have you had TB-last 6 mos (0=No, 1=Yes)
- `C2E1` Have you ever had endocarditis (0=No, 1=Yes)
- `C2E2` Have you had endocarditis-past 6 mos (0=No, 1=Yes)
- `C2F1` Have you ever had an ulcer (0=No, 1=Yes)
- `C2F2` Have you had an ulcer-past 6 mos (0=No, 1=Yes)
- `C2G1` Have you ever had pancreatitis (0=No, 1=Yes)
- `C2G2` Have you had pancreatitis-past 6 mos (0=No, 1=Yes)
- `C2H1` Ever had abdom pain req overnt hosp stay (0=No, 1=Yes)
- `C2H2` Abdom pain req ovrnt hosp stay-lst 6 mos (0=No, 1=Yes)
- `C2I1` Have you ever vomited blood (0=No, 1=Yes)
- `C2I2` Have you vomited blood-past 6 mos (0=No, 1=Yes)
- `C2J1` Have you ever had hepatitis (0=No, 1=Yes)
- `C2J2` Have you had hepatitis-past 6 mos (0=No, 1=Yes)
- `C2K1` Ever had blood clots in legs/lungs (0=No, 1=Yes)
- `C2K2` Blood clots in legs/lungs-past 6 mos (0=No, 1=Yes)
- `C2L1` Have you ever had osteomyelitis (0=No, 1=Yes)
- `C2L2` Have you had osteomyelitis-past 6 mos (0=No, 1=Yes)
- `C2M1` Chst pain using cocaine req ER/hosp (0=No, 1=Yes)
- `C2M2` Chst pain using coc req ER/hosp-lst 6 mos (0=No, 1=Yes)
- `C2N1` Have you ever had jaundice (0=No, 1=Yes)
- `C2N2` Have you had jaundice-past 6 mos (0=No, 1=Yes)
- `C2O1` Lower back pain > 3mos req med attn (0=No, 1=Yes)
- `C2O2` Lwr bck pain >3mos req med attn-last 6 mos (0=No, 1=Yes)
- `C2P1` Ever had seizures or convulsions (0=No, 1=Yes)
- `C2P2` Had seizures or convulsions-past 6 mos (0=No, 1=Yes)
- `C2Q1` Ever had drug/alc overdose req ER attn (0=No, 1=Yes)
- `C2Q2` Drug/alc overdose req ER attn (0=No, 1=Yes)
- `C2R1` Have you ever had a gunshot wound (0=No, 1=Yes)
- `C2R2` Had a gunshot wound-past 6 mos (0=No, 1=Yes)
- `C2S1` Have you ever had a stab wound (0=No, 1=Yes)
- `C2S2` Have you had a stab wound-past 6 mos (0=No, 1=Yes)
- `C2T1` Ever had accid/falls req med attn (0=No, 1=Yes)
- `C2T2` Had accid/falls req med attn-past 6 mos (0=No, 1=Yes)
- `C2U1` Ever had fract/disloc to bones/joints (0=No, 1=Yes)
- `C2U2` Fract/disloc to bones/joints-past 6 mos (0=No, 1=Yes)
- `C2V1` Ever had injury from traffic accident (0=No, 1=Yes)
- `C2V2` Had injury from traffic accid-past 6 mos (0=No, 1=Yes)
- `C2W1` Have you ever had a head injury (0=No, 1=Yes)
- `C2W2` Have you had a head injury-past 6 mos (0=No, 1=Yes)
- `C3A1` Have you ever had syphilis (0=No, 1=Yes)
- `C3A2` # times had syphilis
- `C3A3` Have you had syphilis in last 6 mos (0=No, 1=Yes)
- `C3B1` Have you ever had gonorrhea (0=No, 1=Yes)
- `C3B2` # times had gonorrhea
- `C3B3` Have you had gonorrhea in last 6 mos (0=No, 1=Yes)
- `C3C1` Have you ever had chlamydia (0=No, 1=Yes)
- `C3C2` # of times had Chlamydia
- `C3C3` Have you had chlamydia in last 6 mos (0=No, 1=Yes)
- `C3D` Have you ever had genital warts (0=No, 1=Yes)
- `C3E` Have you ever had genital herpes (0=No, 1=Yes)
- `C3F1` Have you ever had other STD's (not HIV) (0=No, 1=Yes)
- `C3F2` # of times had other STD's (not HIV)
- `C3F3` Had other STD's (not HIV)-last 6 mos (0=No, 1=Yes)
- `C3F_T` a factor with levels `7` `CRABS`
`CRABS - TRICHONOMIS` `CRABS, HEP B` `DOESNT KNOW NAME`
`HAS HAD ALL 3 ABC` `HEP B` `HEP B, TRICAMONAS` `HEP. B`
`HEPATITIS B` `HEPATITS B` `TRICHAMONAS VAGINALA`
`TRICHAMONIS` `TRICHOMONAS` `TRICHOMONIASIS` `TRICHOMONIS`
`TRICHOMONIS VAGINITI` `TRICHOMORAS` `TRICHONOMIS`
- `C3G1` Have you ever been tested for HIV/AIDS (0=No, 1=Yes)
- `C3G2` # times tested for HIV/AIDS
- `C3G3` Have you been tested for HIV/AIDS-lst 6 mos (0=No, 1=Yes)
- `C3G4` What was the result of last test (1=Positive, 2=Negative,
3=Refued, 4=Never got result, 5=Inconclusive
- `C3H1` Have you ever had PID (0=No, 1=Yes)
- `C3H2` # of times had PID
- `C3H3` Have you had PID in last 6 mos (0=No, 1=Yes)
- `C3I` Have you ever had a Pap smear (0=No, 1=Yes)
- `C3J` Have you had a Pap smear in last 3 years (0=No, 1=Yes)
- `C3K` Are you pregnant (0=No, 1=Yes)
- `C3K_M` How many mos pregnant
- `D1` $ of times hospitalized for med probs
- `D2` Take prescr med regularly for phys prob (0=No, 1=Yes)
- `D3` # days had med probs-30 days bef detox
- `D4` How bother by med prob-30days bef detox (0=Not at all,
1=Slightly, 2=Moderately, 3=Considerably, 4=Extremely)
- `D5` How import is trtmnt for these med probs (0=Not at all,
1=Slightly, 2= Moderately, 3= Considerably, 4= Extremely
- `E2A` Detox prog for alc or drug prob-lst 6 mos (0=No, 1=Yes)
- `E2B` # times entered a detox prog-lst 6 mos
- `E2C` # nights ovrnight in detox prg-lst 6 mos
- `E3A` Holding unit for drug/alc prob-lst 6 mos (0=No, 1=Yes)
- `E3B` # times in holding unity=lst 6 mos
- `E3C` # total nights in holding unit-lst 6 mos
- `E4A` In halfway hse/resid facil-lst 6 mos (0=No, 1=Yes)
- `E4B` # times in hlfwy hse/res facil-lst 6 mos
- `E4C` Ttl nites in hlfwy hse/res fac-last 6 mos
- `E5A` In day trtmt prg for alcohol/drug-lst 6 mos (0=No, 1=Yes)
- `E5B` Total # days in day trtmt prg-lst 6 mos
- `E6` In methadone maintenance prg-lst 6 mos (0=No, 1=Yes)
- `E7A` Visit outpt prg subst ab couns-lst 6 mos (0=No, 1=Yes)
- `E7B` # visits outpt prg subst ab couns-lst 6 mos
- `E8A1` Saw MD/H care wkr re alcohol/drugs-lst 6 mos (0=No, 1=Yes)
- `E8A2` Saw Prst/Min/Rabbi re alcohol/drugs-lst 6 mos (0=No, 1=Yes)
- `E8A3` Employ Asst Prg for alcohol/drug prb-lst 6 mos (0=No, 1=Yes)
- `E8A4` Oth source cnsl for alcohol/drug prb-lst 6 mos (0=No, 1=Yes)
- `E9A` AA/NA/slf-hlp for drug/alcohol/emot-lst 6 mos (0=No, 1=Yes)
- `E9B` How often attend AA/NA/slf-hlp-lst 6 mos (1=Daily, 2=2-3
Times/week, 3=Weekly, 4=Every 2 weeks, 5=Once/month
- `E10A` have you been to med clinic-lst 6 mos (0=No, 1=Yes)
- `E10B1` # x visit ment hlth clin/prof-lst 6 mos
- `E10B2` # x visited med clin/priv MD-lst 6 mos
- `E10C19` Visited private MD-last 6 mos (0=No, 1=Yes)
- `E11A` Did you stay ovrnite/+ in hosp-lst 6 mos (0=No, 1=Yes)
- `E11B` # times ovrnight/+ in hosp-last 6 mos
- `E11C` Total # nights in hosp-last 6 mos
- `E12A` Visited Hosp ER for med care-past 6 mos (0=No, 1=Yes)
- `E12B` # times visited hosp ER-last 6 mos
- `E13` Tlt # visits to MDs-lst 2 wks bef detox
- `E14A` Recd trtmt from acupuncturist-last 6 mos (0=No, 1=Yes)
- `E14B` Recd trtmt from chiropractor-last 6 mos (0=No, 1=Yes)
- `E14C` Trtd by hol/herb/hom med prac-lst 6 mos (0=No, 1=Yes)
- `E14D` Recd trtmt from spirit healer-lst 6 mos (0=No, 1=Yes)
- `E14E` Have you had biofeedback-last 6 mos (0=No, 1=Yes)
- `E14F` Have you underwent hypnosis-lst 6 mos (0=No, 1=Yes)
- `E14G` Received other treatment-last 6 mos (0=No, 1=Yes)
- `E15A` Tried to get subst ab services-lst 6 mos (0=No, 1=Yes)
- `E15B` Always able to get subst ab servies (0=No, 1=Yes)
- `E15C1` I could not pay for services (0=No, 1=Yes)
- `E15C2` I did not know where to go for help (0=No, 1=Yes)
- `E15C3` Couldn't get to services due to transp prob (0=No, 1=Yes)
- `E15C4` The offie/clinic hrs were inconvenient (0=No, 1=Yes)
- `E15C5` Didn't speak/understnd Englsh well enough (0=No, 1=Yes)
- `E15C6` Afraid other might find out about prob (0=No, 1=Yes)
- `E15C7` My substance abuse interfered (0=No, 1=Yes)
- `E15C8` Didn't have someone to watch my children (0=No, 1=Yes)
- `E15C9` I did not want to lose my job (0=No, 1=Yes)
- `E15C10` My insurance didn't cover services (0=No, 1=Yes)
- `E15C11` There were no beds available at the prog (0=No, 1=Yes)
- `E15C12` Other reason not get sub ab services (0=No, 1=Yes)
- `E16A1` I cannot pay for services (0=No, 1=Yes)
- `E16A2` I am not eligible for free care (0=No, 1=Yes)
- `E16A3` I do not know where to go (0=No, 1=Yes)
- `E16A4` Can't get to services due to trans prob (0=No, 1=Yes)
- `E16A5` a numeric vectorOffice/clinic hours are inconvenient (0=No,
1=Yes)
- `E16A6` I don't speak/understnd enough English (0=No, 1=Yes)
- `E16A7` Afraid othrs find out about my hlth prob (0=No, 1=Yes)
- `E16A8` My substance abuse interferes (0=No, 1=Yes)
- `E16A9` I don't have someone to watch my childrn (0=No, 1=Yes)
- `E16A10` I do not want to lose my job (0=No, 1=Yes)
- `E16A11` My insurance doesn't cover charges (0=No, 1=Yes)
- `E16A12` I do not feel I need a regular MD (0=No, 1=Yes)
- `E16A13` Other reasons don't have regular MD (0=No, 1=Yes)
- `E18A` I could not pay for services (0=No, 1=Yes)
- `E18B` I did not know where to go for help (0=No, 1=Yes)
- `E18C` Couldn't get to services due to transp prob (0=No, 1=Yes)
- `E18D` The office/clinic hrs were inconvenient (0=No, 1=Yes)
- `E18F` Afraid others might find out about prob (0=No, 1=Yes)
- `E18G` My substance abuse interfered (0=No, 1=Yes)
- `E18H` Didn't have someone to watch my children (0=No, 1=Yes)
- `E18I` I did not want to lose my job (0=No, 1=Yes)
- `E18J` My insurance didn't cover services (0=No, 1=Yes)
- `E18K` There were no beds available at the prog (0=No, 1=Yes)
- `E18L` I do not need substance abuse services (0=No, 1=Yes)
- `E18M` Other reason not get sub ab services (0=No, 1=Yes)
- `F1A` Bothered by thngs not gen boethered by (0=Rarely/never,
1=Some of the time, 2=Occas/moderately, 3=Most of the time)
- `F1B` My appretite was poor (0=Rarely/never, 1=Some of the time,
2=Occas/moderately, 3=Most of the time)
- `F1C` Couldn't shake blues evn w/fam+frnds hlp (0=Rarely/never,
1=Some of the time, 2=Occas/moderately, 3=Most of the time)
- `F1D` Felt I was just as good as other people (0=Rarely/never,
1=Some of the time, 2=Occas/moderately, 3=Most of the time)
- `F1E` Had trouble keeping mind on what doing (0=Rarely/never,
1=Some of the time, 2=Occas/moderately, 3=Most of the time)
- `F1F` I felt depressed (0=Rarely/never, 1=Some of the time,
2=Occas/moderately, 3=Most of the time)
- `F1G` I felt everthing I did was an effort (0=Rarely/never, 1=Some
of the time, 2=Occas/moderately, 3=Most of the time)
- `F1H` I felt hopeful about the future (0=Rarely/never, 1=Some of
the time, 2=Occas/moderately, 3=Most of the time)
- `F1I` I thought my life had been a failure (0=Rarely/never, 1=Some
of the time, 2=Occas/moderately, 3=Most of the time)
- `F1J` I felt fearful (0=Rarely/never, 1=Some of the time,
2=Occas/moderately, 3=Most of the time)
- `F1K` My sleep was restless (0=Rarely/never, 1=Some of the time,
2=Occas/moderately, 3=Most of the time)
- `F1L` I was happy (0=Rarely/never, 1=Some of the time,
2=Occas/moderately, 3=Most of the time)
- `F1M` I talked less than usual (0=Rarely/never, 1=Some of the time,
2=Occas/moderately, 3=Most of the time)
- `F1N` I felt lonely (0=Rarely/never, 1=Some of the time,
2=Occas/moderately, 3=Most of the time)
- `F1O` People were unfriendly (0=Rarely/never, 1=Some of the time,
2=Occas/moderately, 3=Most of the time)
- `F1P` I enoyed life (0=Rarely/never, 1=Some of the time,
2=Occas/moderately, 3=Most of the time)
- `F1Q` I had crying spells (0=Rarely/never, 1=Some of the time,
2=Occas/moderately, 3=Most of the time)
- `F1R` I felt sad (0=Rarely/never, 1=Some of the time,
2=Occas/moderately, 3=Most of the time)
- `F1S` I felt that people dislike me (0=Rarely/never, 1=Some of the
time, 2=Occas/moderately, 3=Most of the time)
- `F1T` I could not get going (0=Rarely/never, 1=Some of the time,
2=Occas/moderately, 3=Most of the time)
- `G1A` Diff contr viol beh for sig time per evr (0=No, 1=Yes)
- `G1A_30` Diff contr viol beh-sig per lst 30 days (0=No, 1=Yes)
- `G1B` Ever had thoughts of suicide (0=No, 1=Yes)
- `G1B_30` Had thoughts of suicide-lst 30 days (0=No, 1=Yes)
- `G1C` Attempted suicide ever (0=No, 1=Yes)
- `G1C_30` Attempted suicide-lst 30 days (0=No, 1=Yes)
- `G1D` Prescr med for pst/emot prob ever (0=No, 1=Yes)
- `G1D_30` Prescr med for psy/emot prob-lst 30 days (0=No, 1=Yes)
- `H1_30` # days in past 30 bef detox used alcohol
- `H1_LT` # yrs regularly used alcohol
- `H1_RT` Route of administration use alcohol (0=N/A. 1=Oral,
2=Nasal, 3=Smoking, 4=Non-IV injection, 5=IV)
- `H2_30` #days in 3- bef detox use alc to intox
- `H2_LT` # yrs regularly used alcohol to intox
- `H2_RT` Route of admin use alcohol to intox (0=N/A. 1=Oral,
2=Nasal, 3=Smoking, 4=Non-IV injection, 5=IV)
- `H3_30` # days in past 30 bef detox used heroin
- `H3_LT` # yrs regularly used heroin
- `H3_RT` Route of administration of heroin (0=N/A. 1=Oral, 2=Nasal,
3=Smoking, 4=Non-IV injection, 5=IV)
- `H4_30` # days used methadone-lst 30 bef detox
- `H4_LT` # yrs regularly used methadone
- `H4_RT` Route of administration of methadone (0=N/A. 1=Oral,
2=Nasal, 3=Smoking, 4=Non-IV injection, 5=IV)
- `H5_30` # days used opi/analg-lst 30 bef detox
- `H5_LT` # yrs regularly used oth opiates/analg
- `H5_RT` Route of admin of oth opiates/analg (0=N/A. 1=Oral,
2=Nasal, 3=Smoking, 4=Non-IV injection, 5=IV)
- `H6_30` # days in past 30 bef detox used barbit
- `H6_LT` # yrs regularly used barbiturates
- `H6_RT` Route of admin of barbiturates (0=N/A. 1=Oral, 2=Nasal,
3=Smoking, 4=Non-IV injection, 5=IV)
- `H7_30` # days used sed/hyp/trnq-lst 30 bef det
- `H7_LT` # yrs regularly used sed/hyp/trnq
- `H7_RT` Route of admin of sed/hyp/trnq (0=N/A. 1=Oral, 2=Nasal,
3=Smoking, 4=Non-IV injection, 5=IV)
- `H8_30` # days in lst 30 bef detox used cocaine
- `H8_LT` # yrs regularly used cocaine
- `H8_RT` Route of admin of cocaine (0=N/A. 1=Oral, 2=Nasal,
3=Smoking, 4=Non-IV injection, 5=IV)
- `H9_30` # days in lst 30 bef detox used amphet
- `H9_LT` # yrs regularly used amphetamines
- `H9_RT` Route of admin of amphetamines (0=N/A. 1=Oral, 2=Nasal,
3=Smoking, 4=Non-IV injection, 5=IV)
- `H10_30` # days in lst 30 bef detox used cannabis
- `H10_LT` # yrs regularly used cannabis
- `H10_RT` Route of admin of cannabis (0=N/A. 1=Oral, 2=Nasal,
3=Smoking, 4=Non-IV injection, 5=IV)
- `H11_30` # days in lst 30 bef detox used halluc
- `H11_LT` # yrs regularly used hallucinogens
- `H11_RT` Route of admin of hallucinogens (0=N/A. 1=Oral, 2=Nasal,
3=Smoking, 4=Non-IV injection, 5=IV)
- `H12_30` # days in lst 30 bef detox used inhalant
- `H12_LT` # yrs regularly used inhalants
- `H12_RT` Route of admin of inhalants (0=N/A. 1=Oral, 2=Nasal,
3=Smoking, 4=Non-IV injection, 5=IV)
- `H13_30` # days used >1 sub/day-lst 30 bef detox
- `H13_LT` # yrs regularly used >1 subst/day
- `H13_RT` Route of admin of >1 subst/day (0=N/A. 1=Oral, 2=Nasal,
3=Smoking, 4=Non-IV injection, 5=IV)
- `H14` Accord to interview w/c subst is main prob (0=No problem,
1=Alcohol, 2=Alcool to intox, 3=Heroin 4=Methadone, 5=Oth
opiate/analg, 6=Barbituates, 7=Sed/hyp/tranq, 8=Cocaine,
9=Amphetamines, 10=Marij/cannabis
- `H15A` # times had alchol DTs
- `H15B` # times overdosed on drugs
- `H16A` $ spent on alc-lst 30 days bef detox
- `H16B` $ spent on drugs-lst 30 days bef detox
- `H17A` # days had alc prob-lst 30 days bef det
- `H17B` # days had drug prob-lst 30 days bef det
- `H18A` How troubled by alc probs-lst 30 days (0=Not at all,
1=Slightly, 2=Moderately, 3=Considerably, 4=Extremely)
- `H18B` How troubled by drug probs-lst 30 days (0=Not at all,
1=Slightly, 2=Moderately, 3=Considerably, 4=Extremely)
- `H19A` How import is trtmnt for alc probs now (0=Not at all,
1=Slightly, 2=Moderately, 3=Considerably, 4=Extremely)
- `H19B` How importy is trtmnt for drug probs now (0=Not at all,
1=Slightly, 2=Moderately, 3=Considerably, 4=Extremely)
- `I1` Avg # drinks in lst 30 days bef detox
- `I2` Most drank any 1 day in lst 30 bef detox
- `I3` On days used heroin, avg # bags used
- `I4` Most bgs heroin use any 1 day-30 bef det
- `I5` Avg $ amt of heorin used per day
- `I6A` On days used cocaine, avg # bags used
- `I6B` On days used cocaine, avg # rocks used
- `I7A` Mst bgs cocaine use any 1 day-30 bef det
- `I7B` Mst rcks cocaine use any 1 day-30 bef det
- `I8` Avg $ amt of cocaine used per day
- `J1` Evr don't stop using cocaine when should (0=No, 1=Yes)
- `J2` Ever tried to cut down on cocaine (0=No, 1=Yes)
- `J3` Does cocaine take up a lot of your time (0=No, 1=Yes)
- `J4` Need use > cocaine to get some feeling (0=No, 1=Yes)
- `J5A` Get phys sick when stop using cocaine (0=No, 1=Yes)
- `J5B` Ever use cocaine to prevent getting sick (0=No, 1=Yes)
- `J6` Ever don't stop using heroin when should (0=No, 1=Yes)
- `J7` Ever tried to cut down on heroin (0=No, 1=Yes)
- `J8` Does heroin take up a lot of your time (0=No, 1=Yes)
- `J9` Need use > heroin to get some feeling (0=No, 1=Yes)
- `J10A` Get phys sick when stop using heroin (0=No, 1=Yes)
- `J10B` Ever use heroin to prevent getting sick (0=No, 1=Yes)
- `K1` Do you currently smoke cigarettes (1=Yes-every day, 2=Yes-some
days, 3=No-former smoker, 4=No-never>100 cigs
- `K2` Avg # cigarettes smoked per day
- `K3` Considering quitting cigs w/in next 6 mo (0=No, 1=Yes)
- `L1` How often drink last time drank (1=To get high/less, 2=To get
drunk, 3=To pass out)
- `L2` Often have hangovrs Sun or Mon mornings (0=No, 1=Yes)
- `L3` Have you had the shakes when sobering (0=No, 1=Sometimes,
2=Alm evry time drink)
- `L4` Do you get phys sick as reslt of drinking (0=No, 1=Sometimes,
2=Alm evry time drink)
- `L5` have you had the DTs (0=No, 1=Once, 2=Several times
- `L6` When drink do you stumble/stagger/weave (0=No, 1=Sometimes,
2=Often)
- `L7` D/t drinkng felt overly hot/sweaty (0=No, 1=Once, 2=Several
times)
- `L8` As result of drinkng saw thngs not there (0=No, 1=Once,
2=Several times)
- `L9` Panic because fear not have drink if need it (0=No, 1=Yes)
- `L10` Have had blkouts as result of drinkng (0=No, never,
1=Sometimes, 2=Often, 3=Alm evry time drink)
- `L11` Do you carry bottle or keep close by (0=No, 1=Some of the
time, 2=Most of the time)
- `L12` After abstin end up drink heavily again (0=No, 1=Sometimes,
2=Almost evry time)
- `L13` Passed out due to drinking-lst 12 mos (0=No, 1=Once, 2=More
than once)
- `L14` Had convuls following period of drinkng (0=No, 1=Once,
2=Several times)
- `L15` Do you drink throughout the day (0=No, 1=Yes)
- `L16` Aftr drinkng heavily was thinkng unclear (0=No, 1=Yes, few
hrs, 2=Yes,1-2 days, 3=Yes, many days)
- `L17` D/t drinkng felt heart beat rapidly (0=No, 1=Once, 2=Several
times)
- `L18` Do you constntly think about drinkng/alc (0=No, 1=Yes)
- `L19` D/t drinkng heard things not there (0=No, 1=Once, 2= Several
times)
- `L20` Had weird/fright sensations when drinkng (0=No, 1=Once or
twice, 2=Often)
- `L21` When drinkng felt things rawl not there (0=No, 1=Once,
2=Several times)
- `L22` With respect to blackouts (0=Never had one, 1=Had for <1hr,
2=Had several hrs, 3=Had for day/+)
- `L23` Ever tried to cut down on drinking & failed (0=No, 1=Once,
2=Several times)
- `L24` Do you gulp drinks (0=No, 1=Yes)
- `L25` After taking 1 or 2 drinks can you stop (0=No, 1=Yes)
- `M1` Had hangover/felt bad aftr using alcohol/drugs (0=No, 1=Yes)
- `M2` Felt bad about self because of alcohol/drug use (0=No, 1=Yes)
- `M3` Missed days wrk/sch because of alcohol/drug use (0=No, 1=Yes)
- `M4` Fam/frinds worry/compl about alcohol/drug use (0=No, 1=Yes)
- `M5` I have enjoyed drinking/using drugs (0=No, 1=Yes)
- `M6` Qual of work suffered because of alcohol/drug use (0=No,
1=Yes)
- `M7` Parenting ability harmed by alcohol/drug use (0=No, 1=Yes)
- `M8` Trouble sleeping/nightmares aftr alcohol/drugs (0=No, 1=Yes)
- `M9` Driven motor veh while undr inf alcohol/drugs (0=No, 1=Yes)
- `M10` Using alcohol/1 drug caused > use othr drugs (0=No, 1=Yes)
- `M11` I have been sick/vomited aft alcohol/drug use (0=No, 1=Yes)
- `M12` I have been unhappy because of alcohol/drug use (0=No, 1=Yes)
- `M13` Lost weight/eaten poorly due to alcohol/drug use (0=No,
1=Yes)
- `M14` Fail to do what expected due to alcohol/drug use (0=No,
1=Yes)
- `M15` Using alcohol/drugs has helped me to relax (0=No, 1=Yes)
- `M16` Felt guilt/ashamed because of my alc drug use (0=No, 1=Yes)
- `M17` Said/done emarras thngs when on alcohol/drug (0=No, 1=Yes)
- `M18` Personality changed for worse on alcohol/drug (0=No, 1=Yes)
- `M19` Taken foolish risk when using alcohol/drugs (0=No, 1=Yes)
- `M20` Gotten into trouble because of alcohol/drug use (0=No, 1=Yes)
- `M21` Said cruel things while using alcohol/drugs (0=No, 1=Yes)
- `M22` Done impuls thngs regret due to alcohol/drug use (0=No,
1=Yes)
- `M23` Gotten in phys fights when use alcohol/drugs (0=No, 1=Yes)
- `M24` My phys health was harmed by alcohol/drug use (0=No, 1=Yes)
- `M25` Using alcohol/drug helped me have more + outlook (0=No,
1=Yes)
- `M26` I have had money probs because of my alcohol/drug use (0=No,
1=Yes)
- `M27` My love relat harmed due to my alcohol/drug use (0=No, 1=Yes)
- `M28` Smoked tobacco more when using alcohol/drugs (0=No, 1=Yes)
- `M29` <y phys appearance harmed by alcohol/drug use (0=No, 1=Yes)
- `M30` My family hurt because of my alc drug use (0=No, 1=Yes)
- `M31` Close relationsp damaged due to alcohol/drug use (0=No,
1=Yes)
- `M32` Spent time in jail because of my alcohol/drug use (0=No,
1=Yes)
- `M33` My sex life suffered due to my alcohol/drug use (0=No, 1=Yes)
- `M34` Lost interst in activity due to my alcohol/drug use (0=No,
1=Yes)
- `M35` Soc life> enjoyable when using alcohol/drug (0=No, 1=Yes)
- `M36` Spirit/moral life harmed by alcohol/drug use (0=No, 1=Yes)
- `M37` Not had kind life want due to alcohol/drug use (0=No, 1=Yes)
- `M38` My alcohol/drug use in way of personal growth (0=No, 1=Yes)
- `M39` My alcohol/drug use damaged soc life/reputat (0=No, 1=Yes)
- `M40` Spent/lost too much $ because alcohol/drug use (0=No, 1=Yes)
- `M41` Arrested for DUI of alc or oth drugs (0=No, 1=Yes)
- `M42` Arrested for offenses rel to alcohol/drug use (0=No, 1=Yes)
- `M43` Lost marriage/love relat due to alcohol/drug use (0=No,
1=Yes)
- `M44` Susp/fired/left job/sch due to alcohol/drug use (0=No, 1=Yes)
- `M45` I used drugs moderately w/o having probs (0=No, 1=Yes)
- `M46` I have lost a friend due to my alcohol/drug use (0=No, 1=Yes)
- `M47` Had an accident while using alcohol/drugs (0=No, 1=Yes)
- `M48` Phys hurt/inj/burned when using alcohol/drugs (0=No, 1=Yes)
- `M49` I injured someone while using alcohol/drugs (0=No, 1=Yes)
- `M50` Damaged things/prop when using alcohol/drugs (0=No, 1=Yes)
- `N1A` My friends give me the moral support I need (0=No, 1=Yes)
- `N1B` Most people closer to friends than I am (0=No, 1=Yes)
- `N1C` My friends enjoy hearing what I think (0=No, 1=Yes)
- `N1D` I rely on my friends for emot support (0=No, 1=Yes)
- `N1E` Friend go to when down w/o feel funny later (0=No, 1=Yes)
- `N1F` Frnds and I open re what thnk about things (0=No, 1=Yes)
- `N1G` My friends sensitive to my pers needs (0=No, 1=Yes)
- `N1H` My friends good at helping me solve probs (0=No, 1=Yes)
- `N1I` have deep sharing relat w/ a # of frnds (0=No, 1=Yes)
- `N1J` When confide in frnds makes me uncomfort (0=No, 1=Yes)
- `N1K` My friends seek me out for companionship (0=No, 1=Yes)
- `N1L` Not have as int relat w/frnds as others (0=No, 1=Yes)
- `N1M` Recent good idea how to do somethng frm frnd (0=No, 1=Yes)
- `N1N` I wish my friends were much different (0=No, 1=Yes)
- `N2A` My family gives me the moral support I need (0=No, 1=Yes)
- `N2B` Good ideas of how do/make thngs from fam (0=No, 1=Yes)
- `N2C` Most peop closer to their fam than I am (0=No, 1=Yes)
- `N2D` When confide make close fam membs uncomf (0=No, 1=Yes)
- `N2E` My fam enjoys hearing about what I think (0=No, 1=Yes)
- `N2F` Membs of my fam share many of my intrsts (0=No, 1=Yes)
- `N2G` I rely on my fam for emot support (0=No, 1=Yes)
- `N2H` Fam memb go to when down w/o feel funny (0=No, 1=Yes)
- `N2I` Fam and I open about what thnk about thngs (0=No, 1=Yes)
- `N2J` My fam is sensitive to my personal needs (0=No, 1=Yes)
- `N2K` Fam memb good at helping me solve probs (0=No, 1=Yes)
- `N2L` Have deep sharing relat w/# of fam membs (0=No, 1=Yes)
- `N2M` Makes me uncomf to confide in fam membs (0=No, 1=Yes)
- `N2N` I wish my family were much different (0=No, 1=Yes)
- `O1A` # people spend tx w/who drink alc (1=None, 2= A few, 3=About
half, 4= Most, 5=All)
- `O1B` # people spend tx w/who are heavy drinkrs (1=None, 2= A few,
3=About half, 4= Most, 5=All)
- `O1C` # people spend tx w/who use drugs (1=None, 2= A few, 3=About
half, 4= Most, 5=All)
- `O1D` # peop spend tx w/who supprt your abstin (1=None, 2= A few,
3=About half, 4= Most, 5=All)
- `O2` Does live-in part/spouse drink/use drugs (0=No, 1=Yes, 2=N/A)
- `P1A` Phys abuse/assaul by fam memb/pers know (0=No, 1=Yes, 7=Not
sure)
- `P1B` Age first phys assaulted by pers know
- `P1C` Phys assaulted by pers know-last 6 mos (0=No, 1=Yes)
- `P2A` Phys abuse/assaul by stranger (0=No, 1=Yes, 7=Not sure)
- `P2B` Age first phys assaulted by stranger
- `P2C` Phys assaulted by stranger-last 6 mos (0=No, 1=Yes)
- `P3` Using drugs/alc when phys assaulted (1=Don't know, 2=Never,
3=Some cases, 4=Most cases, 5=All cases, 9=Never assaulted)
- `P4` Pers who phys assault you using alcohol/drugs (1=Don't know,
2=Never, 3=Some cases, 4=Most cases, 5=All cases, 9=Never assaulted)
- `P5A` Sex abuse/assual by fam memb/pers know (0=No, 1= Yes, 7=Not
sure)
- `P5B` Age first sex assaulted by pers know
- `P5C` Sex assaulted by pers know-last 6 mos (0=No, 1=Yes)
- `P6A` Sex abuse/assaul by stranger (0=No, 1=Yes, 7=Not sure)
- `P6B` Age first sex assaulted by stranger
- `P6C` Sex assaulted by stranger-last 6 mos (0=No, 1=Yes)
- `P7` Using drugs/alc when sex assaulted (1=Don't know, 2=Never,
3=Some cases, 4=Most cases, 5=All cases, 9=Never assaulted)
- `P8` Person who sex assaulted you using alcohol/drugs (1=Don't
know, 2=Never, 3=Some cases, 4=Most cases, 5=All cases, 9=Never
assaulted)
- `Q1A` Have you ever injected drugs (0=No, 1=Yes)
- `Q1B` Have you injected drugs-lst 6 mos (0=No, 1=Yes)
- `Q2` Have you shared needles/works-last 6 mos (0=No/Not shot up,
3=Yes)
- `Q3` # people shared needles w/past 6 mos (0=No/Not shot up, 1=1
other person, 2=2-3 diff people, 3=4/+ diff people)
- `Q4` How often been to shoot gall/hse-lst 6 mos (0=Never, 1=Few
times or less, 2= Few times/month, 3= Once or more/week)
- `Q5` How often been to crack house-last 6 mos (0=Never, 1=Few times
or less, 2=Few times/month, 3=Once or more/week)
- `Q6` How often shared rinse-water-last 6 mos (0=Nevr/Not shot up,
1=Few times or less, 2=Few times/month, 3=Once or more/week)
- `Q7` How often shared a cooker-last 6 mos (0=Nevr/Not shot up,
1=Few times or less, 2=Few times/month, 3=Once or more/week)
- `Q8` How often shared a cotton-last 6 mos (0=Nevr/Not shot up,
1=Few times or less, 2=Few times/month, 3=Once or more/week)
- `Q9` How often use syringe to div drugs-lst 6 mos (0=Nevr/Not shot
up, 1=Few times or less, 2=Few times/month, 3=Once or more/week)
- `Q10` How would you describe yourself (0=Straight, 1=Gay/bisexual)
- `Q11` # men had sex w/in past 6 months (0=0 men, 1=1 man, 2=2-3
men, 3=4+ men
- `Q12` # women had sex w/in past 6 months (0=0 women, 1=1woman,
2=2-3 women, 3=4+ women
- `Q13` # times had sex In past 6 mos (0=Never, 1=Few times or less,
2=Few times/month, 3=Once or more/week)
- `Q14` How often had sex to get drugs-last 6 mos (0=Never, 1=Few
times or less, 2=Few times/month, 3=Once or more/week)
- `Q15` How often given drugs to have sex-lst 6 mos (0=Never, 1=Few
times or less, 2=Few times/month, 3=Once or more/week)
- `Q16` How often were you paid for sex-lst 6 mos (0=Never, 1=Few
times or less, 2=Few times/month, 3=Once or more/week)
- `Q17` How often you pay pers for sex-lst 6 mos (0=Never, 1=Few
times or less, 2=Few times/month, 3=Once or more/week)
- `Q18` How often use condomes during sex=lst 6 mos (0=No sex/always,
1=Most of the time, 2=Some of the time, 3=None of the time)
- `Q19` Condoms are too much of a hassle to use (1=Strongly disagree,
2=Disagree, 3= Agree, 4=Strongly agree)
- `Q20` Safer sex is always your responsibility (1=Strongly disagree,
2=Disagree, 3= Agree, 4=Strongly agree)
- `R1A` I really want to hange my alcohol/drug use (1=Strongly
disagree, 2=Disagree, 3= Agree, 4=Strongly agree)
- `R1B` Sometimes I wonder if I'm an alcohol/addict (1=Strongly
disagree, 2=Disagree, 3= Agree, 4=Strongly agree)
- `R1C` Id I don't chng alcohol/drug probs will worsen (1=Strongly
disagree, 2=Disagree, 3= Agree, 4=Strongly agree)
- `R1D` I started making changes in alcohol/drug use (1=Strongly
disagree, 2=Disagree, 3= Agree, 4=Strongly agree)
- `R1E` Was using too much but managed to change (1=Strongly
disagree, 2=Disagree, 3= Agree, 4=Strongly agree)
- `R1F` I wonder if my alcohol/drug use hurting othrs (1=Strongly
disagree, 2=Disagree, 3= Agree, 4=Strongly agree)
- `R1G` I am a prob drinker or have drug prob (1=Strongly disagree,
2=Disagree, 3= Agree, 4=Strongly agree)
- `R1H` Already doing thngs to chnge alcohol/drug use (1=Strongly
disagree, 2=Disagree, 3= Agree, 4=Strongly agree)
- `R1I` have changed use-trying to not slip back (1=Strongly
disagree, 2=Disagree, 3= Agree, 4=Strongly agree)
- `R1J` I have a serious problem w/ alcohol/drugs (1=Strongly
disagree, 2=Disagree, 3= Agree, 4=Strongly agree)
- `R1K` I wonder if I'm in contrl of alcohol/drug use (1=Strongly
disagree, 2=Disagree, 3= Agree, 4=Strongly agree)
- `R1L` My alcohol/drug use is causing a lot of harm (1=Strongly
disagree, 2=Disagree, 3= Agree, 4=Strongly agree)
- `R1M` Actively curring down/stopping alcohol/drug use (1=Strongly
disagree, 2=Disagree, 3= Agree, 4=Strongly agree)
- `R1N` Want help to not go back to alcohol/drugs (1=Strongly
disagree, 2=Disagree, 3= Agree, 4=Strongly agree)
- `R1O` I know that I have an alcohol/drug problem (1=Strongly
disagree, 2=Disagree, 3= Agree, 4=Strongly agree)
- `R1P` I wonder if I use alcohol/drugs too much (1=Strongly
disagree, 2=Disagree, 3= Agree, 4=Strongly agree)
- `R1Q` I am an alcoholic or drug addict (1=Strongly disagree,
2=Disagree, 3= Agree, 4=Strongly agree)
- `R1R` I am working hard to change alcohol/drug use (1=Strongly
disagree, 2=Disagree, 3= Agree, 4=Strongly agree)
- `R1S` Some changes-want help from going back (1=Strongly disagree,
2=Disagree, 3= Agree, 4=Strongly agree)
- `S1A` At interview pt obviously depressed/withdrawn (0=No, 1=Yes)
- `S1B` at interview pt obviously hostile (0=No, 1=Yes)
- `S1C` At interview pt obviouslt anx/nervous (0=No, 1=Yes)
- `S1D` Trouble w/real tst/thght dis/par at interview (0=No, 1=Yes)
- `S1E` At interview pt trbl w/ compr/concen/rememb (0=No, 1=Yes)
- `S1F` At interview pt had suicidal thoughts (0=No, 1=Yes)
- `T1` Have used alc since leaving River St. (0=No, 1=Yes)
- `T1B` # days in row continued to drink
- `T1C` Longest period abstain-lst 6 mos (alc)
- `T2` Have used heroin since leaving River St (0=No, 1=Yes)
- `T2B` # days in row continued to use heroin
- `T2C` Longest period abstain-lst 6 mos (heroin)
- `T3` Have used cocaine since leaving River St (0=No, 1=Yes)
- `T3B` # days in row continued to use cocaine
- `T3C` Lngest period abstain-lst 6 mos (cocaine)
- `U1` It is important to have a regular MD (1=Strongly agree,
2=Agree, 3=Uncertain, 4=Disagree, 5=Strongly Disagree)
- `U2A` I cannot pay for services (0=No, 1=Yes)
- `U2B` I am not eligible for free care (0=No, 1=Yes)
- `U2C` I do not know where to go (0=No, 1=Yes)
- `U2D` Can't get services due to transport probs (0=No, 1=Yes)
- `U2E` Office/clinic hours are inconvenient (0=No, 1=Yes)
- `U2F` I do not speak/understand English well (0=No, 1=Yes)
- `U2G` Afraid others discover hlth prb I have (0=No, 1=Yes)
- `U2H` My substance abuse interferes (0=No, 1=Yes)
- `U2I` I do not have a babysitter (0=No, 1=Yes)
- `U2J` I do not want to lose my job (0=No, 1=Yes)
- `U2K` My insurance does not cover services (0=No, 1=Yes)
- `U2L` Medical care is not important to me (0=No, 1=Yes)
- `U2M` I do not have time (0=No, 1=Yes)
- `U2N` Med staff do not treat me with respect (0=No, 1=Yes)
- `U2O` I do not trust my doctors or nurses (0=No, 1=Yes)
- `U2P` Often been unsatisfied w/my med care (0=No, 1=Yes)
- `U2Q` Other reason hard to get regular med care (0=No, 1=Yes)
- `U2Q_T` a factor with many levels
- `U2R` a factor with levels `7` `A` `B` `C` `D` `E`
`F` `G` `H` `I` `J` `K` `L` `M` `N` `O` `P`
`Q`
- `U3A` Has MD evr talked to you about drug use (0=No, 1=Yes)
- `U3B` Has MD evr talked to you about alc use (0=No, 1=Yes)
- `U4` Is there an MD you consider your regular MD (0=No, 1=Yes)
- `U5` Have you seen any MDs in last 6 mos (0=No, 1=Yes)
- `U6A` Would you go to this MD if med prb not emer (0=No, 1=Yes)
- `U6B` Think one of these could be your regular MD (0=No, 1=Yes)
- `PCP_ID` a numeric vector
- `U7A` What type of MD is your regular MD/this MD (1=OB/GYN,
2=Family medicine, 3=Pediatrician, 4=Adolescent medicine, 5=Internal
medicine, 6=AIDS doctor, 7=Asthma doctor, 8=Pulmonary doctor,
9=Cardiologist, 10=Gastroen)
- `U7A_T` a factor with levels `ARTHRITIS DOCTOR` `CHIROPRACTOR`
`COCAINE STUDY` `DETOX DOCTOR` `DO` `EAR DOCTOR`
`EAR SPECIALIST` `EAR, NOSE, & THROAT.` `EAR/NOSE/THROAT`
`ENT` `FAMILY PHYSICIAN` `GENERAL MEDICINE`
`GENERAL PRACTICE` `GENERAL PRACTIONER` `GENERAL PRACTITIONER`
`HEAD & NECK SPECIALIST` `HERBAL/HOMEOPATHIC/ACUPUNCTURE`
`ID DOCTOR` `MAYBE GENERAL PRACTITIONER` `MEDICAL STUDENT`
`NEUROLOGIST` `NURSE` `NURSE PRACTICIONER`
`NURSE PRACTITIONER` `ONCOLOGIST` `PRENATAL` `PRIMARY`
`PRIMARY CAAE` `PRIMARY CARE` `PRIMARY CARE DOCTOR`
`PRIMERY CARE` `THERAPIST` `UROLOGIST` `WOMENS CLINIC BMC`
- `U8A` Only saw this person once (=Only saw once)
- `U8B` Saw this person for <6 mos (1=<6 mos)
- `U8C` Saw tis person for 6 mos-1year (2=Betwn 6 mos & 1 yr)
- `U8D` Saw this person for 1-2 years (3=1-2 years)
- `U8E` Saw this person for 3-5 years (4=3-5 years)
- `U8F` Saw this person for more than 5 years (5=>5 years)
- `U10A` # times been to regular MDs office-pst 6 mos
- `U10B` # times saw regular MD in office-pst 6 mos
- `U10C` # times saw oth prof in office-pst 6 mos
- `U11` Rate convenience of MD office location (1=Very poor, 2=Poor,
3=Fair, 4=Good, 5=Very good, 6=Excellent)
- `U12` Rate hours MD office open for med appts (1=Very poor, 2=Poor,
3=Fair, 4=Good, 5=Very good, 6=Excellent)
- `U13` Usual wait for appt when sick (unsched) (1=Very poor, 2=Poor,
3=Fair, 4=Good, 5=Very good, 6=Excellent)
- `U14` Time wait for appt to start at MD office (1=Very poor,
2=Poor, 3=Fair, 4=Good, 5=Very good, 6=Excellent)
- `U15A` DO you pay for any/all of MD visits (0=No, 1=Yes)
- `U15B` How rate amt of $ you pay for MD visits (1=Very poor,
2=Poor, 3=Fair, 4=Good, 5=Very good, 6=Excellent)
- `U16A` Do you pay for any/all of prescript meds (0=No, 1=Yes)
- `U16B` Rate amt $ pay for meds/prescript trtmnts (1=Very poor,
2=Poor, 3=Fair, 4=Good, 5=Very good, 6=Excellent)
- `U17` Ever skip meds/trtmnts because too expensive (1=Yes, often,
2=Yes, occasionally, 3=No, never)
- `U18A` Ability to reach MC office by phone (1=Very poor, 2=Poor,
3=Fair, 4=Good, 5=Very good, 6=Excellent)
- `U18B` Ability to speak to MD by phone if need (1=Very poor,
2=Poor, 3=Fair, 4=Good, 5=Very good, 6=Excellent)
- `U19` How often see regular MD when have regular check-up
(1=Always, 2=Almost always, 3=A lot of the time, 4=Some of the time,
5=Almost never, 6=Never)
- `U20` When sick + go to MD how often see regular MD (1=Always,
2=Almost always, 3=A lot of the time, 4=Some of the time, 5=Almost
never, 6=Never)
- `U21A` How thorough MD exam to check hlth prb (1=Very poor, 2=
Poor, 3=Fair, 4=Good, 5= Very good, 6= Excellent)
- `U21B` How often question if MD diagnosis right (1=Always, 2=Almost
always, 3=A lot of the time, 4=Some of the time, 5=Almost never,
6=Never)
- `U22A` Thoroughness of MD questions re symptoms (1=Very poor, 2=
Poor, 3=Fair, 4=Good, 5= Very good, 6= Excellent)
- `U22B` Attn MD gives to what you have to say (1=Very poor, 2= Poor,
3=Fair, 4=Good, 5= Very good, 6= Excellent)
- `U22C` MD explanations of hlth prbs/trtmnts need (1=Very poor, 2=
Poor, 3=Fair, 4=Good, 5= Very good, 6= Excellent)
- `U22D` MD instrcts re sympt report/further care (1=Very poor, 2=
Poor, 3=Fair, 4=Good, 5= Very good, 6= Excellent)
- `U22E` MD advice in decisions about your care (1=Very poor, 2=
Poor, 3=Fair, 4=Good, 5= Very good, 6= Excellent)
- `U23` How often leave MD office w/unanswd quests (1=Always,
2=Almost always, 3=A lot of the time, 4=Some of the time, 5=Almost
never, 6=Never)
- `U24A` Amount of time your MD spends w/you (1=Very poor, 2= Poor,
3=Fair, 4=Good, 5= Very good, 6= Excellent)
- `U24B` MDs patience w/ your questions/worries (1=Very poor, 2=
Poor, 3=Fair, 4=Good, 5= Very good, 6= Excellent)
- `U24C` MDs friendliness and warmth toward you (1=Very poor, 2=
Poor, 3=Fair, 4=Good, 5= Very good, 6= Excellent)
- `U24D` MDs caring and concern for you (1=Very poor, 2= Poor,
3=Fair, 4=Good, 5= Very good, 6= Excellent)
- `U24E` MDs respect for you (1=Very poor, 2= Poor, 3=Fair, 4=Good,
5= Very good, 6= Excellent)
- `U25A` Reg MD ever talked to you about smoking (0=No, 1=Yes)
- `U25B` Reg MD ever talked to you about alc use (0=No, 1=Yes)
- `U25C` Reg MD ever talk to you about seat belt use (0=No, 1=Yes)
- `U25D` Reg MD ever talked to you about diet (0=No, 1=Yes)
- `U25E` Reg Mdever talked to you about exercise (0=No, 1=Yes)
- `U25F` Reg MD ever talked to you about stress (0=No, 1=Yes)
- `U25G` Reg MD ever talked to you about safe sex (0=No, 1=Yes)
- `U25H` Reg MD ever talked to you about drug use (0=No, 1=Yes)
- `U25I` Reg MD ever talked to you about HIV testing (0=No, 1=Yes)
- `U26A` Cut/quit smoking because of MDs advice (0=No, 1=Yes)
- `U26B` Tried to drink less alcohol because of MD advice (0=No,
1=Yes)
- `U26C` Wore my seat belt more because of MDs advice (0=No, 1=Yes)
- `U26D` Changed diet because of MDs advice (0=No, 1=Yes)
- `U26E` Done more exercise because MDs advice (0=No, 1=Yes)
- `U26F` Relax/reduce stress because of MDs advice (0=No, 1=Yes)
- `U26G` Practiced safer sex because of MDs advice (0=No, 1=Yes)
- `U26H` Tried to cut down/quit drugs because MD advice (0=No,
1=Yes)"
- `U26I` Got HIV tested because of MDs advice (0=No, 1=Yes)"
- `U27A` I can tell my MD anything (1=Strongly agree, 2= Agree, 3=
Not sure, 4=Disagree, 5=Strongly disagree)"
- `U27B` My MD pretends to know thngs if not sure (1=Strongly agree,
2= Agree, 3= Not sure, 4=Disagree, 5=Strongly disagree)"
- `U27C` I trust my MDs judgement re my med care (1=Strongly agree,
2= Agree, 3= Not sure, 4=Disagree, 5=Strongly disagree)"
- `U27D` My MD cares > about < costs than my hlth (1=Strongly agree,
2= Agree, 3= Not sure, 4=Disagree, 5=Strongly disagree)"
- `U27E` My MD always tell truth about my health (1=Strongly agree,
2= Agree, 3= Not sure, 4=Disagree, 5=Strongly disagree)"
- `U27F` My MD cares as much as I about my hlth (1=Strongly agree, 2=
Agree, 3= Not sure, 4=Disagree, 5=Strongly disagree)"
- `U27G` My MD would try to hide a mistake in trtmt (1=Strongly
agree, 2= Agree, 3= Not sure, 4=Disagree, 5=Strongly disagree)"
- `U28` How much to you trst this MD (0=Not at all, 1=1, 2=2, 3=3,
4=4, 5=5, 6=6, 7=7, 8=8, 9=9, 10=Completely)"
- `U29A` MDs knowledge of your entire med history (1=Very poor, 2=
Poor, 3=Fair, 4=Good, 5= Very good, 6= Excellent)"
- `U29B` MD knowldg of your respons-home/work/sch (1=Very poor, 2=
Poor, 3=Fair, 4=Good, 5= Very good, 6= Excellent)"
- `U29C` MD knowldg of what worries you most-hlth (1=Very poor, 2=
Poor, 3=Fair, 4=Good, 5= Very good, 6= Excellent)"
- `U29D` MDs knowledge of you as a person (1=Very poor, 2= Poor,
3=Fair, 4=Good, 5= Very good, 6= Excellent)"
- `U30` MD would know what want done if unconsc (1=Strongly agree,
2=Agree, 3=Not sure, 4= Disagree, 5=Strongly disagree)"
- `U31` Oth MDs/RNs who play roel in your care (0=No, 1=Yes)" \*
- `U32A` Their knowledge of you as a person (1=Very poor, 2= Poor,
3=Fair, 4=Good, 5= Very good, 6= Excellent)
- `U32B` The quality of care they provide (1=Very poor, 2= Poor,
3=Fair, 4=Good, 5= Very good, 6= Excellent)
- `U32C` Coordination betw them and your regular MD (1=Very poor, 2=
Poor, 3=Fair, 4=Good, 5= Very good, 6= Excellent)
- `U32D` Their expl of your hlth prbs/trtmts need (1=Very poor, 2=
Poor, 3=Fair, 4=Good, 5= Very good, 6= Excellent)
- `U32D_T` N/A, only my regular MD does this
- `U33` Amt regular MD knows about care from others (1=Knows
everything, 2=Knows almost everything, 3=Knows some things, 4=Knows
very little, 5=Knows nothing)
- `U34` Has MD ever recommended you see MD sepcialist (0=No, 1=Yes)
- `U35A` How helpful MD in deciding on specialist (1=Very poor, 2=
Poor, 3=Fair, 4=Good, 5= Very good, 6= Excellent)
- `U35B` How helpful MD getting appt w/specialist (1=Very poor, 2=
Poor, 3=Fair, 4=Good, 5= Very good, 6= Excellent)
- `U35C` MDs involvmt when you trtd by specialist (1=Very poor, 2=
Poor, 3=Fair, 4=Good, 5= Very good, 6= Excellent)
- `U35D` MDs communic w/your specialists/oth MDs (1=Very poor, 2=
Poor, 3=Fair, 4=Good, 5= Very good, 6= Excellent)
- `U35E` MD help in explain what specialists said (1=Very poor, 2=
Poor, 3=Fair, 4=Good, 5= Very good, 6= Excellent)
- `U35F` Quality of specialists MD sent you to (1=Very poor, 2= Poor,
3=Fair, 4=Good, 5= Very good, 6= Excellent)
- `U36` How many minutes to get to MDs office (1=<15, 2=16-30.
3=31-60, 4=More than 60)
- `U37` When sick+call how long take to see you (1=Same day, 2=Next
day, 3=In 2-3 days, 4=In 4-5 days, 5=in >5 days)
- `U38` How mant minutes late appt usually begin (1=None, 2=<5
minutes, 3=6-10 minutes, 4=11-20 minutes, 5=21-30 minutes, 6=31-45
minutes, 7=>45 minutes)
- `U39` How satisfied are you w/your regular MD (1=Completely
satisfied, 2=Very satisfied, 3=Somewhat satisfied, 4=Neither,
5=Somewhat dissatisfied, 6=Very dissatisfied, 7=Completely
dissatisfied)
- `V1` Evr needed to drink much more to get effect (0=No, 1=Yes)
- `V2` Evr find alc had < effect than once did (0=No, 1=Yes)
- `Z1` Breath Alcohol Concentration:1st test
- `Z2` Breath Alcohol Concentration:2nd test
- `AGE` Age in years
- `REALM` REALM score
- `E16A_RT` Barrier to regular MD: red tape (0=No, 1=Yes)
- `E16A_IB` Barrier to regular MD: internal barriers (0=No, 1=Yes)
- `E16A_TM` Barrier to regular MD: time restrictions (0=No, 1=Yes)
- `E16A_DD` Barrier to regular MD: dislike docs/system (0=No, 1=Yes)
- `GROUP` Randomization Group (0=Control, 1=Clinic)
- `MMSEC` MMSEC
- `PRIM_SUB` First drug of choice (0=None, 1=Alcohol, 3=Cocaine,
3=Heroine, 4=Barbituates, 5=Benzos, 6=Marijuana, 7=Methadone,
8=Opiates)
- `SECD_SUB` Second drug of choice (0=None, 1=Alcohol, 3=Cocaine,
3=Heroine, 4=Barbituates, 5=Benzos, 6=Marijuana, 7=Methadone,
8=Opiates)
- `ALCOHOL` 1st/2nd drug of coice=Alcohol (0=No, 1=Yes)
- `COC_HER` 1st/2nd drug of choice=cocaine or heroine (0=No, 1=Yes)
- `REALM2` REALM score (dichotomous) (1=0-60, 2=61-66)
- `REALM3` REALM score (categorical) (1=0-44), 2=45-60), 3=61-66)
- `RACE` Race (recode) (1=Afr Amer/Black, 2=White, 3=Hispanic,
4=Other)
- `RACE2` Race (recode) (1=White, 2=Minority)
- `BIRTHPLC` Where born (recode) (0=USA, 1=Foreign)
- `PRIMLANG` First language (recode) (0=English, 1=Other lang)
- `MD_LANG` Lang prefer to speak to MD (recode) (0=English, 1=Other
lang)
- `HS_GRAD` High school graduate (0=No, 1=Yes)
- `MAR_STAT` Marital status (recode) (0=Married, 1=Not married)
- `A12B_REC` Hollingshead category (recode) (0=Cat 1,2,3, 1=Cat
4,5,6, 2=Cat 7,8,9)
- `UNEMPLOY` Usually unemployed last 6m (0=No, 1=Yes)
- `ALONE6M` Usually lived alone past 6m y/n (0=No, 1=Yes)
- `HOMELESS` Homeless-shelter/street past 6 m (0=No, 1=Yes)
- `JAIL_MOS` Total months in jail past 5 years
- `JAIL_5YR` Any jail time past 5 years y/n (0=No, 1=Yes)
- `GOV_SUPP` Received governemtn support past 6 m (0=No, 1=Yes)
- `A18_REC1` Most money made in 1 yr (recode) (0=$19,000 or less,
1=$20,000-$49,000, 2=$50,000 or more)
- `A18_REC2` Most money made-continuous recode
- `STD_EVER` Ever had an STD y/n (0=No, 1=Yes)
- `STD_6M` Had an STD past 6m y/n (0=No, 1=Yes)
- `CHR_SUM` Sum chronic medican conds/HIV ever
- `CHR_EVER` Chronic medical conds/HIV-ever y/n (0=No, 1=Yes)
- `EPI_SUM` Sum episodic (C2A-C2O, C2R-C2U, STD)-6m
- `EPI_6M` Episodic (C2A-C2O,C2R-C2U, STD)-6m y/n (0=No, 1=Yes)
- `EPI_6M2B` Episodic(C2A-C2O)-6m y/n (0=No, 1=Yes)
- `SER_INJ` Recent (6m) serious injury y/n (0=No, 1=Yes)
- `D3_REC` Any medical problems past 30d y/n (0=No, 1=Yes)
- `D4_REC` Bothered by medical problems y/n (0=No, 1=Yes)
- `D5_REC` Medical trtmt is important y/n (0=No, 1=Yes)
- `ANY_INS` Did you have health insurance past 6 m (0=No, 1=Yes)
- `FRML_SAT` Formal substance abuse treatment y/n (0=No, 1=Yes)
- `E10B1_R` Mental health treatment past 6m y/n (0=No, 1=Yes)
- `E10B2_R` Med clinic/private MD past 6m y/n (0=No, 1=Yes)
- `ALT_TRT` Alternative tratments y/n (0=No, 1=Yes)
- `ANY_UTIL` Amy recent health utilization (0=No, 1=Yes)
- `NUM_BARR` # of perceived barriers to linkage
- `G1B_REC` Suicidal thoughs past 30 days y/n (0=No, 1=Yes)
- `G1D_REC` Prescribed psych meds past 30 daus y/n (0=No, 1=Yes)
- `PRIMSUB2` First drug of choice (no marijuana) (0=None, 1=Alcohol,
2=Cocaine, 3=Heroin, 4=Barbituates, 5=Benzos, 6=Marijuana,
7=Methadone, 8=Opiates)
- `ALCQ_30` Total number drinks past 30 days
- `H2_PRB` Problem sub: alc to intox (0=No, 1=Yes)
- `H3_PRB` Problem sub: heroin (0=No, 1=Yes)
- `H4_PRB` Problem sub: methadone (0=No, 1=Yes)
- `H5_PRB` Problem sub: oth opiates/analg (0=No, 1=Yes)
- `H6_PRB` Problem sub: barbituates (0=No, 1=Yes)
- `H7_PRB` Problem sub: sedat/hyp/tranq (0=No, 1=Yes)
- `H8_PRB` Problem sub: cocaine (0=No, 1=Yes)
- `H9_PRB` Problem sub: amphetamines (0=No, 1=Yes)
- `H10_PRB` Problem sub: marijuana, cannabis (0=No, 1=Yes)
- `H11_PRB` Problem sub: hallucinogens (0=No, 1=Yes)
- `H12_PRB` Problem sub: inhalants (0=No, 1=Yes)
- `POLYSUB` Polysubstance abuser y/n (0=No, 1=Yes)
- `SMOKER` Current smoker (every/some days) y/n (0=No, 1=Yes)
- `O1B_REC` Family/friends heavy drinkers y/n (0=No, 1=Yes)
- `O1C_REC` Family/friends use drugs y/n (0=No, 1=Yes)
- `O1D_REC` Family/fiends support abst. y/n (0=No, 1=Yes)
- `O2_REC` Live-in partner drinks/drugs y/n (0=No, 1=Yes)
- `PHYABUSE` Physical abuse-stranger or family (0=No, 1=Yes)
- `SEXABUSE` Sexual abuse-stranger or family (0=No, 1=Yes)
- `PHSXABUS` Any abuse (0=No, 1=Yes)
- `ABUSE2` Type of abuse (0=No abuse, 1=Physical only, 2=Sexual only,
3=Physical and sexual)
- `ABUSE3` Type of abuse (0=No abuse, 1=Physical only, 2=Sexual +/-
physical (0=No, 1=Yes)
- `CURPHYAB` Current abuse-physical (0=No, 1=Yes)
- `CURSEXAB` Current abuse-sexual (0=No, 1=Yes)
- `CURPHYSEXAB` Curent abuse-physical or sexual (0=No abuse,
1=Physical only, 2=Sexual +/- physical)
- `FAMABUSE` Family abuse-physical or sexual (0=No, 1=Yes)
- `STRABUSE` Stranger abuse-physical or sexual (0=No, 1=Yes)
- `ABUSE` Abuse-physical or sexual (0=No abuse, 1= Family abuse, 2=
Stranger only abuse)
- `RAWPF` Raw SF-36 physical functioning
- `PF` SF-36 physical functioning (0-100)
- `RAWRP` Raw SF-36 role-physical
- `RP` SF-36 role physical (0-100)
- `RAWBP` Raw SF-36 pain index
- `BP` SF-36 pain index (0-100)
- `RAWGH` Raw SF-36 general health perceptions
- `GH` SF-36 general health perceptions (0-100)
- `RAWVT` Raw SF-36 vitality
- `VT` SF-36 vitality 0-100)
- `RAWSF` Raw SF-36 social functioning
- `SF` SF-36 social functioning (0-100)
- `RAWRE` Raw SF-36 role-emotional
- `RE` SF-36 role-emotional (0-100)
- `RAWMH` Raw SF-36 mental health index
- `MH` SF-36 mental health index (0-100)
- `HT` Raw SF-36 health transition item
- `PCS` Standardized physical component scale-00
- `MCS` Standardized mental component scale-00
- `CES_D` CES-D score, measure of depressive symptoms, high scores
are worse
- `CESD_CUT` CES-D score > 21 y/n (0=No, 1=Yes)
- `C_MS` ASI-Composite medical status
- `C_AU` ASI-Composite score for alcohol use
- `C_DU` ASI-Composite score for drug use
- `CUAD_C` CUAD-Cocaine
- `CUAD_H` CUAD-Heroin
- `RAW_RE` SOCRATES-Rocognition-Raw
- `DEC_RE` SOCRATES-Recognition-Decile
- `RAW_AM` SOCRATES-Ambivalence-Raw
- `DEC_AM` SOCRATES-Ambivalence-Decile
- `RAW_TS` SOCRATES-Taking steps-Raw
- `DEC_TS` SOCRATES-Taking steps-Decile
- `RAW_ADS` ADS score
- `PHYS` InDUC-2L-Physical-Raw
- `PHYS2` InDUC-2L-Physical 9Raw (w/o M48)
- `INTER` InDUC-2L-Interpersonal-Raw
- `INTRA` InDUC-2L-Intrapersonal-Raw
- `IMPUL` InDUL-2L-Impulse control-Raw
- `IMPUL2` InDUC-2L-Impulse control-Raw (w/0 M23)
- `SR` InDUC-2L-Social responsibility-Raw
- `CNTRL` InDUC-2L-Control score
- `INDTOT` InDUC-2LTotal drlnC sore-Raw
- `INDTOT2` InDUC-2L-Total drlnC-Raw- w/o M23 and M48
- `PSS_FR` Perceived social support-friends
- `PSS_FA` Perceived social support-family
- `DRUGRISK` RAB-Drug risk total
- `SEXRISK` RAB-Sex risk total
- `TOTALRAB` RAB-Total RAB sore
- `RABSCALE` RAB scale sore
- `CHR_6M` Chronic medical conds/HIV-past 6m y/n (0=No, 1=Yes)
- `RCT_LINK` Did subject link to primary care (RCT)–This time point
(0=No, 1=Yes)
- `REG_MD` Did subject report having regular doctor–This time point
(0=No, 1=Yes)
- `ANY_VIS` # visits to regular doctor's office–This time point
- `ANY_VIS_CUMUL` Cumulative # visits to regular doctor's office
- `PC_REC` Primary care received: Linked & #visits (0=Not linked,
1=Linked, 1 visit, 2=Linked, 2+ visits)
- `PC_REC7` Primary cared received: linked & # visits (0=Not linked,
1=Linked, 1 visit, 2=Linked, 2 visits, 3=Linked, 3 visits, 4=Linked,
4 visits, 5= Linked, 5 visits, 6=Linked, 6+visits)
- `SATREAT` Any BSAS substance abuse this time point (0=No, 1=Yes)
- `DRINKSTATUS` Drank alcohol since leaving detox-6m
- `DAYSDRINK` Time (days) from baseline to first drink since leaving
detox-6m
- `ANYSUBSTATUS` Used alcohol, heroin, or cocaine since leaving
detox-6m
- `DAYSANYSUB` time (days) from baseline to first alcohol, heroin, or
cocaine since leaving detox-6m
- `LINKSTATUS` Linked to primary care within 12 months (by
administrative record)
- `DAYSLINK` Time (days) to linkage to primary care within 12 months
(by administrative record)
http://www.math.smith.edu/help
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `help_full.csv`.
Returns:
Tuple of np.ndarray `x_train` with 1472 rows and 788 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'help_full.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/mosaicData/HELPfull.csv'
maybe_download_and_extract(path, url,
save_file_name='help_full.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata | 33,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.