content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
import os
import json
def read_config():
"""Read configuration file."""
config_file = os.getenv('CONFIG_FILE_PATH')
if not config_file:
config_file = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'config.json')
with open(config_file) as file:
return json.load(file) | 3df2d5d081e9a6f8326d4d85e0ab4405dd37d461 | 22,800 |
def sendSingleCommand(server, user, password, command):
"""Wrapper function to open a connection and execute a single command.
Args:
server (str): The IP address of the server to connect to.
username (str): The username to be used in the connection.
password (str): The password associated with the user.
command (str): The command to be executed.
Returns:
String: String containing the command output.
"""
# Open SSH connection
channel = openChannel(server, user, password)
output = ""
try:
output = sendCommand(channel, command)
finally:
# Close ssh connection
closeChannel(channel)
return output | 78a339b2bcb320ad81e79b8656867b894be22ecd | 22,801 |
def test_piecewise_fermidirac(precision):
"""Creates a Chebyshev approximation of the Fermi-Dirac distribution within
the interval (-3, 3), and tests its accuracy for scalars, matrices, and
distributed matrices.
"""
mu = 0.0
beta = 10.0
def f(x):
return 1 / (np.exp(beta * (x - mu)) + 1)
is_vectorized = True
interval = (-3, 3)
n_cheb = 200
# The first one comes from Chebyshev error, the latter from numerical.
rtol = max(5e-6, 10 * testutils.eps(precision))
atol = max(5e-6, 10 * testutils.eps(precision))
test_samples = 1000
test_margin = 0
p_sz = 16
D = 128
dtype = np.float32
M = random_self_adjoint(D, dtype)
# Make sure the spectrum of M is within the interval.
interval_range = max(abs(i) for i in interval)
M = M / (jnp.linalg.norm(M) / interval_range)
v = np.random.randn(D, 1).astype(dtype)
chebyshev_test(
f,
interval,
M,
v,
n_cheb,
is_vectorized,
atol,
rtol,
test_samples,
test_margin,
p_sz,
precision=precision,
) | 587b7acfc5a114677f1bf5ab5a72a9f2019c6063 | 22,802 |
def load_img(flist):
""" Loads images in a list of arrays
Args : list of files
Returns list of all the ndimage arrays """
rgb_imgs = []
for i in flist:
rgb_imgs.append(cv2.imread(i, -1)) # flag <0 to return img as is
print "\t> Batch import of N frames\t", len(rgb_imgs)
size_var = cv2.imread(i) # (height, width, channels)
return rgb_imgs, size_var | 74d3c312e936f434b3738eae79b8f499755cdd0a | 22,803 |
import astropy.io.fits as pyfits
def makesimpleheader(headerin,naxis=2,radesys=None,equinox=None,pywcsdirect=False):
"""
Function to make a new 'simple header' from the WCS information in the input header.
Parameters
----------
headerin : astropy.io.fits.header
Header object
naxis : int
Specifies how many axes the final header should have. Default=2
radesys :str
RA/DEC system to use (valid SkyCoord frame system, e.g. 'icrs')
equinox : str
Equinox to use for the output header
pywcsdirect : bool
True to create the header directly with astropy.wcs.WCS
Returns
-------
astropy.io.fits.header
Output header
"""
if type(headerin)==str:
headerin=pyfits.getheader(headerin)
if pywcsdirect==True: wcstemp=pywcs.WCS(header=headerin)
else:
wcstemp=pywcs.WCS(naxis=naxis);
if naxis>2:
wcstemp.wcs.crpix=[float(headerin['CRPIX1']),float(headerin['CRPIX2']),float(headerin['CRPIX3'])]
wcstemp.wcs.crval=[float(headerin['CRVAL1']),float(headerin['CRVAL2']),float(headerin['CRVAL3'])]
wcstemp.wcs.ctype=[headerin['CTYPE1'],headerin['CTYPE2'],headerin['CTYPE3']]
try: wcstemp.wcs.cunit=[headerin['CUNIT1'],headerin['CUNIT2'],headerin['CUNIT3']]
except: pass
try: wcstemp.wcs.cdelt=list(getcdelts(headerin))+[headerin['CDELT3']];
except: raise(Exception('Invalid WCS CDELTS'))
else:
wcstemp.wcs.crpix=[float(headerin['CRPIX1']),float(headerin['CRPIX2'])]
wcstemp.wcs.crval=[float(headerin['CRVAL1']),float(headerin['CRVAL2'])]
wcstemp.wcs.ctype=[headerin['CTYPE1'],headerin['CTYPE2']]
try: wcstemp.wcs.cunit=[headerin['CUNIT1'],headerin['CUNIT2']]
except: pass
try: wcstemp.wcs.cdelt=list(getcdelts(headerin));
except: raise(Exception('Invalid WCS CDELTS'))
try: crota=getcdelts(headerin,getrot=True)[-1] #degrees, from N
except: raise(Exception('Invalid WCS params for CROTAx'))
#if crota!=0.: wcstemp.wcs.crota=[crota]*2 #Header will include PC_x cards if crot not 0
try: wcstemp.wcs.radesys=headerin['RADESYS']
except: pass
try: wcstemp.wcs.equinox=headerin['EQUINOX']
except: pass
if radesys is not None: wcstemp.wcs.radesys=radesys; #e.g. 'FK5', 'ICRS'. For manually forcing string, not true reprojection.
if equinox is not None: wcstemp.wcs.equinox=equinox; #e.g. 2000.0
simpleheader=wcstemp.to_header()
if pywcsdirect==False:
if crota!=0.: simpleheader['CROTA2']=crota #Alternative method to just use (deprecated) CROTA2 card
simpleheader['NAXIS']=naxis;
try: simpleheader['NAXIS1']=int(headerin['NAXIS1']); simpleheader['NAXIS2']=int(headerin['NAXIS2']);
except: pass
if naxis>2:
for card in ['NAXIS3','CRPIX3','CRVAL3','CDELT3','CTYPE3','CUNIT3', 'SPECSYS','ALTRVAL','ALTRPIX']:
try: simpleheader[card]=headerin[card]
except: pass
for card in ['CROTA','CROTA1','CROTA2','BSCALE','BZERO','ZSCALE','BMAJ','BMIN','BPA', 'JANSCALE','FLUXCONV',
'WAVELEN','FREQ', 'RESTFRQ', 'LATPOLE','LONPOLE']:
try: simpleheader[card]=float(headerin[card])
except: pass
for card in ['BUNIT','OBJECT','TELESCOP','ZUNITS','SPECSYS']:
try: simpleheader[card]=headerin[card]
except: pass
return simpleheader | ffdc8f755227451e3df2329e8ec804b7444a553d | 22,804 |
def _callcatch(ui, func):
"""like scmutil.callcatch but handles more high-level exceptions about
config parsing and commands. besides, use handlecommandexception to handle
uncaught exceptions.
"""
detailed_exit_code = -1
try:
return scmutil.callcatch(ui, func)
except error.AmbiguousCommand as inst:
detailed_exit_code = 10
ui.warn(
_(b"hg: command '%s' is ambiguous:\n %s\n")
% (inst.prefix, b" ".join(inst.matches))
)
except error.CommandError as inst:
detailed_exit_code = 10
if inst.command:
ui.pager(b'help')
msgbytes = pycompat.bytestr(inst.message)
ui.warn(_(b"hg %s: %s\n") % (inst.command, msgbytes))
commands.help_(ui, inst.command, full=False, command=True)
else:
ui.warn(_(b"hg: %s\n") % inst.message)
ui.warn(_(b"(use 'hg help -v' for a list of global options)\n"))
except error.UnknownCommand as inst:
detailed_exit_code = 10
nocmdmsg = _(b"hg: unknown command '%s'\n") % inst.command
try:
# check if the command is in a disabled extension
# (but don't check for extensions themselves)
formatted = help.formattedhelp(
ui, commands, inst.command, unknowncmd=True
)
ui.warn(nocmdmsg)
ui.write(formatted)
except (error.UnknownCommand, error.Abort):
suggested = False
if inst.all_commands:
sim = error.getsimilar(inst.all_commands, inst.command)
if sim:
ui.warn(nocmdmsg)
ui.warn(b"(%s)\n" % error.similarity_hint(sim))
suggested = True
if not suggested:
ui.warn(nocmdmsg)
ui.warn(_(b"(use 'hg help' for a list of commands)\n"))
except IOError:
raise
except KeyboardInterrupt:
raise
except: # probably re-raises
if not handlecommandexception(ui):
raise
if ui.configbool(b'ui', b'detailed-exit-code'):
return detailed_exit_code
else:
return -1 | 495531b930187f1d3aff329453235f9683bc25bc | 22,805 |
def _determ_estim_update(new_bit, counts):
"""Beliefs only a sequence of all ones or zeros.
"""
new_counts = counts[:]
new_counts[new_bit] += 1
if new_counts[0] > 0 and new_counts[1] > 0:
return LOG_ZERO
log_p_new = _determ_log_p(new_counts)
log_p_old = _determ_log_p(counts)
return log_p_new - log_p_old | ea6f172161b215d5d474241da18fdd222692f245 | 22,806 |
def get_projects(config):
"""Find all XNAT projects and the list of scan sites uploaded to each one.
Args:
config (:obj:`datman.config.config`): The config for a study
Returns:
dict: A map of XNAT project names to the URL(s) of the server holding
that project.
"""
projects = {}
for site in config.get_sites():
xnat_project = config.get_key("XnatArchive", site=site)
projects.setdefault(xnat_project, set()).add(site)
return projects | 09824b67e73f8190d777ec782454940f27b70e33 | 22,807 |
import json
def load_jsonrpc_method(name):
"""Load a method based on the file naming conventions for the JSON-RPC.
"""
base_path = (repo_root() / "doc" / "schemas").resolve()
req_file = base_path / f"{name.lower()}.request.json"
resp_file = base_path / f"{name.lower()}.schema.json"
request = CompositeField.from_js(json.load(open(req_file)), path=name)
response = CompositeField.from_js(json.load(open(resp_file)), path=name)
# Normalize the method request and response typename so they no
# longer conflict.
request.typename += "Request"
response.typename += "Response"
return Method(
name=method_name_override.get(name, name),
request=request,
response=response,
) | 173fdaad563989042f6ff3c5622c4b56be1a5fa5 | 22,808 |
def download_missing_namespace(network_id: int, namespace: str):
"""Output a namespace built from the missing names in the given namespace.
---
tags:
- network
parameters:
- name: network_id
in: path
description: The database network identifier
required: true
type: integer
- name: namespace
in: path
description: The keyword of the namespace to extract
required: true
type: string
"""
graph = manager.cu_authenticated_get_graph_by_id_or_404(network_id)
names = get_incorrect_names_by_namespace(graph, namespace) # TODO put into report data
return _build_namespace_helper(graph, namespace, names) | daca8a78454aec73bbf1e8a54f1a006aa748681c | 22,809 |
def client_decrypt_hello_reply(ciphertext, iv1, key1):
"""
Decrypt the server's reply using the IV and key we sent to it.
Returns iv2, key2, salt2 (8 bytes), and the original salt1.
The pair iv2/key2 are to be used in future communications.
Salt1 is returned to help confirm the integrity of the operation.
"""
iv1 = bytes(iv1)
key1 = bytes(key1)
# iv_ = ciphertext[0:AES_BLOCK_BYTES] # of no interest
cipher = Cipher(
algorithms.AES(key1),
modes.CBC(iv1),
backend=default_backend())
decryptor = cipher.decryptor()
plaintext = decryptor.update(ciphertext) + decryptor.finalize()
# unpadded = strip_pkcs7_padding(plaintext, AES_BLOCK_BYTES)
unpadder = padding.PKCS7(AES_BLOCK_BITS).unpadder()
unpadded = unpadder.update(plaintext) + unpadder.finalize()
iv2 = unpadded[:AES_BLOCK_BYTES]
key2 = unpadded[AES_BLOCK_BYTES: 3 * AES_BLOCK_BYTES]
salt2 = unpadded[3 * AES_BLOCK_BYTES: 3 * AES_BLOCK_BYTES + 8]
salt1 = unpadded[3 * AES_BLOCK_BYTES + 8: 3 * AES_BLOCK_BYTES + 16]
v_bytes = unpadded[3 * AES_BLOCK_BYTES + 16: 3 * AES_BLOCK_BYTES + 20]
version2 = v_bytes[0] |\
(v_bytes[1] << 8) |\
(v_bytes[2] << 16) |\
(v_bytes[3] << 24)
return iv2, key2, salt2, salt1, version2 | 70f3361acbeaa26376d4a54605a526ecac5ea61e | 22,810 |
import pandas
def load_labeled_data(filename):
""" Loads data from a csv, where the last column is the label of the data in that row
:param filename: name of the file to load
:return: data frames and labels in separate arrays
"""
dataframe = pandas.read_csv(filename, header=None)
dataset = dataframe.values
data = dataset[:, 0:-1].astype(float)
labels = dataset[:, -1]
return data, labels | 727691d376b744ccfdffbd62dd9f386e7bd7c4dd | 22,811 |
import logging
def get_logger(name, level='debug', log_file='log.txt'):
"""
Retrieve the logger for SWIFLow with coloredlogs installed in the
right format
"""
# Setup logging
log_level = level.upper()
level = logging.getLevelName(log_level)
# Add a custom format for logging
fmt = "%(levelname)s: %(msg)s"
# Always write log file to <output>/log.txt
log = logging.getLogger(name)
# Log to file, no screen output.
logging.basicConfig(filename=log_file, filemode='w+',
level=log_level,
format=fmt)
return log | bb990320b689faa7385952612afe44fc17ae4d7b | 22,812 |
import sh
import os
import errno
import shutil
def filesystem_move(
source_path,
source_type,
destination_path,
destination_type,
backup_ending,
):
"""
Moves a file from the source to the destination.
Arguments
---------
source_path: path to the source
source_type: Type of the source (a directory -> 'dir' or a file -> 'file')
destination_path: path to the destination
destination_type: Type of the destination (place it in a directory -> 'dir' or replace it -> 'file')
backup_ending: the file ending for backup files
Returns
-------
The hash of the source
"""
(
source_path,
source_type,
destination_path,
destination_type,
backup_ending,
_,
) = sh.filesystem_type_check(
source_path,
source_type,
destination_path,
destination_type,
backup_ending,
)
# destination file with name of source exists
if (source_type == 'dir' and os.path.isdir(destination_path)) or (
source_type == 'file' and os.path.isfile(destination_path)):
# Backup file name already exists
backup_path = destination_path + backup_ending
if os.path.exists(backup_path):
raise FileExistsError(
errno.EEXIST,
os.strerror(errno.EEXIST),
backup_path,
)
# move old file to backup
os.rename(destination_path, backup_path)
elif os.path.exists(destination_path):
raise ValueError(
"Expected a {} at `{}`, but did not found one.".format(
"file"
if source_type == "file" else "directory", destination_path))
if source_type == 'dir':
os.mkdir(destination_path)
for root, dirs, files in os.walk(source_path):
# set the prefix from source_path to destination_path
dest_root = os.path.join(destination_path,
root[len(source_path) + 1:])
for directory in dirs:
os.mkdir(os.path.join(dest_root, directory))
for fil in files:
shutil.copyfile(
os.path.join(root, fil),
os.path.join(dest_root, fil),
)
return sh.hash_directory(destination_path)
elif source_type == 'file':
# finally link source to destination
shutil.copyfile(source_path, destination_path)
return sh.hash_file(destination_path) | 5d3cb03248f04dc93d086026a01d43cc1c3bdd9f | 22,813 |
def _get_data_tuple(sptoks, asp_termIn, label):
"""
Method obtained from Trusca et al. (2020), no original docstring provided.
:param sptoks:
:param asp_termIn:
:param label:
:return:
"""
# Find the ids of aspect term.
aspect_is = []
asp_term = ' '.join(sp for sp in asp_termIn).lower()
for _i, group in enumerate(window(sptoks, len(asp_termIn))):
if asp_term == ' '.join([g.lower() for g in group]):
aspect_is = list(range(_i, _i + len(asp_termIn)))
break
elif asp_term in ' '.join([g.lower() for g in group]):
aspect_is = list(range(_i, _i + len(asp_termIn)))
break
pos_info = []
for _i, sptok in enumerate(sptoks):
pos_info.append(min([abs(_i - i) for i in aspect_is]))
lab = None
if label == 'negative':
lab = -1
elif label == 'neutral':
lab = 0
elif label == "positive":
lab = 1
else:
raise ValueError("Unknown label: %s" % lab)
return pos_info, lab | 2dae699ba4da27f6a36b7aac21cc8bc759a71d67 | 22,814 |
from pathlib import Path
def setup_environment(new_region: Path) -> bool:
"""Try to create new_region folder"""
if new_region.exists():
print(f"{new_region.resolve()} exists, this may cause problems")
proceed = input("Do you want to proceed regardless? [y/N] ")
sep()
return proceed.startswith("y")
new_region.mkdir()
print(f"Saving newly generated region files to {new_region.resolve()}")
return True | 175e21b10aca860d9886841be743d8f2a240dfc6 | 22,815 |
def is_favorable_halide_environment(
i_seq,
contacts,
pdb_atoms,
sites_frac,
connectivity,
unit_cell,
params,
assume_hydrogens_all_missing=Auto):
"""
Detects if an atom's site exists in a favorable environment for a halide
ion. This includes coordinating by a positively charged sidechain or backbone
as well as an absense of negatively charged coordinating groups.
Parameters
----------
i_seq : int
contacts : list of mmtbx.ions.environment.atom_contact
pdb_atoms : iotbx.pdb.hierarchy.af_shared_atom
sites_frac : tuple of float, float, float
connectivity : scitbx.array_family.shared.stl_set_unsigned
unit_cell : uctbx.unit_cell
params : libtbx.phil.scope_extract
assume_hydrogens_all_missing : bool, optional
Returns
-------
bool
"""
if (assume_hydrogens_all_missing in [None, Auto]):
elements = pdb_atoms.extract_element()
assume_hydrogens_all_missing = not ("H" in elements or "D" in elements)
atom = pdb_atoms[i_seq]
binds_amide_hydrogen = False
near_cation = False
near_lys = False
near_hydroxyl = False
xyz = col(atom.xyz)
min_distance_to_cation = max(unit_cell.parameters()[0:3])
min_distance_to_hydroxyl = min_distance_to_cation
for contact in contacts :
# to analyze local geometry, we use the target site mapped to be in the
# same ASU as the interacting site
def get_site(k_seq):
return unit_cell.orthogonalize(
site_frac = (contact.rt_mx * sites_frac[k_seq]))
other = contact.atom
resname = contact.resname()
atom_name = contact.atom_name()
element = contact.element
distance = abs(contact)
j_seq = other.i_seq
# XXX need to figure out exactly what this should be - CL has a
# fairly large radius though (1.67A according to ener_lib.cif)
if (distance < params.min_distance_to_other_sites):
return False
if not element in ["C", "N", "H", "O", "S"]:
charge = server.get_charge(element)
if charge < 0 and distance <= params.min_distance_to_anion:
# Nearby anion that is too close
return False
if charge > 0 and distance <= params.max_distance_to_cation:
# Nearby cation
near_cation = True
if (distance < min_distance_to_cation):
min_distance_to_cation = distance
# Lysine sidechains (can't determine planarity)
elif (atom_name in ["NZ"] and #, "NE", "NH1", "NH2"] and
resname in ["LYS"] and
distance <= params.max_distance_to_cation):
near_lys = True
if (distance < min_distance_to_cation):
min_distance_to_cation = distance
# sidechain amide groups, no hydrogens (except Arg)
# XXX this would be more reliable if we also calculate the expected
# hydrogen positions and use the vector method below
elif (atom_name in ["NZ","NH1","NH2","ND2","NE2"] and
resname in ["ARG","ASN","GLN"] and
(assume_hydrogens_all_missing or resname == "ARG") and
distance <= params.max_distance_to_cation):
if (_is_coplanar_with_sidechain(atom, other.parent(),
distance_cutoff = params.max_deviation_from_plane)):
binds_amide_hydrogen = True
if (resname == "ARG") and (distance < min_distance_to_cation):
min_distance_to_cation = distance
# hydroxyl groups - note that the orientation of the hydrogen is usually
# arbitrary and we can't determine precise bonding
elif ((atom_name in ["OG1", "OG2", "OH1"]) and
(resname in ["SER", "THR", "TYR"]) and
(distance <= params.max_distance_to_hydroxyl)):
near_hydroxyl = True
if (distance < min_distance_to_hydroxyl):
min_distance_to_hydroxyl = distance
# Backbone amide, explicit H
elif atom_name in ["H"]:
# TODO make this more general for any amide H?
xyz_h = col(contact.site_cart)
bonded_atoms = connectivity[j_seq]
if (len(bonded_atoms) != 1):
continue
xyz_n = col(get_site(bonded_atoms[0]))
vec_hn = xyz_h - xyz_n
vec_hx = xyz_h - xyz
angle = abs(vec_hn.angle(vec_hx, deg = True))
# If Cl, H, and N line up, Cl binds the amide group
if abs(angle - 180) <= params.delta_amide_h_angle:
binds_amide_hydrogen = True
else :
pass #print "%s N-H-X angle: %s" % (atom.id_str(), angle)
# Backbone amide, implicit H
elif atom_name in ["N"] and assume_hydrogens_all_missing:
xyz_n = col(contact.site_cart)
bonded_atoms = connectivity[j_seq]
ca_same = c_prev = None
for k_seq in bonded_atoms :
other2 = pdb_atoms[k_seq]
if other2.name.strip().upper() in ["CA"]:
ca_same = col(get_site(k_seq))
elif other2.name.strip().upper() in ["C"]:
c_prev = col(get_site(k_seq))
if ca_same is not None and c_prev is not None:
xyz_cca = (ca_same + c_prev) / 2
vec_ncca = xyz_n - xyz_cca
# 0.86 is the backbone N-H bond distance in geostd
xyz_h = xyz_n + (vec_ncca.normalize() * 0.86)
vec_nh = xyz_n - xyz_h
vec_nx = xyz_n - xyz
angle = abs(vec_nh.angle(vec_nx, deg = True))
if abs(angle - 180) <= params.delta_amide_h_angle:
binds_amide_hydrogen = True
# sidechain NH2 groups, explicit H
elif ((atom_name in ["HD1","HD2"] and resname in ["ASN"]) or
(atom_name in ["HE1","HE2"] and resname in ["GLN"])):
# XXX not doing this for Arg because it can't handle the bidentate
# coordination
#(atom_name in ["HH11","HH12","HH21","HH22"] and resname == "ARG")):
bonded_atoms = connectivity[j_seq]
assert (len(bonded_atoms) == 1)
xyz_n = col(get_site(bonded_atoms[0]))
xyz_h = col(contact.site_cart)
vec_nh = xyz_n - xyz_h
vec_xh = xyz - xyz_h
angle = abs(vec_nh.angle(vec_xh, deg = True))
if abs(angle - 180) <= params.delta_amide_h_angle:
binds_amide_hydrogen = True
else :
pass #print "%s amide angle: %s" % (atom.id_str(), angle)
# now check again for negatively charged sidechain (etc.) atoms (e.g.
# carboxyl groups), but with some leeway if a cation is also nearby.
# backbone carbonyl atoms are also excluded.
for contact in contacts :
if (contact.altloc() not in ["", "A"]):
continue
resname = contact.resname()
atom_name = contact.atom_name()
distance = abs(contact)
if ((distance < 3.2) and
(distance < (min_distance_to_cation + 0.2)) and
is_negatively_charged_oxygen(atom_name, resname)):
#print contact.id_str(), distance
return False
return (binds_amide_hydrogen or near_cation or near_lys) | dc09952a022c0bf8d946773db0487293da88c4a5 | 22,816 |
def headers():
"""Default headers for making requests."""
return {
'content-type': 'application/json',
'accept': 'application/json',
} | 53e42df6cae8ba9cbdc5f0e0a86a0154d3ba360e | 22,817 |
def merge_two_lists(l1: ListNode, l2: ListNode) -> ListNode:
"""Returns a single sorted, in-place merged linked list of two sorted input linked lists
The linked list is made by splicing together the nodes of l1 and l2
Args:
l1:
l2:
Examples:
>>> l1 = linked_list.convert_list_to_linked_list([1,2,4])
>>> l2 = linked_list.convert_list_to_linked_list([1,3,4])
>>> merge_two_lists(l1, l2).as_list()
[1, 1, 2, 3, 4, 4]
>>> l1 = linked_list.convert_list_to_linked_list([])
>>> l2 = linked_list.convert_list_to_linked_list([0])
>>> merge_two_lists(l1, l2).as_list()
[0]
>>> merge_two_lists(l2, l1).as_list()
[0]
>>> merge_two_lists(None, None)
"""
"""ALGORITHM"""
head_handle = curr = ListNode(None)
while l1 is not None and l2 is not None:
if l1.val <= l2.val:
curr.next, l1 = l1, l1.next
else:
curr.next, l2 = l2, l2.next
curr = curr.next
# Post-condition:
# if at least one list was not None, one list is now exhausted and `curr`
# is the last node of the now exhausted list; complete splice by assigning
# the head of the remaining non-exhausted list to `curr.next`
curr.next = l1 if l1 is not None else l2
return head_handle.next | 49033ef17e0940a201c70555cc0e49b8e745fb3b | 22,818 |
import csv
def map_SOPR_to_firm():
"""
Map SOPR identifiers to a lobbying CUID.
Return a dictionnary.
"""
firms = {}
with open(DATASET_PATH_TO['LOBBYING_FIRMS'], 'rb') as f:
reader = csv.reader(f, delimiter='%', quoting=csv.QUOTE_NONE)
for record in reader:
SOPR_reports = record[3].split(';')
CUID_firm = record[0]
for report_id in SOPR_reports:
firms[report_id] = CUID_firm
return firms | e0f00d7f720512eef3e32685bb8ba5ed4ed0203c | 22,819 |
from typing import Set
def specialbefores_given_external_square(
befores: Set[Before],
directly_playable_squares: Set[Square],
external_directly_playable_square: Square) -> Set[Specialbefore]:
"""
Args:
befores (Set[Before]): a set of Befores used to create Specialbefores.
directly_playable_squares (Set[Square]): a set of directly playable squares, possibly including square.
external_directly_playable_square (Square): a square to be used as the external directly playable
square of each Specialbefore.
Returns:
specialbefores (Set[Specialbefore]): a set of Specialbefores. Each Specialbefore uses square as its external
directly playable square.
"""
specialbefores = set()
for before in befores:
directly_playable_squares_in_before_group = internal_directly_playable_squares(
before, directly_playable_squares)
for internal_directly_playable_square in directly_playable_squares_in_before_group:
if can_be_used_with_before(external_directly_playable_square, before):
specialbefores.add(Specialbefore(
before=before,
internal_directly_playable_square=internal_directly_playable_square,
external_directly_playable_square=external_directly_playable_square,
))
return specialbefores | bb0405cc783ee94130893d0dca0f0b06e43d71c5 | 22,820 |
import os
from typing import Tuple
def load_sets(path: WindowsPath = project_dir / 'data/processed') -> \
Tuple[pd.DataFrame,
pd.DataFrame,
pd.Series,
pd.Series]:
"""
:param path:
:return:
"""
X_train = pd.read_csv(os.path.join(path, "X_train.csv"))
X_test = pd.read_csv(os.path.join(path,"X_test.csv"))
X_val = pd.read_csv(os.path.join(path,"X_val.csv"))
y_train = pd.read_csv(os.path.join(path,"y_train.csv"))
y_test = pd.read_csv(os.path.join(path,"y_test.csv"))
y_val = pd.read_csv(os.path.join(path,"y_val.csv"))
return X_train, X_test, X_val, y_train, y_test, y_val | 31afd27131579d2d14e9bf8e2ddbd81db4825c72 | 22,821 |
import pytest
def check_if_all_tests_pass(option='-x'):
"""Runs all of the tests and only returns True if all tests pass.
The -x option is the default, and -x will tell pytest to exit on the first encountered failure.
The -s option prints out stdout from the tests (normally hidden.)"""
options = [option]
arguments = options
exitcode = pytest.main(arguments)
all_passed = exitcode == 0
if not all_passed:
input()
return all_passed | 81e41cb985bcf346d9351d327d0ca0941ed7320e | 22,822 |
import http
def init(api, _cors, impl):
"""Configures REST handlers for allocation resource."""
namespace = webutils.namespace(
api, __name__, 'Local nodeinfo redirect API.'
)
@namespace.route('/<hostname>/<path:path>')
class _NodeRedirect(restplus.Resource):
"""Redirects to local nodeinfo endpoint."""
def get(self, hostname, path):
"""Returns list of local instances."""
hostport = impl.get(hostname)
if not hostport:
return 'Host not found.', http.client.NOT_FOUND
url = utils.encode_uri_parts(path)
return flask.redirect('http://%s/%s' % (hostport, url),
code=http.client.FOUND) | 77430c891ceac87bec3d8b1cfa46557fbc1fd9f5 | 22,823 |
def split_matrix_2(input1):
"""
Split matrix.
Args:
inputs:tvm.Tensor of type float32.
Returns:
akg.tvm.Tensor of type float32 with 3d shape.
"""
dim = input1.shape[0]
split_num = dim // split_dim
result_3 = allocate((split_num, split_dim, split_dim), input1.dtype, 'local')
for i in range(split_num):
for j in range(split_dim):
for k in range(split_dim):
result_3[i,j,k] = input1[i * split_dim + j, i * split_dim + k]
return result_3 | 8ee5b4069c28166ef6181cb8c6ef1e21232239a4 | 22,824 |
import glob
def load_all(path, jobmanager=None):
"""Load all jobs from *path*.
This function works as a multiple execution of |load_job|. It searches for ``.dill`` files inside the directory given by *path*, yet not directly in it, but one level deeper. In other words, all files matching ``path/*/*.dill`` are used. That way a path to the main working folder of previously run script can be used to import all jobs run by that script.
The purpose of this function is to provide quick and easy way of restarting a script that previously failed. Loading all successful jobs from the previous run prevents double work and allows the script to proceed directly to the place where it failed.
Jobs are loaded using default job manager stored in ``config.jm``. If you wish to use a different one you can pass it as *jobmanager* argument of this function.
Returned value is a dictionary containing all loaded jobs as values and absolute paths to ``.dill`` files as keys.
"""
jm = jobmanager or config.jm
loaded_jobs = {}
for f in glob.glob(opj(path, '*', '*.dill')):
loaded_jobs[f] = jm.load_job(f)
return loaded_jobs | f76e2baeaa0b35283eed4748d68403827bdaff97 | 22,825 |
import numpy
def relative_error(estimate, exact):
"""
Compute the relative error of an estimate, in percent.
"""
tol = 1e-15
if numpy.abs(exact) < tol:
if numpy.abs(estimate - exact) < tol:
relative_error = 0.0
else:
relative_error = numpy.inf
else:
relative_error = numpy.abs((estimate - exact) / exact) * 100.0
return relative_error | 4170fd4a7c448eb312ea9f42d436d12acd828695 | 22,826 |
import os
def extract_sample_paths(seq_dir):
""" Obtain the sample paths.
Parameters
----------
seq_dir : str
Input directory containing all of the sample files.
Returns
-------
dict of list of str
Samples with a list of their forward and reverse files.
"""
fps = os.listdir(seq_dir)
files_df = illumina_filenames_to_df(fps)
sample_reads_dict = extract_sample_reads(files_df, seq_dir)
return sample_reads_dict | 8e02cec67a6c836bb08d31f47c9513af5fae6114 | 22,827 |
def list_users(cursor):
"""
Returns the current roles
"""
cursor.execute(
"""
SELECT
r.rolname AS name,
r.rolcanlogin AS login,
ARRAY(
SELECT b.rolname
FROM pg_catalog.pg_auth_members m
JOIN pg_catalog.pg_roles b ON (m.roleid = b.oid)
WHERE m.member = r.oid
) AS memberof
FROM pg_catalog.pg_roles r
"""
)
return map(User.create, cursor.fetchall()) | b51efbae5da08089987e3bc2753e1da3c13ee365 | 22,828 |
import subprocess
def getVelocityRange(vis, options={}):
"""
Parse the velocity range from uvlist.
Useful when resampling and re-binning data in `line` selections
Returns a tuple of the form (starting velocity, end velocity)
"""
options['vis'] = vis
options['options'] = 'spec'
specdata = uvlist(options, stdout=subprocess.PIPE).stdout
specdata = str(specdata)
# starting velocity
startvel = specdata[specdata.find('starting velocity'):]
startvel = startvel[startvel.find(':')+1:startvel.find('\\n')].split()[0]
# ending velocity
endvel = specdata[specdata.rfind('ending velocity'):]
endvel = endvel[endvel.find(':')+1:endvel.find('\\n')].split()[-1]
return (float(startvel), float(endvel)) | 268a2228f45f05b364b8173ec0ee4b2571ae068f | 22,829 |
def get_job_exe_output_vol_name(job_exe):
"""Returns the container output volume name for the given job execution
:param job_exe: The job execution model (must not be queued) with related job and job_type fields
:type job_exe: :class:`job.models.JobExecution`
:returns: The container output volume name
:rtype: string
:raises Exception: If the job execution is still queued
"""
return '%s_output_data' % job_exe.get_cluster_id() | c625b596f9ee819eb0c7afc9aed1328ecef0e206 | 22,830 |
def bytes2human(n, format='%(value).1f %(symbol)s', symbols='customary'):
"""
Convert n bytes into a human readable string based on format.
symbols can be either "customary", "customary_ext", "iec" or "iec_ext",
see: http://goo.gl/kTQMs
>>> bytes2human(0)
'0.0 B'
>>> bytes2human(0.9)
'0.0 B'
>>> bytes2human(1)
'1.0 B'
>>> bytes2human(1.9)
'1.0 B'
>>> bytes2human(1024)
'1.0 K'
>>> bytes2human(1048576)
'1.0 M'
>>> bytes2human(1099511627776127398123789121)
'909.5 Y'
>>> bytes2human(9856, symbols="customary")
'9.6 K'
>>> bytes2human(9856, symbols="customary_ext")
'9.6 kilo'
>>> bytes2human(9856, symbols="iec")
'9.6 Ki'
>>> bytes2human(9856, symbols="iec_ext")
'9.6 kibi'
>>> bytes2human(10000, "%(value).1f %(symbol)s/sec")
'9.8 K/sec'
>>> # precision can be adjusted by playing with %f operator
>>> bytes2human(10000, format="%(value).5f %(symbol)s")
'9.76562 K'
"""
n = int(n)
if n < 0:
raise ValueError("n < 0")
symbols = SYMBOLS[symbols]
prefix = {}
for i, s in enumerate(symbols[1:]):
prefix[s] = 1 << (i+1)*10
for symbol in reversed(symbols[1:]):
if n >= prefix[symbol]:
value = float(n) / prefix[symbol]
return format % locals()
return format % dict(symbol=symbols[0], value=n) | 21d3f52fe60a25a860c8350c20e0b43209802751 | 22,831 |
def check_if_free(driver, available, movie_hulu_url):
"""
Check if "Watch Movie" button is there
if not, it's likely available in a special package (Starz etc) or availabe for Rent on Hulu.
"""
is_free = False
if available:
driver.get(movie_hulu_url)
sleep(3)
watch_movie_button = driver.find_elements_by_class_name("WatchAction")
for e in watch_movie_button:
#print(e.text)
#print(e.get_attribute('href'))
if e.text == "WATCH MOVIE":
is_free = True
return is_free | e27d62538e5bf9c416bcaedb4b7c5c4706493ba0 | 22,832 |
def scatter_add(data, indices, updates, axis=0):
"""Update data by adding values in updates at positions defined by indices
Parameters
----------
data : relay.Expr
The input data to the operator.
indices : relay.Expr
The index locations to update.
updates : relay.Expr
The values to be added.
axis : int
The axis to scatter on
Returns
-------
ret : relay.Expr
The computed result.
"""
if axis < 0:
axis += len(data.shape)
assert axis >= 0
assert axis < len(data.shape)
rank = len(data.shape)
assert 1 <= rank <= 4, "scatter_add only supports 1-4 dimensions"
ir_funcs = {
1: gen_scatter_add_1d_atomic,
2: gen_ir_2d,
3: gen_ir_3d,
4: gen_ir_4d,
}
def update_func(dst_ptr, dst_index, update):
dst_ptr[dst_index] += update
out_shape = data.shape
out_buf = tvm.tir.decl_buffer(out_shape, data.dtype, "out_buf")
out = te.extern(
[out_shape],
[data, indices, updates],
lambda ins, outs: ir_funcs[rank](ins[0], ins[1], ins[2], axis, outs[0], update_func),
dtype=data.dtype,
out_buffers=[out_buf],
name="scatter_add_gpu",
tag="scatter_add_gpu",
)
return out | 641d96562700553a2ed4a5c4df323d468bba1bd8 | 22,833 |
def generate_test_linked_list(size=5, singly=False):
"""
Generate node list for test case
:param size: size of linked list
:type size: int
:param singly: whether or not this linked list is singly
:type singly: bool
:return: value list and generated linked list
"""
assert size >= 1
val_list = [i for i in range(size)]
node_list = LinkedList(singly=singly)
node_list.append_val_list(val_list)
return val_list, node_list | 5d6d5fc3c6027cc18fd6da24a7cefc506e64eb2a | 22,834 |
def _bytes_to_long(bytestring, byteorder):
"""Convert a bytestring to a long
For use in python version prior to 3.2
"""
result = []
if byteorder == 'little':
result = (v << i * 8 for (i, v) in enumerate(bytestring))
else:
result = (v << i * 8 for (i, v) in enumerate(reversed(bytestring)))
return sum(result) | fcaa038b21aef2822ad7a513c28a7a2ed3c08cbc | 22,835 |
import os
def process_manifest_for_key(manifest, manifest_key, installinfo,
parentcatalogs=None):
"""Processes keys in manifests to build the lists of items to install and
remove.
Can be recursive if manifests include other manifests.
Probably doesn't handle circular manifest references well.
manifest can be a path to a manifest file or a dictionary object.
"""
if isinstance(manifest, basestring):
display.display_debug1(
"** Processing manifest %s for %s" %
(os.path.basename(manifest), manifest_key))
manifestdata = manifestutils.get_manifest_data(manifest)
else:
manifestdata = manifest
manifest = 'embedded manifest'
cataloglist = manifestdata.get('catalogs')
if cataloglist:
catalogs.get_catalogs(cataloglist)
elif parentcatalogs:
cataloglist = parentcatalogs
if not cataloglist:
display.display_warning('Manifest %s has no catalogs', manifest)
return
for item in manifestdata.get('included_manifests', []):
nestedmanifestpath = manifestutils.get_manifest(item)
if not nestedmanifestpath:
raise manifestutils.ManifestException
if processes.stop_requested():
return {}
process_manifest_for_key(nestedmanifestpath, manifest_key,
installinfo, cataloglist)
conditionalitems = manifestdata.get('conditional_items', [])
if conditionalitems:
display.display_debug1(
'** Processing conditional_items in %s', manifest)
# conditionalitems should be an array of dicts
# each dict has a predicate; the rest consists of the
# same keys as a manifest
for item in conditionalitems:
try:
predicate = item['condition']
except (AttributeError, KeyError):
display.display_warning(
'Missing predicate for conditional_item %s', item)
continue
except BaseException:
display.display_warning(
'Conditional item is malformed: %s', item)
continue
if info.predicate_evaluates_as_true(
predicate, additional_info={'catalogs': cataloglist}):
conditionalmanifest = item
process_manifest_for_key(
conditionalmanifest, manifest_key, installinfo, cataloglist)
for item in manifestdata.get(manifest_key, []):
if processes.stop_requested():
return {}
if manifest_key == 'managed_installs':
dummy_result = process_install(item, cataloglist, installinfo)
elif manifest_key == 'managed_updates':
process_managed_update(item, cataloglist, installinfo)
elif manifest_key == 'optional_installs':
process_optional_install(item, cataloglist, installinfo)
elif manifest_key == 'managed_uninstalls':
dummy_result = process_removal(item, cataloglist, installinfo) | 1bb5386f35c9197963a4640203965aa79fa7e221 | 22,836 |
import contextlib
import ast
def SoS_exec(script: str, _dict: dict = None, return_result: bool = True) -> None:
"""Execute a statement."""
if _dict is None:
_dict = env.sos_dict.dict()
if not return_result:
if env.verbosity == 0:
with contextlib.redirect_stdout(None):
exec(
compile(script, filename=stmtHash.hash(script), mode="exec"), _dict
)
else:
exec(compile(script, filename=stmtHash.hash(script), mode="exec"), _dict)
return None
try:
stmts = list(ast.iter_child_nodes(ast.parse(script)))
if not stmts:
return
if isinstance(stmts[-1], ast.Expr):
# the last one is an expression and we will try to return the results
# so we first execute the previous statements
if len(stmts) > 1:
if env.verbosity == 0:
with contextlib.redirect_stdout(None):
exec(
compile(
ast.Module(body=stmts[:-1], type_ignores=[]),
filename=stmtHash.hash(script),
mode="exec",
),
_dict,
)
else:
exec(
compile(
ast.Module(body=stmts[:-1], type_ignores=[]),
filename=stmtHash.hash(script),
mode="exec",
),
_dict,
)
# then we eval the last one
if env.verbosity == 0:
with contextlib.redirect_stdout(None):
res = eval(
compile(
ast.Expression(body=stmts[-1].value),
filename=stmtHash.hash(script),
mode="eval",
),
_dict,
)
else:
res = eval(
compile(
ast.Expression(body=stmts[-1].value),
filename=stmtHash.hash(script),
mode="eval",
),
_dict,
)
else:
# otherwise we just execute the entire code
if env.verbosity == 0:
with contextlib.redirect_stdout(None):
exec(
compile(script, filename=stmtHash.hash(script), mode="exec"),
_dict,
)
else:
exec(
compile(script, filename=stmtHash.hash(script), mode="exec"), _dict
)
res = None
except SyntaxError as e:
raise SyntaxError(f"Invalid code {script}: {e}")
# if check_readonly:
# env.sos_dict.check_readonly_vars()
return res | c524aa064dfc396d421cdef9962d81ca79c7010b | 22,837 |
def aten_dim(mapper, graph, node):
""" 构造获取维度的PaddleLayer。
TorchScript示例:
%106 : int = aten::dim(%101)
参数含义:
%106 (int): 输出,Tensor的维度。
%101 (Tensor): 输入的Tensor。
"""
scope_name = mapper.normalize_scope_name(node)
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_inputs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%input.8
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs, scope_name)
layer_inputs["input"] = inputs_name[0]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
graph.add_layer(
"prim.shape", inputs=layer_inputs, outputs=layer_outputs, scope_name=scope_name)
graph.add_layer(
"prim.len", inputs={"input": output_name}, outputs=layer_outputs, scope_name=scope_name)
return current_inputs, current_outputs | 8037cc1943577aed2737aceee47b97b59c6a9244 | 22,838 |
import numpy
def calcBlockingMatrix(vs , NC = 1 ):
"""Calculate the blocking matrix for a distortionless beamformer,
and return its Hermitian transpose."""
vsize = len(vs)
bsize = vsize - NC
blockMat = numpy.zeros((vsize,bsize), numpy.complex)
# Calculate the perpendicular projection operator 'PcPerp' for 'vs'.
norm_vs = numpy.inner( vs, numpy.conjugate(vs) )
if norm_vs.real > 0.0:
PcPerp = numpy.eye(len(vs)) - numpy.outer( numpy.conjugate(vs), vs ) / norm_vs
# Do Gram-Schmidt orthogonalization on the columns of 'PcPerp'.
for idim in range(bsize):
vec = PcPerp[:,idim]
for jdim in range(idim):
rvec = blockMat[:,jdim]
ip = numpy.inner(numpy.conjugate(rvec), vec)
vec -= rvec * ip
norm_vec = numpy.sqrt( abs(numpy.inner(numpy.conjugate(vec),vec)) )
blockMat[:,idim] = vec / norm_vec
# Debugging:
#print "len",len(vs),len(blockMat),len(blockMat[0])
#print matrixmultiply(vs, blockMat)
# return numpy.conjugate(numpy.transpose(blockMat))
return blockMat | a46955771ba729a22fe18959ca6f9ac82af031b3 | 22,839 |
def plot_LA(mobile, ref, GDT_TS, GDT_HA, GDT_ndx,
sel1="protein and name CA", sel2="protein and name CA",
cmap="GDT_HA", **kwargs):
"""
Create LocalAccuracy Plot (heatmap) with
- xdata = residue ID
- ydata = frame number
- color = color-coded pair distance
.. Note:: do not pass too many data points otherwise the plot will get squeezed
Args:
mobile (universe, atomgrp): mobile structure with trajectory
ref (universe, atomgrp): reference structure
GDT_TS (array): array with GDT_TS scores.
GDT_HA (array): array with GDT_HA scores.
GTD_ndx (array): array with corresponding index values (representative for frame numbers).
sel1 (str): selection string of mobile structure (calculation of pair distances)
sel2 (str): selection string of reference structure (calculation of pair distances)
cmap (str):
| "GDT_TS" or "TS": color map with new colors at values (0, 1, 2, 4, 8)
and vmin, vmax = (0, 10).
| "GDT_HA" or "HA": color map with new colors at values (0, .5, 1, 2, 4)
and vmin, vmax = (0, 5).
| "nucleic" or "RNA" or "DNA": color map with new colors at values (0, .5, 1, 2, 4)
and vmin, vmax = (0, 20).
| other cmap names: see help(plt.colormaps) or alternatively
https://matplotlib.org/examples/color/colormaps_reference.html
Keyword Args:
prec (None, int):
| rounding precision of scores
| None: rounding off
| int: rounding on to <int> decimals
ndx_offset (int):
| offset/shift of GDT_ndx to match real "mobile" frames. Defaults to 0.
| Look up "start" parameter during execution of gdt.GDT()
rank_num (int): plot only <rank_num> best ranked frames. Defaults to 30.
show_cbar (bool): show/hide colorbar. Defaults to True.
show_frames (bool): show/hide frame numbers. Defaults to False.
show_scores (bool): show/hide GDT_TS and GDT_HA scores. Defaults to True.
save_as (None, str): save name or realpath to save file. Defaults to None.
cbar_ticks (None, list): color bar tick positions. Defaults to None.
cbar_label/label (str)
cbar_fontweight/fontweight (str): "normal", "bold"
cbar_location/location (str): "right", "bottom", "left", "top"
cbar_orientation/orientation (str): "horizontal", "vertical"
cbar_min/vmin (None, int): min value of colorbar and heatmap. Gets
overwritten by cmaps such as "GDT_TS", "GDT_HA", "RNA" etc.
cbar_max/vmax (None, int): max value of colorbar and heatmap. Gets
overwritten by cmaps such as "GDT_TS", "GDT_HA", "RNA" etc.
text_pos_Frame (list): [x0, y0] position of the "Frame" text box (label)
text_pos_TS (list): [x0, y0] position of the "TS" text box (label)
text_pos_HA (list): [x0, y0] position of the "HA" text box (label)
font_scale (float)
.. Hint:: Args and Keyword of misc.figure() are also valid.
Returns:
fig (class)
matplotlib.figure.Figure
ax (class, list)
ax or list of axes ~ matplotlib.axes._subplots.Axes
LA_data (tuple)
| LA_data[0]: PairDistances (list)
| LA_data[1]: Frames (list)
Example:
| # obtain data
| >> GDT = gdt.GDT(mobile, ref, sss=[None,None,None])
| >> GDT_percent, GDT_resids, GDT_cutoff, RMSD, FRAME = GDT
|
| # rank data
| >> SCORES = gdt.GDT_rank_scores(GDT_percent, ranking_order="GDT_HA")
| >> GDT_TS_ranked, GDT_HA_ranked, GDT_ndx_ranked = SCORES
|
| # edit text box positions of labels "Frame", "TS", "HA"
| >>text_pos_kws = {"text_pos_Frame": [-8.8, -0.3],
| "text_pos_TS": [-4.2, -0.3],
| "text_pos_HA": [-1.9, -0.3]}
|
| # plot
| >> gdt.plot_LA(mobile, ref, SCORES[0], SCORES[1], SCORES[2], **text_pos_kws)
"""
# init CONFIG object with default parameter and overwrite them if kwargs contain the same keywords.
default = {"figsize": (7.5, 6),
"font_scale": 1.2,
"ndx_offset": 0,
"rank_num": 30,
"show_cbar": True,
"show_frames": False,
"show_scores": True,
"save_as": None,
"prec": 2,
"cmap": cmap,
"cbar_ticks": None,
"cbar_label": r"mobile-reference CA-CA distances ($\AA$)",
"cbar_fontweight": "bold",
"cbar_location": 'right',
"cbar_orientation": 'vertical',
"cbar_min": None,
"cbar_max": None,
"vmin": None,
"vmax": None,
"text_pos_Frame": [-8.8, -0.3],
"text_pos_TS": [-3.8, -0.3],
"text_pos_HA": [-1.7, -0.3]}
cfg = _misc.CONFIG(default, **kwargs)
cfg.update_by_alias(alias="label", key="cbar_label", **kwargs)
cfg.update_by_alias(alias="fontweight", key="cbar_fontweight", **kwargs)
cfg.update_by_alias(alias="location", key="cbar_location", **kwargs)
cfg.update_by_alias(alias="orientation", key="cbar_orientation", **kwargs)
cfg.update_by_alias(alias="vmin", key="cbar_min", **kwargs)
cfg.update_by_alias(alias="vmax", key="cbar_max", **kwargs)
############################################################################
### load data
PAIR_DISTANCES = []
FRAMES = [i+cfg.ndx_offset for i in GDT_ndx[:cfg.rank_num]]
for ts in mobile.trajectory[FRAMES]:
PD, *_ = get_Pair_Distances(mobile, ref, sel1=sel1, sel2=sel2)
PAIR_DISTANCES.append(PD)
if cfg.prec != None and cfg.prec != -1:
GDT_TS = np.around(GDT_TS[: cfg.rank_num], cfg.prec)
GDT_HA = np.around(GDT_HA[: cfg.rank_num], cfg.prec)
xticks = mobile.select_atoms(sel1).residues.resids
xticks = [x if x % 5 == 0 else "." for x in xticks]
xticklabels = xticks
if cfg.show_frames and cfg.show_scores:
yticks = [f"{FRAMES[i]:>9}{GDT_TS[i]:>10.2f}{GDT_HA[i]:>8.2f} " if GDT_TS[i] != 100 else
f"{FRAMES[i]:>9}{GDT_TS[i]:>9.2f}{GDT_HA[i]:>8.2f} " for i in range(len(FRAMES))]
elif cfg.show_frames:
yticks = FRAMES
elif cfg.show_scores:
yticks = [f"{GDT_TS[i]:>10.2f}{GDT_HA[i]:>8.2f} " if GDT_TS[i] != 100 else
f"{GDT_TS[i]:>9.2f}{GDT_HA[i]:>8.2f} " for i in range(len(FRAMES))]
yticklabels = yticks
############################################################################
### heatmap/cbar settings
cmap_GDT = ["lightblue", "lightgreen", "yellow", "yellow", "orange", "orange",
"orange", "orange", "red", "red"]
cmap_RNA = ["lightblue", "lightblue", "lightgreen", "lightgreen",
"yellow", "yellow", "orange", "orange", "red", "red"]
# apply color bar limits if passed (vmin and vmax have higher priority than cbar_min and cbar_max)
if cfg.cbar_min is not None:
cfg.vmin = cfg.cbar_min
if cfg.cbar_max is not None:
cfg.vmax = cfg.cbar_max
# if no limits passed: apply pre-defined limits
if cfg.cmap in ["GDT_HA", "HA"]:
if cfg.vmin is None:
cfg.vmin = 0.0
if cfg.vmax is None:
cfg.vmax = 5.0
elif cfg.cmap in ["GDT_TS", "TS"]:
if cfg.vmin is None:
cfg.vmin = 0.0
if cfg.vmax is None:
cfg.vmax = 10.0
elif cfg.cmap in ["nucleic", "rna", "dna", "RNA", "DNA"]:
if cfg.vmin is None:
cfg.vmin = 0.0
if cfg.vmax is None:
cfg.vmax = 14.0
############################################################################
### plot
fig, ax = _misc.figure(**cfg)
if cfg.show_cbar:
cbar_ax = _misc.add_cbar_ax(ax, location=cfg.cbar_location,
orientation=cfg.cbar_orientation)
cbar_kws = {'orientation': cfg.cbar_orientation}
else:
cbar_ax = None
cbar_kws = dict()
if cfg.cmap in ["GDT_TS", "TS", "GDT_HA", "HA"]:
hm = sns.heatmap(PAIR_DISTANCES, cmap=cmap_GDT, vmin=cfg.vmin, vmax=cfg.vmax,
xticklabels=xticklabels, yticklabels=yticklabels,
square=False, annot=False, linewidths=1.0,
ax=ax, cbar_ax=cbar_ax, cbar_kws=cbar_kws, cbar=cfg.show_cbar)
elif cfg.cmap in ["nucleic", "rna", "dna", "RNA", "DNA"]:
hm = sns.heatmap(PAIR_DISTANCES, cmap=cmap_RNA, vmin=cfg.vmin, vmax=cfg.vmax,
xticklabels=xticklabels, yticklabels=yticklabels,
square=False, annot=False, linewidths=1.0,
ax=ax, cbar_ax=cbar_ax, cbar_kws=cbar_kws, cbar=cfg.show_cbar)
else:
hm = sns.heatmap(PAIR_DISTANCES, cmap=cfg.cmap, vmin=cfg.vmin, vmax=cfg.vmax,
xticklabels=xticklabels, yticklabels=yticklabels,
square=False, annot=False, linewidths=1.0,
ax=ax, cbar_ax=cbar_ax, cbar_kws=cbar_kws, cbar=cfg.show_cbar)
if cfg.show_cbar:
cbar = hm.collections[0].colorbar
cbar.set_label(label=cfg.cbar_label, fontweight=cfg.cbar_fontweight)
_misc.cbar_set_ticks_position(cbar, cfg.cbar_location)
if cfg.cbar_ticks is None and cfg.cmap in ["nucleic", "rna", "dna", "RNA", "DNA"]:
cbar.set_ticks(np.arange(0, 22, 2))
if cfg.cbar_ticks is not None:
cbar.set_ticks(cfg.cbar_ticks)
ax.tick_params(left=False, bottom=False) # hide ticks of heatmap
plt.title("Local Accuracy", fontweight='bold')
plt.xlabel("Residue ID", fontweight='bold')
# table labels
if cfg.show_frames:
ax.text(cfg.text_pos_Frame[0], cfg.text_pos_Frame[1], 'Frame', fontweight='bold')
if cfg.show_scores:
ax.text(cfg.text_pos_TS[0], cfg.text_pos_TS[1], 'TS', fontweight='bold')
ax.text(cfg.text_pos_HA[0], cfg.text_pos_HA[1], 'HA', fontweight='bold')
plt.tight_layout()
plt.tight_layout()
if cfg.save_as != None:
_misc.savefig(cfg.save_as)
if len(FRAMES) > 50:
print("Displaying data for more than 50 frames...")
print("Consider reducing the input data (e.g. rank scores and use top 40 frames).")
LA_data = (PAIR_DISTANCES, FRAMES)
return(fig, ax, LA_data) | 8103dbeb9801b08125ebb5e26cb5f76c948262ec | 22,840 |
import os
import pickle
def calculate_psfs(output_prefix):
"""Tune a family of comparable line-STED vs. point-STED psfs.
"""
comparison_filename = os.path.join(output_prefix, 'psf_comparisons.pkl')
if os.path.exists(comparison_filename):
print("Loading saved PSF comparisons...")
comparisons = pickle.load(open(comparison_filename, 'rb'))
else:
comparisons = {}
# Yes, I really did tune all the parameters below by hand so the
# comparisons came out perfectly. Ugh.
comparisons['1p0x_ld'] = psf_comparison_pair(
point_resolution_improvement=0.99, #Juuust under 1, ensures no STED
line_resolution_improvement=0.99,
point_emissions_per_molecule=4,
line_emissions_per_molecule=4,
line_scan_type='descanned',
line_num_orientations=1,
max_excitation_brightness=0.01) # Without STED, no reason to saturate
comparisons['1p0x_lr'] = psf_comparison_pair(
point_resolution_improvement=0.99, #Juuust under 1, ensures no STED
line_resolution_improvement=1.38282445,
point_emissions_per_molecule=4,
line_emissions_per_molecule=4,
line_scan_type='rescanned',
line_num_orientations=2,
max_excitation_brightness=0.01) # Without STED, no reason to saturate
comparisons['1p5x_ld'] = psf_comparison_pair(
point_resolution_improvement=1.5,
line_resolution_improvement=2.68125,
point_emissions_per_molecule=4,
line_emissions_per_molecule=2.825,
line_scan_type='descanned',
line_num_orientations=3)
comparisons['1p5x_lr'] = psf_comparison_pair(
point_resolution_improvement=1.5,
line_resolution_improvement=2.95425,
point_emissions_per_molecule=4,
line_emissions_per_molecule=2.618,
line_scan_type='rescanned',
line_num_orientations=3)
comparisons['2p0x_ld'] = psf_comparison_pair(
point_resolution_improvement=2,
line_resolution_improvement=4.04057,
point_emissions_per_molecule=4,
line_emissions_per_molecule=3.007,
line_scan_type='descanned',
line_num_orientations=4)
comparisons['2p0x_lr'] = psf_comparison_pair(
point_resolution_improvement=2,
line_resolution_improvement=4.07614,
point_emissions_per_molecule=4,
line_emissions_per_molecule=3.0227,
line_scan_type='rescanned',
line_num_orientations=4)
comparisons['2p5x_ld'] = psf_comparison_pair(
point_resolution_improvement=2.5,
line_resolution_improvement=5.13325,
point_emissions_per_molecule=4,
line_emissions_per_molecule=3.792,
line_scan_type='descanned',
line_num_orientations=6)
comparisons['2p5x_lr'] = psf_comparison_pair(
point_resolution_improvement=2.5,
line_resolution_improvement=5.15129,
point_emissions_per_molecule=4,
line_emissions_per_molecule=3.8,
line_scan_type='rescanned',
line_num_orientations=6)
comparisons['3p0x_ld'] = psf_comparison_pair(
point_resolution_improvement=3,
line_resolution_improvement=5.94563,
point_emissions_per_molecule=4,
line_emissions_per_molecule=5.034,
line_scan_type='descanned',
line_num_orientations=8)
comparisons['3p0x_lr'] = psf_comparison_pair(
point_resolution_improvement=3,
line_resolution_improvement=5.95587,
point_emissions_per_molecule=4,
line_emissions_per_molecule=5.0385,
line_scan_type='rescanned',
line_num_orientations=8)
comparisons['4p0x_ld'] = psf_comparison_pair(
point_resolution_improvement=4,
line_resolution_improvement=7.8386627,
point_emissions_per_molecule=4,
line_emissions_per_molecule=7.371,
line_scan_type='descanned',
line_num_orientations=10)
comparisons['4p0x_lr'] = psf_comparison_pair(
point_resolution_improvement=4,
line_resolution_improvement=7.840982,
point_emissions_per_molecule=4,
line_emissions_per_molecule=7.37195,
line_scan_type='rescanned',
line_num_orientations=10)
print("Done calculating PSFs.\n")
if not os.path.isdir(output_prefix): os.makedirs(output_prefix)
pickle.dump(comparisons, open(comparison_filename, 'wb'))
print("Light dose (saturation units):")
for c in sorted(comparisons.keys()):
print("%s point-STED:%6s (excitation),%9s (depletion)"%(
c,
"%0.2f"%(comparisons[c]['point']['excitation_dose']),
"%0.2f"%(comparisons[c]['point']['depletion_dose'])))
print("%7s-line-STED:%6s (excitation),%9s (depletion)"%(
c + '%3s'%('%i'%len(comparisons[c]['line_sted_psfs'])),
"%0.2f"%(comparisons[c]['line']['excitation_dose']),
"%0.2f"%(comparisons[c]['line']['depletion_dose'])))
psfs = {}
for c in comparisons.keys():
psfs[c + '_point_sted'] = comparisons[c]['point_sted_psf']
psfs[c + '_line_%i_angles_sted'%len(comparisons[c]['line_sted_psfs'])
] = comparisons[c]['line_sted_psfs']
return psfs, comparisons | 0608ceee2089d692268d422acc0c16270aaa733f | 22,841 |
def simulateGVecs(pd, detector_params, grain_params,
ome_range=[(-np.pi, np.pi), ],
ome_period=(-np.pi, np.pi),
eta_range=[(-np.pi, np.pi), ],
panel_dims=[(-204.8, -204.8), (204.8, 204.8)],
pixel_pitch=(0.2, 0.2),
distortion=None):
"""
returns valid_ids, valid_hkl, valid_ang, valid_xy, ang_ps
panel_dims are [(xmin, ymin), (xmax, ymax)] in mm
pixel_pitch is [row_size, column_size] in mm
simulate the monochormatic scattering for a specified
- space group
- wavelength
- orientation
- strain
- position
- detector parameters
- oscillation axis tilt (chi)
subject to
- omega (oscillation) ranges (list of (min, max) tuples)
- eta (azimuth) ranges
pd................a hexrd.crystallography.PlaneData instance
detector_params...a (10,) ndarray containing the tilt angles (3),
translation (3), chi (1), and sample frame translation
(3) parameters
grain_params......a (12,) ndarray containing the exponential map (3),
translation (3), and inverse stretch tensor compnents
in Mandel-Voigt notation (6).
* currently only one panel is supported, but this will likely change soon
"""
bMat = pd.latVecOps['B']
wlen = pd.wavelength
full_hkls = _fetch_hkls_from_planedata(pd)
# extract variables for convenience
rMat_d = xfcapi.makeDetectorRotMat(detector_params[:3])
tVec_d = np.ascontiguousarray(detector_params[3:6])
chi = detector_params[6]
tVec_s = np.ascontiguousarray(detector_params[7:10])
rMat_c = xfcapi.makeRotMatOfExpMap(grain_params[:3])
tVec_c = np.ascontiguousarray(grain_params[3:6])
vInv_s = np.ascontiguousarray(grain_params[6:12])
# first find valid G-vectors
angList = np.vstack(
xfcapi.oscillAnglesOfHKLs(
full_hkls[:, 1:], chi, rMat_c, bMat, wlen, vInv=vInv_s
)
)
allAngs, allHKLs = _filter_hkls_eta_ome(
full_hkls, angList, eta_range, ome_range
)
if len(allAngs) == 0:
valid_ids = []
valid_hkl = []
valid_ang = []
valid_xy = []
ang_ps = []
else:
# ??? preallocate for speed?
det_xy, rMat_s, on_plane = _project_on_detector_plane(
allAngs,
rMat_d, rMat_c, chi,
tVec_d, tVec_c, tVec_s,
distortion
)
#
on_panel_x = np.logical_and(
det_xy[:, 0] >= panel_dims[0][0],
det_xy[:, 0] <= panel_dims[1][0]
)
on_panel_y = np.logical_and(
det_xy[:, 1] >= panel_dims[0][1],
det_xy[:, 1] <= panel_dims[1][1]
)
on_panel = np.logical_and(on_panel_x, on_panel_y)
#
op_idx = np.where(on_panel)[0]
#
valid_ang = allAngs[op_idx, :]
valid_ang[:, 2] = xfcapi.mapAngle(valid_ang[:, 2], ome_period)
valid_ids = allHKLs[op_idx, 0]
valid_hkl = allHKLs[op_idx, 1:]
valid_xy = det_xy[op_idx, :]
ang_ps = angularPixelSize(valid_xy, pixel_pitch,
rMat_d, rMat_s,
tVec_d, tVec_s, tVec_c,
distortion=distortion)
return valid_ids, valid_hkl, valid_ang, valid_xy, ang_ps | bdff9dc1b7fd15d7b3b1cf45a4364dc495790293 | 22,842 |
import logging
def get_logger(name: str):
"""Get logger call.
Args:
name (str): Module name
Returns:
Logger: Return Logger object
"""
logger = logging.getLogger(name)
logger.setLevel(logging.INFO)
logger.addHandler(get_file_handler())
logger.addHandler(get_stream_handler())
return logger | 6d661896d38e2227b6825e649bdbd719dd64670a | 22,843 |
def create_label_colormap(dataset=_PASCAL):
"""Creates a label colormap for the specified dataset.
Args:
dataset: The colormap used in the dataset.
Returns:
A numpy array of the dataset colormap.
Raises:
ValueError: If the dataset is not supported.
"""
if dataset == _PASCAL:
return create_pascal_label_colormap()
elif dataset == _CITYSCAPES:
return create_cityscapes_label_colormap()
elif dataset == _ADE:
return create_ade_label_colormap()
else:
raise ValueError('Unsupported dataset.') | 683d52f3f2c476b0e39e41ae7f2a0f897fee60d3 | 22,844 |
def SDM_lune(params, dvals, title=None, label_prefix='ham='):
"""Exact calculation for SDM circle intersection. For some reason mine is a slight upper bound on the results found in the book. Uses a proof from Appendix B of the SDM book (Kanerva, 1988). Difference is neglible when norm=True."""
res = expected_intersection_lune(params.n, dvals, params.hamm_radius, params.r )
if params.plot_lines:
plot_line(dvals, res, label_prefix, params.hamm_radius, params.norm)
if params.fit_beta_and_plot_attention:
fit_beta_res, beta = fit_beta_regression(params.n, dvals, res)
plot_line(dvals, fit_beta_res, 'fit_beta | '+label_prefix, params.hamm_radius, params.norm)
if title: # else can call "label plot separately"
label_plot(title, params.norm)
return res | e1465e002632ceb1431fa1a668abfdaa7deb307b | 22,845 |
def font_match(obj):
"""
Matches the given input againts the available
font type matchers.
Args:
obj: path to file, bytes or bytearray.
Returns:
Type instance if matches. Otherwise None.
Raises:
TypeError: if obj is not a supported type.
"""
return match(obj, font_matchers) | 8cf99e626578d278b3ce9e598233cb6dfa407820 | 22,846 |
def process_image(debug=False):
"""Processes an image by:
-> sending the file to Vision Azure api
-> returns a dictionary containing the caption
and confidence level associated with that image
TODO implement
"""
# get the json data
json_data = response.json()
if json_data is None or type(json_data) is not list:
return jsonify({'error': 'Not a JSON'}), 402
return jsonify(caption_response.to_dict()) | 6931d523223c960721c37bffd47d89f2daeba7f6 | 22,847 |
import six
def time_monotonically_increases(func_or_granularity):
"""
Decorate a unittest method with this function to cause the value
of :func:`time.time` and :func:`time.gmtime` to monotonically
increase by one each time it is called. This ensures things like
last modified dates always increase.
We make three guarantees about the value of :func:`time.time`
returned while the decorated function is running:
1. It is always *at least* the value of the *real*
:func:`time.time`;
2. Each call returns a value greater than the previous call;
3. Those two constraints hold across different invocations of
functions decorated. This decorator can be applied to a
method in a test case::
class TestThing(unittest.TestCase)
@time_monotonically_increases
def test_method(self):
t = time.time()
...
It can also be applied to a bare function taking any number of
arguments::
@time_monotonically_increases
def utility_function(a, b, c=1):
t = time.time()
...
By default, the time will be incremented in 1.0 second intervals.
You can specify a particular granularity as an argument; this is
useful to keep from running too far ahead of the real clock::
@time_monotonically_increases(0.1)
def smaller_increment():
t1 = time.time()
t2 = time.time()
assrt t2 == t1 + 0.1
"""
if isinstance(func_or_granularity, (six.integer_types, float)):
# We're being used as a factory.
wrapper_factory = _TimeWrapper(func_or_granularity)
return wrapper_factory
# We're being used bare
wrapper_factory = _TimeWrapper()
return wrapper_factory(func_or_granularity) | f3f76502cf0cd5f402cb4f585d6cd06db8eb5851 | 22,848 |
def rotate_coordinates(coords: np.ndarray, axis_coords: np.ndarray) -> np.ndarray:
"""
Given a set of coordinates, `coords`, and the eigenvectors of the principal
moments of inertia tensor, use the scipy `Rotation` class to rotate the
coordinates into the principal axis frame.
Parameters
----------
coords : np.ndarray
NumPy 1D array containing xyz coordinates
axis_coords : np.ndarray
NumPy 2D array (shape 3x3) containing the principal axis
vectors
Returns
-------
np.ndarray
NumPy 1D array containing the rotated coordinates.
"""
# Create a Rotation object from the eigenvectors
r_mat = R.from_matrix(axis_coords)
# transform the coordinates into the principal axis
return r_mat.apply(coords) | 686657f464fdf846fa394128ad1f8be6d00adf06 | 22,849 |
from typing import Type
from typing import Any
def get_maggy_ddp_wrapper(module: Type[TorchModule]):
"""Factory function for MaggyDDPModuleWrapper.
:param module: PyTorch module passed by the user.
"""
class MaggyDDPModuleWrapper(TorchDistributedDataParallel):
"""Wrapper around PyTorch's DDP Module.
The wrapper replaces the user's module. Since the module's signature needs to be preserved,
we cannot add the module as an additional parameter during initialization. Instead, it is
configured by its factory function.
"""
__module = module # Avoid overwriting torch module
def __init__(self, *args: Any, **kwargs: Any):
"""Initializes the previously set module, moves it to the GPU and initializes a DDP
module with it.
:param args: Arguments passed by the user for module initialization.
:param kwargs: Keyword arguments passed by the user for module initialization.
"""
# Avoid self because bound method adds to args which makes the function call fail
model = MaggyDDPModuleWrapper.__module(*args, **kwargs).cuda()
super().__init__(model)
return MaggyDDPModuleWrapper | 53f7e5096c41221072d7584470dd8a1bcf32a04f | 22,850 |
def random_jitter(cv_img, saturation_range, brightness_range, contrast_range):
"""
图像亮度、饱和度、对比度调节,在调整范围内随机获得调节比例,并随机顺序叠加三种效果
Args:
cv_img(numpy.ndarray): 输入图像
saturation_range(float): 饱和对调节范围,0-1
brightness_range(float): 亮度调节范围,0-1
contrast_range(float): 对比度调节范围,0-1
Returns:
亮度、饱和度、对比度调整后图像
"""
saturation_ratio = np.random.uniform(-saturation_range, saturation_range)
brightness_ratio = np.random.uniform(-brightness_range, brightness_range)
contrast_ratio = np.random.uniform(-contrast_range, contrast_range)
order = [1, 2, 3]
np.random.shuffle(order)
for i in range(3):
if order[i] == 0:
cv_img = saturation_jitter(cv_img, saturation_ratio)
if order[i] == 1:
cv_img = brightness_jitter(cv_img, brightness_ratio)
if order[i] == 2:
cv_img = contrast_jitter(cv_img, contrast_ratio)
return cv_img | f7ff6d2e0bbe1656abe5ad1dca404e1903417166 | 22,851 |
from typing import Union
def render_orchestrator_inputs() -> Union[Driver, None]:
""" Renders input form for collecting orchestrator-related connection
metadata, and assembles a Synergos Driver object for subsequent use.
Returns:
Connected Synergos Driver (Driver)
"""
with st.sidebar.beta_container():
st.header("NETWORK")
with st.beta_expander("Orchestrator Parameters", expanded=True):
orchestrator_host = st.text_input(
label="Orchestrator IP:",
help="Declare the server IP of your selected orchestrator."
)
orchestrator_port = st.number_input(
label="Orchestrator Port:",
value=5000,
help="Declare the access port of your selected orchestrator."
)
if is_connection_valid(host=orchestrator_host, port=orchestrator_port):
driver = Driver(host=orchestrator_host, port=orchestrator_port)
else:
driver = None # Ensures rendering of unpopulated widgets
return driver | d67f3a40347a2f247183e2b9092429ca118bc739 | 22,852 |
import logging
def json_cache_wrapper(func, intf, cache_file_ident):
"""
Wrapper for saving/restoring rpc-call results inside cache files.
"""
def json_call_wrapper(*args, **kwargs):
cache_file = intf.config.cache_dir + '/insight_dash_' + cache_file_ident + '.json'
try: # looking into cache first
j = simplejson.load(open(cache_file))
logging.debug('Loaded data from existing cache file: ' + cache_file)
return j
except:
pass
# if not found in cache, call the original function
j = func(*args, **kwargs)
try:
simplejson.dump(j, open(cache_file, 'w'))
except Exception as e:
logging.exception('Cannot save data to a cache file')
pass
return j
return json_call_wrapper | 176c6f5a7b14dc2389592c415ffa9e122ebcd794 | 22,853 |
def type_assert_dict(
d,
kcls=None,
vcls=None,
allow_none: bool=False,
cast_from=None,
cast_to=None,
dynamic=None,
objcls=None,
ctor=None,
desc: str=None,
false_to_none: bool=False,
check=None,
):
""" Checks that every key/value in @d is an instance of @kcls: @vcls
Will also unmarshal JSON objects to Python objects if
the value is an instance of dict and @vcls is a class type
Args:
d: The dict to type assert
kcls: The class to type assert for keys.
NOTE: JSON only allows str keys
vcls: The class to type assert for values
allow_none: Allow a None value for the values.
This would not make sense for the keys.
cast_from: type-or-tuple-of-types, If @obj is an instance
of this type(s), cast it to @cast_to
cast_to: type, The type to cast @obj to if it's an instance
of @cast_from, or None to cast to @cls.
If you need more than type(x), use a lambda or
factory function.
dynamic: @cls, A dynamic default value if @d is None,
and @dynamic is not None.
objcls: None-or-type, a type to assert @d is,
ie: dict, etc...
Note that isinstance considers
collections.OrderedDict to be of type dict
ctor: None-or-static-method: Use this method as the
constructor instead of __init__
desc: None-or-string, an optional description for this field,
for using this function to fully replace docstrings
false_to_none: bool, True to cast falsey values such as "", 0, [],
to None
check: None-lambda-function, Single argument function to check
a value, return False if not valid, for example:
lambda x: x >= 0 and x < 256
Returns:
@d, note that @d will be recreated, which
may be a performance concern if @d has many items
Raises:
TypeError: If a key is not an instance of @kcls or
a value is not an instance of @vcls
ValueError: If @check is not None and a value fails @check
"""
_check_dstruct(d, objcls)
if (
d is None
and
dynamic is not None
):
d = dynamic
t = type(d)
return t(
(
_check(k, kcls) if kcls else k,
_check(
v,
vcls,
allow_none,
cast_from,
cast_to,
ctor=ctor,
false_to_none=false_to_none,
check=check,
) if vcls else v,
)
for k, v in d.items()
) | 8a5590a86a0f2e1b4dbef5218c4a294dde3675e1 | 22,854 |
def get_missing_ids(raw, results):
"""
Compare cached results with overall expected IDs, return missing ones.
Returns a set.
"""
all_ids = set(raw.keys())
cached_ids = set(results.keys())
print("There are {0} IDs in the dataset, we already have {1}. {2} are missing.".format(len(all_ids), len(cached_ids), len(all_ids) - len(cached_ids)))
return all_ids - cached_ids | cb380c12f26de8b4d3908964f4314bc7efe43056 | 22,855 |
import collections
def _spaghettinet_edgetpu_s():
"""Architecture definition for SpaghettiNet-EdgeTPU-S."""
nodes = collections.OrderedDict()
outputs = ['c0n1', 'c0n2', 'c0n3', 'c0n4', 'c0n5']
nodes['s0'] = SpaghettiStemNode(kernel_size=5, num_filters=24)
nodes['n0'] = SpaghettiNode(
num_filters=48,
level=2,
layers=[
IbnFusedGrouped(3, 8, 2, 3, False),
],
edges=[SpaghettiPassthroughEdge(input='s0')])
nodes['n1'] = SpaghettiNode(
num_filters=64,
level=3,
layers=[
IbnFusedGrouped(3, 4, 2, 4, False),
IbnFusedGrouped(3, 4, 1, 4, True),
IbnFusedGrouped(3, 4, 1, 4, True),
],
edges=[SpaghettiPassthroughEdge(input='n0')])
nodes['n2'] = SpaghettiNode(
num_filters=72,
level=4,
layers=[
IbnOp(3, 8, 2, False),
IbnFusedGrouped(3, 8, 1, 4, True),
IbnOp(3, 8, 1, True),
IbnOp(3, 4, 1, True),
],
edges=[SpaghettiPassthroughEdge(input='n1')])
nodes['n3'] = SpaghettiNode(
num_filters=88,
level=5,
layers=[
IbnOp(3, 8, 2, False),
IbnOp(3, 8, 1, True),
IbnOp(3, 4, 1, True),
IbnOp(3, 4, 1, True),
],
edges=[SpaghettiPassthroughEdge(input='n2')])
nodes['n4'] = SpaghettiNode(
num_filters=88,
level=6,
layers=[
IbnOp(3, 8, 2, False),
SepConvOp(5, 1, True),
SepConvOp(5, 1, True),
SepConvOp(5, 1, True),
],
edges=[SpaghettiPassthroughEdge(input='n3')])
nodes['n5'] = SpaghettiNode(
num_filters=88,
level=7,
layers=[
SepConvOp(5, 2, False),
SepConvOp(3, 1, True),
],
edges=[SpaghettiPassthroughEdge(input='n4')])
nodes['c0n0'] = SpaghettiNode(
num_filters=144,
level=5,
layers=[
IbnOp(3, 4, 1, False),
IbnOp(3, 4, 1, True),
IbnOp(3, 4, 1, True),
IbnOp(3, 4, 1, True),
],
edges=[
SpaghettiResampleEdge(input='n3'),
SpaghettiResampleEdge(input='n4')
])
nodes['c0n1'] = SpaghettiNode(
num_filters=120,
level=4,
layers=[
IbnOp(3, 8, 1, False),
IbnOp(3, 4, 1, True),
IbnOp(3, 4, 1, True),
IbnOp(3, 4, 1, True),
],
edges=[
SpaghettiResampleEdge(input='n2'),
SpaghettiResampleEdge(input='c0n0')
])
nodes['c0n2'] = SpaghettiNode(
num_filters=168,
level=5,
layers=[
IbnOp(3, 4, 1, False),
],
edges=[
SpaghettiResampleEdge(input='c0n1'),
SpaghettiResampleEdge(input='c0n0')
])
nodes['c0n3'] = SpaghettiNode(
num_filters=136,
level=6,
layers=[
IbnOp(3, 4, 1, False),
SepConvOp(3, 1, True),
],
edges=[
SpaghettiResampleEdge(input='n5'),
SpaghettiResampleEdge(input='c0n0')
])
nodes['c0n4'] = SpaghettiNode(
num_filters=136,
level=7,
layers=[
IbnOp(3, 4, 1, False),
],
edges=[
SpaghettiResampleEdge(input='n5'),
SpaghettiResampleEdge(input='c0n0')
])
nodes['c0n5'] = SpaghettiNode(
num_filters=64,
level=8,
layers=[
SepConvOp(3, 1, False),
SepConvOp(3, 1, True),
],
edges=[SpaghettiPassthroughEdge(input='c0n4')])
node_specs = SpaghettiNodeSpecs(nodes=nodes, outputs=outputs)
return node_specs | 93b98a29654f8a838f39d6bfa59f78719ff6c42c | 22,856 |
def instance_of(type):
"""
A validator that raises a :exc:`TypeError` if the initializer is called
with a wrong type for this particular attribute (checks are perfomed using
:func:`isinstance` therefore it's also valid to pass a tuple of types).
:param type: The type to check for.
:type type: type or tuple of types
The :exc:`TypeError` is raised with a human readable error message, the
attribute (of type :class:`attr.Attribute`), the expected type, and the
value it got.
"""
return _InstanceOfValidator(type) | 2d41d457e9f7e60fa5e5d77f83454ca75dc112f7 | 22,857 |
import subprocess
import os
def execute_command(command):
"""
execute command
"""
output = subprocess.PIPE
flag = 0
# for background execution
if("nohup" in command) or ("\&" in command):
output = open('result.log', 'w')
flag = 1
child = subprocess.Popen(command.split(), shell=False, stdout=output, stderr=output, preexec_fn=os.setpgrp)
ret = child.communicate()
err = ""
if (flag == 0):
result = ret[0]
err = ret[1]
else:
status, result = execute_command("cat result.log")
status = child.returncode
if(status != 0):
LOG_ERROR("=== execute " + command + " failed, information = " + result + " error information:" + err.decode('utf-8'))
if err is not None:
result += err.decode('utf-8')
return (status, result) | a5f80a2fc0c9adc4f8f01eab26d1466aa4b0b438 | 22,858 |
def ass(stream: Stream, *args, **kwargs) -> FilterableStream:
"""https://ffmpeg.org/ffmpeg-filters.html#ass"""
return filter(stream, ass.__name__, *args, **kwargs) | 7f9d88fe1fdeb2337acce25e8b40db94d59f8748 | 22,859 |
def resultcallback(group):
"""Compatibility layer for Click 7 and 8."""
if hasattr(group, "result_callback") and group.result_callback is not None:
decorator = group.result_callback()
else:
# Click < 8.0
decorator = group.resultcallback()
return decorator | 1eb938400c90667eb532366f5ca83d02dd6429e1 | 22,860 |
from licensedcode import cache
def get_license_matches(location=None, query_string=None):
"""
Return a sequence of LicenseMatch objects.
"""
if not query_string:
return []
idx = cache.get_index()
return idx.match(location=location, query_string=query_string) | 5d2891d36dd10e6c4d1c24280df86d1bf39e464a | 22,861 |
def compute_corrector_prf(results, logger, on_detected=True):
"""
References:
https://github.com/sunnyqiny/
Confusionset-guided-Pointer-Networks-for-Chinese-Spelling-Check/blob/master/utils/evaluation_metrics.py
"""
TP = 0
FP = 0
FN = 0
all_predict_true_index = []
all_gold_index = []
for item in results:
src, tgt, predict, d_predict = item
gold_index = []
for i in range(len(list(src))):
if src[i] == tgt[i]:
continue
else:
gold_index.append(i)
all_gold_index.append(gold_index)
predict_index = []
for i in range(len(list(src))):
if src[i] == predict[i]:
continue
else:
predict_index.append(i)
each_true_index = []
for i in predict_index:
if i in gold_index:
TP += 1
each_true_index.append(i)
else:
FP += 1
for i in gold_index:
if i in predict_index:
continue
else:
FN += 1
all_predict_true_index.append(each_true_index)
# For the detection Precision, Recall and F1
dp, dr, detection_f1 = report_prf(TP, FP, FN,
'detection', logger=logger)
# store FN counts
n_misreported = int(FN)
TP = 0
FP = 0
FN = 0
# we only detect those correctly detected location, which is a different from the common metrics since
# we wanna to see the precision improve by using the confusion set
for i in range(len(all_predict_true_index)):
if len(all_predict_true_index[i]) > 0:
predict_words = []
for j in all_predict_true_index[i]:
predict_words.append(results[i][2][j])
if results[i][1][j] == results[i][2][j]:
TP += 1
else:
FP += 1
for j in all_gold_index[i]:
if results[i][1][j] in predict_words:
continue
else:
FN += 1
# For the correction Precision, Recall and F1
cp, cr, correction_f1 = report_prf(TP, FP, FN,
'correction', logger=logger)
# common metrics to compare with other baseline methods.
ccp, ccr, correction_cf1 = report_prf(TP, FP, FN + n_misreported,
'correction_common', logger=logger)
if not on_detected:
correction_f1 = correction_cf1
details = {
'det_p': dp,
'det_r': dr,
'det_f1': detection_f1,
'cor_p': cp,
'cor_r': cr,
'cor_f1': correction_f1,
'common_cor_p': ccp,
'common_cor_r': ccr,
'common_cor_f1': correction_cf1,
}
return detection_f1, correction_f1, details | 1f86b5f7cd91aba9a50007493d83cd3480eb9e20 | 22,862 |
def nonzero_sign(x, name=None):
"""Returns the sign of x with sign(0) defined as 1 instead of 0."""
with tf.compat.v1.name_scope(name, 'nonzero_sign', [x]):
x = tf.convert_to_tensor(value=x)
one = tf.ones_like(x)
return tf.compat.v1.where(tf.greater_equal(x, 0.0), one, -one) | 1955f37bece137537d53cde6681a8f56554cafea | 22,863 |
def tls_control_system_tdcops(tls_control_system):
"""Control system with time-dependent collapse operators"""
objectives, controls, _ = tls_control_system
c_op = [[0.1 * sigmap(), controls[0]]]
c_ops = [c_op]
H1 = objectives[0].H
H2 = objectives[1].H
objectives = [
krotov.Objective(
initial_state=ket('0'), target=ket('1'), H=H1, c_ops=c_ops
),
krotov.Objective(
initial_state=ket('0'), target=ket('1'), H=H2, c_ops=c_ops
),
]
controls_mapping = krotov.conversions.extract_controls_mapping(
objectives, controls
)
return objectives, controls, controls_mapping | b94f438291671e863bb759ce024a0e42e6230481 | 22,864 |
def create_new_credential(site_name,account_name, account_password):
"""Function to create a new account and its credentials"""
new_credential = Credentials(site_name,account_name, account_password)
return new_credential | 127335a31054d1b89521a1bc8b354ad51e193be6 | 22,865 |
import os
def create_inception_graph():
""""Creates a graph from saved GraphDef file and returns a Graph object.
Returns:
Graph holding the trained Inception network, and various tensors we'll be
manipulating.
"""
with tf.Session() as sess:
model_filename = os.path.join(
'imagenet', 'classify_image_graph_def.pb')
with gfile.FastGFile(model_filename, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
bottleneck_tensor, jpeg_data_tensor, resized_input_tensor = (
tf.import_graph_def(graph_def, name='', return_elements=[
BOTTLENECK_TENSOR_NAME, JPEG_DATA_TENSOR_NAME,
RESIZED_INPUT_TENSOR_NAME]))
return sess.graph, bottleneck_tensor, jpeg_data_tensor, resized_input_tensor | 69fc47bdceca23e530c21d6beb0e53b4adc24a3a | 22,866 |
from typing import Dict
def prepare_request_params(
request_params: Dict, model_id: Text, model_data: Dict
) -> Dict:
""" reverse hash names and correct types of input params """
request_params = correct_types(request_params, model_data["columns_data"])
if model_data["hashed_indexes"]:
request_params = reverse_hash_names(model_id, request_params)
return request_params | c7aee17db83e96cb3bcf6ce75ea650414035654a | 22,867 |
import pandas as pd
import os
def help_full(path):
"""Health Evaluation and Linkage to Primary Care
The HELP study was a clinical trial for adult inpatients recruited from
a detoxification unit. Patients with no primary care physician were
randomized to receive a multidisciplinary assessment and a brief
motivational intervention or usual care, with the goal of linking them
to primary medical care.
A data frame with 1472 observations on the following variables.
- `ID` Subject ID
- `TIME` Interview time point
- `NUM_INTERVALS` Number of 6-month intervals from previous to
current interview
- `INT_TIME1` # of months from baseline to current interview
- `DAYS_SINCE_BL` # of days from baseline to current interview
- `INT_TIME2` # of months from previous to current interview
- `DAYS_SINCE_PREV` # of days from previous to current interview
- `PREV_TIME` Previous interview time
- `DEAD` a numeric vector
- `A1` Gender (1=Male, 2=Female)
- `A9` Years of education completed
- `A10` Marital Status (1=Married, 2=Remarried, 3=Widowed, 4=
Separated, 5=Divorced, 6=Never Married
- `A11A` Do you currently have a living mother? (0=No, 1= Yes
- `A11B` Do you currently have a living father? (0=No, 1=Yes
- `A11C` Do you currently have siblings? (0=No, 1=Yes
- `A11D` Do you currently have a partner (0=No, 1=Yes)
- `A11E` Do you currently have children? (0=No, 1=Yes)
- `A12B` Hollingshead categories (1=Major profess, 2= Lesser profess,
3=Minor profess, 4=Clerical/sales, 5=Skilled manual, 6=Semi-skilled,
7=Unskilled, 8= Homemaker, 9=No occupation)
- `A13` Usual employment pattern in last 6 months (1=Full time, 2=
Part time, 3=Student, 4=Unemployed, 5=Control envir)
- `A14A` Loved alone-last 6 mos (0=No, 1=Yes)
- `A14B` Lived w/a partner-last 6 mos (0=No, 1=Yes
- `A14C` Lived with parent(s)-last 6 mos (0=No, 1=Yes)
- `A14D` Lived w/children-last 6 mos (0=No, 1=Yes)
- `A14E` Lived w/other family-last 6 mos (0=No, 1=Yes
- `A14F` Lived w/friend(s)-last 6 mos (0=No, 1=Yes)
- `A14G` Lived w/other-last 6 mos (0=No, 1=Yes)
- `A14G_T` a factor with levels `1/2 WAY HOUSE` `3/4 HOUSE`
`ANCHOR INN` `ARMY` `ASSOCIATES` `BOARDERS`
`BOYFRIENDS MOM` `CORRECTIONAL FACILIT` `CRACK HOUSE`
`DEALER` `ENTRE FAMILIA` `FENWOOD` `GAVIN HSE`
`GIRLFRIENDS DAUGHTE` `GIRLFRIENDS SON` `GIRLFRIENDS CHILDREN`
`GIRLFRIENDS DAUGHTER` `GROUP HOME` `HALF-WAY HOUSE`
`HALFWAY HOUSE` `HALFWAY HOUSES` `HALFWAY HSE` `HOLDING UNIT`
`HOME BORDER` `HOMELESS` `HOMELESS SHELTER` `IN JAIL`
`IN PROGRAMS` `INCARCERATED` `JAIL` `JAIL HALFWAY HOUSE`
`JAIL, SHELTER` `JAIL, STREET` `JAIL/PROGRAM` `JAIL/SHELTER`
`JAILS` `LANDLADY` `LANDLORD` `LODGING HOUSE`
`MERIDIAN HOUSE` `NURSING HOME` `ON THE STREET`
`PARTNERS MOTHER` `PARTNERS CHILD` `PARTNERS CHILDREN`
`PRDGRAMS` `PRISON` `PROGRAM` `PROGRAM MTHP`
`PROGRAM ROOMMATES` `PROGRAM SOBER HOUSE` `PROGRAM-RESIDENTIAL`
`PROGRAM/HALFWAY HOUS` `PROGRAM/JAIL` `PROGRAM/SHELTER`
`PROGRAM/SHELTERS` `PROGRAMS` `PROGRAMS SUBSTANCE`
`PROGRAMS/SHELTER` `PROGRAMS/SHELTERS` `PROGRAMS/SHELTERS/DE`
`PROJECT SOAR` `RESIDENTIAL FACILITY` `RESIDENTIAL PROGRAM`
`ROOMING HOUSE` `ROOMING HOUSE (RELIG` `ROOMMATE` `ROOMMATES`
`ROOMMATES AT TRANSIT` `RYAN HOUSE` `SALVATION ARMY`
`SHELTER` `SHELTER/HALFWAY HSE` `SHELTER/HOTEL`
`SHELTER/PROGRAM` `SHELTERS` `SHELTERS/HOSPITALS`
`SHELTERS/JAIL` `SHELTERS/PROGRAMS` `SHELTERS/STREETS`
`SOBER HOUSE` `SOBER HOUSING` `SOUTH BAY JAIL` `STEPSON`
`STREET` `STREETS` `SUBSTANCE ABUSE TREA`
`TRANSITIONAL HOUSE` `VA SHELTER`
- `A15A` #nights in ovrnight shelter-last 6 mos
- `A15B` # nights on street-last 6 mos
- `A15C` #months in jail-last 6 mos
- `A16A` # months in ovrnight shelter-last 5 yrs
- `A16B` #moths on street-last 5 yrs
- `A16C` #months in jail-last 5 yrs
- `A17A` Received SSI-past 6 mos (0=No, 1=Yes)
- `A17B` Received SSDI-past 6 mos (0=No, 1=Yes)
- `A17C` Received AFDC-past 6 mos (0=No, 1=Yes)
- `A17D` Received EAEDC-past 6 mos (0=No, 1=Yes)
- `A17E` Received WIC-past 6 mos (0=No, 1=Yes)
- `A17F` Received unemployment benefits-past 6 mos (0=No, 1=Yes)
- `A17G` Received Workman's Comp-past 6 mos (0=No, 1=Yes)
- `A17H` Received Child Support-past 6 mos (0=No, 1=Yes)
- `A17I` Received other income-past 6 mos (0=No, 1=Yes)
- `A17I_T` a factor with levels `DISABLED VETERAN`
`EBT (FOOD STAMPS)` `EMERGENCY FOOD STAMP` `FOOD STAMP`
`FOOD STAMPS` `FOOD STAMPS/VETERAN` `FOOD STAMPS/VETERANS`
`INSURANCE SETTLEMENT` `PENSION CHECK` `SECTION 8`
`SERVICE CONNECTED DI` `SOCIAL SECURITY` `SSDI FOR SON`
`SURVIVORS BENEFITS` `TEMPORARY DISABILITY`
`VA BENEFITS-DISABILI` `VA COMPENSATION` `VA DISABILITY PENSIO`
`VETERAN BENEFITS` `VETERANS SERVICES` `VETERANS AFFAIRS`
- `A18` Most money made in any 1 year-last 5 yrs (1=<5000,
2=5000-10000, 3=11000-19000, 4=20000-29000, 5=30000-39000,
6=40000-49000, 7=50000+
- `B1` In general, how is your health (1=Excellent, 2=Very Good,
3=Good, 4=Fair, 5=Poor)
- `B2` Comp to 1 yr ago, how is your health now (1=Much better,
2=Somewhat better, 3=About the same, 4=Somewhat worse, 5=Much worse)
- `B3A` Does health limit you in vigorous activity (1=Limited a lot,
2=Limited a little, 3=Not limited)
- `B3B` Does your health limit you in moderate activity (1=Limited a
lot, 2=Limited a little, 3=Not limited)
- `B3C` Does health limit you in lift/carry groceries (1=Limited a
lot, 2=Limited a little, 3=Not limited)
- `B3D` Hlth limit you in climb sev stair flights (1=Limited a lot,
2=Limited a little, 3=Not limited)
- `B3E` Health limit you in climb 1 stair flight (1=Limited a lot,
2=Limited a little, 3=Not limited)
- `B3F` Health limit you in bend/kneel/stoop (1=Limited a lot,
2=Limited a little, 3=Not limited)
- `B3G` Does health limit you in walking >1 mile (1=Limited a lot,
2=Limited a little, 3=Not limited)
- `B3H` Hlth limit you in walking sevrl blocks (1=Limited a lot,
2=Limited a little, 3=Not limited)
- `B3I` Does health limit you in walking 1 block (1=Limited a lot,
2=Limited a little, 3=Not limited)
- `B3J` Hlth limit you in bathing/dressing self (1=Limited a lot,
2=Limited a little, 3=Not limited)
- `B4A` Cut down wrk/act due to phys hlth-lst 4 wks (0=No, 1=Yes)
- `B4B` Accomplish less due to phys hlth-lst 4 wks (0=No, 1=Yes)
- `B4C` Lim wrk/act type due to phys hlth-lst 4 wks (0=No, 1=Yes)
- `B4D` Diff perf work due to phys hlth-lst 4 wks (0=No, 1=Yes)
- `B5A` Cut wrk/act time due to emot prbs-lst 4 wks (0=No, 1=Yes)
- `B5B` Accomplish ess due to emot probs-lst 4 wks (0=No, 1=Yes)
- `B5C` <carefl w/wrk/act due to em prb-lst 4 wks (0=No, 1=Yes)
- `B6` Ext phys/em intf w/norm soc act-lst 4 wk (1-Not al all,
2=Slightly, 3=Moderately, 4=Quite a bit, 5=Extremely)
- `B7` Amount of bodily pain-past 4 wks (1=None, 2=Very mild, 3=
Mild, 4=Moderate, 5= Severe, 6= Very severe)
- `B8` Amt pain interf with norm work-last 4 wks (1=Not at all, 2=A
little bit, 3=Moderately, 4=Quite a bit, 5=Extremely
- `B9A` Did you feel full of pep-past 4 wks (1=All of the time,
2=Most of the time, 3 = Good bit of the time, 4=Some of the time, 5=A
little of time, 6=None of the time)
- `B9B` Have you been nervous-past 4 wks (1=All of the time, 2=Most
of the time, 3 = Good bit of the time, 4=Some of the time, 5=A little
of time, 6=None of the time)
- `B9C` Felt nothing could cheer you-lst 4 wks (1=All of the time,
2=Most of the time, 3 = Good bit of the time, 4=Some of the time, 5=A
little of time, 6=None of the time)
- `B9D` Have you felt calm/peaceful-past 4 wks (1=All of the time,
2=Most of the time, 3 = Good bit of the time, 4=Some of the time, 5=A
little of time, 6=None of the time)
- `B9E` Did you have a lot of energy-past 4 wks (1=All of the time,
2=Most of the time, 3 = Good bit of the time, 4=Some of the time, 5=A
little of time, 6=None of the time)
- `B9F` Did you feel downhearted-past 4 wks (1=All of the time,
2=Most of the time, 3 = Good bit of the time, 4=Some of the time, 5=A
little of time, 6=None of the time)
- `B9G` Did you feel worn out-past 4 wks (1=All of the time, 2=Most
of the time, 3 = Good bit of the time, 4=Some of the time, 5=A little
of time, 6=None of the time)
- `B9H` Have you been a happy pers-past 4 wks (1=All of the time,
2=Most of the time, 3 = Good bit of the time, 4=Some of the time, 5=A
little of time, 6=None of the time)
- `B9I` Did you feel tired-past 4 wks (1=All of the time, 2=Most of
the time, 3 = Good bit of the time, 4=Some of the time, 5=A little of
time, 6=None of the time)
- `B10` Amyphys/em prb intf w/soc act-lst 4 wks (1All of the time,
2=Most of the time, 3=Some of the time, 4= A lttle of time, 5= Non of
the time)
- `B11A` I seem to get sick easier than oth peop (1=Definitely true,
2=Mostly True, 3=Don't know, 4=Mostly false, 5=Definitely false)
- `B11B` I am as healthy as anybody I know (1=Definitely true,
2=Mostly true, 3=Don't know, 4=Mostly false, 5=Definitely False)
- `B11C` I expect my health to get worse (1=Definitely true, 2=Mostly
true, 3=Don't know, 3=Mostly false, 5=Definitely false)
- `B11D` My health is excellent (1=Definitely true, 2=Mostly true,
3=Don't know, 4=Mostly false, 5=Definitely false)
- `C1A` Tolf by MD had seix, epil, convuls (0=No, 1=Yes)
- `C1B` Told by MD had asth, emphys, chr lung dis (0=No, 1=Yes)
- `C1C` Told by MD had MI (0=No, 1=Yes)
- `C1D` Told by MD had CHF (0=No, 1=Yes)
- `C1E` Told by MD had other heart dis (req med) (0=No, 1=Yes)
- `C1F` Told by MD had HBP (0=No, 1=Yes)
- `C1G` Told by MD had chronic liver disease (0=No, 1=Yes)
- `C1H` Told by MD had kidney failure (0=No, 1=Yes)
- `C1I` Told by MD had chronic art, osteoarth (0=No, 1=Yes)
- `C1J` Told by MD had peripheral neuropathy (0=No, 1=Yes)
- `C1K` Ever told by MD had cancer (0=No, 1=Yes)
- `C1L` Ever told by MD had diabetes (0=No, 1=Yes)
- `C1M` Ever told by MD had stroke (0=No, 1=Yes)
- `C2A1` Have you ever had skin infections (0=No, 1=Yes)
- `C2A2` Have you had skin infections-past 6 mos (0=No, 1=Yes)
- `C2B1` Have you ever had pneumonia (0=No, 1=Yes)
- `C2B2` Have you had pneumonia-past 6 mos (0=No, 1=Yes)
- `C2C1` Have you ever had septic arthritis (0=No, 1=Yes)
- `C2C2` Have you had septic arthritis-past 6 mos (0=No, 1=Yes)
- `C2D1` Have you ever had TB (0=No, 1=Yes)
- `C2D2` Have you had TB-last 6 mos (0=No, 1=Yes)
- `C2E1` Have you ever had endocarditis (0=No, 1=Yes)
- `C2E2` Have you had endocarditis-past 6 mos (0=No, 1=Yes)
- `C2F1` Have you ever had an ulcer (0=No, 1=Yes)
- `C2F2` Have you had an ulcer-past 6 mos (0=No, 1=Yes)
- `C2G1` Have you ever had pancreatitis (0=No, 1=Yes)
- `C2G2` Have you had pancreatitis-past 6 mos (0=No, 1=Yes)
- `C2H1` Ever had abdom pain req overnt hosp stay (0=No, 1=Yes)
- `C2H2` Abdom pain req ovrnt hosp stay-lst 6 mos (0=No, 1=Yes)
- `C2I1` Have you ever vomited blood (0=No, 1=Yes)
- `C2I2` Have you vomited blood-past 6 mos (0=No, 1=Yes)
- `C2J1` Have you ever had hepatitis (0=No, 1=Yes)
- `C2J2` Have you had hepatitis-past 6 mos (0=No, 1=Yes)
- `C2K1` Ever had blood clots in legs/lungs (0=No, 1=Yes)
- `C2K2` Blood clots in legs/lungs-past 6 mos (0=No, 1=Yes)
- `C2L1` Have you ever had osteomyelitis (0=No, 1=Yes)
- `C2L2` Have you had osteomyelitis-past 6 mos (0=No, 1=Yes)
- `C2M1` Chst pain using cocaine req ER/hosp (0=No, 1=Yes)
- `C2M2` Chst pain using coc req ER/hosp-lst 6 mos (0=No, 1=Yes)
- `C2N1` Have you ever had jaundice (0=No, 1=Yes)
- `C2N2` Have you had jaundice-past 6 mos (0=No, 1=Yes)
- `C2O1` Lower back pain > 3mos req med attn (0=No, 1=Yes)
- `C2O2` Lwr bck pain >3mos req med attn-last 6 mos (0=No, 1=Yes)
- `C2P1` Ever had seizures or convulsions (0=No, 1=Yes)
- `C2P2` Had seizures or convulsions-past 6 mos (0=No, 1=Yes)
- `C2Q1` Ever had drug/alc overdose req ER attn (0=No, 1=Yes)
- `C2Q2` Drug/alc overdose req ER attn (0=No, 1=Yes)
- `C2R1` Have you ever had a gunshot wound (0=No, 1=Yes)
- `C2R2` Had a gunshot wound-past 6 mos (0=No, 1=Yes)
- `C2S1` Have you ever had a stab wound (0=No, 1=Yes)
- `C2S2` Have you had a stab wound-past 6 mos (0=No, 1=Yes)
- `C2T1` Ever had accid/falls req med attn (0=No, 1=Yes)
- `C2T2` Had accid/falls req med attn-past 6 mos (0=No, 1=Yes)
- `C2U1` Ever had fract/disloc to bones/joints (0=No, 1=Yes)
- `C2U2` Fract/disloc to bones/joints-past 6 mos (0=No, 1=Yes)
- `C2V1` Ever had injury from traffic accident (0=No, 1=Yes)
- `C2V2` Had injury from traffic accid-past 6 mos (0=No, 1=Yes)
- `C2W1` Have you ever had a head injury (0=No, 1=Yes)
- `C2W2` Have you had a head injury-past 6 mos (0=No, 1=Yes)
- `C3A1` Have you ever had syphilis (0=No, 1=Yes)
- `C3A2` # times had syphilis
- `C3A3` Have you had syphilis in last 6 mos (0=No, 1=Yes)
- `C3B1` Have you ever had gonorrhea (0=No, 1=Yes)
- `C3B2` # times had gonorrhea
- `C3B3` Have you had gonorrhea in last 6 mos (0=No, 1=Yes)
- `C3C1` Have you ever had chlamydia (0=No, 1=Yes)
- `C3C2` # of times had Chlamydia
- `C3C3` Have you had chlamydia in last 6 mos (0=No, 1=Yes)
- `C3D` Have you ever had genital warts (0=No, 1=Yes)
- `C3E` Have you ever had genital herpes (0=No, 1=Yes)
- `C3F1` Have you ever had other STD's (not HIV) (0=No, 1=Yes)
- `C3F2` # of times had other STD's (not HIV)
- `C3F3` Had other STD's (not HIV)-last 6 mos (0=No, 1=Yes)
- `C3F_T` a factor with levels `7` `CRABS`
`CRABS - TRICHONOMIS` `CRABS, HEP B` `DOESNT KNOW NAME`
`HAS HAD ALL 3 ABC` `HEP B` `HEP B, TRICAMONAS` `HEP. B`
`HEPATITIS B` `HEPATITS B` `TRICHAMONAS VAGINALA`
`TRICHAMONIS` `TRICHOMONAS` `TRICHOMONIASIS` `TRICHOMONIS`
`TRICHOMONIS VAGINITI` `TRICHOMORAS` `TRICHONOMIS`
- `C3G1` Have you ever been tested for HIV/AIDS (0=No, 1=Yes)
- `C3G2` # times tested for HIV/AIDS
- `C3G3` Have you been tested for HIV/AIDS-lst 6 mos (0=No, 1=Yes)
- `C3G4` What was the result of last test (1=Positive, 2=Negative,
3=Refued, 4=Never got result, 5=Inconclusive
- `C3H1` Have you ever had PID (0=No, 1=Yes)
- `C3H2` # of times had PID
- `C3H3` Have you had PID in last 6 mos (0=No, 1=Yes)
- `C3I` Have you ever had a Pap smear (0=No, 1=Yes)
- `C3J` Have you had a Pap smear in last 3 years (0=No, 1=Yes)
- `C3K` Are you pregnant (0=No, 1=Yes)
- `C3K_M` How many mos pregnant
- `D1` $ of times hospitalized for med probs
- `D2` Take prescr med regularly for phys prob (0=No, 1=Yes)
- `D3` # days had med probs-30 days bef detox
- `D4` How bother by med prob-30days bef detox (0=Not at all,
1=Slightly, 2=Moderately, 3=Considerably, 4=Extremely)
- `D5` How import is trtmnt for these med probs (0=Not at all,
1=Slightly, 2= Moderately, 3= Considerably, 4= Extremely
- `E2A` Detox prog for alc or drug prob-lst 6 mos (0=No, 1=Yes)
- `E2B` # times entered a detox prog-lst 6 mos
- `E2C` # nights ovrnight in detox prg-lst 6 mos
- `E3A` Holding unit for drug/alc prob-lst 6 mos (0=No, 1=Yes)
- `E3B` # times in holding unity=lst 6 mos
- `E3C` # total nights in holding unit-lst 6 mos
- `E4A` In halfway hse/resid facil-lst 6 mos (0=No, 1=Yes)
- `E4B` # times in hlfwy hse/res facil-lst 6 mos
- `E4C` Ttl nites in hlfwy hse/res fac-last 6 mos
- `E5A` In day trtmt prg for alcohol/drug-lst 6 mos (0=No, 1=Yes)
- `E5B` Total # days in day trtmt prg-lst 6 mos
- `E6` In methadone maintenance prg-lst 6 mos (0=No, 1=Yes)
- `E7A` Visit outpt prg subst ab couns-lst 6 mos (0=No, 1=Yes)
- `E7B` # visits outpt prg subst ab couns-lst 6 mos
- `E8A1` Saw MD/H care wkr re alcohol/drugs-lst 6 mos (0=No, 1=Yes)
- `E8A2` Saw Prst/Min/Rabbi re alcohol/drugs-lst 6 mos (0=No, 1=Yes)
- `E8A3` Employ Asst Prg for alcohol/drug prb-lst 6 mos (0=No, 1=Yes)
- `E8A4` Oth source cnsl for alcohol/drug prb-lst 6 mos (0=No, 1=Yes)
- `E9A` AA/NA/slf-hlp for drug/alcohol/emot-lst 6 mos (0=No, 1=Yes)
- `E9B` How often attend AA/NA/slf-hlp-lst 6 mos (1=Daily, 2=2-3
Times/week, 3=Weekly, 4=Every 2 weeks, 5=Once/month
- `E10A` have you been to med clinic-lst 6 mos (0=No, 1=Yes)
- `E10B1` # x visit ment hlth clin/prof-lst 6 mos
- `E10B2` # x visited med clin/priv MD-lst 6 mos
- `E10C19` Visited private MD-last 6 mos (0=No, 1=Yes)
- `E11A` Did you stay ovrnite/+ in hosp-lst 6 mos (0=No, 1=Yes)
- `E11B` # times ovrnight/+ in hosp-last 6 mos
- `E11C` Total # nights in hosp-last 6 mos
- `E12A` Visited Hosp ER for med care-past 6 mos (0=No, 1=Yes)
- `E12B` # times visited hosp ER-last 6 mos
- `E13` Tlt # visits to MDs-lst 2 wks bef detox
- `E14A` Recd trtmt from acupuncturist-last 6 mos (0=No, 1=Yes)
- `E14B` Recd trtmt from chiropractor-last 6 mos (0=No, 1=Yes)
- `E14C` Trtd by hol/herb/hom med prac-lst 6 mos (0=No, 1=Yes)
- `E14D` Recd trtmt from spirit healer-lst 6 mos (0=No, 1=Yes)
- `E14E` Have you had biofeedback-last 6 mos (0=No, 1=Yes)
- `E14F` Have you underwent hypnosis-lst 6 mos (0=No, 1=Yes)
- `E14G` Received other treatment-last 6 mos (0=No, 1=Yes)
- `E15A` Tried to get subst ab services-lst 6 mos (0=No, 1=Yes)
- `E15B` Always able to get subst ab servies (0=No, 1=Yes)
- `E15C1` I could not pay for services (0=No, 1=Yes)
- `E15C2` I did not know where to go for help (0=No, 1=Yes)
- `E15C3` Couldn't get to services due to transp prob (0=No, 1=Yes)
- `E15C4` The offie/clinic hrs were inconvenient (0=No, 1=Yes)
- `E15C5` Didn't speak/understnd Englsh well enough (0=No, 1=Yes)
- `E15C6` Afraid other might find out about prob (0=No, 1=Yes)
- `E15C7` My substance abuse interfered (0=No, 1=Yes)
- `E15C8` Didn't have someone to watch my children (0=No, 1=Yes)
- `E15C9` I did not want to lose my job (0=No, 1=Yes)
- `E15C10` My insurance didn't cover services (0=No, 1=Yes)
- `E15C11` There were no beds available at the prog (0=No, 1=Yes)
- `E15C12` Other reason not get sub ab services (0=No, 1=Yes)
- `E16A1` I cannot pay for services (0=No, 1=Yes)
- `E16A2` I am not eligible for free care (0=No, 1=Yes)
- `E16A3` I do not know where to go (0=No, 1=Yes)
- `E16A4` Can't get to services due to trans prob (0=No, 1=Yes)
- `E16A5` a numeric vectorOffice/clinic hours are inconvenient (0=No,
1=Yes)
- `E16A6` I don't speak/understnd enough English (0=No, 1=Yes)
- `E16A7` Afraid othrs find out about my hlth prob (0=No, 1=Yes)
- `E16A8` My substance abuse interferes (0=No, 1=Yes)
- `E16A9` I don't have someone to watch my childrn (0=No, 1=Yes)
- `E16A10` I do not want to lose my job (0=No, 1=Yes)
- `E16A11` My insurance doesn't cover charges (0=No, 1=Yes)
- `E16A12` I do not feel I need a regular MD (0=No, 1=Yes)
- `E16A13` Other reasons don't have regular MD (0=No, 1=Yes)
- `E18A` I could not pay for services (0=No, 1=Yes)
- `E18B` I did not know where to go for help (0=No, 1=Yes)
- `E18C` Couldn't get to services due to transp prob (0=No, 1=Yes)
- `E18D` The office/clinic hrs were inconvenient (0=No, 1=Yes)
- `E18F` Afraid others might find out about prob (0=No, 1=Yes)
- `E18G` My substance abuse interfered (0=No, 1=Yes)
- `E18H` Didn't have someone to watch my children (0=No, 1=Yes)
- `E18I` I did not want to lose my job (0=No, 1=Yes)
- `E18J` My insurance didn't cover services (0=No, 1=Yes)
- `E18K` There were no beds available at the prog (0=No, 1=Yes)
- `E18L` I do not need substance abuse services (0=No, 1=Yes)
- `E18M` Other reason not get sub ab services (0=No, 1=Yes)
- `F1A` Bothered by thngs not gen boethered by (0=Rarely/never,
1=Some of the time, 2=Occas/moderately, 3=Most of the time)
- `F1B` My appretite was poor (0=Rarely/never, 1=Some of the time,
2=Occas/moderately, 3=Most of the time)
- `F1C` Couldn't shake blues evn w/fam+frnds hlp (0=Rarely/never,
1=Some of the time, 2=Occas/moderately, 3=Most of the time)
- `F1D` Felt I was just as good as other people (0=Rarely/never,
1=Some of the time, 2=Occas/moderately, 3=Most of the time)
- `F1E` Had trouble keeping mind on what doing (0=Rarely/never,
1=Some of the time, 2=Occas/moderately, 3=Most of the time)
- `F1F` I felt depressed (0=Rarely/never, 1=Some of the time,
2=Occas/moderately, 3=Most of the time)
- `F1G` I felt everthing I did was an effort (0=Rarely/never, 1=Some
of the time, 2=Occas/moderately, 3=Most of the time)
- `F1H` I felt hopeful about the future (0=Rarely/never, 1=Some of
the time, 2=Occas/moderately, 3=Most of the time)
- `F1I` I thought my life had been a failure (0=Rarely/never, 1=Some
of the time, 2=Occas/moderately, 3=Most of the time)
- `F1J` I felt fearful (0=Rarely/never, 1=Some of the time,
2=Occas/moderately, 3=Most of the time)
- `F1K` My sleep was restless (0=Rarely/never, 1=Some of the time,
2=Occas/moderately, 3=Most of the time)
- `F1L` I was happy (0=Rarely/never, 1=Some of the time,
2=Occas/moderately, 3=Most of the time)
- `F1M` I talked less than usual (0=Rarely/never, 1=Some of the time,
2=Occas/moderately, 3=Most of the time)
- `F1N` I felt lonely (0=Rarely/never, 1=Some of the time,
2=Occas/moderately, 3=Most of the time)
- `F1O` People were unfriendly (0=Rarely/never, 1=Some of the time,
2=Occas/moderately, 3=Most of the time)
- `F1P` I enoyed life (0=Rarely/never, 1=Some of the time,
2=Occas/moderately, 3=Most of the time)
- `F1Q` I had crying spells (0=Rarely/never, 1=Some of the time,
2=Occas/moderately, 3=Most of the time)
- `F1R` I felt sad (0=Rarely/never, 1=Some of the time,
2=Occas/moderately, 3=Most of the time)
- `F1S` I felt that people dislike me (0=Rarely/never, 1=Some of the
time, 2=Occas/moderately, 3=Most of the time)
- `F1T` I could not get going (0=Rarely/never, 1=Some of the time,
2=Occas/moderately, 3=Most of the time)
- `G1A` Diff contr viol beh for sig time per evr (0=No, 1=Yes)
- `G1A_30` Diff contr viol beh-sig per lst 30 days (0=No, 1=Yes)
- `G1B` Ever had thoughts of suicide (0=No, 1=Yes)
- `G1B_30` Had thoughts of suicide-lst 30 days (0=No, 1=Yes)
- `G1C` Attempted suicide ever (0=No, 1=Yes)
- `G1C_30` Attempted suicide-lst 30 days (0=No, 1=Yes)
- `G1D` Prescr med for pst/emot prob ever (0=No, 1=Yes)
- `G1D_30` Prescr med for psy/emot prob-lst 30 days (0=No, 1=Yes)
- `H1_30` # days in past 30 bef detox used alcohol
- `H1_LT` # yrs regularly used alcohol
- `H1_RT` Route of administration use alcohol (0=N/A. 1=Oral,
2=Nasal, 3=Smoking, 4=Non-IV injection, 5=IV)
- `H2_30` #days in 3- bef detox use alc to intox
- `H2_LT` # yrs regularly used alcohol to intox
- `H2_RT` Route of admin use alcohol to intox (0=N/A. 1=Oral,
2=Nasal, 3=Smoking, 4=Non-IV injection, 5=IV)
- `H3_30` # days in past 30 bef detox used heroin
- `H3_LT` # yrs regularly used heroin
- `H3_RT` Route of administration of heroin (0=N/A. 1=Oral, 2=Nasal,
3=Smoking, 4=Non-IV injection, 5=IV)
- `H4_30` # days used methadone-lst 30 bef detox
- `H4_LT` # yrs regularly used methadone
- `H4_RT` Route of administration of methadone (0=N/A. 1=Oral,
2=Nasal, 3=Smoking, 4=Non-IV injection, 5=IV)
- `H5_30` # days used opi/analg-lst 30 bef detox
- `H5_LT` # yrs regularly used oth opiates/analg
- `H5_RT` Route of admin of oth opiates/analg (0=N/A. 1=Oral,
2=Nasal, 3=Smoking, 4=Non-IV injection, 5=IV)
- `H6_30` # days in past 30 bef detox used barbit
- `H6_LT` # yrs regularly used barbiturates
- `H6_RT` Route of admin of barbiturates (0=N/A. 1=Oral, 2=Nasal,
3=Smoking, 4=Non-IV injection, 5=IV)
- `H7_30` # days used sed/hyp/trnq-lst 30 bef det
- `H7_LT` # yrs regularly used sed/hyp/trnq
- `H7_RT` Route of admin of sed/hyp/trnq (0=N/A. 1=Oral, 2=Nasal,
3=Smoking, 4=Non-IV injection, 5=IV)
- `H8_30` # days in lst 30 bef detox used cocaine
- `H8_LT` # yrs regularly used cocaine
- `H8_RT` Route of admin of cocaine (0=N/A. 1=Oral, 2=Nasal,
3=Smoking, 4=Non-IV injection, 5=IV)
- `H9_30` # days in lst 30 bef detox used amphet
- `H9_LT` # yrs regularly used amphetamines
- `H9_RT` Route of admin of amphetamines (0=N/A. 1=Oral, 2=Nasal,
3=Smoking, 4=Non-IV injection, 5=IV)
- `H10_30` # days in lst 30 bef detox used cannabis
- `H10_LT` # yrs regularly used cannabis
- `H10_RT` Route of admin of cannabis (0=N/A. 1=Oral, 2=Nasal,
3=Smoking, 4=Non-IV injection, 5=IV)
- `H11_30` # days in lst 30 bef detox used halluc
- `H11_LT` # yrs regularly used hallucinogens
- `H11_RT` Route of admin of hallucinogens (0=N/A. 1=Oral, 2=Nasal,
3=Smoking, 4=Non-IV injection, 5=IV)
- `H12_30` # days in lst 30 bef detox used inhalant
- `H12_LT` # yrs regularly used inhalants
- `H12_RT` Route of admin of inhalants (0=N/A. 1=Oral, 2=Nasal,
3=Smoking, 4=Non-IV injection, 5=IV)
- `H13_30` # days used >1 sub/day-lst 30 bef detox
- `H13_LT` # yrs regularly used >1 subst/day
- `H13_RT` Route of admin of >1 subst/day (0=N/A. 1=Oral, 2=Nasal,
3=Smoking, 4=Non-IV injection, 5=IV)
- `H14` Accord to interview w/c subst is main prob (0=No problem,
1=Alcohol, 2=Alcool to intox, 3=Heroin 4=Methadone, 5=Oth
opiate/analg, 6=Barbituates, 7=Sed/hyp/tranq, 8=Cocaine,
9=Amphetamines, 10=Marij/cannabis
- `H15A` # times had alchol DTs
- `H15B` # times overdosed on drugs
- `H16A` $ spent on alc-lst 30 days bef detox
- `H16B` $ spent on drugs-lst 30 days bef detox
- `H17A` # days had alc prob-lst 30 days bef det
- `H17B` # days had drug prob-lst 30 days bef det
- `H18A` How troubled by alc probs-lst 30 days (0=Not at all,
1=Slightly, 2=Moderately, 3=Considerably, 4=Extremely)
- `H18B` How troubled by drug probs-lst 30 days (0=Not at all,
1=Slightly, 2=Moderately, 3=Considerably, 4=Extremely)
- `H19A` How import is trtmnt for alc probs now (0=Not at all,
1=Slightly, 2=Moderately, 3=Considerably, 4=Extremely)
- `H19B` How importy is trtmnt for drug probs now (0=Not at all,
1=Slightly, 2=Moderately, 3=Considerably, 4=Extremely)
- `I1` Avg # drinks in lst 30 days bef detox
- `I2` Most drank any 1 day in lst 30 bef detox
- `I3` On days used heroin, avg # bags used
- `I4` Most bgs heroin use any 1 day-30 bef det
- `I5` Avg $ amt of heorin used per day
- `I6A` On days used cocaine, avg # bags used
- `I6B` On days used cocaine, avg # rocks used
- `I7A` Mst bgs cocaine use any 1 day-30 bef det
- `I7B` Mst rcks cocaine use any 1 day-30 bef det
- `I8` Avg $ amt of cocaine used per day
- `J1` Evr don't stop using cocaine when should (0=No, 1=Yes)
- `J2` Ever tried to cut down on cocaine (0=No, 1=Yes)
- `J3` Does cocaine take up a lot of your time (0=No, 1=Yes)
- `J4` Need use > cocaine to get some feeling (0=No, 1=Yes)
- `J5A` Get phys sick when stop using cocaine (0=No, 1=Yes)
- `J5B` Ever use cocaine to prevent getting sick (0=No, 1=Yes)
- `J6` Ever don't stop using heroin when should (0=No, 1=Yes)
- `J7` Ever tried to cut down on heroin (0=No, 1=Yes)
- `J8` Does heroin take up a lot of your time (0=No, 1=Yes)
- `J9` Need use > heroin to get some feeling (0=No, 1=Yes)
- `J10A` Get phys sick when stop using heroin (0=No, 1=Yes)
- `J10B` Ever use heroin to prevent getting sick (0=No, 1=Yes)
- `K1` Do you currently smoke cigarettes (1=Yes-every day, 2=Yes-some
days, 3=No-former smoker, 4=No-never>100 cigs
- `K2` Avg # cigarettes smoked per day
- `K3` Considering quitting cigs w/in next 6 mo (0=No, 1=Yes)
- `L1` How often drink last time drank (1=To get high/less, 2=To get
drunk, 3=To pass out)
- `L2` Often have hangovrs Sun or Mon mornings (0=No, 1=Yes)
- `L3` Have you had the shakes when sobering (0=No, 1=Sometimes,
2=Alm evry time drink)
- `L4` Do you get phys sick as reslt of drinking (0=No, 1=Sometimes,
2=Alm evry time drink)
- `L5` have you had the DTs (0=No, 1=Once, 2=Several times
- `L6` When drink do you stumble/stagger/weave (0=No, 1=Sometimes,
2=Often)
- `L7` D/t drinkng felt overly hot/sweaty (0=No, 1=Once, 2=Several
times)
- `L8` As result of drinkng saw thngs not there (0=No, 1=Once,
2=Several times)
- `L9` Panic because fear not have drink if need it (0=No, 1=Yes)
- `L10` Have had blkouts as result of drinkng (0=No, never,
1=Sometimes, 2=Often, 3=Alm evry time drink)
- `L11` Do you carry bottle or keep close by (0=No, 1=Some of the
time, 2=Most of the time)
- `L12` After abstin end up drink heavily again (0=No, 1=Sometimes,
2=Almost evry time)
- `L13` Passed out due to drinking-lst 12 mos (0=No, 1=Once, 2=More
than once)
- `L14` Had convuls following period of drinkng (0=No, 1=Once,
2=Several times)
- `L15` Do you drink throughout the day (0=No, 1=Yes)
- `L16` Aftr drinkng heavily was thinkng unclear (0=No, 1=Yes, few
hrs, 2=Yes,1-2 days, 3=Yes, many days)
- `L17` D/t drinkng felt heart beat rapidly (0=No, 1=Once, 2=Several
times)
- `L18` Do you constntly think about drinkng/alc (0=No, 1=Yes)
- `L19` D/t drinkng heard things not there (0=No, 1=Once, 2= Several
times)
- `L20` Had weird/fright sensations when drinkng (0=No, 1=Once or
twice, 2=Often)
- `L21` When drinkng felt things rawl not there (0=No, 1=Once,
2=Several times)
- `L22` With respect to blackouts (0=Never had one, 1=Had for <1hr,
2=Had several hrs, 3=Had for day/+)
- `L23` Ever tried to cut down on drinking & failed (0=No, 1=Once,
2=Several times)
- `L24` Do you gulp drinks (0=No, 1=Yes)
- `L25` After taking 1 or 2 drinks can you stop (0=No, 1=Yes)
- `M1` Had hangover/felt bad aftr using alcohol/drugs (0=No, 1=Yes)
- `M2` Felt bad about self because of alcohol/drug use (0=No, 1=Yes)
- `M3` Missed days wrk/sch because of alcohol/drug use (0=No, 1=Yes)
- `M4` Fam/frinds worry/compl about alcohol/drug use (0=No, 1=Yes)
- `M5` I have enjoyed drinking/using drugs (0=No, 1=Yes)
- `M6` Qual of work suffered because of alcohol/drug use (0=No,
1=Yes)
- `M7` Parenting ability harmed by alcohol/drug use (0=No, 1=Yes)
- `M8` Trouble sleeping/nightmares aftr alcohol/drugs (0=No, 1=Yes)
- `M9` Driven motor veh while undr inf alcohol/drugs (0=No, 1=Yes)
- `M10` Using alcohol/1 drug caused > use othr drugs (0=No, 1=Yes)
- `M11` I have been sick/vomited aft alcohol/drug use (0=No, 1=Yes)
- `M12` I have been unhappy because of alcohol/drug use (0=No, 1=Yes)
- `M13` Lost weight/eaten poorly due to alcohol/drug use (0=No,
1=Yes)
- `M14` Fail to do what expected due to alcohol/drug use (0=No,
1=Yes)
- `M15` Using alcohol/drugs has helped me to relax (0=No, 1=Yes)
- `M16` Felt guilt/ashamed because of my alc drug use (0=No, 1=Yes)
- `M17` Said/done emarras thngs when on alcohol/drug (0=No, 1=Yes)
- `M18` Personality changed for worse on alcohol/drug (0=No, 1=Yes)
- `M19` Taken foolish risk when using alcohol/drugs (0=No, 1=Yes)
- `M20` Gotten into trouble because of alcohol/drug use (0=No, 1=Yes)
- `M21` Said cruel things while using alcohol/drugs (0=No, 1=Yes)
- `M22` Done impuls thngs regret due to alcohol/drug use (0=No,
1=Yes)
- `M23` Gotten in phys fights when use alcohol/drugs (0=No, 1=Yes)
- `M24` My phys health was harmed by alcohol/drug use (0=No, 1=Yes)
- `M25` Using alcohol/drug helped me have more + outlook (0=No,
1=Yes)
- `M26` I have had money probs because of my alcohol/drug use (0=No,
1=Yes)
- `M27` My love relat harmed due to my alcohol/drug use (0=No, 1=Yes)
- `M28` Smoked tobacco more when using alcohol/drugs (0=No, 1=Yes)
- `M29` <y phys appearance harmed by alcohol/drug use (0=No, 1=Yes)
- `M30` My family hurt because of my alc drug use (0=No, 1=Yes)
- `M31` Close relationsp damaged due to alcohol/drug use (0=No,
1=Yes)
- `M32` Spent time in jail because of my alcohol/drug use (0=No,
1=Yes)
- `M33` My sex life suffered due to my alcohol/drug use (0=No, 1=Yes)
- `M34` Lost interst in activity due to my alcohol/drug use (0=No,
1=Yes)
- `M35` Soc life> enjoyable when using alcohol/drug (0=No, 1=Yes)
- `M36` Spirit/moral life harmed by alcohol/drug use (0=No, 1=Yes)
- `M37` Not had kind life want due to alcohol/drug use (0=No, 1=Yes)
- `M38` My alcohol/drug use in way of personal growth (0=No, 1=Yes)
- `M39` My alcohol/drug use damaged soc life/reputat (0=No, 1=Yes)
- `M40` Spent/lost too much $ because alcohol/drug use (0=No, 1=Yes)
- `M41` Arrested for DUI of alc or oth drugs (0=No, 1=Yes)
- `M42` Arrested for offenses rel to alcohol/drug use (0=No, 1=Yes)
- `M43` Lost marriage/love relat due to alcohol/drug use (0=No,
1=Yes)
- `M44` Susp/fired/left job/sch due to alcohol/drug use (0=No, 1=Yes)
- `M45` I used drugs moderately w/o having probs (0=No, 1=Yes)
- `M46` I have lost a friend due to my alcohol/drug use (0=No, 1=Yes)
- `M47` Had an accident while using alcohol/drugs (0=No, 1=Yes)
- `M48` Phys hurt/inj/burned when using alcohol/drugs (0=No, 1=Yes)
- `M49` I injured someone while using alcohol/drugs (0=No, 1=Yes)
- `M50` Damaged things/prop when using alcohol/drugs (0=No, 1=Yes)
- `N1A` My friends give me the moral support I need (0=No, 1=Yes)
- `N1B` Most people closer to friends than I am (0=No, 1=Yes)
- `N1C` My friends enjoy hearing what I think (0=No, 1=Yes)
- `N1D` I rely on my friends for emot support (0=No, 1=Yes)
- `N1E` Friend go to when down w/o feel funny later (0=No, 1=Yes)
- `N1F` Frnds and I open re what thnk about things (0=No, 1=Yes)
- `N1G` My friends sensitive to my pers needs (0=No, 1=Yes)
- `N1H` My friends good at helping me solve probs (0=No, 1=Yes)
- `N1I` have deep sharing relat w/ a # of frnds (0=No, 1=Yes)
- `N1J` When confide in frnds makes me uncomfort (0=No, 1=Yes)
- `N1K` My friends seek me out for companionship (0=No, 1=Yes)
- `N1L` Not have as int relat w/frnds as others (0=No, 1=Yes)
- `N1M` Recent good idea how to do somethng frm frnd (0=No, 1=Yes)
- `N1N` I wish my friends were much different (0=No, 1=Yes)
- `N2A` My family gives me the moral support I need (0=No, 1=Yes)
- `N2B` Good ideas of how do/make thngs from fam (0=No, 1=Yes)
- `N2C` Most peop closer to their fam than I am (0=No, 1=Yes)
- `N2D` When confide make close fam membs uncomf (0=No, 1=Yes)
- `N2E` My fam enjoys hearing about what I think (0=No, 1=Yes)
- `N2F` Membs of my fam share many of my intrsts (0=No, 1=Yes)
- `N2G` I rely on my fam for emot support (0=No, 1=Yes)
- `N2H` Fam memb go to when down w/o feel funny (0=No, 1=Yes)
- `N2I` Fam and I open about what thnk about thngs (0=No, 1=Yes)
- `N2J` My fam is sensitive to my personal needs (0=No, 1=Yes)
- `N2K` Fam memb good at helping me solve probs (0=No, 1=Yes)
- `N2L` Have deep sharing relat w/# of fam membs (0=No, 1=Yes)
- `N2M` Makes me uncomf to confide in fam membs (0=No, 1=Yes)
- `N2N` I wish my family were much different (0=No, 1=Yes)
- `O1A` # people spend tx w/who drink alc (1=None, 2= A few, 3=About
half, 4= Most, 5=All)
- `O1B` # people spend tx w/who are heavy drinkrs (1=None, 2= A few,
3=About half, 4= Most, 5=All)
- `O1C` # people spend tx w/who use drugs (1=None, 2= A few, 3=About
half, 4= Most, 5=All)
- `O1D` # peop spend tx w/who supprt your abstin (1=None, 2= A few,
3=About half, 4= Most, 5=All)
- `O2` Does live-in part/spouse drink/use drugs (0=No, 1=Yes, 2=N/A)
- `P1A` Phys abuse/assaul by fam memb/pers know (0=No, 1=Yes, 7=Not
sure)
- `P1B` Age first phys assaulted by pers know
- `P1C` Phys assaulted by pers know-last 6 mos (0=No, 1=Yes)
- `P2A` Phys abuse/assaul by stranger (0=No, 1=Yes, 7=Not sure)
- `P2B` Age first phys assaulted by stranger
- `P2C` Phys assaulted by stranger-last 6 mos (0=No, 1=Yes)
- `P3` Using drugs/alc when phys assaulted (1=Don't know, 2=Never,
3=Some cases, 4=Most cases, 5=All cases, 9=Never assaulted)
- `P4` Pers who phys assault you using alcohol/drugs (1=Don't know,
2=Never, 3=Some cases, 4=Most cases, 5=All cases, 9=Never assaulted)
- `P5A` Sex abuse/assual by fam memb/pers know (0=No, 1= Yes, 7=Not
sure)
- `P5B` Age first sex assaulted by pers know
- `P5C` Sex assaulted by pers know-last 6 mos (0=No, 1=Yes)
- `P6A` Sex abuse/assaul by stranger (0=No, 1=Yes, 7=Not sure)
- `P6B` Age first sex assaulted by stranger
- `P6C` Sex assaulted by stranger-last 6 mos (0=No, 1=Yes)
- `P7` Using drugs/alc when sex assaulted (1=Don't know, 2=Never,
3=Some cases, 4=Most cases, 5=All cases, 9=Never assaulted)
- `P8` Person who sex assaulted you using alcohol/drugs (1=Don't
know, 2=Never, 3=Some cases, 4=Most cases, 5=All cases, 9=Never
assaulted)
- `Q1A` Have you ever injected drugs (0=No, 1=Yes)
- `Q1B` Have you injected drugs-lst 6 mos (0=No, 1=Yes)
- `Q2` Have you shared needles/works-last 6 mos (0=No/Not shot up,
3=Yes)
- `Q3` # people shared needles w/past 6 mos (0=No/Not shot up, 1=1
other person, 2=2-3 diff people, 3=4/+ diff people)
- `Q4` How often been to shoot gall/hse-lst 6 mos (0=Never, 1=Few
times or less, 2= Few times/month, 3= Once or more/week)
- `Q5` How often been to crack house-last 6 mos (0=Never, 1=Few times
or less, 2=Few times/month, 3=Once or more/week)
- `Q6` How often shared rinse-water-last 6 mos (0=Nevr/Not shot up,
1=Few times or less, 2=Few times/month, 3=Once or more/week)
- `Q7` How often shared a cooker-last 6 mos (0=Nevr/Not shot up,
1=Few times or less, 2=Few times/month, 3=Once or more/week)
- `Q8` How often shared a cotton-last 6 mos (0=Nevr/Not shot up,
1=Few times or less, 2=Few times/month, 3=Once or more/week)
- `Q9` How often use syringe to div drugs-lst 6 mos (0=Nevr/Not shot
up, 1=Few times or less, 2=Few times/month, 3=Once or more/week)
- `Q10` How would you describe yourself (0=Straight, 1=Gay/bisexual)
- `Q11` # men had sex w/in past 6 months (0=0 men, 1=1 man, 2=2-3
men, 3=4+ men
- `Q12` # women had sex w/in past 6 months (0=0 women, 1=1woman,
2=2-3 women, 3=4+ women
- `Q13` # times had sex In past 6 mos (0=Never, 1=Few times or less,
2=Few times/month, 3=Once or more/week)
- `Q14` How often had sex to get drugs-last 6 mos (0=Never, 1=Few
times or less, 2=Few times/month, 3=Once or more/week)
- `Q15` How often given drugs to have sex-lst 6 mos (0=Never, 1=Few
times or less, 2=Few times/month, 3=Once or more/week)
- `Q16` How often were you paid for sex-lst 6 mos (0=Never, 1=Few
times or less, 2=Few times/month, 3=Once or more/week)
- `Q17` How often you pay pers for sex-lst 6 mos (0=Never, 1=Few
times or less, 2=Few times/month, 3=Once or more/week)
- `Q18` How often use condomes during sex=lst 6 mos (0=No sex/always,
1=Most of the time, 2=Some of the time, 3=None of the time)
- `Q19` Condoms are too much of a hassle to use (1=Strongly disagree,
2=Disagree, 3= Agree, 4=Strongly agree)
- `Q20` Safer sex is always your responsibility (1=Strongly disagree,
2=Disagree, 3= Agree, 4=Strongly agree)
- `R1A` I really want to hange my alcohol/drug use (1=Strongly
disagree, 2=Disagree, 3= Agree, 4=Strongly agree)
- `R1B` Sometimes I wonder if I'm an alcohol/addict (1=Strongly
disagree, 2=Disagree, 3= Agree, 4=Strongly agree)
- `R1C` Id I don't chng alcohol/drug probs will worsen (1=Strongly
disagree, 2=Disagree, 3= Agree, 4=Strongly agree)
- `R1D` I started making changes in alcohol/drug use (1=Strongly
disagree, 2=Disagree, 3= Agree, 4=Strongly agree)
- `R1E` Was using too much but managed to change (1=Strongly
disagree, 2=Disagree, 3= Agree, 4=Strongly agree)
- `R1F` I wonder if my alcohol/drug use hurting othrs (1=Strongly
disagree, 2=Disagree, 3= Agree, 4=Strongly agree)
- `R1G` I am a prob drinker or have drug prob (1=Strongly disagree,
2=Disagree, 3= Agree, 4=Strongly agree)
- `R1H` Already doing thngs to chnge alcohol/drug use (1=Strongly
disagree, 2=Disagree, 3= Agree, 4=Strongly agree)
- `R1I` have changed use-trying to not slip back (1=Strongly
disagree, 2=Disagree, 3= Agree, 4=Strongly agree)
- `R1J` I have a serious problem w/ alcohol/drugs (1=Strongly
disagree, 2=Disagree, 3= Agree, 4=Strongly agree)
- `R1K` I wonder if I'm in contrl of alcohol/drug use (1=Strongly
disagree, 2=Disagree, 3= Agree, 4=Strongly agree)
- `R1L` My alcohol/drug use is causing a lot of harm (1=Strongly
disagree, 2=Disagree, 3= Agree, 4=Strongly agree)
- `R1M` Actively curring down/stopping alcohol/drug use (1=Strongly
disagree, 2=Disagree, 3= Agree, 4=Strongly agree)
- `R1N` Want help to not go back to alcohol/drugs (1=Strongly
disagree, 2=Disagree, 3= Agree, 4=Strongly agree)
- `R1O` I know that I have an alcohol/drug problem (1=Strongly
disagree, 2=Disagree, 3= Agree, 4=Strongly agree)
- `R1P` I wonder if I use alcohol/drugs too much (1=Strongly
disagree, 2=Disagree, 3= Agree, 4=Strongly agree)
- `R1Q` I am an alcoholic or drug addict (1=Strongly disagree,
2=Disagree, 3= Agree, 4=Strongly agree)
- `R1R` I am working hard to change alcohol/drug use (1=Strongly
disagree, 2=Disagree, 3= Agree, 4=Strongly agree)
- `R1S` Some changes-want help from going back (1=Strongly disagree,
2=Disagree, 3= Agree, 4=Strongly agree)
- `S1A` At interview pt obviously depressed/withdrawn (0=No, 1=Yes)
- `S1B` at interview pt obviously hostile (0=No, 1=Yes)
- `S1C` At interview pt obviouslt anx/nervous (0=No, 1=Yes)
- `S1D` Trouble w/real tst/thght dis/par at interview (0=No, 1=Yes)
- `S1E` At interview pt trbl w/ compr/concen/rememb (0=No, 1=Yes)
- `S1F` At interview pt had suicidal thoughts (0=No, 1=Yes)
- `T1` Have used alc since leaving River St. (0=No, 1=Yes)
- `T1B` # days in row continued to drink
- `T1C` Longest period abstain-lst 6 mos (alc)
- `T2` Have used heroin since leaving River St (0=No, 1=Yes)
- `T2B` # days in row continued to use heroin
- `T2C` Longest period abstain-lst 6 mos (heroin)
- `T3` Have used cocaine since leaving River St (0=No, 1=Yes)
- `T3B` # days in row continued to use cocaine
- `T3C` Lngest period abstain-lst 6 mos (cocaine)
- `U1` It is important to have a regular MD (1=Strongly agree,
2=Agree, 3=Uncertain, 4=Disagree, 5=Strongly Disagree)
- `U2A` I cannot pay for services (0=No, 1=Yes)
- `U2B` I am not eligible for free care (0=No, 1=Yes)
- `U2C` I do not know where to go (0=No, 1=Yes)
- `U2D` Can't get services due to transport probs (0=No, 1=Yes)
- `U2E` Office/clinic hours are inconvenient (0=No, 1=Yes)
- `U2F` I do not speak/understand English well (0=No, 1=Yes)
- `U2G` Afraid others discover hlth prb I have (0=No, 1=Yes)
- `U2H` My substance abuse interferes (0=No, 1=Yes)
- `U2I` I do not have a babysitter (0=No, 1=Yes)
- `U2J` I do not want to lose my job (0=No, 1=Yes)
- `U2K` My insurance does not cover services (0=No, 1=Yes)
- `U2L` Medical care is not important to me (0=No, 1=Yes)
- `U2M` I do not have time (0=No, 1=Yes)
- `U2N` Med staff do not treat me with respect (0=No, 1=Yes)
- `U2O` I do not trust my doctors or nurses (0=No, 1=Yes)
- `U2P` Often been unsatisfied w/my med care (0=No, 1=Yes)
- `U2Q` Other reason hard to get regular med care (0=No, 1=Yes)
- `U2Q_T` a factor with many levels
- `U2R` a factor with levels `7` `A` `B` `C` `D` `E`
`F` `G` `H` `I` `J` `K` `L` `M` `N` `O` `P`
`Q`
- `U3A` Has MD evr talked to you about drug use (0=No, 1=Yes)
- `U3B` Has MD evr talked to you about alc use (0=No, 1=Yes)
- `U4` Is there an MD you consider your regular MD (0=No, 1=Yes)
- `U5` Have you seen any MDs in last 6 mos (0=No, 1=Yes)
- `U6A` Would you go to this MD if med prb not emer (0=No, 1=Yes)
- `U6B` Think one of these could be your regular MD (0=No, 1=Yes)
- `PCP_ID` a numeric vector
- `U7A` What type of MD is your regular MD/this MD (1=OB/GYN,
2=Family medicine, 3=Pediatrician, 4=Adolescent medicine, 5=Internal
medicine, 6=AIDS doctor, 7=Asthma doctor, 8=Pulmonary doctor,
9=Cardiologist, 10=Gastroen)
- `U7A_T` a factor with levels `ARTHRITIS DOCTOR` `CHIROPRACTOR`
`COCAINE STUDY` `DETOX DOCTOR` `DO` `EAR DOCTOR`
`EAR SPECIALIST` `EAR, NOSE, & THROAT.` `EAR/NOSE/THROAT`
`ENT` `FAMILY PHYSICIAN` `GENERAL MEDICINE`
`GENERAL PRACTICE` `GENERAL PRACTIONER` `GENERAL PRACTITIONER`
`HEAD & NECK SPECIALIST` `HERBAL/HOMEOPATHIC/ACUPUNCTURE`
`ID DOCTOR` `MAYBE GENERAL PRACTITIONER` `MEDICAL STUDENT`
`NEUROLOGIST` `NURSE` `NURSE PRACTICIONER`
`NURSE PRACTITIONER` `ONCOLOGIST` `PRENATAL` `PRIMARY`
`PRIMARY CAAE` `PRIMARY CARE` `PRIMARY CARE DOCTOR`
`PRIMERY CARE` `THERAPIST` `UROLOGIST` `WOMENS CLINIC BMC`
- `U8A` Only saw this person once (=Only saw once)
- `U8B` Saw this person for <6 mos (1=<6 mos)
- `U8C` Saw tis person for 6 mos-1year (2=Betwn 6 mos & 1 yr)
- `U8D` Saw this person for 1-2 years (3=1-2 years)
- `U8E` Saw this person for 3-5 years (4=3-5 years)
- `U8F` Saw this person for more than 5 years (5=>5 years)
- `U10A` # times been to regular MDs office-pst 6 mos
- `U10B` # times saw regular MD in office-pst 6 mos
- `U10C` # times saw oth prof in office-pst 6 mos
- `U11` Rate convenience of MD office location (1=Very poor, 2=Poor,
3=Fair, 4=Good, 5=Very good, 6=Excellent)
- `U12` Rate hours MD office open for med appts (1=Very poor, 2=Poor,
3=Fair, 4=Good, 5=Very good, 6=Excellent)
- `U13` Usual wait for appt when sick (unsched) (1=Very poor, 2=Poor,
3=Fair, 4=Good, 5=Very good, 6=Excellent)
- `U14` Time wait for appt to start at MD office (1=Very poor,
2=Poor, 3=Fair, 4=Good, 5=Very good, 6=Excellent)
- `U15A` DO you pay for any/all of MD visits (0=No, 1=Yes)
- `U15B` How rate amt of $ you pay for MD visits (1=Very poor,
2=Poor, 3=Fair, 4=Good, 5=Very good, 6=Excellent)
- `U16A` Do you pay for any/all of prescript meds (0=No, 1=Yes)
- `U16B` Rate amt $ pay for meds/prescript trtmnts (1=Very poor,
2=Poor, 3=Fair, 4=Good, 5=Very good, 6=Excellent)
- `U17` Ever skip meds/trtmnts because too expensive (1=Yes, often,
2=Yes, occasionally, 3=No, never)
- `U18A` Ability to reach MC office by phone (1=Very poor, 2=Poor,
3=Fair, 4=Good, 5=Very good, 6=Excellent)
- `U18B` Ability to speak to MD by phone if need (1=Very poor,
2=Poor, 3=Fair, 4=Good, 5=Very good, 6=Excellent)
- `U19` How often see regular MD when have regular check-up
(1=Always, 2=Almost always, 3=A lot of the time, 4=Some of the time,
5=Almost never, 6=Never)
- `U20` When sick + go to MD how often see regular MD (1=Always,
2=Almost always, 3=A lot of the time, 4=Some of the time, 5=Almost
never, 6=Never)
- `U21A` How thorough MD exam to check hlth prb (1=Very poor, 2=
Poor, 3=Fair, 4=Good, 5= Very good, 6= Excellent)
- `U21B` How often question if MD diagnosis right (1=Always, 2=Almost
always, 3=A lot of the time, 4=Some of the time, 5=Almost never,
6=Never)
- `U22A` Thoroughness of MD questions re symptoms (1=Very poor, 2=
Poor, 3=Fair, 4=Good, 5= Very good, 6= Excellent)
- `U22B` Attn MD gives to what you have to say (1=Very poor, 2= Poor,
3=Fair, 4=Good, 5= Very good, 6= Excellent)
- `U22C` MD explanations of hlth prbs/trtmnts need (1=Very poor, 2=
Poor, 3=Fair, 4=Good, 5= Very good, 6= Excellent)
- `U22D` MD instrcts re sympt report/further care (1=Very poor, 2=
Poor, 3=Fair, 4=Good, 5= Very good, 6= Excellent)
- `U22E` MD advice in decisions about your care (1=Very poor, 2=
Poor, 3=Fair, 4=Good, 5= Very good, 6= Excellent)
- `U23` How often leave MD office w/unanswd quests (1=Always,
2=Almost always, 3=A lot of the time, 4=Some of the time, 5=Almost
never, 6=Never)
- `U24A` Amount of time your MD spends w/you (1=Very poor, 2= Poor,
3=Fair, 4=Good, 5= Very good, 6= Excellent)
- `U24B` MDs patience w/ your questions/worries (1=Very poor, 2=
Poor, 3=Fair, 4=Good, 5= Very good, 6= Excellent)
- `U24C` MDs friendliness and warmth toward you (1=Very poor, 2=
Poor, 3=Fair, 4=Good, 5= Very good, 6= Excellent)
- `U24D` MDs caring and concern for you (1=Very poor, 2= Poor,
3=Fair, 4=Good, 5= Very good, 6= Excellent)
- `U24E` MDs respect for you (1=Very poor, 2= Poor, 3=Fair, 4=Good,
5= Very good, 6= Excellent)
- `U25A` Reg MD ever talked to you about smoking (0=No, 1=Yes)
- `U25B` Reg MD ever talked to you about alc use (0=No, 1=Yes)
- `U25C` Reg MD ever talk to you about seat belt use (0=No, 1=Yes)
- `U25D` Reg MD ever talked to you about diet (0=No, 1=Yes)
- `U25E` Reg Mdever talked to you about exercise (0=No, 1=Yes)
- `U25F` Reg MD ever talked to you about stress (0=No, 1=Yes)
- `U25G` Reg MD ever talked to you about safe sex (0=No, 1=Yes)
- `U25H` Reg MD ever talked to you about drug use (0=No, 1=Yes)
- `U25I` Reg MD ever talked to you about HIV testing (0=No, 1=Yes)
- `U26A` Cut/quit smoking because of MDs advice (0=No, 1=Yes)
- `U26B` Tried to drink less alcohol because of MD advice (0=No,
1=Yes)
- `U26C` Wore my seat belt more because of MDs advice (0=No, 1=Yes)
- `U26D` Changed diet because of MDs advice (0=No, 1=Yes)
- `U26E` Done more exercise because MDs advice (0=No, 1=Yes)
- `U26F` Relax/reduce stress because of MDs advice (0=No, 1=Yes)
- `U26G` Practiced safer sex because of MDs advice (0=No, 1=Yes)
- `U26H` Tried to cut down/quit drugs because MD advice (0=No,
1=Yes)"
- `U26I` Got HIV tested because of MDs advice (0=No, 1=Yes)"
- `U27A` I can tell my MD anything (1=Strongly agree, 2= Agree, 3=
Not sure, 4=Disagree, 5=Strongly disagree)"
- `U27B` My MD pretends to know thngs if not sure (1=Strongly agree,
2= Agree, 3= Not sure, 4=Disagree, 5=Strongly disagree)"
- `U27C` I trust my MDs judgement re my med care (1=Strongly agree,
2= Agree, 3= Not sure, 4=Disagree, 5=Strongly disagree)"
- `U27D` My MD cares > about < costs than my hlth (1=Strongly agree,
2= Agree, 3= Not sure, 4=Disagree, 5=Strongly disagree)"
- `U27E` My MD always tell truth about my health (1=Strongly agree,
2= Agree, 3= Not sure, 4=Disagree, 5=Strongly disagree)"
- `U27F` My MD cares as much as I about my hlth (1=Strongly agree, 2=
Agree, 3= Not sure, 4=Disagree, 5=Strongly disagree)"
- `U27G` My MD would try to hide a mistake in trtmt (1=Strongly
agree, 2= Agree, 3= Not sure, 4=Disagree, 5=Strongly disagree)"
- `U28` How much to you trst this MD (0=Not at all, 1=1, 2=2, 3=3,
4=4, 5=5, 6=6, 7=7, 8=8, 9=9, 10=Completely)"
- `U29A` MDs knowledge of your entire med history (1=Very poor, 2=
Poor, 3=Fair, 4=Good, 5= Very good, 6= Excellent)"
- `U29B` MD knowldg of your respons-home/work/sch (1=Very poor, 2=
Poor, 3=Fair, 4=Good, 5= Very good, 6= Excellent)"
- `U29C` MD knowldg of what worries you most-hlth (1=Very poor, 2=
Poor, 3=Fair, 4=Good, 5= Very good, 6= Excellent)"
- `U29D` MDs knowledge of you as a person (1=Very poor, 2= Poor,
3=Fair, 4=Good, 5= Very good, 6= Excellent)"
- `U30` MD would know what want done if unconsc (1=Strongly agree,
2=Agree, 3=Not sure, 4= Disagree, 5=Strongly disagree)"
- `U31` Oth MDs/RNs who play roel in your care (0=No, 1=Yes)" \*
- `U32A` Their knowledge of you as a person (1=Very poor, 2= Poor,
3=Fair, 4=Good, 5= Very good, 6= Excellent)
- `U32B` The quality of care they provide (1=Very poor, 2= Poor,
3=Fair, 4=Good, 5= Very good, 6= Excellent)
- `U32C` Coordination betw them and your regular MD (1=Very poor, 2=
Poor, 3=Fair, 4=Good, 5= Very good, 6= Excellent)
- `U32D` Their expl of your hlth prbs/trtmts need (1=Very poor, 2=
Poor, 3=Fair, 4=Good, 5= Very good, 6= Excellent)
- `U32D_T` N/A, only my regular MD does this
- `U33` Amt regular MD knows about care from others (1=Knows
everything, 2=Knows almost everything, 3=Knows some things, 4=Knows
very little, 5=Knows nothing)
- `U34` Has MD ever recommended you see MD sepcialist (0=No, 1=Yes)
- `U35A` How helpful MD in deciding on specialist (1=Very poor, 2=
Poor, 3=Fair, 4=Good, 5= Very good, 6= Excellent)
- `U35B` How helpful MD getting appt w/specialist (1=Very poor, 2=
Poor, 3=Fair, 4=Good, 5= Very good, 6= Excellent)
- `U35C` MDs involvmt when you trtd by specialist (1=Very poor, 2=
Poor, 3=Fair, 4=Good, 5= Very good, 6= Excellent)
- `U35D` MDs communic w/your specialists/oth MDs (1=Very poor, 2=
Poor, 3=Fair, 4=Good, 5= Very good, 6= Excellent)
- `U35E` MD help in explain what specialists said (1=Very poor, 2=
Poor, 3=Fair, 4=Good, 5= Very good, 6= Excellent)
- `U35F` Quality of specialists MD sent you to (1=Very poor, 2= Poor,
3=Fair, 4=Good, 5= Very good, 6= Excellent)
- `U36` How many minutes to get to MDs office (1=<15, 2=16-30.
3=31-60, 4=More than 60)
- `U37` When sick+call how long take to see you (1=Same day, 2=Next
day, 3=In 2-3 days, 4=In 4-5 days, 5=in >5 days)
- `U38` How mant minutes late appt usually begin (1=None, 2=<5
minutes, 3=6-10 minutes, 4=11-20 minutes, 5=21-30 minutes, 6=31-45
minutes, 7=>45 minutes)
- `U39` How satisfied are you w/your regular MD (1=Completely
satisfied, 2=Very satisfied, 3=Somewhat satisfied, 4=Neither,
5=Somewhat dissatisfied, 6=Very dissatisfied, 7=Completely
dissatisfied)
- `V1` Evr needed to drink much more to get effect (0=No, 1=Yes)
- `V2` Evr find alc had < effect than once did (0=No, 1=Yes)
- `Z1` Breath Alcohol Concentration:1st test
- `Z2` Breath Alcohol Concentration:2nd test
- `AGE` Age in years
- `REALM` REALM score
- `E16A_RT` Barrier to regular MD: red tape (0=No, 1=Yes)
- `E16A_IB` Barrier to regular MD: internal barriers (0=No, 1=Yes)
- `E16A_TM` Barrier to regular MD: time restrictions (0=No, 1=Yes)
- `E16A_DD` Barrier to regular MD: dislike docs/system (0=No, 1=Yes)
- `GROUP` Randomization Group (0=Control, 1=Clinic)
- `MMSEC` MMSEC
- `PRIM_SUB` First drug of choice (0=None, 1=Alcohol, 3=Cocaine,
3=Heroine, 4=Barbituates, 5=Benzos, 6=Marijuana, 7=Methadone,
8=Opiates)
- `SECD_SUB` Second drug of choice (0=None, 1=Alcohol, 3=Cocaine,
3=Heroine, 4=Barbituates, 5=Benzos, 6=Marijuana, 7=Methadone,
8=Opiates)
- `ALCOHOL` 1st/2nd drug of coice=Alcohol (0=No, 1=Yes)
- `COC_HER` 1st/2nd drug of choice=cocaine or heroine (0=No, 1=Yes)
- `REALM2` REALM score (dichotomous) (1=0-60, 2=61-66)
- `REALM3` REALM score (categorical) (1=0-44), 2=45-60), 3=61-66)
- `RACE` Race (recode) (1=Afr Amer/Black, 2=White, 3=Hispanic,
4=Other)
- `RACE2` Race (recode) (1=White, 2=Minority)
- `BIRTHPLC` Where born (recode) (0=USA, 1=Foreign)
- `PRIMLANG` First language (recode) (0=English, 1=Other lang)
- `MD_LANG` Lang prefer to speak to MD (recode) (0=English, 1=Other
lang)
- `HS_GRAD` High school graduate (0=No, 1=Yes)
- `MAR_STAT` Marital status (recode) (0=Married, 1=Not married)
- `A12B_REC` Hollingshead category (recode) (0=Cat 1,2,3, 1=Cat
4,5,6, 2=Cat 7,8,9)
- `UNEMPLOY` Usually unemployed last 6m (0=No, 1=Yes)
- `ALONE6M` Usually lived alone past 6m y/n (0=No, 1=Yes)
- `HOMELESS` Homeless-shelter/street past 6 m (0=No, 1=Yes)
- `JAIL_MOS` Total months in jail past 5 years
- `JAIL_5YR` Any jail time past 5 years y/n (0=No, 1=Yes)
- `GOV_SUPP` Received governemtn support past 6 m (0=No, 1=Yes)
- `A18_REC1` Most money made in 1 yr (recode) (0=$19,000 or less,
1=$20,000-$49,000, 2=$50,000 or more)
- `A18_REC2` Most money made-continuous recode
- `STD_EVER` Ever had an STD y/n (0=No, 1=Yes)
- `STD_6M` Had an STD past 6m y/n (0=No, 1=Yes)
- `CHR_SUM` Sum chronic medican conds/HIV ever
- `CHR_EVER` Chronic medical conds/HIV-ever y/n (0=No, 1=Yes)
- `EPI_SUM` Sum episodic (C2A-C2O, C2R-C2U, STD)-6m
- `EPI_6M` Episodic (C2A-C2O,C2R-C2U, STD)-6m y/n (0=No, 1=Yes)
- `EPI_6M2B` Episodic(C2A-C2O)-6m y/n (0=No, 1=Yes)
- `SER_INJ` Recent (6m) serious injury y/n (0=No, 1=Yes)
- `D3_REC` Any medical problems past 30d y/n (0=No, 1=Yes)
- `D4_REC` Bothered by medical problems y/n (0=No, 1=Yes)
- `D5_REC` Medical trtmt is important y/n (0=No, 1=Yes)
- `ANY_INS` Did you have health insurance past 6 m (0=No, 1=Yes)
- `FRML_SAT` Formal substance abuse treatment y/n (0=No, 1=Yes)
- `E10B1_R` Mental health treatment past 6m y/n (0=No, 1=Yes)
- `E10B2_R` Med clinic/private MD past 6m y/n (0=No, 1=Yes)
- `ALT_TRT` Alternative tratments y/n (0=No, 1=Yes)
- `ANY_UTIL` Amy recent health utilization (0=No, 1=Yes)
- `NUM_BARR` # of perceived barriers to linkage
- `G1B_REC` Suicidal thoughs past 30 days y/n (0=No, 1=Yes)
- `G1D_REC` Prescribed psych meds past 30 daus y/n (0=No, 1=Yes)
- `PRIMSUB2` First drug of choice (no marijuana) (0=None, 1=Alcohol,
2=Cocaine, 3=Heroin, 4=Barbituates, 5=Benzos, 6=Marijuana,
7=Methadone, 8=Opiates)
- `ALCQ_30` Total number drinks past 30 days
- `H2_PRB` Problem sub: alc to intox (0=No, 1=Yes)
- `H3_PRB` Problem sub: heroin (0=No, 1=Yes)
- `H4_PRB` Problem sub: methadone (0=No, 1=Yes)
- `H5_PRB` Problem sub: oth opiates/analg (0=No, 1=Yes)
- `H6_PRB` Problem sub: barbituates (0=No, 1=Yes)
- `H7_PRB` Problem sub: sedat/hyp/tranq (0=No, 1=Yes)
- `H8_PRB` Problem sub: cocaine (0=No, 1=Yes)
- `H9_PRB` Problem sub: amphetamines (0=No, 1=Yes)
- `H10_PRB` Problem sub: marijuana, cannabis (0=No, 1=Yes)
- `H11_PRB` Problem sub: hallucinogens (0=No, 1=Yes)
- `H12_PRB` Problem sub: inhalants (0=No, 1=Yes)
- `POLYSUB` Polysubstance abuser y/n (0=No, 1=Yes)
- `SMOKER` Current smoker (every/some days) y/n (0=No, 1=Yes)
- `O1B_REC` Family/friends heavy drinkers y/n (0=No, 1=Yes)
- `O1C_REC` Family/friends use drugs y/n (0=No, 1=Yes)
- `O1D_REC` Family/fiends support abst. y/n (0=No, 1=Yes)
- `O2_REC` Live-in partner drinks/drugs y/n (0=No, 1=Yes)
- `PHYABUSE` Physical abuse-stranger or family (0=No, 1=Yes)
- `SEXABUSE` Sexual abuse-stranger or family (0=No, 1=Yes)
- `PHSXABUS` Any abuse (0=No, 1=Yes)
- `ABUSE2` Type of abuse (0=No abuse, 1=Physical only, 2=Sexual only,
3=Physical and sexual)
- `ABUSE3` Type of abuse (0=No abuse, 1=Physical only, 2=Sexual +/-
physical (0=No, 1=Yes)
- `CURPHYAB` Current abuse-physical (0=No, 1=Yes)
- `CURSEXAB` Current abuse-sexual (0=No, 1=Yes)
- `CURPHYSEXAB` Curent abuse-physical or sexual (0=No abuse,
1=Physical only, 2=Sexual +/- physical)
- `FAMABUSE` Family abuse-physical or sexual (0=No, 1=Yes)
- `STRABUSE` Stranger abuse-physical or sexual (0=No, 1=Yes)
- `ABUSE` Abuse-physical or sexual (0=No abuse, 1= Family abuse, 2=
Stranger only abuse)
- `RAWPF` Raw SF-36 physical functioning
- `PF` SF-36 physical functioning (0-100)
- `RAWRP` Raw SF-36 role-physical
- `RP` SF-36 role physical (0-100)
- `RAWBP` Raw SF-36 pain index
- `BP` SF-36 pain index (0-100)
- `RAWGH` Raw SF-36 general health perceptions
- `GH` SF-36 general health perceptions (0-100)
- `RAWVT` Raw SF-36 vitality
- `VT` SF-36 vitality 0-100)
- `RAWSF` Raw SF-36 social functioning
- `SF` SF-36 social functioning (0-100)
- `RAWRE` Raw SF-36 role-emotional
- `RE` SF-36 role-emotional (0-100)
- `RAWMH` Raw SF-36 mental health index
- `MH` SF-36 mental health index (0-100)
- `HT` Raw SF-36 health transition item
- `PCS` Standardized physical component scale-00
- `MCS` Standardized mental component scale-00
- `CES_D` CES-D score, measure of depressive symptoms, high scores
are worse
- `CESD_CUT` CES-D score > 21 y/n (0=No, 1=Yes)
- `C_MS` ASI-Composite medical status
- `C_AU` ASI-Composite score for alcohol use
- `C_DU` ASI-Composite score for drug use
- `CUAD_C` CUAD-Cocaine
- `CUAD_H` CUAD-Heroin
- `RAW_RE` SOCRATES-Rocognition-Raw
- `DEC_RE` SOCRATES-Recognition-Decile
- `RAW_AM` SOCRATES-Ambivalence-Raw
- `DEC_AM` SOCRATES-Ambivalence-Decile
- `RAW_TS` SOCRATES-Taking steps-Raw
- `DEC_TS` SOCRATES-Taking steps-Decile
- `RAW_ADS` ADS score
- `PHYS` InDUC-2L-Physical-Raw
- `PHYS2` InDUC-2L-Physical 9Raw (w/o M48)
- `INTER` InDUC-2L-Interpersonal-Raw
- `INTRA` InDUC-2L-Intrapersonal-Raw
- `IMPUL` InDUL-2L-Impulse control-Raw
- `IMPUL2` InDUC-2L-Impulse control-Raw (w/0 M23)
- `SR` InDUC-2L-Social responsibility-Raw
- `CNTRL` InDUC-2L-Control score
- `INDTOT` InDUC-2LTotal drlnC sore-Raw
- `INDTOT2` InDUC-2L-Total drlnC-Raw- w/o M23 and M48
- `PSS_FR` Perceived social support-friends
- `PSS_FA` Perceived social support-family
- `DRUGRISK` RAB-Drug risk total
- `SEXRISK` RAB-Sex risk total
- `TOTALRAB` RAB-Total RAB sore
- `RABSCALE` RAB scale sore
- `CHR_6M` Chronic medical conds/HIV-past 6m y/n (0=No, 1=Yes)
- `RCT_LINK` Did subject link to primary care (RCT)–This time point
(0=No, 1=Yes)
- `REG_MD` Did subject report having regular doctor–This time point
(0=No, 1=Yes)
- `ANY_VIS` # visits to regular doctor's office–This time point
- `ANY_VIS_CUMUL` Cumulative # visits to regular doctor's office
- `PC_REC` Primary care received: Linked & #visits (0=Not linked,
1=Linked, 1 visit, 2=Linked, 2+ visits)
- `PC_REC7` Primary cared received: linked & # visits (0=Not linked,
1=Linked, 1 visit, 2=Linked, 2 visits, 3=Linked, 3 visits, 4=Linked,
4 visits, 5= Linked, 5 visits, 6=Linked, 6+visits)
- `SATREAT` Any BSAS substance abuse this time point (0=No, 1=Yes)
- `DRINKSTATUS` Drank alcohol since leaving detox-6m
- `DAYSDRINK` Time (days) from baseline to first drink since leaving
detox-6m
- `ANYSUBSTATUS` Used alcohol, heroin, or cocaine since leaving
detox-6m
- `DAYSANYSUB` time (days) from baseline to first alcohol, heroin, or
cocaine since leaving detox-6m
- `LINKSTATUS` Linked to primary care within 12 months (by
administrative record)
- `DAYSLINK` Time (days) to linkage to primary care within 12 months
(by administrative record)
http://www.math.smith.edu/help
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `help_full.csv`.
Returns:
Tuple of np.ndarray `x_train` with 1472 rows and 788 columns and
dictionary `metadata` of column headers (feature names).
"""
path = os.path.expanduser(path)
filename = 'help_full.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/mosaicData/HELPfull.csv'
maybe_download_and_extract(path, url,
save_file_name='help_full.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata | ec84932a5fb6cceeb86530145bd136890ed60568 | 22,868 |
from typing import List
def get_all_listening_ports() -> List[int]:
"""
Returns all tcp port numbers in LISTEN state (on any address).
Reads port state from /proc/net/tcp.
"""
res = []
with open('/proc/net/tcp', 'r') as file:
try:
next(file)
for line in file:
split_line = line.strip().split(' ')
hex_port = split_line[1].split(':')[1]
hex_state = split_line[3]
if hex_state == '0A':
res.append(int(hex_port, 16))
except StopIteration:
pass
return res | cfc1b4b93358954ad802ce3727bd9d424ef9d136 | 22,869 |
async def mock_race_result() -> dict:
"""Create a mock race-result object."""
return {
"id": "race_result_1",
"race_id": "190e70d5-0933-4af0-bb53-1d705ba7eb95",
"timing_point": "Finish",
"no_of_contestants": 2,
"ranking_sequence": ["time_event_1", "time_event_2"],
"status": 0,
} | 33a2889bcb2665642a3e5743128a478bb103a82b | 22,870 |
import re
import binascii
def qr_to_install_code(qr_code: str) -> tuple[zigpy.types.EUI64, bytes]:
"""Try to parse the QR code.
if successful, return a tuple of a EUI64 address and install code.
"""
for code_pattern in QR_CODES:
match = re.search(code_pattern, qr_code, re.VERBOSE)
if match is None:
continue
ieee_hex = binascii.unhexlify(match[1])
ieee = zigpy.types.EUI64(ieee_hex[::-1])
install_code = match[2]
# install_code sanity check
install_code = convert_install_code(install_code)
return ieee, install_code
raise vol.Invalid(f"couldn't convert qr code: {qr_code}") | 91ec3f90385e95b94c47c338f56b26315ff12e99 | 22,871 |
def vwr(scene, analyzer, test_number, workbook=None, sheet_format=None, agg_dict=None):
"""
Calculates Variability Weighted Return (VWR).
:param workbook: Excel workbook to be saved to disk.
:param analyzer: Backtest analyzer.
:param sheet_format: Dictionary holding formatting information such as col width, font etc.
:param agg_dict: Collects the dictionary outputs from backtrader for using in platting.
:return workbook: Excel workbook to be saved to disk.
"""
# Get the drawdowns auto ordered nested dictionary
vwr_dict = analyzer.get_analysis()
columns = [
"vwr",
]
if scene["save_db"]:
df = pd.DataFrame(vwr_dict.values(), index=vwr_dict.keys()).T
df = add_key_to_df(df, test_number)
agg_dict["vwr"] = df
if scene["save_excel"]:
worksheet = workbook.add_worksheet("vwr")
worksheet.write_row(0, 0, columns)
worksheet.set_row(0, None, sheet_format["header_format"])
worksheet.set_column("A:A", sheet_format["x_wide"], sheet_format["align_left"])
worksheet.set_column("B:B", sheet_format["medium"], sheet_format["align_left"])
for i, (k, v) in enumerate(vwr_dict.items()):
worksheet.write_row(i + 1, 0, [k])
worksheet.write_row(i + 1, 1, [v])
return workbook, agg_dict | 7fa8c9794e443be91d0cf246c209dfdc86e19f54 | 22,872 |
def dec2hms(dec):
"""
ADW: This should really be replaced by astropy
"""
DEGREE = 360.
HOUR = 24.
MINUTE = 60.
SECOND = 3600.
dec = float(dec)
fhour = dec*(HOUR/DEGREE)
hour = int(fhour)
fminute = (fhour - hour)*MINUTE
minute = int(fminute)
second = (fminute - minute)*MINUTE
return (hour, minute, second) | 4c2c564631d431d908f66486af40e380598f2724 | 22,873 |
def logfpsd(data, rate, window, noverlap, fmin, bins_per_octave):
"""Computes ordinary linear-frequency power spectral density, then multiplies by a matrix
that converts to log-frequency space.
Returns the log-frequency PSD, the centers of the frequency bins,
and the time points.
Adapted from Matlab code by Dan Ellis (Columbia):
http://www.ee.columbia.edu/ln/rosa/matlab/sgram/logfsgram.m"""
stft, linfreqs, times = specgram(data, window, Fs=rate, noverlap=noverlap)
# construct matrix for mapping to log-frequency space
fratio = 2**(1/bins_per_octave) # ratio between adjacent frequencies
nbins = np.floor(np.log((rate/2)/fmin)/np.log(fratio))
#fftfreqs = (rate/window)*np.arange(window/2+1)
nfftbins = window/2+1
logffreqs = fmin*np.exp(np.log(2)*np.arange(nbins)/bins_per_octave)
logfbws = logffreqs*(fratio-1)
logfbws = np.maximum(logfbws, rate/window)
bandoverlapconstant = 0.5475 # controls adjacent band overlap. set by hand by Dan Ellis
freqdiff = (np.repeat(logffreqs[:,np.newaxis],nfftbins,axis=1) - np.repeat(linfreqs[np.newaxis,:],nbins,axis=0))
freqdiff = freqdiff / np.repeat(bandoverlapconstant*logfbws[:,np.newaxis],nfftbins,axis=1)
mapping = np.exp(-0.5*freqdiff**2)
rowEs = np.sqrt(2*np.sum(mapping**2,axis=1))
mapping = mapping/np.repeat(rowEs[:,np.newaxis],nfftbins,axis=1)
# perform mapping
logfpsd = np.sqrt(np.dot(mapping,(np.abs(stft)**2)))
return logfpsd.T, logffreqs, times | cf9a9ee3a248760e6bfa36a82ae782d607269b10 | 22,874 |
def ppg_dual_double_frequency_template(width):
"""
EXPOSE
Generate a PPG template by using 2 sine waveforms.
The first waveform double the second waveform frequency
:param width: the sample size of the generated waveform
:return: a 1-D numpy array of PPG waveform
having diastolic peak at the low position
"""
t = np.linspace(0, 1, width, False) # 1 second
sig = np.sin(2 * np.pi * 2 * t - np.pi / 2) + \
np.sin(2 * np.pi * 1 * t - np.pi / 6)
sig_scale = MinMaxScaler().fit_transform(np.array(sig).reshape(-1, 1))
return sig_scale.reshape(-1) | 9c04afb7687e19a96fb84bf1d5367dc79ce6ceea | 22,875 |
import os
import fnmatch
def _run_fast_scandir(dir, fn_glob):
"""
Quickly scan nested directories to get a list of filenames that match the fn_glob string.
Modified from https://stackoverflow.com/a/59803793/2441026
(faster than os.walk or glob methods, and allows filename matching in subdirectories).
Parameters
----------
dir : str
full path to the input directory
fn_glob : str
glob-style filename pattern
Outputs
-------
subfolders : list
list of strings of all nested subdirectories
files : list
list of strings containing full paths to each file matching the filename pattern
"""
subfolders, files = [], []
for f in os.scandir(dir):
if any(f.name.startswith(s) for s in ["__", "."]):
continue
if f.is_dir():
subfolders.append(f.path)
if f.is_file():
if fnmatch.fnmatch(f.name, fn_glob):
files.append(f.path)
for dir in list(subfolders):
sf, f = _run_fast_scandir(dir, fn_glob)
subfolders.extend(sf)
files.extend(f)
return subfolders, files | 79f95bb312663c7870ec71cc93a0b6000c08ebdd | 22,876 |
def str_input(prompt: str) -> str:
"""Prompt user for string value.
Args:
prompt (str): Prompt to display.
Returns:
str: User string response.
"""
return input(f"{prompt} ") | ac6c3c694adf227fcc1418574d4875d7fa637541 | 22,877 |
def action(ra_deg, dec_deg, d_kpc, pm_ra_masyr, pm_dec_masyr, v_los_kms,
verbose=False):
"""
parameters:
----------
ra_deg: (float)
RA in degrees.
dec_deg: (float)
Dec in degress.
d_kpc: (float)
Distance in kpc.
pm_ra_masyr: (float)
RA proper motion in mas/yr.
pm_decmasyr: (float)
Dec proper motion in mas/yr.
v_los_kms: (float)
RV in kms.
returns:
------
R_kpc, phi_rad, z_kpc, vR_kms, vT_kms, vz_kms
jR: (float)
Radial action.
lz: (float)
Vertical ang mom.
jz: (float)
Vertical action.
"""
ra_rad = ra_deg * (np.pi / 180.) # RA [rad]
dec_rad = dec_deg * (np.pi / 180.) # dec [rad]
# Galactocentric position of the Sun:
X_gc_sun_kpc = 8. # [kpc]
Z_gc_sun_kpc = 0.025 # [kpc]
# Galactocentric velocity of the Sun:
vX_gc_sun_kms = -9.58 # = -U [kms]
vY_gc_sun_kms = 10.52 + 220. # = V+v_circ(R_Sun) [kms]
vZ_gc_sun_kms = 7.01 # = W [kms]
# a. convert spatial coordinates (ra,dec,d) to (R,z,phi)
# (ra,dec) --> Galactic coordinates (l,b):
lb = bovy_coords.radec_to_lb(ra_rad, dec_rad, degree=False, epoch=2000.0)
# l_rad = lb[:, 0]
# b_rad = lb[:, 1]
l_rad = lb[0]
b_rad = lb[1]
# (l,b,d) --> Galactocentric cartesian coordinates (x,y,z):
xyz = bovy_coords.lbd_to_XYZ(l_rad, b_rad, d_kpc, degree=False)
# x_kpc = xyz[:, 0]
# y_kpc = xyz[:, 1]
# z_kpc = xyz[:, 2]
x_kpc = xyz[0]
y_kpc = xyz[1]
z_kpc = xyz[2]
# (x,y,z) --> Galactocentric cylindrical coordinates (R,z,phi):
Rzphi = bovy_coords.XYZ_to_galcencyl(x_kpc, y_kpc, z_kpc,
Xsun=X_gc_sun_kpc, Zsun=Z_gc_sun_kpc)
# R_kpc = Rzphi[:, 0]
# phi_rad = Rzphi[:, 1]
# z_kpc = Rzphi[:, 2]
R_kpc = Rzphi[0]
phi_rad = Rzphi[1]
z_kpc = Rzphi[2]
# b. convert velocities (pm_ra,pm_dec,vlos) to (vR,vz,vT)
# (pm_ra,pm_dec) --> (pm_l,pm_b):
pmlpmb = bovy_coords.pmrapmdec_to_pmllpmbb(pm_ra_masyr, pm_dec_masyr,
ra_rad, dec_rad, degree=False,
epoch=2000.0)
# pml_masyr = pmlpmb[:, 0]
# pmb_masyr = pmlpmb[:, 1]
pml_masyr = pmlpmb[0]
pmb_masyr = pmlpmb[1]
# (v_los,pm_l,pm_b) & (l,b,d) --> (vx,vy,vz):
vxvyvz = bovy_coords.vrpmllpmbb_to_vxvyvz(v_los_kms, pml_masyr, pmb_masyr,
l_rad, b_rad, d_kpc, XYZ=False,
degree=False)
# vx_kms = vxvyvz[:, 0]
# vy_kms = vxvyvz[:, 1]
# vz_kms = vxvyvz[:, 2]
vx_kms = vxvyvz[0]
vy_kms = vxvyvz[1]
vz_kms = vxvyvz[2]
# (vx,vy,vz) & (x,y,z) --> (vR,vT,vz):
vRvTvZ = bovy_coords.vxvyvz_to_galcencyl(vx_kms, vy_kms, vz_kms, R_kpc,
phi_rad, z_kpc,
vsun=[vX_gc_sun_kms,
vY_gc_sun_kms,
vZ_gc_sun_kms],
galcen=True)
# vR_kms = vRvTvZ[:, 0]
# vT_kms = vRvTvZ[:, 1]
# vz_kms = vRvTvZ[:, 2]
vR_kms = vRvTvZ[0]
vT_kms = vRvTvZ[1]
vz_kms = vRvTvZ[2]
if verbose:
print("R = ", R_kpc, "\t kpc")
print("phi = ", phi_rad, "\t rad")
print("z = ", z_kpc, "\t kpc")
print("v_R = ", vR_kms, "\t km/s")
print("v_T = ", vT_kms, "\t km/s")
print("v_z = ", vz_kms, "\t km/s")
jR, lz, jz = calc_actions(R_kpc, phi_rad, z_kpc, vR_kms, vT_kms, vz_kms)
return R_kpc, phi_rad, z_kpc, vR_kms, vT_kms, vz_kms, jR, lz, jz | 0e707bff67cee3c909213f14181927a14d5d5656 | 22,878 |
def getProcWithParent(host,targetParentPID,procname):
""" returns (parentPID,procPID) tuple for the procname with the specified parent """
cmdStr="ps -ef | grep '%s' | grep -v grep" % (procname)
cmd=Command("ps",cmdStr,ctxt=REMOTE,remoteHost=host)
cmd.run(validateAfter=True)
sout=cmd.get_results().stdout
logger.info(cmd.get_results().printResult())
if sout is None:
return (0,0)
lines=sout.split('\n')
for line in lines:
if line == '':
continue
fields=line.lstrip(' ').split()
if len(fields) < 3:
logger.info("not enough fields line: '%s'" % line)
return (0,0)
procPID=int(line.split()[1])
parentPID=int(line.split()[2])
if parentPID == targetParentPID:
return (parentPID,procPID)
logger.info("couldn't find process with name: %s which is a child of PID: %s" % (procname,targetParentPID))
return (0,0) | 77f4f13eb381dc840eee26875724cd6e1cdf1e57 | 22,879 |
def temporal_autocorrelation(array):
"""Computes temporal autocorrelation of array."""
dt = array['time'][1] - array['time'][0]
length = array.sizes['time']
subsample = max(1, int(1. / dt))
def _autocorrelation(array):
def _corr(x, d):
del x
arr1 = jnp.roll(array, d, 0)
ans = arr1 * array
ans = jnp.sum(
jnp.where(
jnp.arange(length).reshape(-1, 1, 1, 1) >= d, ans / length, 0),
axis=0)
return d, ans
_, full_result = jax.lax.scan(_corr, 0, jnp.arange(0, length, subsample))
return full_result
full_result = jax.jit(_autocorrelation)(
jnp.array(array.transpose('time', 'sample', 'x', 'model').u))
full_result = xarray.Dataset(
data_vars=dict(t_corr=(['time', 'sample', 'x', 'model'], full_result)),
coords={
'dt': np.array(array.time[slice(None, None, subsample)]),
'sample': array.sample,
'x': array.x,
'model': array.model
})
return full_result | 69640da51fa94edd92e793f2c86ac34090e70a28 | 22,880 |
import json
def kv_detail(request, kv_class, kv_pk):
"""
GET to:
/core/keyvalue/api/<kv_class>/<kv_pk>/detail/
Returns a single KV instance.
"""
Klass = resolve_class(kv_class)
KVKlass = Klass.keyvalue_set.related.model
try:
kv = KVKlass.objects.get(pk=kv_pk)
except KVKlass.DoesNotExist:
return HttpResponse(
status=404, content=json.dumps({'success': False})
)
return HttpResponse(
json.dumps(kv.get_bundle())
) | bd8961c25e39f8540b57753b8f923229e77ae795 | 22,881 |
from typing import Protocol
import json
def opentrons_protocol(protocol_id):
"""Get OpenTrons representation of a protocol."""
current_protocol = Protocol.query.filter_by(id=protocol_id).first()
if not current_protocol:
flash('No such specification!', 'danger')
return redirect('.')
if current_protocol.user != current_user and not current_protocol.public:
flash('Not your project!', 'danger')
return redirect('.')
if not current_protocol.protocol:
return ""
protocol_object = json.loads(current_protocol.protocol)
converter = OpenTrons()
resp = make_response(converter.convert(protocol_object, current_protocol.name, current_protocol.description))
resp.headers['Content-Type'] = "text"
resp.headers['Content-Disposition'] = "attachment; filename=" + current_protocol.name + "-opentrons.py"
return resp | 3a8cc4355763f788f01bb1d95aa43f5cb249a68f | 22,882 |
import os
def get_link_external():
""" Return True if we should link to system BLAS / LAPACK
If True, attempt to link to system BLAS / LAPACK. Otherwise, compile
lapack_lite, and link to that.
First check ``setup.cfg`` file for section ``[lapack]`` key ``external``.
If this value undefined, then get string from environment variable
NIPY_EXTERNAL_LAPACK.
If value from ``setup.cfg`` or environment variable is not 'False' or '0',
then return True.
"""
config = ConfigParser()
try:
config.read(SETUP_FILE)
external_link = config.get(SECTION, KEY)
except (IOError, KeyError, NoOptionError, NoSectionError):
external_link = os.environ.get(EXTERNAL_LAPACK_VAR)
if external_link is None:
return False
return external_link.lower() not in ('0', 'false') | 2a6c867bf3cebf7966951f19f9beddddd22f0fad | 22,883 |
def flag(request, comment_id, next=None):
"""
Flags a comment. Confirmation on GET, action on POST.
Templates: `comments/flag.html`,
Context:
comment
the flagged `comments.comment` object
"""
comment = get_object_or_404(comments.get_model(), pk=comment_id, site__pk=settings.SITE_ID)
# Flag on POST
if request.method == 'POST':
flag, created = comments.models.CommentFlag.objects.get_or_create(
comment = comment,
user = request.user,
flag = comments.models.CommentFlag.SUGGEST_REMOVAL
)
signals.comment_was_flagged.send(
sender = comment.__class__,
comment = comment,
flag = flag,
created = created,
request = request,
)
return next_redirect(request.POST.copy(), next, flag_done, c=comment.pk)
# Render a form on GET
else:
return render_to_response('comments/flag.html',
{'comment': comment, "next": next},
template.RequestContext(request)
) | e0997313c13446150ed3a0f402b2df74089aa4e9 | 22,884 |
from typing import List
def get_nodes_for_homek8s_group(inventory, group_name) -> List[str]:
"""Return the nodes' names of the given group from the inventory as a list."""
hosts_dict = inventory['all']['children']['homek8s']['children'][group_name]['hosts']
if hosts_dict:
return list(hosts_dict.keys())
else:
return [] | 806394259816ec4311e69dcd46e7b111c7ca0652 | 22,885 |
def is_blank_or_none(value: str):
"""
Returns True if the specified string is whitespace, empty or None.
:param value: the string to check
:return: True if the specified string is whitespace, empty or None
"""
try:
return "".__eq__(value.strip())
except AttributeError:
return value is None | 062e1ab33fc5043435af9e97cdf2443ffc4625bd | 22,886 |
import typing
def __get_play_widget(function: typing.Any) -> typing.Any:
"""Generate play widget.
:param function: Function to associate with Play.
:return: Play widget.
"""
play = widgets.interactive(
function,
i=widgets.Play(
value=0,
min=0,
max=500,
step=1,
interval=5000,
description="Press play",
disabled=False,
),
)
return play | 5bb63256f84f1c6f50e2ae007a08e6e794535bc5 | 22,887 |
def add_data_to_profile(id, profile_id, read_only, tree_identifier, folder_path=None, web_session=None):
"""Shares data to user group
Args:
id (int): The id of the data
profile_id (int): The id of profile
read_only (int): The flag that specifies whether the data is read only
tree_identifier (str): The identifier of the tree
folder_path (str): The folder path f.e. "/scripts/server1"
web_session (object): The webserver session object, optional. Will be
passed in my the webserver automatically
Returns:
The id of the folder to which the data was shared.
"""
if tree_identifier.strip() == "":
raise MSGException(Error.CORE_INVALID_PARAMETER, f"Parameter 'tree_identifier' cannot be empty.")
if folder_path.strip() == "" or folder_path.strip() == "/":
folder_path = None
with BackendDatabase(web_session) as db:
with BackendTransaction(db):
privileges = backend.get_user_privileges_for_data(db, id, web_session.user_id)
max_privilege = min([p['read_only'] for p in privileges])
# We check if the user is owner of given profile
if backend.get_profile_owner(db, profile_id) == web_session.user_id:
if max_privilege <= read_only:
profile_root_folder_id = backend.get_root_folder_id(db, tree_identifier, 'profile', profile_id)
if profile_root_folder_id is None:
profile_root_folder_id = backend.create_profile_data_tree(db, tree_identifier, profile_id)
if folder_path:
folder_profile_id, _ = backend.create_folder(db, folder_path, profile_root_folder_id)
else:
folder_profile_id = profile_root_folder_id
backend.add_data_to_folder(db, id, folder_profile_id, read_only)
else:
raise MSGException(Error.MODULES_SHARING_WITH_HIGHER_PERMISSIONS, "Cannot assign data to profile with higher permission than user has.")
else:
raise MSGException(Error.MODULES_USER_HAVE_NO_PRIVILEGES, "User have no privileges to perform operation.")
return folder_profile_id | 489d246b90506b7581ee8aef66c7f5f2ba6b9b88 | 22,888 |
def get_activation_function(activation):
"""
Gets an activation function module given the name of the activation.
:param activation: The name of the activation function.
:return: The activation function module.
"""
if activation == 'ReLU':
return nn.ReLU()
elif activation == 'LeakyReLU':
return nn.LeakyReLU(0.1)
elif activation == 'PReLU':
return nn.PReLU()
elif activation == 'tanh':
return nn.Tanh()
elif activation == 'SELU':
return nn.SELU()
elif activation == 'ELU':
return nn.ELU()
else:
raise ValueError('Activation "{}" not supported.'.format(activation)) | ae5d8e91667e2dc4fe34eb1ac96cff329d542103 | 22,889 |
def get_value_from_time(a_node="", idx=0):
"""
gets the value from the time supplied.
:param a_node: MFn.kAnimCurve node.
:param idx: <int> the time index.
:return: <tuple> data.
"""
return OpenMaya.MTime(a_node.time(idx).value(), OpenMaya.MTime.kSeconds).value(), a_node.value(idx), | 77c6cb47c4381df3537754dc66e03ef4366557de | 22,890 |
def getrinputs(rtyper, graph):
"""Return the list of reprs of the input arguments to the 'graph'."""
return [rtyper.bindingrepr(v) for v in graph.getargs()] | bb0f8861a29cd41af59432f267f07ff67601460c | 22,891 |
import os
def _apply_mask(head_file, mask_file, write_dir=None,
caching=False, terminal_output='allatonce'):
"""
Parameters
----------
head_file : str
Path to the image to mask.
mask_file : str
Path to the image mask to apply.
write_dir : str or None, optional
Path to the folder to save the output file to. If None, the folder
of the head file is used.
caching : bool, optional
Wether or not to use caching.
terminal_output : one of {'stream', 'allatonce', 'file', 'none'}
Control terminal output :
'stream' : displays to terminal immediately,
'allatonce' : waits till command is finished to display output,
'file' : writes output to file
'none' : output is ignored
Returns
-------
path to brain extracted image.
"""
if write_dir is None:
write_dir = os.path.dirname(head_file)
if caching:
memory = Memory(write_dir)
apply_mask = memory.cache(fsl.ApplyMask)
apply_mask.interface().set_default_terminal_output(terminal_output)
else:
apply_mask = fsl.ApplyMask(terminal_output=terminal_output).run
# Check mask is binary
mask_img = nibabel.load(mask_file)
mask = mask_img.get_data()
values = np.unique(mask)
if len(values) == 2:
# If there are 2 different values, one of them must be 0 (background)
if not 0 in values:
raise ValueError('Background of the mask must be represented with'
'0. Given mask contains: %s.' % values)
elif len(values) != 2:
# If there are more than 2 values, the mask is invalid
raise ValueError('Given mask is not made of 2 values: %s'
'. Cannot interpret as true or false' % values)
try:
np.testing.assert_array_equal(nibabel.load(mask_file).affine,
nibabel.load(head_file).affine)
except AssertionError:
raise ValueError('Given mask {0} and file {1} do not have the same '
'affine'.format(mask_file, head_file))
out_apply_mask = apply_mask(in_file=head_file,
mask_file=mask_file,
out_file=fname_presuffix(head_file,
suffix='_masked',
newpath=write_dir))
return out_apply_mask.outputs.out_file | fe2964ad62aa466f6ca92c0329e15ef80ca48460 | 22,892 |
def mars_reshape(x_i):
"""
Reshape (n_stacks, 3, 16, 112, 112) into (n_stacks * 16, 112, 112, 3)
"""
return np.transpose(x_i, (0, 2, 3, 4, 1)).reshape((-1, 112, 112, 3)) | d842d4d9b865feadf7c56e58e66d09c4a3389edf | 22,893 |
def Rz_to_coshucosv(R,z,delta=1.):
"""
NAME:
Rz_to_coshucosv
PURPOSE:
calculate prolate confocal cosh(u) and cos(v) coordinates from R,z, and delta
INPUT:
R - radius
z - height
delta= focus
OUTPUT:
(cosh(u),cos(v))
HISTORY:
2012-11-27 - Written - Bovy (IAS)
"""
d12= (z+delta)**2.+R**2.
d22= (z-delta)**2.+R**2.
coshu= 0.5/delta*(sc.sqrt(d12)+sc.sqrt(d22))
cosv= 0.5/delta*(sc.sqrt(d12)-sc.sqrt(d22))
return (coshu,cosv) | f01ed002c09e488d89cfd4089343b16346dfb5fd | 22,894 |
import ast
import numpy
def rpFFNET_createdict(cf,ds,series):
""" Creates a dictionary in ds to hold information about the FFNET data used
to gap fill the tower data."""
# get the section of the control file containing the series
section = pfp_utils.get_cfsection(cf,series=series,mode="quiet")
# return without doing anything if the series isn't in a control file section
if len(section)==0:
logger.error("ERUsingFFNET: Series "+series+" not found in control file, skipping ...")
return
# check that none of the drivers have missing data
driver_list = ast.literal_eval(cf[section][series]["ERUsingFFNET"]["drivers"])
target = cf[section][series]["ERUsingFFNET"]["target"]
for label in driver_list:
data,flag,attr = pfp_utils.GetSeriesasMA(ds,label)
if numpy.ma.count_masked(data)!=0:
logger.error("ERUsingFFNET: driver "+label+" contains missing data, skipping target "+target)
return
# create the dictionary keys for this series
ffnet_info = {}
# site name
ffnet_info["site_name"] = ds.globalattributes["site_name"]
# source series for ER
opt = pfp_utils.get_keyvaluefromcf(cf, [section,series,"ERUsingFFNET"], "source", default="Fc")
ffnet_info["source"] = opt
# target series name
ffnet_info["target"] = cf[section][series]["ERUsingFFNET"]["target"]
# list of drivers
ffnet_info["drivers"] = ast.literal_eval(cf[section][series]["ERUsingFFNET"]["drivers"])
# name of ffnet output series in ds
ffnet_info["output"] = cf[section][series]["ERUsingFFNET"]["output"]
# results of best fit for plotting later on
ffnet_info["results"] = {"startdate":[],"enddate":[],"No. points":[],"r":[],
"Bias":[],"RMSE":[],"Frac Bias":[],"NMSE":[],
"Avg (obs)":[],"Avg (FFNET)":[],
"Var (obs)":[],"Var (FFNET)":[],"Var ratio":[],
"m_ols":[],"b_ols":[]}
# create an empty series in ds if the SOLO output series doesn't exist yet
if ffnet_info["output"] not in ds.series.keys():
data,flag,attr = pfp_utils.MakeEmptySeries(ds,ffnet_info["output"])
pfp_utils.CreateSeries(ds,ffnet_info["output"],data,flag,attr)
# create the merge directory in the data structure
if "merge" not in dir(ds): ds.merge = {}
if "standard" not in ds.merge.keys(): ds.merge["standard"] = {}
# create the dictionary keys for this series
ds.merge["standard"][series] = {}
# output series name
ds.merge["standard"][series]["output"] = series
# source
ds.merge["standard"][series]["source"] = ast.literal_eval(cf[section][series]["MergeSeries"]["Source"])
# create an empty series in ds if the output series doesn't exist yet
if ds.merge["standard"][series]["output"] not in ds.series.keys():
data,flag,attr = pfp_utils.MakeEmptySeries(ds,ds.merge["standard"][series]["output"])
pfp_utils.CreateSeries(ds,ds.merge["standard"][series]["output"],data,flag,attr)
return ffnet_info | 60646de63106895eeb716f763c49195b9c5459e8 | 22,895 |
def svn_client_relocate(*args):
"""
svn_client_relocate(char dir, char from_prefix, char to_prefix, svn_boolean_t recurse,
svn_client_ctx_t ctx, apr_pool_t pool) -> svn_error_t
"""
return _client.svn_client_relocate(*args) | 55e78e311d461e5a20e5cd04778e6c8431a6d990 | 22,896 |
def get_pairs(image1, image2, global_shift, current_objects, record, params):
""" Given two images, this function identifies the matching objects and
pairs them appropriately. See disparity function. """
nobj1 = np.max(image1)
nobj2 = np.max(image2)
if nobj1 == 0:
print('No echoes found in the first scan.')
return
elif nobj2 == 0:
zero_pairs = np.zeros(nobj1)
return zero_pairs
obj_match = locate_all_objects(image1,
image2,
global_shift,
current_objects,
record,
params)
pairs = match_pairs(obj_match, params)
return pairs | 5013764c7e2a1d5e12abc2107ffdbfca640f1423 | 22,897 |
def getComparedVotes(request):
"""
* @api {get} /getComparedVotes/?people_same={people_same_ids}&parties_same={parties_same_ids}&people_different={people_different_ids}&parties_different={parties_different_ids} List all votes where selected MPs/PGs voted the same/differently
* @apiName getComparedVotes
* @apiGroup Session
* @apiParam {people_same_ids} Comma separated list of Parladata ids for MPs who voted the same
* @apiParam {parties_same_ids} Comma separated list of Parladata ids for PGs who voted the same
* @apiParam {people_different_ids} Comma separated list of Parladata ids for MPs who voted differently
* @apiParam {parties_different_ids} Comma separated list of Parladata ids for PGs who voted the differently
* @apiSuccess {Integer} total Total number of votes so far
* @apiSuccess {Object[]} results List of votes that satisfy the supplied criteria
* @apiSuccess {Object} results.session Session data for this vote
* @apiSuccess {String} results.session.name Name of session.
* @apiSuccess {Date} results.session.date_ts Date and time of session.
* @apiSuccess {Date} results.session.date Date of session.
* @apiSuccess {Integer} results.session.id Id of session.
* @apiSuccess {Boolean} results.session.in_review Return true or false if session is in review.
* @apiSuccess {Object[]} results.session.orgs Organization object
* @apiSuccess {String} results.session.orgs.acronym Organization acronym
* @apiSuccess {Boolean} results.session.orgs.is_coalition True of False if organization is in coalition
* @apiSuccess {Integer} results.session.orgs.id Id of organization
* @apiSuccess {Integer} results.session.orgs.name Name of organization
* @apiSuccess {Object} results.results Results for this vote
* @apiSuccess {Integer} results.results.abstain Number of abstentions
* @apiSuccess {Integer} results.results.against Number of MPs who voted against the motion
* @apiSuccess {Integer} results.results.not_present Number of MPs who weren't present at the vote
* @apiSuccess {Integer} results.results.votes_for Number of MPs who voted for the motion
* @apiSuccess {date} results.results.date The date of the vote
* @apiSuccess {String} results.results.text The text of the motion which was voted upon
* @apiSuccess {String[]} results.results.tags List of tags that belong to this motion
* @apiSuccess {Boolean} results.results.is_outlier Is this vote a weird one (flame icon)?
* @apiSuccess {Boolean} results.results.result Did the motion pass?
* @apiExample {curl} Example:
curl -i https://analize.parlameter.si/v1/s/getComparedVotes/?people_same=&parties_same=1&people_different=&parties_different=2
* @apiSuccessExample {json} Example response:
{
"total": 2155,
"results": [{
"session": {
"name": "44. izredna seja",
"date_ts": "2017-05-30T02:00:00",
"orgs": [{
"acronym": "DZ",
"is_coalition": false,
"id": 95,
"name": "Dr\u017eavni zbor"
}],
"date": "30. 5. 2017",
"org": {
"acronym": "DZ",
"is_coalition": false,
"id": 95,
"name": "Dr\u017eavni zbor"
},
"id": 9587,
"in_review": false
},
"results": {
"abstain": 0,
"against": 0,
"motion_id": 7260,
"date": "09.06.2017",
"text": "Dnevni red v celoti",
"tags": ["Proceduralna glasovanja"],
"is_outlier": false,
"not_present": 34,
"votes_for": 56,
"result": true
}
}, {
"session": {
"name": "44. izredna seja",
"date_ts": "2017-05-30T02:00:00",
"orgs": [{
"acronym": "DZ",
"is_coalition": false,
"id": 95,
"name": "Dr\u017eavni zbor"
}],
"date": "30. 5. 2017",
"org": {
"acronym": "DZ",
"is_coalition": false,
"id": 95,
"name": "Dr\u017eavni zbor"
},
"id": 9587,
"in_review": false
},
"results": {
"abstain": 0,
"against": 34,
"motion_id": 7258,
"date": "09.06.2017",
"text": "Priporo\u010dilo Vladi RS v zvezi z okoljsko katastrofo, ki jo je povzro\u010dil po\u017ear v podjetju Kemis d.o.o. - Amandma: k 5. to\u010dki 9.6.2017 [SDS - Poslanska skupina Slovenske demokratske stranke]",
"tags": ["Odbor za infrastrukturo, okolje in prostor"],
"is_outlier": false,
"not_present": 35,
"votes_for": 21,
"result": false
}
}, {
"session": {
"name": "30. redna seja",
"date_ts": "2017-05-22T02:00:00",
"orgs": [{
"acronym": "DZ",
"is_coalition": false,
"id": 95,
"name": "Dr\u017eavni zbor"
}],
"date": "22. 5. 2017",
"org": {
"acronym": "DZ",
"is_coalition": false,
"id": 95,
"name": "Dr\u017eavni zbor"
},
"id": 9580,
"in_review": true
},
"results": {
"abstain": 4,
"against": 18,
"motion_id": 7219,
"date": "30.05.2017",
"text": "Zakon o dopolnitvi Zakona o omejevanju uporabe toba\u010dnih in povezanih izdelkov - Glasovanje o zakonu v celoti",
"tags": ["Odbor za zdravstvo"],
"is_outlier": false,
"not_present": 16,
"votes_for": 52,
"result": true
}
}, {
"session": {
"name": "30. redna seja",
"date_ts": "2017-05-22T02:00:00",
"orgs": [{
"acronym": "DZ",
"is_coalition": false,
"id": 95,
"name": "Dr\u017eavni zbor"
}],
"date": "22. 5. 2017",
"org": {
"acronym": "DZ",
"is_coalition": false,
"id": 95,
"name": "Dr\u017eavni zbor"
},
"id": 9580,
"in_review": true
},
"results": {
"abstain": 6,
"against": 23,
"motion_id": 7218,
"date": "30.05.2017",
"text": "Zakon o spremembah in dopolnitvah Zakona o zdravstveni dejavnosti - Eviden\u010dni sklep o primernosti predloga zakona 30.5.2017",
"tags": ["Odbor za zdravstvo"],
"is_outlier": false,
"not_present": 19,
"votes_for": 42,
"result": true
}
}, {
"session": {
"name": "30. redna seja",
"date_ts": "2017-05-22T02:00:00",
"orgs": [{
"acronym": "DZ",
"is_coalition": false,
"id": 95,
"name": "Dr\u017eavni zbor"
}],
"date": "22. 5. 2017",
"org": {
"acronym": "DZ",
"is_coalition": false,
"id": 95,
"name": "Dr\u017eavni zbor"
},
"id": 9580,
"in_review": true
},
"results": {
"abstain": 6,
"against": 23,
"motion_id": 7218,
"date": "30.05.2017",
"text": "Zakon o spremembah in dopolnitvah Zakona o zdravstveni dejavnosti - Eviden\u010dni sklep o primernosti predloga zakona 30.5.2017",
"tags": ["Odbor za zdravstvo"],
"is_outlier": false,
"not_present": 19,
"votes_for": 42,
"result": true
}
}, {
"session": {
"name": "30. redna seja",
"date_ts": "2017-05-22T02:00:00",
"orgs": [{
"acronym": "DZ",
"is_coalition": false,
"id": 95,
"name": "Dr\u017eavni zbor"
}],
"date": "22. 5. 2017",
"org": {
"acronym": "DZ",
"is_coalition": false,
"id": 95,
"name": "Dr\u017eavni zbor"
},
"id": 9580,
"in_review": true
},
"results": {
"abstain": 3,
"against": 22,
"motion_id": 7217,
"date": "30.05.2017",
"text": "Priporo\u010dilo v zvezi s problematiko slovenskega zdravstva - Eviden\u010dni sklep MDT 30.5.2017",
"tags": ["Odbor za zdravstvo"],
"is_outlier": false,
"not_present": 14,
"votes_for": 51,
"result": true
}
}, {
"session": {
"name": "30. redna seja",
"date_ts": "2017-05-22T02:00:00",
"orgs": [{
"acronym": "DZ",
"is_coalition": false,
"id": 95,
"name": "Dr\u017eavni zbor"
}],
"date": "22. 5. 2017",
"org": {
"acronym": "DZ",
"is_coalition": false,
"id": 95,
"name": "Dr\u017eavni zbor"
},
"id": 9580,
"in_review": true
},
"results": {
"abstain": 2,
"against": 51,
"motion_id": 7216,
"date": "30.05.2017",
"text": "Zakon o spremembah in dopolnitvah Zakona o pokojninskem in invalidskem zavarovanju - Eviden\u010dni sklep o primernosti predloga zakona 30.5.2017",
"tags": ["Odbor za delo, dru\u017eino, socialne zadeve in invalide"],
"is_outlier": false,
"not_present": 13,
"votes_for": 24,
"result": false
}
}]
}
"""
people_same = request.GET.get('people_same')
parties_same = request.GET.get('parties_same')
people_different = request.GET.get('people_different')
parties_different = request.GET.get('parties_different')
if people_same != '':
people_same_list = people_same.split(',')
else:
people_same_list = []
if parties_same != '':
parties_same_list = parties_same.split(',')
else:
parties_same_list = []
if people_different != '':
people_different_list = people_different.split(',')
else:
people_different_list = []
if parties_different != '':
parties_different_list = parties_different.split(',')
else:
parties_different_list = []
if len(people_same_list) + len(parties_same_list) == 0:
return HttpResponse('Need at least one same to compare.')
if len(people_same_list) + len(parties_same_list) < 2 and len(people_different_list) + len(parties_different_list) < 1:
return HttpResponse('Not enough to compare.')
beginning = 'SELECT * FROM '
select_same_people = ''
select_same_parties = ''
match_same_people_ballots = ''
match_same_people_persons = ''
match_same_people_options = ''
match_same_parties_ballots = ''
match_same_parties_organizations = ''
match_same_parties_options = ''
select_different_people = ''
select_different_parties = ''
match_different_people_ballots = ''
match_different_people_persons = ''
match_different_people_options = ''
match_different_parties_ballots = ''
match_different_parties_organizations = ''
match_different_parties_options = ''
# select for same people DONE
for i, e in enumerate(people_same_list):
if i < len(people_same_list) - 1:
select_same_people = '%s parlaseje_ballot b%s, parlaseje_activity a%s, parlaposlanci_person p%s, ' % (select_same_people, str(i), str(i), str(i))
else:
select_same_people = '%s parlaseje_ballot b%s, parlaseje_activity a%s, parlaposlanci_person p%s' % (select_same_people, str(i), str(i), str(i))
# select for same parties DONE
for i, e in enumerate(parties_same_list):
if i < len(parties_same_list) - 1:
select_same_parties = '%s parlaseje_ballot pb%s, parlaskupine_organization o%s, ' % (select_same_parties, str(i), str(i))
else:
select_same_parties = '%s parlaseje_ballot pb%s, parlaskupine_organization o%s' % (select_same_parties, str(i), str(i))
# select for different people DONE
for i, e in enumerate(people_different_list):
if i < len(people_different_list) - 1:
select_different_people = '%s parlaseje_ballot db%s, parlaseje_activity da%s, parlaposlanci_person dp%s, ' % (select_different_people, str(i), str(i), str(i))
else:
select_different_people = '%s parlaseje_ballot db%s, parlaseje_activity da%s, parlaposlanci_person dp%s' % (select_different_people, str(i), str(i), str(i))
# select for different parties DONE
for i, e in enumerate(parties_different_list):
if i < len(parties_different_list) - 1:
select_different_parties = '%s parlaseje_ballot dpb%s, parlaskupine_organization do%s, ' % (select_different_parties, str(i), str(i))
else:
select_different_parties = '%s parlaseje_ballot dpb%s, parlaskupine_organization do%s' % (select_different_parties, str(i), str(i))
# match same people ballots by vote id DONE
# if only one person was passed, match_same_people_ballots will remain an empty string
for i, e in enumerate(people_same_list):
if i != 0:
if i < len(people_same_list) - 1:
match_same_people_ballots = '%s b0.vote_id = b%s.vote_id AND ' % (match_same_people_ballots, str(i))
else:
match_same_people_ballots = '%s b0.vote_id = b%s.vote_id' % (match_same_people_ballots, str(i))
# match same parties ballots by vote id DONE
# if only one same party was passed match_same_parties_ballots will remain an empty string
if len(people_same_list) == 0:
# no same people were passed to the API
pass
if len(parties_same_list) == 0:
# no same parties were passed
return HttpResponse('You need to pass at least one "same" person or party.')
elif len(parties_same_list) == 1:
# only one same party was passed, there is nothing to match yet
match_same_parties_ballots = ''
else:
# more than one same party was passed
for i, e in enumerate(parties_same_list):
if i != 0:
# ignore the first one, because all others will be compared with it
if i < len(parties_same_list) - 1:
# not last
match_same_parties_ballots = '%s pb0.vote_id = pb%s.vote_id AND ' % (match_same_parties_ballots, str(i))
else:
# last
match_same_parties_ballots = '%s pb0.vote_id = pb%s.vote_id' % (match_same_parties_ballots, str(i))
elif len(people_same_list) > 0:
# one or more same people were passed
for i, e in enumerate(parties_same_list):
# do not ignore the first one, because all will be compared to the first person ballot
if i < len(parties_same_list) - 1:
# not last
match_same_parties_ballots = '%s b0.vote_id = pb%s.vote_id AND ' % (match_same_parties_ballots, str(i))
else:
# last
match_same_parties_ballots = '%s b0.vote_id = pb%s.vote_id' % (match_same_parties_ballots, str(i))
# match same people with persons DONE
for i, e in enumerate(people_same_list):
if i < len(people_same_list) - 1:
match_same_people_persons = '%s b%s.activity_ptr_id = a%s.id AND a%s.person_id = p%s.id AND p%s.id_parladata = %s AND ' % (match_same_people_persons, str(i), str(i), str(i), str(i), str(i), e)
else:
match_same_people_persons = '%s b%s.activity_ptr_id = a%s.id AND a%s.person_id = p%s.id AND p%s.id_parladata = %s' % (match_same_people_persons, str(i), str(i), str(i), str(i), str(i), e)
# match same parties with organizations DONE
for i, e in enumerate(parties_same_list):
if i < len(parties_same_list) -1:
match_same_parties_organizations = '%s pb%s.org_voter_id = o%s.id AND o%s.id_parladata = %s AND ' % (match_same_parties_organizations, str(i), str(i), str(i), e)
else:
match_same_parties_organizations = '%s pb%s.org_voter_id = o%s.id AND o%s.id_parladata = %s' % (match_same_parties_organizations, str(i), str(i), str(i), e)
# match same people based on options DONE
for i, e in enumerate(people_same_list):
if i != 0:
if i != len(people_same_list) - 1:
match_same_people_options = '%s b0.option = b%s.option AND ' % (match_same_people_options, str(i))
else:
match_same_people_options = '%s b0.option = b%s.option' % (match_same_people_options, str(i))
# match same parties based on options
for i, e in enumerate(parties_same_list):
if i == 0:
if select_same_people != '':
if len(parties_same_list) > 1:
match_same_parties_options = '%s b0.option = pb0.option AND ' % (match_same_parties_options)
else:
match_same_parties_options = '%s b0.option = pb0.option ' % (match_same_parties_options)
else:
if i != len(parties_same_list) - 1:
match_same_parties_options = '%s pb0.option = pb%s.option AND ' % (match_same_parties_options, str(i))
else:
match_same_parties_options = '%s pb0.option = pb%s.option' % (match_same_parties_options, str(i))
# compare different people and parties
if len(people_same_list) > 0:
# we compare with same people
# match different people ballots by vote id
for i, e in enumerate(people_different_list):
if i < len(people_different_list) - 1:
match_different_people_ballots = '%s b0.vote_id = db%s.vote_id AND ' % (match_different_people_ballots, str(i))
else:
match_different_people_ballots = '%s b0.vote_id = db%s.vote_id' % (match_different_people_ballots, str(i))
# match different parties ballots by vote id
for i, e in enumerate(parties_different_list):
if i < len(parties_different_list) - 1:
match_different_parties_ballots = '%s b0.vote_id = dpb%s.vote_id AND ' % (match_different_parties_ballots, str(i))
else:
match_different_parties_ballots = '%s b0.vote_id = dpb%s.vote_id' % (match_different_parties_ballots, str(i))
# match different people based on options
for i, e in enumerate(people_different_list):
if i != len(people_different_list) - 1:
match_different_people_options = '%s b0.option != db%s.option AND ' % (match_different_people_options, str(i))
else:
match_different_people_options = '%s b0.option != db%s.option' % (match_different_people_options, str(i))
# match different parties based on options
for i, e in enumerate(parties_different_list):
if i < len(parties_different_list) - 1:
match_different_parties_options = '%s b0.option != dpb%s.option AND ' % (match_different_parties_options, str(i))
else:
match_different_parties_options = '%s b0.option != dpb%s.option ' % (match_different_parties_options, str(i))
else:
# we compare with same parties
# match different people ballots by vote id
for i, e in enumerate(people_different_list):
if i < len(people_different_list) - 1:
match_different_people_ballots = '%s pb0.vote_id = db%s.vote_id AND ' % (match_different_people_ballots, str(i))
else:
match_different_people_ballots = '%s pb0.vote_id = db%s.vote_id' % (match_different_people_ballots, str(i))
# match different parties ballots by vote id
for i, e in enumerate(parties_different_list):
if i < len(parties_different_list) - 1:
match_different_parties_ballots = '%s pb0.vote_id = dpb%s.vote_id AND ' % (match_different_parties_ballots, str(i))
else:
match_different_parties_ballots = '%s pb0.vote_id = dpb%s.vote_id' % (match_different_parties_ballots, str(i))
# match different people based on options
for i, e in enumerate(people_different_list):
if i != len(people_different_list) - 1:
match_different_people_options = '%s pb0.option != db%s.option AND ' % (match_different_people_options, str(i))
else:
match_different_people_options = '%s pb0.option != db%s.option' % (match_different_people_options, str(i))
# match different parties based on options
for i, e in enumerate(parties_different_list):
if i < len(parties_different_list) - 1:
match_different_parties_options = '%s pb0.option != dpb%s.option AND ' % (match_different_parties_options, str(i))
else:
match_different_parties_options = '%s pb0.option != dpb%s.option ' % (match_different_parties_options, str(i))
# match different people with person
for i, e in enumerate(people_different_list):
if i < len(people_different_list) - 1:
match_different_people_persons = '%s db%s.activity_ptr_id = da%s.id AND da%s.person_id = dp%s.id AND dp%s.id_parladata = %s AND ' % (match_different_people_persons, str(i), str(i), str(i), str(i), str(i), e)
else:
match_different_people_persons = '%s db%s.activity_ptr_id = da%s.id AND da%s.person_id = dp%s.id AND dp%s.id_parladata = %s' % (match_different_people_persons, str(i), str(i), str(i), str(i), str(i), e)
# match different parties with organizations
for i, e in enumerate(parties_different_list):
if i < len(parties_different_list) - 1:
match_different_parties_organizations = '%s dpb%s.org_voter_id = do%s.id AND do%s.id_parladata = %s AND ' % (match_different_parties_organizations, str(i), str(i), str(i), e)
else:
match_different_parties_organizations = '%s dpb%s.org_voter_id = do%s.id AND do%s.id_parladata = %s' % (match_different_parties_organizations, str(i), str(i), str(i), e)
query = beginning
q_selectors_list = [select_same_people, select_same_parties, select_different_people, select_different_parties]
q_selectors_list_clean = [s for s in q_selectors_list if s != '']
q_selectors = ', '.join(q_selectors_list_clean)
print 'q_selectors ' + q_selectors
query = query + ' ' + q_selectors + ' WHERE'
q_match_ballots_list = [match_same_people_ballots, match_same_parties_ballots, match_different_people_ballots, match_different_parties_ballots]
q_match_ballots_list_clean = [s for s in q_match_ballots_list if s != '']
q_match_ballots = ' AND '.join(q_match_ballots_list_clean)
print 'q_match_ballots ' + q_match_ballots
# query = query + ' ' + q_match_ballots + ' AND'
q_match_options_list = [match_same_people_options, match_same_parties_options, match_different_people_options, match_different_parties_options]
q_match_options_list_clean = [s for s in q_match_options_list if s != '']
q_match_options = ' AND '.join(q_match_options_list_clean)
print 'q_match_options ' + q_match_options
# query = query + ' ' + q_match_options + ' AND'
q_match_persons_list = [match_same_people_persons, match_different_people_persons]
q_match_persons_list_clean = [s for s in q_match_persons_list if s != '']
q_match_persons = ' AND '.join(q_match_persons_list_clean)
print 'q_match_persons ' + q_match_persons
# query = query + ' ' + q_match_persons + ' AND'
q_match_organizations_list = [match_same_parties_organizations, match_different_parties_organizations]
q_match_organizations_list_clean = [s for s in q_match_organizations_list if s != '']
q_match_organizations = ' AND '.join(q_match_organizations_list_clean)
print 'q_match_organizations ' + q_match_organizations
# query = query + ' ' + q_match_organizations
after_where_list = [q_match_ballots, q_match_options, q_match_persons, q_match_organizations]
after_where_list_clean = [s for s in after_where_list if s != '']
after_where = ' AND '.join(after_where_list_clean)
query = query + after_where
if request.GET.get('special'):
# exclude 'ni'
exclude_ni_people_same = ''
exclude_ni_parties_same = ''
exclude_ni_people_different = ''
exclude_ni_parties_different = ''
for i, e in enumerate(people_same_list):
if i < len(people_same_list) - 1:
exclude_ni_people_same = '%s b%s.option != \'ni\' AND ' % (exclude_ni_people_same, i)
else:
exclude_ni_people_same = '%s b%s.option != \'ni\'' % (exclude_ni_people_same, i)
for i, e in enumerate(parties_same_list):
if i < len(parties_same_list) - 1:
exclude_ni_parties_same = '%s pb%s.option != \'ni\' AND ' % (exclude_ni_parties_same, i)
else:
exclude_ni_parties_same = '%s pb%s.option != \'ni\'' % (exclude_ni_parties_same, i)
for i, e in enumerate(people_different_list):
if i < len(people_different_list) - 1:
exclude_ni_people_different = '%s db%s.option != \'ni\' AND ' % (exclude_ni_people_different, i)
else:
exclude_ni_people_different = '%s db%s.option != \'ni\'' % (exclude_ni_people_different, i)
for i, e in enumerate(parties_different_list):
if i < len(parties_different_list) - 1:
exclude_ni_parties_different = '%s dpb%s.option != \'ni\' AND ' % (exclude_ni_parties_different, i)
else:
exclude_ni_parties_different = '%s dpb%s.option != \'ni\'' % (exclude_ni_parties_different, i)
exclude_ni_list = [exclude_ni_people_same, exclude_ni_parties_same, exclude_ni_people_different, exclude_ni_parties_different]
exclude_ni_list_clean = [s for s in exclude_ni_list if s != '']
exclude_ni = ' AND '.join(exclude_ni_list_clean)
query = query + ' AND ' + exclude_ni
# return HttpResponse(query)
print query
print 'STATEMENT PARTS:'
print 'select_same_people ' + select_same_people
print 'select_same_parties ' + select_same_parties
print 'match_same_people_ballots ' + match_same_people_ballots
print 'match_same_people_persons ' + match_same_people_persons
print 'match_same_people_options ' + match_same_people_options
print 'match_same_parties_ballots ' + match_same_parties_ballots
print 'match_same_parties_organizations ' + match_same_parties_organizations
print 'match_same_parties_options ' + match_same_parties_options
print 'select_different_people ' + select_different_people
print 'select_different_parties ' + select_different_parties
print 'match_different_people_ballots ' + match_different_people_ballots
print 'match_different_people_persons ' + match_different_people_persons
print 'match_different_people_options ' + match_different_people_options
print 'match_different_parties_ballots ' + match_different_parties_ballots
print 'match_different_parties_organizations ' + match_different_parties_organizations
print 'match_different_parties_options ' + match_different_parties_options
ballots = Ballot.objects.raw(query)
session_ids = set([b.vote.session.id for b in ballots])
sessions = {}
for s in session_ids:
sessions[s] = Session.objects.get(id=s).getSessionData()
print '[SESSION IDS:]'
print set(session_ids)
out = {
'total': Vote.objects.all().count(),
'results': []
}
for ballot in ballots:
out['results'].append({
'session': sessions[ballot.vote.session.id],
'results': {
'motion_id': ballot.vote.id_parladata,
'text': ballot.vote.motion,
'votes_for': ballot.vote.votes_for,
'against': ballot.vote.against,
'abstain': ballot.vote.abstain,
'not_present': ballot.vote.not_present,
'result': ballot.vote.result,
'is_outlier': ballot.vote.is_outlier,
'tags': ballot.vote.tags,
'date': ballot.start_time.strftime(API_DATE_FORMAT)
}
})
return JsonResponse(out, safe=False) | e49b2e1b181761e56795868a3dd6ff5a0452cd05 | 22,898 |
def get_bits(register, index, length=1):
"""
Get selected bit(s) from register while masking out the rest.
Returns as boolean if length==1
:param register: Register value
:type register: int
:param index: Start index (from right)
:type index: int
:param length: Number of bits (default 1)
:type length: int
:return: Selected bit(s)
:rtype: Union[int, bool]
"""
result = (register >> index) & ((1 << length) - 1)
if length == 1:
return result == 1
return result | 0663d925c2c74ece359a430392881cf24b75a575 | 22,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.