_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3 values | text stringlengths 75 19.8k | language stringclasses 1 value | meta_information dict |
|---|---|---|---|---|---|
q34500 | cors_allow_any | train | def cors_allow_any(request, response):
"""
Add headers to permit CORS requests from any origin, with or without credentials,
with any headers.
"""
origin = request.META.get('HTTP_ORIGIN')
if not origin:
return response
# From the CORS spec: The string "*" cannot be used for a resource that supports credentials.
response['Access-Control-Allow-Origin'] = origin
patch_vary_headers(response, ['Origin'])
response['Access-Control-Allow-Credentials'] = 'true'
if request.method == 'OPTIONS':
if 'HTTP_ACCESS_CONTROL_REQUEST_HEADERS' in request.META:
response['Access-Control-Allow-Headers'] \
= request.META['HTTP_ACCESS_CONTROL_REQUEST_HEADERS']
response['Access-Control-Allow-Methods'] = 'GET, POST, OPTIONS'
return response | python | {
"resource": ""
} |
q34501 | create_token | train | def create_token(user, client, scope, id_token_dic=None):
"""
Create and populate a Token object.
Return a Token object.
"""
token = Token()
token.user = user
token.client = client
token.access_token = uuid.uuid4().hex
if id_token_dic is not None:
token.id_token = id_token_dic
token.refresh_token = uuid.uuid4().hex
token.expires_at = timezone.now() + timedelta(
seconds=settings.get('OIDC_TOKEN_EXPIRE'))
token.scope = scope
return token | python | {
"resource": ""
} |
q34502 | create_code | train | def create_code(user, client, scope, nonce, is_authentication,
code_challenge=None, code_challenge_method=None):
"""
Create and populate a Code object.
Return a Code object.
"""
code = Code()
code.user = user
code.client = client
code.code = uuid.uuid4().hex
if code_challenge and code_challenge_method:
code.code_challenge = code_challenge
code.code_challenge_method = code_challenge_method
code.expires_at = timezone.now() + timedelta(
seconds=settings.get('OIDC_CODE_EXPIRE'))
code.scope = scope
code.nonce = nonce
code.is_authentication = is_authentication
return code | python | {
"resource": ""
} |
q34503 | get_client_alg_keys | train | def get_client_alg_keys(client):
"""
Takes a client and returns the set of keys associated with it.
Returns a list of keys.
"""
if client.jwt_alg == 'RS256':
keys = []
for rsakey in RSAKey.objects.all():
keys.append(jwk_RSAKey(key=importKey(rsakey.key), kid=rsakey.kid))
if not keys:
raise Exception('You must add at least one RSA Key.')
elif client.jwt_alg == 'HS256':
keys = [SYMKey(key=client.client_secret, alg=client.jwt_alg)]
else:
raise Exception('Unsupported key algorithm.')
return keys | python | {
"resource": ""
} |
q34504 | read_gbasis | train | def read_gbasis(basis_lines, fname):
'''Reads gbasis-formatted file data and converts it to a dictionary with the
usual BSE fields
Note that the gbasis format does not store all the fields we
have, so some fields are left blank
'''
skipchars = '!#'
basis_lines = [l for l in basis_lines if l and not l[0] in skipchars]
bs_data = create_skel('component')
i = 0
bs_name = None
while i < len(basis_lines):
line = basis_lines[i]
lsplt = line.split(':')
elementsym = lsplt[0]
if bs_name is None:
bs_name = lsplt[1]
elif lsplt[1] != bs_name:
raise RuntimeError("Multiple basis sets in a file")
element_Z = lut.element_Z_from_sym(elementsym)
element_Z = str(element_Z)
if not element_Z in bs_data['elements']:
bs_data['elements'][element_Z] = {}
element_data = bs_data['elements'][element_Z]
if not 'electron_shells' in element_data:
element_data['electron_shells'] = []
i += 1
max_am = int(basis_lines[i].strip())
i += 1
for am in range(0, max_am + 1):
lsplt = basis_lines[i].split()
shell_am = lut.amchar_to_int(lsplt[0])
nprim = int(lsplt[1])
ngen = int(lsplt[2])
if shell_am[0] != am:
raise RuntimeError("AM out of order in gbasis?")
if max(shell_am) <= 1:
func_type = 'gto'
else:
func_type = 'gto_spherical'
shell = {
'function_type': func_type,
'region': '',
'angular_momentum': shell_am
}
exponents = []
coefficients = []
i += 1
for j in range(nprim):
line = basis_lines[i].replace('D', 'E')
line = line.replace('d', 'E')
lsplt = line.split()
if len(lsplt) != (ngen + 1):
raise RuntimeError("Incorrect number of general contractions in gbasis")
exponents.append(lsplt[0])
coefficients.append(lsplt[1:])
i += 1
shell['exponents'] = exponents
# We need to transpose the coefficient matrix
# (we store a matrix with primitives being the column index and
# general contraction being the row index)
shell['coefficients'] = list(map(list, zip(*coefficients)))
element_data['electron_shells'].append(shell)
return bs_data | python | {
"resource": ""
} |
q34505 | read_molcas | train | def read_molcas(basis_lines, fname):
'''Reads molcas-formatted file data and converts it to a dictionary with the
usual BSE fields
Note that the turbomole format does not store all the fields we
have, so some fields are left blank
'''
skipchars = '*#$'
basis_lines = [l for l in basis_lines if l and not l[0] in skipchars]
bs_data = create_skel('component')
i = 0
while i < len(basis_lines):
line = basis_lines[i]
if not line.startswith('/'):
raise RuntimeError("Expecting line starting with /")
line_splt = line[1:].split('.')
elementsym = line_splt[0]
element_Z = lut.element_Z_from_sym(elementsym)
element_Z = str(element_Z)
if not element_Z in bs_data['elements']:
bs_data['elements'][element_Z] = {}
element_data = bs_data['elements'][element_Z]
if "ecp" in line.lower():
raise NotImplementedError("MolCAS ECPs not supported")
#if not 'ecp_potentials' in element_data:
# element_data['ecp_potentials'] = []
#i += 1
#line = basis_lines[i]
#lsplt = line.split('=')
#maxam = int(lsplt[2])
#n_elec = int(lsplt[1].split()[0])
#amlist = [maxam]
#amlist.extend(list(range(0, maxam)))
#i += 1
#for shell_am in amlist:
# shell_am2 = lut.amchar_to_int(basis_lines[i][0])[0]
# if shell_am2 != shell_am:
# raise RuntimeError("AM not in expected order?")
# i += 1
# ecp_shell = {
# 'ecp_type': 'scalar',
# 'angular_momentum': [shell_am],
# }
# ecp_exponents = []
# ecp_rexponents = []
# ecp_coefficients = []
# while i < len(basis_lines) and basis_lines[i][0].isalpha() is False:
# lsplt = basis_lines[i].split()
# ecp_exponents.append(lsplt[2])
# ecp_rexponents.append(int(lsplt[1]))
# ecp_coefficients.append(lsplt[0])
# i += 1
# ecp_shell['r_exponents'] = ecp_rexponents
# ecp_shell['gaussian_exponents'] = ecp_exponents
# ecp_shell['coefficients'] = [ecp_coefficients]
# element_data['ecp_potentials'].append(ecp_shell)
#element_data['ecp_electrons'] = n_elec
else:
if not 'electron_shells' in element_data:
element_data['electron_shells'] = []
# Skip two comment lines (usually ref)
i += 3
# Skip over an options block
line = basis_lines[i]
if line.lower() == 'options':
while basis_lines[i].lower() != 'endoptions':
i += 1
i += 1
lsplt = basis_lines[i].split()
max_am = int(lsplt[1])
i += 1
for shell_am in range(max_am+1):
lsplt = basis_lines[i].replace(',', ' ').split()
nprim = int(lsplt[0])
ngen = int(lsplt[1])
i += 1
if shell_am <= 1:
func_type = 'gto'
else:
func_type = 'gto_spherical'
shell = {
'function_type': func_type,
'region': '',
'angular_momentum': [shell_am]
}
exponents = []
coefficients = []
j = 0
while j < nprim:
line = basis_lines[i].replace('D', 'E')
line = line.replace('d', 'E')
lsplt = line.split()
exponents.extend(lsplt)
i += 1
j += len(lsplt)
for j in range(nprim):
line = basis_lines[i].replace('D', 'E')
line = line.replace('d', 'E')
lsplt = line.split()
if len(lsplt) != ngen:
print(fname)
print(line)
raise RuntimeError("Unexpected number of coefficients")
coefficients.append(lsplt)
i += 1
shell['exponents'] = exponents
# We need to transpose the coefficient matrix
# (we store a matrix with primitives being the column index and
# general contraction being the row index)
shell['coefficients'] = list(map(list, zip(*coefficients)))
element_data['electron_shells'].append(shell)
# Skip energies?
to_skip = int(basis_lines[i].strip())
skipped = 0
i += 1
while skipped < to_skip:
skipped += len(basis_lines[i].split())
i += 1
return bs_data | python | {
"resource": ""
} |
q34506 | _read_plain_json | train | def _read_plain_json(file_path, check_bse):
"""
Reads a JSON file
A simple wrapper around json.load that only takes the file name
If the file does not exist, an exception is thrown.
If the file does exist, but there is a problem with the JSON formatting,
the filename is added to the exception information.
If check_bse is True, this function also make sure the 'molssi_bse_schema' key
exists in the file.
Parameters
----------
file_path : str
Full path to the file to read
check_bse: bool
If True, check to make sure the bse schema information is included.
If not found, an exception is raised
"""
if not os.path.isfile(file_path):
raise FileNotFoundError('JSON file \'{}\' does not exist, is not '
'readable, or is not a file'.format(file_path))
try:
if file_path.endswith('.bz2'):
with bz2.open(file_path, 'rt', encoding=_default_encoding) as f:
js = json.load(f)
else:
with open(file_path, 'r', encoding=_default_encoding) as f:
js = json.load(f)
except json.decoder.JSONDecodeError as ex:
raise RuntimeError("File {} contains JSON errors".format(file_path)) from ex
if check_bse is True:
# Check for molssi_bse_schema key
if 'molssi_bse_schema' not in js:
raise RuntimeError('File {} does not appear to be a BSE JSON file'.format(file_path))
return js | python | {
"resource": ""
} |
q34507 | _write_plain_json | train | def _write_plain_json(file_path, js):
"""
Write information to a JSON file
This makes sure files are created with the proper encoding and consistent indenting
Parameters
----------
file_path : str
Full path to the file to write to. It will be overwritten if it exists
js : dict
JSON information to write
"""
# Disable ascii in the json - this prevents the json writer
# from escaping everything
if file_path.endswith('.bz2'):
with bz2.open(file_path, 'wt', encoding=_default_encoding) as f:
json.dump(js, f, indent=2, ensure_ascii=False)
else:
with open(file_path, 'w', encoding=_default_encoding) as f:
json.dump(js, f, indent=2, ensure_ascii=False) | python | {
"resource": ""
} |
q34508 | read_notes_file | train | def read_notes_file(file_path):
"""
Returns the contents of a notes file.
If the notes file does not exist, None is returned
"""
if not os.path.isfile(file_path):
return None
with open(file_path, 'r', encoding=_default_encoding) as f:
return f.read() | python | {
"resource": ""
} |
q34509 | _whole_basis_types | train | def _whole_basis_types(basis):
'''
Get a list of all the types of features in this basis set.
'''
all_types = set()
for v in basis['elements'].values():
if 'electron_shells' in v:
for sh in v['electron_shells']:
all_types.add(sh['function_type'])
if 'ecp_potentials' in v:
for pot in v['ecp_potentials']:
all_types.add(pot['ecp_type'])
return sorted(list(all_types)) | python | {
"resource": ""
} |
q34510 | compose_elemental_basis | train | def compose_elemental_basis(file_relpath, data_dir):
"""
Creates an 'elemental' basis from an elemental json file
This function reads the info from the given file, and reads all the component
basis set information from the files listed therein. It then composes all the
information together into one 'elemental' basis dictionary
"""
# Do a simple read of the json
el_bs = fileio.read_json_basis(os.path.join(data_dir, file_relpath))
# construct a list of all files to read
component_files = set()
for k, v in el_bs['elements'].items():
component_files.update(set(v['components']))
# Read all the data from these files into a big dictionary
component_map = {k: fileio.read_json_basis(os.path.join(data_dir, k)) for k in component_files}
# Use the basis_set_description for the reference description
for k, v in component_map.items():
for el, el_data in v['elements'].items():
el_data['references'] = [{
'reference_description': v['description'],
'reference_keys': el_data['references']
}]
# Compose on a per-element basis
for k, v in el_bs['elements'].items():
components = v.pop('components')
# all of the component data for this element
el_comp_data = []
for c in components:
centry = component_map[c]['elements']
if k not in centry:
raise RuntimeError('File {} does not contain element {}'.format(c, k))
el_comp_data.append(centry[k])
# merge all the data
v = manip.merge_element_data(None, el_comp_data)
el_bs['elements'][k] = v
return el_bs | python | {
"resource": ""
} |
q34511 | compose_table_basis | train | def compose_table_basis(file_relpath, data_dir):
"""
Creates a 'table' basis from an table json file
This function reads the info from the given file, and reads all the elemental
basis set information from the files listed therein. It then composes all the
information together into one 'table' basis dictionary
Note that the data returned from this function will not be shared, even if
the function is called again with the same arguments.
"""
# Do a simple read of the json
file_path = os.path.join(data_dir, file_relpath)
table_bs = fileio.read_json_basis(file_path)
# construct a list of all elemental files to read
element_files = set(table_bs['elements'].values())
# Create a map of the elemental basis data
# (maps file path to data contained in that file)
element_map = {k: compose_elemental_basis(k, data_dir) for k in element_files}
# Replace the basis set for all elements in the table basis with the data
# from the elemental basis
for k, entry in table_bs['elements'].items():
data = element_map[entry]
if k not in data['elements']:
raise KeyError('File {} does not contain element {}'.format(entry, k))
table_bs['elements'][k] = data['elements'][k]
# Add the version to the dictionary
file_base = os.path.basename(file_relpath)
table_bs['version'] = file_base.split('.')[-3]
# Add whether the entire basis is spherical or cartesian
table_bs['function_types'] = _whole_basis_types(table_bs)
# Read and merge in the metadata
# This file must be in the same location as the table file
meta_dirpath, table_filename = os.path.split(file_path)
meta_filename = table_filename.split('.')[0] + '.metadata.json'
meta_filepath = os.path.join(meta_dirpath, meta_filename)
bs_meta = fileio.read_json_basis(meta_filepath)
table_bs.update(bs_meta)
# Remove the molssi schema (which isn't needed here)
table_bs.pop('molssi_bse_schema')
return table_bs | python | {
"resource": ""
} |
q34512 | create_skel | train | def create_skel(role):
'''
Create the skeleton of a dictionary or JSON file
A dictionary is returned that contains the "molssi_bse_schema"
key and other required keys, depending on the role
role can be either 'component', 'element', or 'table'
'''
role = role.lower()
if not role in _skeletons:
raise RuntimeError("Role {} not found. Should be 'component', 'element', 'table', or 'metadata'")
return copy.deepcopy(_skeletons[role]) | python | {
"resource": ""
} |
q34513 | process_notes | train | def process_notes(notes, ref_data):
'''Add reference information to the bottom of a notes file
`:ref:` tags are removed and the actual reference data is appended
'''
ref_keys = ref_data.keys()
found_refs = set()
for k in ref_keys:
if k in notes:
found_refs.add(k)
# The block to append
reference_sec = '\n\n'
reference_sec += '-------------------------------------------------\n'
reference_sec += ' REFERENCES MENTIONED ABOVE\n'
reference_sec += ' (not necessarily references for the basis sets)\n'
reference_sec += '-------------------------------------------------\n'
# Add reference data
if len(found_refs) == 0:
return notes
for r in sorted(found_refs):
rtxt = references.reference_text(ref_data[r])
reference_sec += r + '\n'
reference_sec += textwrap.indent(rtxt, ' ' * 4)
reference_sec += '\n\n'
return notes + reference_sec | python | {
"resource": ""
} |
q34514 | _validate_extra_component | train | def _validate_extra_component(bs_data):
'''Extra checks for component basis files'''
assert len(bs_data['elements']) > 0
# Make sure size of the coefficient matrix matches the number of exponents
for el in bs_data['elements'].values():
if not 'electron_shells' in el:
continue
for s in el['electron_shells']:
nprim = len(s['exponents'])
if nprim <= 0:
raise RuntimeError("Invalid number of primitives: {}".format(nprim))
for g in s['coefficients']:
if nprim != len(g):
raise RuntimeError("Number of coefficients doesn't match number of primitives ({} vs {}".format(
len(g), nprim))
# If more than one AM is given, that should be the number of
# general contractions
nam = len(s['angular_momentum'])
if nam > 1:
ngen = len(s['coefficients'])
if ngen != nam:
raise RuntimeError("Number of general contractions doesn't match combined AM ({} vs {}".format(
ngen, nam)) | python | {
"resource": ""
} |
q34515 | validate_data | train | def validate_data(file_type, bs_data):
"""
Validates json basis set data against a schema
Parameters
----------
file_type : str
Type of file to read. May be 'component', 'element', 'table', or 'references'
bs_data:
Data to be validated
Raises
------
RuntimeError
If the file_type is not valid (and/or a schema doesn't exist)
ValidationError
If the given file does not pass validation
FileNotFoundError
If the file given by file_path doesn't exist
"""
if file_type not in _validate_map:
raise RuntimeError("{} is not a valid file_type".format(file_type))
schema = api.get_schema(file_type)
jsonschema.validate(bs_data, schema)
_validate_map[file_type](bs_data) | python | {
"resource": ""
} |
q34516 | validate_file | train | def validate_file(file_type, file_path):
"""
Validates a file against a schema
Parameters
----------
file_type : str
Type of file to read. May be 'component', 'element', 'table', or 'references'
file_path:
Full path to the file to be validated
Raises
------
RuntimeError
If the file_type is not valid (and/or a schema doesn't exist)
ValidationError
If the given file does not pass validation
FileNotFoundError
If the file given by file_path doesn't exist
"""
file_data = fileio._read_plain_json(file_path, False)
validate_data(file_type, file_data) | python | {
"resource": ""
} |
q34517 | validate_data_dir | train | def validate_data_dir(data_dir):
"""
Validates all files in a data_dir
"""
all_meta, all_table, all_element, all_component = fileio.get_all_filelist(data_dir)
for f in all_meta:
full_path = os.path.join(data_dir, f)
validate_file('metadata', full_path)
for f in all_table:
full_path = os.path.join(data_dir, f)
validate_file('table', full_path)
for f in all_element:
full_path = os.path.join(data_dir, f)
validate_file('element', full_path)
for f in all_component:
full_path = os.path.join(data_dir, f)
validate_file('component', full_path) | python | {
"resource": ""
} |
q34518 | sort_basis_dict | train | def sort_basis_dict(bs):
"""Sorts a basis set dictionary into a standard order
This, for example, allows the written file to be more easily read by humans by,
for example, putting the name and description before more detailed fields.
This is generally for cosmetic reasons. However, users will generally like things
in a consistent order
"""
# yapf: disable
_keyorder = [
# Schema stuff
'molssi_bse_schema', 'schema_type', 'schema_version',
# Auxiliary block
'jkfit', 'jfit', 'rifit', 'admmfit', 'dftxfit', 'dftjfit',
# Basis set metadata
'name', 'names', 'aliases', 'flags', 'family', 'description', 'role', 'auxiliaries',
'notes', 'function_types',
# Reference stuff
'reference_description', 'reference_keys',
# Version metadata
'version', 'revision_description',
# Sources of components
'data_source',
# Elements and data
'elements', 'references', 'ecp_electrons',
'electron_shells', 'ecp_potentials', 'components',
# Shell information
'function_type', 'region', 'angular_momentum', 'exponents',
'coefficients',
'ecp_type', 'angular_momentum', 'r_exponents', 'gaussian_exponents',
'coefficients'
]
# yapf: enable
# Add integers for the elements (being optimistic that element 150 will be found someday)
_keyorder.extend([str(x) for x in range(150)])
bs_sorted = sorted(bs.items(), key=lambda x: _keyorder.index(x[0]))
if _use_odict:
bs_sorted = OrderedDict(bs_sorted)
else:
bs_sorted = dict(bs_sorted)
for k, v in bs_sorted.items():
# If this is a dictionary, sort recursively
# If this is a list, sort each element but DO NOT sort the list itself.
if isinstance(v, dict):
bs_sorted[k] = sort_basis_dict(v)
elif isinstance(v, list):
# Note - the only nested list is with coeffs, which shouldn't be sorted
# (so we don't have to recurse into lists of lists)
bs_sorted[k] = [sort_basis_dict(x) if isinstance(x, dict) else x for x in v]
return bs_sorted | python | {
"resource": ""
} |
q34519 | sort_shell | train | def sort_shell(shell, use_copy=True):
"""
Sort a basis set shell into a standard order
If use_copy is True, the input shells are not modified.
"""
if use_copy:
shell = copy.deepcopy(shell)
# Transpose of coefficients
tmp_c = list(map(list, zip(*shell['coefficients'])))
# For each primitive, find the index of the first nonzero coefficient
nonzero_idx = [next((i for i, x in enumerate(c) if float(x) != 0.0), None) for c in tmp_c]
# Zip together exponents and coeffs for sorting
tmp = zip(shell['exponents'], tmp_c, nonzero_idx)
# Sort by decreasing value of exponent
tmp = sorted(tmp, key=lambda x: -float(x[0]))
# Now (stable) sort by first non-zero coefficient
tmp = sorted(tmp, key=lambda x: int(x[2]))
# Unpack, and re-transpose the coefficients
tmp_c = [x[1] for x in tmp]
shell['exponents'] = [x[0] for x in tmp]
# Now sort the columns of the coefficient by index of first nonzero coefficient
tmp_c = list(map(list, zip(*tmp_c)))
nonzero_idx = [next((i for i, x in enumerate(c) if float(x) != 0.0), None) for c in tmp_c]
tmp = zip(tmp_c, nonzero_idx)
tmp = sorted(tmp, key=lambda x: int(x[1]))
tmp_c = [x[0] for x in tmp]
shell['coefficients'] = tmp_c
return shell | python | {
"resource": ""
} |
q34520 | sort_shells | train | def sort_shells(shells, use_copy=True):
"""
Sort a list of basis set shells into a standard order
The order within a shell is by decreasing value of the exponent.
The order of the shell list is in increasing angular momentum, and then
by decreasing number of primitives, then decreasing value of the largest exponent.
If use_copy is True, the input shells are not modified.
"""
if use_copy:
shells = copy.deepcopy(shells)
# Sort primitives within a shell
# (copying already handled above)
shells = [sort_shell(sh, False) for sh in shells]
# Sort the list by increasing AM, then general contraction level, then decreasing highest exponent
return list(
sorted(
shells,
key=lambda x: (max(x['angular_momentum']), -len(x['exponents']), -len(x['coefficients']), -float(
max(x['exponents']))))) | python | {
"resource": ""
} |
q34521 | sort_potentials | train | def sort_potentials(potentials, use_copy=True):
"""
Sort a list of ECP potentials into a standard order
The order within a potential is not modified.
The order of the shell list is in increasing angular momentum, with the largest
angular momentum being moved to the front.
If use_copy is True, the input potentials are not modified.
"""
if use_copy:
potentials = copy.deepcopy(potentials)
# Sort by increasing AM, then move the last element to the front
potentials = list(sorted(potentials, key=lambda x: x['angular_momentum']))
potentials.insert(0, potentials.pop())
return potentials | python | {
"resource": ""
} |
q34522 | sort_basis | train | def sort_basis(basis, use_copy=True):
"""
Sorts all the information in a basis set into a standard order
If use_copy is True, the input basis set is not modified.
"""
if use_copy:
basis = copy.deepcopy(basis)
for k, el in basis['elements'].items():
if 'electron_shells' in el:
el['electron_shells'] = sort_shells(el['electron_shells'], False)
if 'ecp_potentials' in el:
el['ecp_potentials'] = sort_potentials(el['ecp_potentials'], False)
return sort_basis_dict(basis) | python | {
"resource": ""
} |
q34523 | sort_single_reference | train | def sort_single_reference(ref_entry):
"""Sorts a dictionary containing data for a single reference into a standard order
"""
# yapf: disable
_keyorder = [
# Schema stuff
# This function gets called on the schema 'entry', too
'schema_type', 'schema_version',
# Type of the entry
'type',
# Actual publication info
'authors', 'title', 'booktitle', 'series', 'editors', 'journal',
'institution', 'volume', 'number', 'page', 'year', 'note', 'publisher',
'address', 'isbn', 'doi'
]
# yapf: enable
sorted_entry = sorted(ref_entry.items(), key=lambda x: _keyorder.index(x[0]))
if _use_odict:
return OrderedDict(sorted_entry)
else:
return dict(sorted_entry) | python | {
"resource": ""
} |
q34524 | sort_references_dict | train | def sort_references_dict(refs):
"""Sorts a reference dictionary into a standard order
The keys of the references are also sorted, and the keys for the data for each
reference are put in a more canonical order.
"""
if _use_odict:
refs_sorted = OrderedDict()
else:
refs_sorted = dict()
# We insert this first, That is ok - it will be overwritten
# with the sorted version later
refs_sorted['molssi_bse_schema'] = refs['molssi_bse_schema']
# This sorts the entries by reference key (author1985a, etc)
for k, v in sorted(refs.items()):
refs_sorted[k] = sort_single_reference(v)
return refs_sorted | python | {
"resource": ""
} |
q34525 | read_dalton | train | def read_dalton(basis_lines, fname):
'''Reads Dalton-formatted file data and converts it to a dictionary with the
usual BSE fields
Note that the nwchem format does not store all the fields we
have, so some fields are left blank
'''
skipchars = '$'
basis_lines = [l for l in basis_lines if l and not l[0] in skipchars]
bs_data = create_skel('component')
i = 0
while i < len(basis_lines):
line = basis_lines[i]
if line.lower().startswith('a '):
element_Z = line.split()[1]
i += 1
# Shell am is strictly increasing (I hope)
shell_am = 0
while i < len(basis_lines) and not basis_lines[i].lower().startswith('a '):
line = basis_lines[i]
nprim, ngen = line.split()
if not element_Z in bs_data['elements']:
bs_data['elements'][element_Z] = {}
if not 'electron_shells' in bs_data['elements'][element_Z]:
bs_data['elements'][element_Z]['electron_shells'] = []
element_data = bs_data['elements'][element_Z]
if shell_am <= 1:
func_type = 'gto'
else:
func_type = 'gto_spherical'
shell = {
'function_type': func_type,
'region': '',
'angular_momentum': [shell_am]
}
exponents = []
coefficients = []
i += 1
for _ in range(int(nprim)):
line = basis_lines[i].replace('D', 'E')
line = line.replace('d', 'E')
lsplt = line.split()
exponents.append(lsplt[0])
coefficients.append(lsplt[1:])
i += 1
shell['exponents'] = exponents
# We need to transpose the coefficient matrix
# (we store a matrix with primitives being the column index and
# general contraction being the row index)
shell['coefficients'] = list(map(list, zip(*coefficients)))
# Make sure the number of general contractions is >0
# (This error was found in some bad files)
if int(ngen) <= 0:
raise RuntimeError("Number of general contractions is not greater than zero for element " + str(element_Z))
# Make sure the number of general contractions match the heading line
if len(shell['coefficients']) != int(ngen):
raise RuntimeError("Number of general contractions does not equal what was given for element " + str(element_Z))
element_data['electron_shells'].append(shell)
shell_am += 1
return bs_data | python | {
"resource": ""
} |
q34526 | find_range | train | def find_range(coeffs):
'''
Find the range in a list of coefficients where the coefficient is nonzero
'''
coeffs = [float(x) != 0 for x in coeffs]
first = coeffs.index(True)
coeffs.reverse()
last = len(coeffs) - coeffs.index(True) - 1
return first, last | python | {
"resource": ""
} |
q34527 | _ref_bib | train | def _ref_bib(key, ref):
'''Convert a single reference to bibtex format
'''
s = ''
s += '@{}{{{},\n'.format(ref['type'], key)
entry_lines = []
for k, v in ref.items():
if k == 'type':
continue
# Handle authors/editors
if k == 'authors':
entry_lines.append(' author = {{{}}}'.format(' and '.join(v)))
elif k == 'editors':
entry_lines.append(' editor = {{{}}}'.format(' and '.join(v)))
else:
entry_lines.append(' {} = {{{}}}'.format(k, v))
s += ',\n'.join(entry_lines)
s += '\n}'
return s | python | {
"resource": ""
} |
q34528 | write_bib | train | def write_bib(refs):
'''Converts references to bibtex
'''
full_str = ''
lib_citation_desc, lib_citations = get_library_citation()
full_str += '%' * 80 + '\n'
full_str += textwrap.indent(lib_citation_desc, '% ')
full_str += '%' * 80 + '\n\n'
for k, r in lib_citations.items():
full_str += _ref_bib(k, r) + '\n\n'
full_str += '%' * 80 + '\n'
full_str += "% References for the basis set\n"
full_str += '%' * 80 + '\n'
# First, write out the element, description -> key mapping
# Also make a dict of unique reference to output
unique_refs = {}
for ref in refs:
full_str += '% {}\n'.format(compact_elements(ref['elements']))
for ri in ref['reference_info']:
full_str += '% {}\n'.format(ri['reference_description'])
refdata = ri['reference_data']
if len(refdata) == 0:
full_str += '% (...no reference...)\n%\n'
else:
rkeys = [x[0] for x in ri['reference_data']]
full_str += '% {}\n%\n'.format(' '.join(rkeys))
for k, r in refdata:
unique_refs[k] = r
full_str += '\n\n'
# Go through them sorted alphabetically by key
for k, r in sorted(unique_refs.items(), key=lambda x: x[0]):
full_str += '{}\n\n'.format(_ref_bib(k, r))
return full_str | python | {
"resource": ""
} |
q34529 | write_turbomole | train | def write_turbomole(basis):
'''Converts a basis set to Gaussian format
'''
s = '$basis\n'
s += '*\n'
# TM basis sets are completely uncontracted
basis = manip.uncontract_general(basis, True)
basis = manip.uncontract_spdf(basis, 0, False)
basis = sort.sort_basis(basis, False)
# Elements for which we have electron basis
electron_elements = [k for k, v in basis['elements'].items() if 'electron_shells' in v]
# Elements for which we have ECP
ecp_elements = [k for k, v in basis['elements'].items() if 'ecp_potentials' in v]
# Electron Basis
if len(electron_elements) > 0:
for z in electron_elements:
data = basis['elements'][z]
sym = lut.element_sym_from_Z(z, False)
s += '{} {}\n'.format(sym, basis['name'])
s += '*\n'
for shell in data['electron_shells']:
exponents = shell['exponents']
coefficients = shell['coefficients']
ncol = len(coefficients) + 1
nprim = len(exponents)
am = shell['angular_momentum']
amchar = lut.amint_to_char(am, hij=True)
s += ' {} {}\n'.format(nprim, amchar)
point_places = [8 * i + 15 * (i - 1) for i in range(1, ncol + 1)]
s += printing.write_matrix([exponents, *coefficients], point_places, convert_exp=True)
s += '*\n'
# Write out ECP
if len(ecp_elements) > 0:
s += '$ecp\n'
s += '*\n'
for z in ecp_elements:
data = basis['elements'][z]
sym = lut.element_sym_from_Z(z)
s += '{} {}-ecp\n'.format(sym, basis['name'])
s += '*\n'
max_ecp_am = max([x['angular_momentum'][0] for x in data['ecp_potentials']])
max_ecp_amchar = lut.amint_to_char([max_ecp_am], hij=True)
# Sort lowest->highest, then put the highest at the beginning
ecp_list = sorted(data['ecp_potentials'], key=lambda x: x['angular_momentum'])
ecp_list.insert(0, ecp_list.pop())
s += ' ncore = {} lmax = {}\n'.format(data['ecp_electrons'], max_ecp_am)
for pot in ecp_list:
rexponents = pot['r_exponents']
gexponents = pot['gaussian_exponents']
coefficients = pot['coefficients']
am = pot['angular_momentum']
amchar = lut.amint_to_char(am, hij=True)
if am[0] == max_ecp_am:
s += '{}\n'.format(amchar)
else:
s += '{}-{}\n'.format(amchar, max_ecp_amchar)
point_places = [9, 23, 32]
s += printing.write_matrix([*coefficients, rexponents, gexponents], point_places, convert_exp=True)
s += '*\n'
s += '$end\n'
return s | python | {
"resource": ""
} |
q34530 | compact_references | train | def compact_references(basis_dict, ref_data):
"""
Creates a mapping of elements to reference keys
A list is returned, with each element of the list being a dictionary
with entries 'reference_info' containing data for (possibly) multiple references,
and 'elements' which is a list of element Z numbers
that those references apply to
Parameters
----------
basis_dict : dict
Dictionary containing basis set information
ref_data : dict
Dictionary containing all reference information
"""
element_refs = []
# Create a mapping of elements -> reference information
# (sort by Z first, keeping in mind Z is a string)
sorted_el = sorted(basis_dict['elements'].items(), key=lambda x: int(x[0]))
for el, eldata in sorted_el:
# elref is a list of dict
# dict is { 'reference_description': str, 'reference_keys': [keys] }
elref = eldata['references']
for x in element_refs:
if x['reference_info'] == elref:
x['elements'].append(el)
break
else:
element_refs.append({'reference_info': elref, 'elements': [el]})
for item in element_refs:
# Loop over a list of dictionaries for this group of elements and add the
# actual reference data
# Since we store the keys with the data, we don't need it anymore
for elref in item['reference_info']:
elref['reference_data'] = [(k, ref_data[k]) for k in elref['reference_keys']]
elref.pop('reference_keys')
return element_refs | python | {
"resource": ""
} |
q34531 | reference_text | train | def reference_text(ref):
'''Convert a single reference to plain text format
Parameters
----------
ref : dict
Information about a single reference
'''
ref_wrap = textwrap.TextWrapper(initial_indent='', subsequent_indent=' ' * 8)
s = ''
if ref['type'] == 'unpublished':
s += ref_wrap.fill(', '.join(ref['authors'])) + '\n'
s += ref_wrap.fill(ref['title']) + '\n'
s += ref_wrap.fill(ref['note']) + '\n'
elif ref['type'] == 'article':
s += ref_wrap.fill(', '.join(ref['authors'])) + '\n'
s += ref_wrap.fill(ref['title']) + '\n'
s += '{}, {}, {} ({})'.format(ref['journal'], ref['volume'], ref['page'], ref['year'])
s += '\n' + ref['doi']
elif ref['type'] == 'incollection':
s += ref_wrap.fill(', '.join(ref['authors']))
s += ref_wrap.fill('\n{}'.format(ref['title']))
s += ref_wrap.fill('\nin \'{}\''.format(ref['booktitle']))
if 'editors' in ref:
s += ref_wrap.fill('\ned. ' + ', '.join(ref['editors']))
if 'series' in ref:
s += '\n{}, {}, {} ({})'.format(ref['series'], ref['volume'], ref['page'], ref['year'])
if 'doi' in ref:
s += '\n' + ref['doi']
elif ref['type'] == 'techreport':
s += ref_wrap.fill(', '.join(ref['authors']))
s += ref_wrap.fill('\n{}'.format(ref['title']))
s += '\n\'{}\''.format(ref['institution'])
s += '\nTechnical Report {}'.format(ref['number'])
s += '\n{}'.format(ref['year'])
if 'doi' in ref:
s += '\n' + ref['doi']
elif ref['type'] == 'misc':
s += ref_wrap.fill(', '.join(ref['authors'])) + '\n'
s += ref_wrap.fill(ref['title'])
if 'note' in ref:
s += '\n' + ref['note']
if 'doi' in ref:
s += '\n' + ref['doi']
else:
raise RuntimeError('Cannot handle reference type {}'.format(ref['type']))
return s | python | {
"resource": ""
} |
q34532 | _determine_leftpad | train | def _determine_leftpad(column, point_place):
'''Find how many spaces to put before a column of numbers
so that all the decimal points line up
This function takes a column of decimal numbers, and returns a
vector containing the number of spaces to place before each number
so that (when possible) the decimal points line up.
Parameters
----------
column : list
Numbers that will be printed as a column
point_place : int
Number of the character column to put the decimal point
'''
# Find the number of digits before the decimal
ndigits_left = [_find_point(x) for x in column]
# find the padding per entry, filtering negative numbers
return [max((point_place - 1) - x, 0) for x in ndigits_left] | python | {
"resource": ""
} |
q34533 | electron_shell_str | train | def electron_shell_str(shell, shellidx=None):
'''Return a string representing the data for an electron shell
If shellidx (index of the shell) is not None, it will also be printed
'''
am = shell['angular_momentum']
amchar = lut.amint_to_char(am)
amchar = amchar.upper()
shellidx_str = ''
if shellidx is not None:
shellidx_str = 'Index {} '.format(shellidx)
exponents = shell['exponents']
coefficients = shell['coefficients']
ncol = len(coefficients) + 1
point_places = [8 * i + 15 * (i - 1) for i in range(1, ncol + 1)]
s = "Shell: {}Region: {}: AM: {}\n".format(shellidx_str, shell['region'], amchar)
s += "Function: {}\n".format(shell['function_type'])
s += write_matrix([exponents, *coefficients], point_places)
return s | python | {
"resource": ""
} |
q34534 | ecp_pot_str | train | def ecp_pot_str(pot):
'''Return a string representing the data for an ECP potential
'''
am = pot['angular_momentum']
amchar = lut.amint_to_char(am)
rexponents = pot['r_exponents']
gexponents = pot['gaussian_exponents']
coefficients = pot['coefficients']
point_places = [0, 10, 33]
s = 'Potential: {} potential\n'.format(amchar)
s += 'Type: {}\n'.format(pot['ecp_type'])
s += write_matrix([rexponents, gexponents, *coefficients], point_places)
return s | python | {
"resource": ""
} |
q34535 | element_data_str | train | def element_data_str(z, eldata):
'''Return a string with all data for an element
This includes shell and ECP potential data
Parameters
----------
z : int or str
Element Z-number
eldata: dict
Data for the element to be printed
'''
sym = lut.element_sym_from_Z(z, True)
cs = contraction_string(eldata)
if cs == '':
cs = '(no electron shells)'
s = '\nElement: {} : {}\n'.format(sym, cs)
if 'electron_shells' in eldata:
for shellidx, shell in enumerate(eldata['electron_shells']):
s += electron_shell_str(shell, shellidx) + '\n'
if 'ecp_potentials' in eldata:
s += 'ECP: Element: {} Number of electrons: {}\n'.format(sym, eldata['ecp_electrons'])
for pot in eldata['ecp_potentials']:
s += ecp_pot_str(pot) + '\n'
return s | python | {
"resource": ""
} |
q34536 | component_basis_str | train | def component_basis_str(basis, elements=None):
'''Print a component basis set
If elements is not None, only the specified elements will be printed
(see :func:`bse.misc.expand_elements`)
'''
s = "Description: " + basis['description'] + '\n'
eldata = basis['elements']
# Filter to the given elements
if elements is None:
elements = list(eldata.keys())
else:
elements = expand_elements(elements, True)
# Add the str for each element
for z in elements:
s += element_data_str(z, eldata[z]) + '\n'
return s | python | {
"resource": ""
} |
q34537 | write_molpro | train | def write_molpro(basis):
'''Converts a basis set to Molpro format
'''
# Uncontract all, and make as generally-contracted as possible
basis = manip.uncontract_spdf(basis, 0, True)
basis = manip.make_general(basis, False)
basis = sort.sort_basis(basis, True)
s = ''
# Elements for which we have electron basis
electron_elements = [k for k, v in basis['elements'].items() if 'electron_shells' in v]
# Elements for which we have ECP
ecp_elements = [k for k, v in basis['elements'].items() if 'ecp_potentials' in v]
if len(electron_elements) > 0:
# basis set starts with a string
s += 'basis={\n'
# Electron Basis
for z in electron_elements:
data = basis['elements'][z]
sym = lut.element_sym_from_Z(z).upper()
s += '!\n'
s += '! {:20} {}\n'.format(lut.element_name_from_Z(z), misc.contraction_string(data))
for shell in data['electron_shells']:
exponents = shell['exponents']
coefficients = shell['coefficients']
am = shell['angular_momentum']
amchar = lut.amint_to_char(am).lower()
s += '{}, {} , {}\n'.format(amchar, sym, ', '.join(exponents))
for c in coefficients:
first, last = find_range(c)
s += 'c, {}.{}, {}\n'.format(first + 1, last + 1, ', '.join(c[first:last + 1]))
s += '}\n'
# Write out ECP
if len(ecp_elements) > 0:
s += '\n\n! Effective core Potentials\n'
for z in ecp_elements:
data = basis['elements'][z]
sym = lut.element_sym_from_Z(z).lower()
max_ecp_am = max([x['angular_momentum'][0] for x in data['ecp_potentials']])
# Sort lowest->highest, then put the highest at the beginning
ecp_list = sorted(data['ecp_potentials'], key=lambda x: x['angular_momentum'])
ecp_list.insert(0, ecp_list.pop())
s += 'ECP, {}, {}, {} ;\n'.format(sym, data['ecp_electrons'], max_ecp_am)
for pot in ecp_list:
rexponents = pot['r_exponents']
gexponents = pot['gaussian_exponents']
coefficients = pot['coefficients']
am = pot['angular_momentum']
amchar = lut.amint_to_char(am).lower()
s += '{};'.format(len(rexponents))
if am[0] == max_ecp_am:
s += ' ! ul potential\n'
else:
s += ' ! {}-ul potential\n'.format(amchar)
for p in range(len(rexponents)):
s += '{},{},{};\n'.format(rexponents[p], gexponents[p], coefficients[0][p])
return s | python | {
"resource": ""
} |
q34538 | convert_basis | train | def convert_basis(basis_dict, fmt, header=None):
'''
Returns the basis set data as a string representing
the data in the specified output format
'''
# make converters case insensitive
fmt = fmt.lower()
if fmt not in _converter_map:
raise RuntimeError('Unknown basis set format "{}"'.format(fmt))
converter = _converter_map[fmt]
# Determine if the converter supports all the types in the basis_dict
if converter['valid'] is not None:
ftypes = set(basis_dict['function_types'])
if ftypes > converter['valid']:
raise RuntimeError('Converter {} does not support all function types: {}'.format(fmt, str(ftypes)))
# Actually do the conversion
ret_str = converter['function'](basis_dict)
if header is not None and fmt != 'json':
comment_str = _converter_map[fmt]['comment']
header_str = comment_str + comment_str.join(header.splitlines(True))
ret_str = header_str + '\n\n' + ret_str
# HACK - Psi4 requires the first non-comment line be spherical/cartesian
# so we have to add that before the header
if fmt == 'psi4':
types = basis_dict['function_types']
harm_type = 'spherical' if 'spherical_gto' in types else 'cartesian'
ret_str = harm_type + '\n\n' + ret_str
return ret_str | python | {
"resource": ""
} |
q34539 | get_formats | train | def get_formats(function_types=None):
'''
Returns the available formats mapped to display name.
This is returned as an ordered dictionary, with the most common
at the top, followed by the rest in alphabetical order
If a list is specified for function_types, only those formats
supporting the given function types will be returned.
'''
if function_types is None:
return {k: v['display'] for k, v in _converter_map.items()}
ftypes = [x.lower() for x in function_types]
ftypes = set(ftypes)
ret = []
for fmt, v in _converter_map.items():
if v['valid'] is None or ftypes <= v['valid']:
ret.append(fmt)
return ret | python | {
"resource": ""
} |
q34540 | get_format_extension | train | def get_format_extension(fmt):
'''
Returns the recommended extension for a given format
'''
if fmt is None:
return 'dict'
fmt = fmt.lower()
if fmt not in _converter_map:
raise RuntimeError('Unknown basis set format "{}"'.format(fmt))
return _converter_map[fmt]['extension'] | python | {
"resource": ""
} |
q34541 | _make_graph | train | def _make_graph(bsname, version=None, data_dir=None):
'''
Create a DOT graph file of the files included in a basis set
'''
if not graphviz_avail:
raise RuntimeError("graphviz package is not installed")
data_dir = api.fix_data_dir(data_dir)
md = api._get_basis_metadata(bsname, data_dir)
if version is None:
version = md['latest_version']
else:
version = str(version)
if not version in md['versions']:
raise RuntimeError("Version {} of {} doesn't exist".format(version, bsname))
gr = graphviz.Digraph(comment='Basis Set Graph: ' + bsname)
# Read the table file
table_path = os.path.join(data_dir, md['versions'][version]['file_relpath'])
table_data = fileio.read_json_basis(table_path)
table_edges = {}
for el, entry in table_data['elements'].items():
if entry not in table_edges:
table_edges[entry] = []
table_edges[entry].append(el)
for k, v in table_edges.items():
gr.edge(bsname, k, label=compact_elements(v))
# Element file
for elfile in table_edges.keys():
element_path = os.path.join(data_dir, elfile)
element_data = fileio.read_json_basis(element_path)
element_edges = {}
for el, components in element_data['elements'].items():
components = components['components']
components_str = '\n'.join(components)
# skip if this element for the table basis doesn't come from this file
if el not in table_data['elements']:
continue
if table_data['elements'][el] != elfile:
continue
if components_str not in element_edges:
element_edges[components_str] = []
element_edges[components_str].append(el)
for k, v in element_edges.items():
if len(v):
gr.edge(elfile, k, label=compact_elements(v))
return gr | python | {
"resource": ""
} |
q34542 | get_library_citation | train | def get_library_citation():
'''Return a descriptive string and reference data for what users of the library should cite'''
all_ref_data = api.get_reference_data()
lib_refs_data = {k: all_ref_data[k] for k in _lib_refs}
return (_lib_refs_desc, lib_refs_data) | python | {
"resource": ""
} |
q34543 | format_columns | train | def format_columns(lines, prefix=''):
'''
Create a simple column output
Parameters
----------
lines : list
List of lines to format. Each line is a tuple/list with each
element corresponding to a column
prefix : str
Characters to insert at the beginning of each line
Returns
-------
str
Columnated output as one big string
'''
if len(lines) == 0:
return ''
ncols = 0
for l in lines:
ncols = max(ncols, len(l))
if ncols == 0:
return ''
# We only find the max strlen for all but the last col
maxlen = [0] * (ncols - 1)
for l in lines:
for c in range(ncols - 1):
maxlen[c] = max(maxlen[c], len(l[c]))
fmtstr = prefix + ' '.join(['{{:{x}}}'.format(x=x) for x in maxlen])
fmtstr += ' {}'
return [fmtstr.format(*l) for l in lines] | python | {
"resource": ""
} |
q34544 | write_nwchem | train | def write_nwchem(basis):
'''Converts a basis set to NWChem format
'''
# Uncontract all but SP
basis = manip.uncontract_spdf(basis, 1, True)
basis = sort.sort_basis(basis, True)
s = ''
# Elements for which we have electron basis
electron_elements = [k for k, v in basis['elements'].items() if 'electron_shells' in v]
# Elements for which we have ECP
ecp_elements = [k for k, v in basis['elements'].items() if 'ecp_potentials' in v]
if len(electron_elements) > 0:
# basis set starts with a string
s += 'BASIS "ao basis" PRINT\n'
# Electron Basis
for z in electron_elements:
data = basis['elements'][z]
sym = lut.element_sym_from_Z(z, True)
s += '#BASIS SET: {}\n'.format(misc.contraction_string(data))
for shell in data['electron_shells']:
exponents = shell['exponents']
coefficients = shell['coefficients']
ncol = len(coefficients) + 1
am = shell['angular_momentum']
amchar = lut.amint_to_char(am).upper()
s += '{} {}\n'.format(sym, amchar)
point_places = [8 * i + 15 * (i - 1) for i in range(1, ncol + 1)]
s += printing.write_matrix([exponents, *coefficients], point_places)
s += 'END\n'
# Write out ECP
if len(ecp_elements) > 0:
s += '\n\nECP\n'
for z in ecp_elements:
data = basis['elements'][z]
sym = lut.element_sym_from_Z(z, True)
max_ecp_am = max([x['angular_momentum'][0] for x in data['ecp_potentials']])
# Sort lowest->highest, then put the highest at the beginning
ecp_list = sorted(data['ecp_potentials'], key=lambda x: x['angular_momentum'])
ecp_list.insert(0, ecp_list.pop())
s += '{} nelec {}\n'.format(sym, data['ecp_electrons'])
for pot in ecp_list:
rexponents = pot['r_exponents']
gexponents = pot['gaussian_exponents']
coefficients = pot['coefficients']
am = pot['angular_momentum']
amchar = lut.amint_to_char(am).upper()
if am[0] == max_ecp_am:
s += '{} ul\n'.format(sym)
else:
s += '{} {}\n'.format(sym, amchar)
point_places = [0, 10, 33]
s += printing.write_matrix([rexponents, gexponents, *coefficients], point_places)
s += 'END\n'
return s | python | {
"resource": ""
} |
q34545 | contraction_string | train | def contraction_string(element):
"""
Forms a string specifying the contractions for an element
ie, (16s,10p) -> [4s,3p]
"""
# Does not have electron shells (ECP only?)
if 'electron_shells' not in element:
return ""
cont_map = dict()
for sh in element['electron_shells']:
nprim = len(sh['exponents'])
ngeneral = len(sh['coefficients'])
# is a combined general contraction (sp, spd, etc)
is_spdf = len(sh['angular_momentum']) > 1
for am in sh['angular_momentum']:
# If this a general contraction (and not combined am), then use that
ncont = ngeneral if not is_spdf else 1
if am not in cont_map:
cont_map[am] = (nprim, ncont)
else:
cont_map[am] = (cont_map[am][0] + nprim, cont_map[am][1] + ncont)
primstr = ""
contstr = ""
for am in sorted(cont_map.keys()):
nprim, ncont = cont_map[am]
if am != 0:
primstr += ','
contstr += ','
primstr += str(nprim) + lut.amint_to_char([am])
contstr += str(ncont) + lut.amint_to_char([am])
return "({}) -> [{}]".format(primstr, contstr) | python | {
"resource": ""
} |
q34546 | expand_elements | train | def expand_elements(compact_el, as_str=False):
"""
Create a list of integers given a string or list of compacted elements
This is partly the opposite of compact_elements, but is more flexible.
compact_el can be a list or a string. If compact_el is a list, each element is processed individually
as a string (meaning list elements can contain commas, ranges, etc)
If compact_el is a string, it is split by commas and then each section is processed.
In all cases, element symbols (case insensitive) and Z numbers (as integers or strings)
can be used interchangeably. Ranges are also allowed in both lists and strings.
Some examples:
"H-Li,C-O,Ne" will return [1, 2, 3, 6, 7, 8, 10]
"H-N,8,Na-12" will return [1, 2, 3, 4, 5, 6, 7, 8, 11, 12]
['C', 'Al-15,S', 17, '18'] will return [6, 13, 14, 15, 16, 17, 18]
If as_str is True, the list will contain strings of the integers
(ie, the first example above will return ['1', '2', '3', '6', '7', '8', '10']
"""
# If an integer, just return it
if isinstance(compact_el, int):
if as_str is True:
return [str(compact_el)]
else:
return [compact_el]
# If compact_el is a list, make it a comma-separated string
if isinstance(compact_el, list):
compact_el = [str(x) for x in compact_el]
compact_el = [x for x in compact_el if len(x) > 0]
compact_el = ','.join(compact_el)
# Find multiple - or ,
# Also replace all whitespace with spaces
compact_el = re.sub(r',+', ',', compact_el)
compact_el = re.sub(r'-+', '-', compact_el)
compact_el = re.sub(r'\s+', '', compact_el)
# Find starting with or ending with comma and strip them
compact_el = compact_el.strip(',')
# Check if I was passed an empty string or list
if len(compact_el) == 0:
return []
# Find some erroneous patterns
# -, and ,-
if '-,' in compact_el:
raise RuntimeError("Malformed element string")
if ',-' in compact_el:
raise RuntimeError("Malformed element string")
# Strings ends or begins with -
if compact_el.startswith('-') or compact_el.endswith('-'):
raise RuntimeError("Malformed element string")
# x-y-z
if re.search(r'\w+-\w+-\w+', compact_el):
raise RuntimeError("Malformed element string")
# Split on commas
tmp_list = compact_el.split(',')
# Now go over each one and replace elements with ints
el_list = []
for el in tmp_list:
if not '-' in el:
el_list.append(_Z_from_str(el))
else:
begin, end = el.split('-')
begin = _Z_from_str(begin)
end = _Z_from_str(end)
el_list.extend(list(range(begin, end + 1)))
if as_str is True:
return [str(x) for x in el_list]
else:
return el_list | python | {
"resource": ""
} |
q34547 | elements_in_files | train | def elements_in_files(filelist):
'''Get a list of what elements exist in JSON files
This works on table, element, and component data files
Parameters
----------
filelist : list
A list of paths to json files
Returns
-------
dict
Keys are the file path, value is a compacted element string of
what elements are in that file
'''
ret = {}
for fpath in filelist:
filedata = fileio.read_json_basis(fpath)
els = list(filedata['elements'].keys())
ret[fpath] = misc.compact_elements(els)
return ret | python | {
"resource": ""
} |
q34548 | _fix_uncontracted | train | def _fix_uncontracted(basis):
'''
Forces the contraction coefficient of uncontracted shells to 1.0
'''
for el in basis['elements'].values():
if 'electron_shells' not in el:
continue
for sh in el['electron_shells']:
if len(sh['coefficients']) == 1 and len(sh['coefficients'][0]) == 1:
sh['coefficients'][0][0] = '1.0000000'
# Some uncontracted shells don't have a coefficient
if len(sh['coefficients']) == 0:
sh['coefficients'].append(['1.0000000'])
return basis | python | {
"resource": ""
} |
q34549 | write_bsedebug | train | def write_bsedebug(basis):
'''Converts a basis set to BSE Debug format
'''
s = ''
for el, eldata in basis['elements'].items():
s += element_data_str(el, eldata)
return s | python | {
"resource": ""
} |
q34550 | _bsecurate_cli_get_reader_formats | train | def _bsecurate_cli_get_reader_formats(args):
'''Handles the get-file-types subcommand'''
all_formats = curate.get_reader_formats()
if args.no_description:
liststr = all_formats.keys()
else:
liststr = format_columns(all_formats.items())
return '\n'.join(liststr) | python | {
"resource": ""
} |
q34551 | _bsecurate_cli_elements_in_files | train | def _bsecurate_cli_elements_in_files(args):
'''Handles the elements-in-files subcommand'''
data = curate.elements_in_files(args.files)
return '\n'.join(format_columns(data.items())) | python | {
"resource": ""
} |
q34552 | _bsecurate_cli_component_file_refs | train | def _bsecurate_cli_component_file_refs(args):
'''Handles the component-file-refs subcommand'''
data = curate.component_file_refs(args.files)
s = ''
for cfile, cdata in data.items():
s += cfile + '\n'
rows = []
for el, refs in cdata:
rows.append((' ' + el, ' '.join(refs)))
s += '\n'.join(format_columns(rows)) + '\n\n'
return s | python | {
"resource": ""
} |
q34553 | _bsecurate_cli_print_component_file | train | def _bsecurate_cli_print_component_file(args):
'''Handles the print-component-file subcommand'''
data = fileio.read_json_basis(args.file)
return printing.component_basis_str(data, elements=args.elements) | python | {
"resource": ""
} |
q34554 | _bsecurate_cli_compare_basis_sets | train | def _bsecurate_cli_compare_basis_sets(args):
'''Handles compare-basis-sets subcommand'''
ret = curate.compare_basis_sets(args.basis1, args.basis2, args.version1, args.version2, args.uncontract_general,
args.data_dir, args.data_dir)
if ret:
return "No difference found"
else:
return "DIFFERENCES FOUND. SEE ABOVE" | python | {
"resource": ""
} |
q34555 | _bsecurate_cli_compare_basis_files | train | def _bsecurate_cli_compare_basis_files(args):
'''Handles compare-basis-files subcommand'''
ret = curate.compare_basis_files(args.file1, args.file2, args.readfmt1, args.readfmt2, args.uncontract_general)
if ret:
return "No difference found"
else:
return "DIFFERENCES FOUND. SEE ABOVE" | python | {
"resource": ""
} |
q34556 | _bsecurate_cli_view_graph | train | def _bsecurate_cli_view_graph(args):
'''Handles the view-graph subcommand'''
curate.view_graph(args.basis, args.version, args.data_dir)
return '' | python | {
"resource": ""
} |
q34557 | _bsecurate_cli_make_graph_file | train | def _bsecurate_cli_make_graph_file(args):
'''Handles the make-graph-file subcommand'''
curate.make_graph_file(args.basis, args.outfile, args.render, args.version, args.data_dir)
return '' | python | {
"resource": ""
} |
q34558 | element_data_from_Z | train | def element_data_from_Z(Z):
'''Obtain elemental data given a Z number
An exception is thrown if the Z number is not found
'''
# Z may be a str
if isinstance(Z, str) and Z.isdecimal():
Z = int(Z)
if Z not in _element_Z_map:
raise KeyError('No element data for Z = {}'.format(Z))
return _element_Z_map[Z] | python | {
"resource": ""
} |
q34559 | element_data_from_sym | train | def element_data_from_sym(sym):
'''Obtain elemental data given an elemental symbol
The given symbol is not case sensitive
An exception is thrown if the symbol is not found
'''
sym_lower = sym.lower()
if sym_lower not in _element_sym_map:
raise KeyError('No element data for symbol \'{}\''.format(sym))
return _element_sym_map[sym_lower] | python | {
"resource": ""
} |
q34560 | element_data_from_name | train | def element_data_from_name(name):
'''Obtain elemental data given an elemental name
The given name is not case sensitive
An exception is thrown if the name is not found
'''
name_lower = name.lower()
if name_lower not in _element_name_map:
raise KeyError('No element data for name \'{}\''.format(name))
return _element_name_map[name_lower] | python | {
"resource": ""
} |
q34561 | element_name_from_Z | train | def element_name_from_Z(Z, normalize=False):
'''Obtain an element's name from its Z number
An exception is thrown if the Z number is not found
If normalize is True, the first letter will be capitalized
'''
r = element_data_from_Z(Z)[2]
if normalize:
return r.capitalize()
else:
return r | python | {
"resource": ""
} |
q34562 | element_sym_from_Z | train | def element_sym_from_Z(Z, normalize=False):
'''Obtain an element's symbol from its Z number
An exception is thrown if the Z number is not found
If normalize is True, the first letter will be capitalized
'''
r = element_data_from_Z(Z)[0]
if normalize:
return r.capitalize()
else:
return r | python | {
"resource": ""
} |
q34563 | convert_references | train | def convert_references(ref_data, fmt):
'''
Returns the basis set references as a string representing
the data in the specified output format
'''
# Make fmt case insensitive
fmt = fmt.lower()
if fmt not in _converter_map:
raise RuntimeError('Unknown reference format "{}"'.format(fmt))
# Sort the data for all references
for elref in ref_data:
for rinfo in elref['reference_info']:
rdata = rinfo['reference_data']
rinfo['reference_data'] = [(k, sort.sort_single_reference(v)) for k, v in rdata]
# Actually do the conversion
ret_str = _converter_map[fmt]['function'](ref_data)
return ret_str | python | {
"resource": ""
} |
q34564 | _get_basis_metadata | train | def _get_basis_metadata(name, data_dir):
'''Get metadata for a single basis set
If the basis doesn't exist, an exception is raised
'''
# Transform the name into an internal representation
tr_name = misc.transform_basis_name(name)
# Get the metadata for all basis sets
metadata = get_metadata(data_dir)
if not tr_name in metadata:
raise KeyError("Basis set {} does not exist".format(name))
return metadata[tr_name] | python | {
"resource": ""
} |
q34565 | _header_string | train | def _header_string(basis_dict):
'''Creates a header with information about a basis set
Information includes description, revision, etc, but not references
'''
tw = textwrap.TextWrapper(initial_indent='', subsequent_indent=' ' * 20)
header = '-' * 70 + '\n'
header += ' Basis Set Exchange\n'
header += ' Version ' + version() + '\n'
header += ' ' + _main_url + '\n'
header += '-' * 70 + '\n'
header += ' Basis set: ' + basis_dict['name'] + '\n'
header += tw.fill(' Description: ' + basis_dict['description']) + '\n'
header += ' Role: ' + basis_dict['role'] + '\n'
header += tw.fill(' Version: {} ({})'.format(basis_dict['version'],
basis_dict['revision_description'])) + '\n'
header += '-' * 70 + '\n'
return header | python | {
"resource": ""
} |
q34566 | get_basis | train | def get_basis(name,
elements=None,
version=None,
fmt=None,
uncontract_general=False,
uncontract_spdf=False,
uncontract_segmented=False,
make_general=False,
optimize_general=False,
data_dir=None,
header=True):
'''Obtain a basis set
This is the main function for getting basis set information.
This function reads in all the basis data and returns it either
as a string or as a python dictionary.
Parameters
----------
name : str
Name of the basis set. This is not case sensitive.
elements : str or list
List of elements that you want the basis set for.
Elements can be specified by Z-number (int or str) or by symbol (str).
If this argument is a str (ie, '1-3,7-10'), it is expanded into a list.
Z numbers and symbols (case insensitive) can be used interchangeably
(see :func:`bse.misc.expand_elements`)
If an empty string or list is passed, or if None is passed (the default),
all elements for which the basis set is defined are included.
version : int or str
Obtain a specific version of this basis set. By default,
the latest version is returned.
fmt: str
The desired output format of the basis set. By default,
basis set information is returned as a python dictionary. Otherwise,
if a format is specified, a string is returned.
Use :func:`bse.api.get_formats` to programmatically obtain the available
formats. The `fmt` argument is not case sensitive.
Available formats are
* nwchem
* gaussian94
* psi4
* gamess_us
* turbomole
* json
uncontract_general : bool
If True, remove general contractions by duplicating the set
of primitive exponents with each vector of coefficients.
Primitives with zero coefficient are removed, as are duplicate shells.
uncontract_spdf : bool
If True, remove general contractions with combined angular momentum (sp, spd, etc)
by duplicating the set of primitive exponents with each vector of coefficients.
Primitives with zero coefficient are removed, as are duplicate shells.
uncontract_segmented : bool
If True, remove segmented contractions by duplicating each primitive into new shells.
Each coefficient is set to 1.0
make_general : bool
If True, make the basis set as generally-contracted as possible. There will be one
shell per angular momentum (for each element)
optimize_general : bool
Optimize by removing general contractions that contain uncontracted
functions (see :func:`bse.manip.optimize_general`)
data_dir : str
Data directory with all the basis set information. By default,
it is in the 'data' subdirectory of this project.
Returns
-------
str or dict
The basis set in the desired format. If `fmt` is **None**, this will be a python
dictionary. Otherwise, it will be a string.
'''
data_dir = fix_data_dir(data_dir)
bs_data = _get_basis_metadata(name, data_dir)
# If version is not specified, use the latest
if version is None:
version = bs_data['latest_version']
else:
version = str(version) # Version may be an int
if not version in bs_data['versions']:
raise KeyError("Version {} does not exist for basis {}".format(version, name))
# Compose the entire basis set (all elements)
file_relpath = bs_data['versions'][version]['file_relpath']
basis_dict = compose.compose_table_basis(file_relpath, data_dir)
# Set the name (from the global metadata)
# Only the list of all names will be returned from compose_table_basis
basis_dict['name'] = bs_data['display_name']
# Handle optional arguments
if elements is not None:
# Convert to purely a list of strings that represent integers
elements = misc.expand_elements(elements, True)
# Did the user pass an empty string or empty list? If so, include
# all elements
if len(elements) != 0:
bs_elements = basis_dict['elements']
# Are elements part of this basis set?
for el in elements:
if not el in bs_elements:
elsym = lut.element_sym_from_Z(el)
raise KeyError("Element {} (Z={}) not found in basis {} version {}".format(
elsym, el, name, version))
# Set to only the elements we want
basis_dict['elements'] = {k: v for k, v in bs_elements.items() if k in elements}
# Note that from now on, the pipleline is going to modify basis_dict. That is ok,
# since we are returned a unique instance from compose_table_basis
needs_pruning = False
if optimize_general:
basis_dict = manip.optimize_general(basis_dict, False)
needs_pruning = True
# uncontract_segmented implies uncontract_general
if uncontract_segmented:
basis_dict = manip.uncontract_segmented(basis_dict, False)
needs_pruning = True
elif uncontract_general:
basis_dict = manip.uncontract_general(basis_dict, False)
needs_pruning = True
if uncontract_spdf:
basis_dict = manip.uncontract_spdf(basis_dict, 0, False)
needs_pruning = True
if make_general:
basis_dict = manip.make_general(basis_dict, False)
needs_pruning = True
# Remove dead and duplicate shells
if needs_pruning:
basis_dict = manip.prune_basis(basis_dict, False)
# If fmt is not specified, return as a python dict
if fmt is None:
return basis_dict
if header:
header_str = _header_string(basis_dict)
else:
header_str = None
return converters.convert_basis(basis_dict, fmt, header_str) | python | {
"resource": ""
} |
q34567 | lookup_basis_by_role | train | def lookup_basis_by_role(primary_basis, role, data_dir=None):
'''Lookup the name of an auxiliary basis set given a primary basis set and role
Parameters
----------
primary_basis : str
The primary (orbital) basis set that we want the auxiliary
basis set for. This is not case sensitive.
role: str
Desired role/type of auxiliary basis set.
Use :func:`bse.api.get_roles` to programmatically obtain the available
formats. The `fmt` argument is not case sensitive.
Available roles are
* jfit
* jkfit
* rifit
* admmfit
data_dir : str
Data directory with all the basis set information. By default,
it is in the 'data' subdirectory of this project.
Returns
-------
str
The name of the auxiliary basis set for the given primary basis
and role.
'''
data_dir = fix_data_dir(data_dir)
role = role.lower()
if not role in get_roles():
raise RuntimeError("Role {} is not a valid role".format(role))
bs_data = _get_basis_metadata(primary_basis, data_dir)
auxdata = bs_data['auxiliaries']
if not role in auxdata:
raise RuntimeError("Role {} doesn't exist for {}".format(role, primary_basis))
return auxdata[role] | python | {
"resource": ""
} |
q34568 | get_metadata | train | def get_metadata(data_dir=None):
'''Obtain the metadata for all basis sets
The metadata includes information such as the display name of the basis set,
its versions, and what elements are included in the basis set
The data is read from the METADATA.json file in the `data_dir` directory.
Parameters
----------
data_dir : str
Data directory with all the basis set information. By default,
it is in the 'data' subdirectory of this project.
'''
data_dir = fix_data_dir(data_dir)
metadata_file = os.path.join(data_dir, "METADATA.json")
return fileio.read_metadata(metadata_file) | python | {
"resource": ""
} |
q34569 | get_reference_data | train | def get_reference_data(data_dir=None):
'''Obtain information for all stored references
This is a nested dictionary with all the data for all the references
The reference data is read from the REFERENCES.json file in the given
`data_dir` directory.
'''
data_dir = fix_data_dir(data_dir)
reffile_path = os.path.join(data_dir, 'REFERENCES.json')
return fileio.read_references(reffile_path) | python | {
"resource": ""
} |
q34570 | get_basis_family | train | def get_basis_family(basis_name, data_dir=None):
'''Lookup a family by a basis set name
'''
data_dir = fix_data_dir(data_dir)
bs_data = _get_basis_metadata(basis_name, data_dir)
return bs_data['family'] | python | {
"resource": ""
} |
q34571 | get_families | train | def get_families(data_dir=None):
'''Return a list of all basis set families'''
data_dir = fix_data_dir(data_dir)
metadata = get_metadata(data_dir)
families = set()
for v in metadata.values():
families.add(v['family'])
return sorted(list(families)) | python | {
"resource": ""
} |
q34572 | filter_basis_sets | train | def filter_basis_sets(substr=None, family=None, role=None, data_dir=None):
'''Filter basis sets by some criteria
All parameters are ANDed together and are not case sensitive.
Parameters
----------
substr : str
Substring to search for in the basis set name
family : str
Family the basis set belongs to
role : str
Role of the basis set
data_dir : str
Data directory with all the basis set information. By default,
it is in the 'data' subdirectory of this project.
Returns
-------
dict
Basis set metadata that matches the search criteria
'''
data_dir = fix_data_dir(data_dir)
metadata = get_metadata(data_dir)
# family and role are required to be lowercase (via schema and validation functions)
if family:
family = family.lower()
if not family in get_families(data_dir):
raise RuntimeError("Family '{}' is not a valid family".format(family))
metadata = {k: v for k, v in metadata.items() if v['family'] == family}
if role:
role = role.lower()
if not role in get_roles():
raise RuntimeError("Role '{}' is not a valid role".format(role))
metadata = {k: v for k, v in metadata.items() if v['role'] == role}
if substr:
substr = substr.lower()
metadata = {k: v for k, v in metadata.items() if substr in k or substr in v['display_name']}
return metadata | python | {
"resource": ""
} |
q34573 | _family_notes_path | train | def _family_notes_path(family, data_dir):
'''Form a path to the notes for a family'''
data_dir = fix_data_dir(data_dir)
family = family.lower()
if not family in get_families(data_dir):
raise RuntimeError("Family '{}' does not exist".format(family))
file_name = 'NOTES.' + family.lower()
file_path = os.path.join(data_dir, file_name)
return file_path | python | {
"resource": ""
} |
q34574 | _basis_notes_path | train | def _basis_notes_path(name, data_dir):
'''Form a path to the notes for a basis set'''
data_dir = fix_data_dir(data_dir)
bs_data = _get_basis_metadata(name, data_dir)
# the notes file is the same as the base file name, with a .notes extension
filebase = bs_data['basename']
file_path = os.path.join(data_dir, filebase + '.notes')
return file_path | python | {
"resource": ""
} |
q34575 | get_family_notes | train | def get_family_notes(family, data_dir=None):
'''Return a string representing the notes about a basis set family
If the notes are not found, an empty string is returned
'''
file_path = _family_notes_path(family, data_dir)
notes_str = fileio.read_notes_file(file_path)
if notes_str is None:
notes_str = ""
ref_data = get_reference_data(data_dir)
return notes.process_notes(notes_str, ref_data) | python | {
"resource": ""
} |
q34576 | has_family_notes | train | def has_family_notes(family, data_dir=None):
'''Check if notes exist for a given family
Returns True if they exist, false otherwise
'''
file_path = _family_notes_path(family, data_dir)
return os.path.isfile(file_path) | python | {
"resource": ""
} |
q34577 | get_basis_notes | train | def get_basis_notes(name, data_dir=None):
'''Return a string representing the notes about a specific basis set
If the notes are not found, an empty string is returned
'''
file_path = _basis_notes_path(name, data_dir)
notes_str = fileio.read_notes_file(file_path)
if notes_str is None:
return ""
ref_data = get_reference_data(data_dir)
return notes.process_notes(notes_str, ref_data) | python | {
"resource": ""
} |
q34578 | has_basis_notes | train | def has_basis_notes(family, data_dir=None):
'''Check if notes exist for a given basis set
Returns True if they exist, false otherwise
'''
file_path = _basis_notes_path(family, data_dir)
return os.path.isfile(file_path) | python | {
"resource": ""
} |
q34579 | get_schema | train | def get_schema(schema_type):
'''Get a schema that can validate BSE JSON files
The schema_type represents the type of BSE JSON file to be validated,
and can be 'component', 'element', 'table', 'metadata', or 'references'.
'''
schema_file = "{}-schema.json".format(schema_type)
file_path = os.path.join(_default_schema_dir, schema_file)
if not os.path.isfile(file_path):
raise RuntimeError('Schema file \'{}\' does not exist, is not readable, or is not a file'.format(file_path))
return fileio.read_schema(file_path) | python | {
"resource": ""
} |
q34580 | _cli_check_data_dir | train | def _cli_check_data_dir(data_dir):
'''Checks that the data dir exists and contains METADATA.json'''
if data_dir is None:
return None
data_dir = os.path.expanduser(data_dir)
data_dir = os.path.expandvars(data_dir)
if not os.path.isdir(data_dir):
raise RuntimeError("Data directory '{}' does not exist or is not a directory".format(data_dir))
if not os.path.isfile(os.path.join(data_dir, 'METADATA.json')):
raise RuntimeError("Data directory '{}' does not contain a METADATA.json file".format(data_dir))
return data_dir | python | {
"resource": ""
} |
q34581 | _cli_check_format | train | def _cli_check_format(fmt):
'''Checks that a basis set format exists and if not, raises a helpful exception'''
if fmt is None:
return None
fmt = fmt.lower()
if not fmt in api.get_formats():
errstr = "Format '" + fmt + "' does not exist.\n"
errstr += "For a complete list of formats, use the 'bse list-formats' command"
raise RuntimeError(errstr)
return fmt | python | {
"resource": ""
} |
q34582 | _cli_check_ref_format | train | def _cli_check_ref_format(fmt):
'''Checks that a reference format exists and if not, raises a helpful exception'''
if fmt is None:
return None
fmt = fmt.lower()
if not fmt in api.get_reference_formats():
errstr = "Reference format '" + fmt + "' does not exist.\n"
errstr += "For a complete list of formats, use the 'bse list-ref-formats' command"
raise RuntimeError(errstr)
return fmt | python | {
"resource": ""
} |
q34583 | _cli_check_role | train | def _cli_check_role(role):
'''Checks that a basis set role exists and if not, raises a helpful exception'''
if role is None:
return None
role = role.lower()
if not role in api.get_roles():
errstr = "Role format '" + role + "' does not exist.\n"
errstr += "For a complete list of roles, use the 'bse list-roles' command"
raise RuntimeError(errstr)
return role | python | {
"resource": ""
} |
q34584 | _cli_check_basis | train | def _cli_check_basis(name, data_dir):
'''Checks that a basis set exists and if not, raises a helpful exception'''
if name is None:
return None
name = misc.transform_basis_name(name)
metadata = api.get_metadata(data_dir)
if not name in metadata:
errstr = "Basis set '" + name + "' does not exist.\n"
errstr += "For a complete list of basis sets, use the 'bse list-basis-sets' command"
raise RuntimeError(errstr)
return name | python | {
"resource": ""
} |
q34585 | _cli_check_family | train | def _cli_check_family(family, data_dir):
'''Checks that a basis set family exists and if not, raises a helpful exception'''
if family is None:
return None
family = family.lower()
if not family in api.get_families(data_dir):
errstr = "Basis set family '" + family + "' does not exist.\n"
errstr += "For a complete list of families, use the 'bse list-families' command"
raise RuntimeError(errstr)
return family | python | {
"resource": ""
} |
q34586 | _cli_check_readfmt | train | def _cli_check_readfmt(readfmt):
'''Checks that a file type exists and if not, raises a helpful exception'''
if readfmt is None:
return None
readfmt = readfmt.lower()
if not readfmt in curate.get_reader_formats():
errstr = "Reader for file type '" + readfmt + "' does not exist.\n"
errstr += "For a complete list of file types, use the 'bsecurate get-reader-formats' command"
raise RuntimeError(errstr)
return readfmt | python | {
"resource": ""
} |
q34587 | _create_readme | train | def _create_readme(fmt, reffmt):
'''
Creates the readme file for the bundle
Returns a str representing the readme file
'''
now = datetime.datetime.utcnow()
timestamp = now.strftime('%Y-%m-%d %H:%M:%S UTC')
# yapf: disable
outstr = _readme_str.format(timestamp=timestamp,
bsever=api.version(),
fmt=fmt, reffmt=reffmt)
# yapf: enable
return outstr | python | {
"resource": ""
} |
q34588 | _add_to_tbz | train | def _add_to_tbz(tfile, filename, data_str):
'''
Adds string data to a tarfile
'''
# Create a bytesio object for adding to a tarfile
# https://stackoverflow.com/a/52724508
encoded_data = data_str.encode('utf-8')
ti = tarfile.TarInfo(name=filename)
ti.size = len(encoded_data)
tfile.addfile(tarinfo=ti, fileobj=io.BytesIO(encoded_data)) | python | {
"resource": ""
} |
q34589 | _bundle_generic | train | def _bundle_generic(bfile, addhelper, fmt, reffmt, data_dir):
'''
Loop over all basis sets and add data to an archive
Parameters
----------
bfile : object
An object that gets passed through to the addhelper function
addhelper : function
A function that takes bfile and adds data to the bfile
fmt : str
Format of the basis set to create
reffmt : str
Format to use for the references
data_dir : str
Data directory with all the basis set information.
Returns
-------
None
'''
ext = converters.get_format_extension(fmt)
refext = refconverters.get_format_extension(reffmt)
subdir = 'basis_set_bundle-' + fmt + '-' + reffmt
readme_path = os.path.join(subdir, 'README.txt')
addhelper(bfile, readme_path, _create_readme(fmt, reffmt))
for name, data, notes in _basis_data_iter(fmt, reffmt, data_dir):
for ver, verdata in data.items():
filename = misc.basis_name_to_filename(name)
basis_filepath = os.path.join(subdir, '{}.{}{}'.format(filename, ver, ext))
ref_filename = os.path.join(subdir, '{}.{}.ref{}'.format(filename, ver, refext))
bsdata, refdata = verdata
addhelper(bfile, basis_filepath, bsdata)
addhelper(bfile, ref_filename, refdata)
if len(notes) > 0:
notes_filename = os.path.join(subdir, filename + '.notes')
addhelper(bfile, notes_filename, notes)
for fam in api.get_families(data_dir):
fam_notes = api.get_family_notes(fam, data_dir)
if len(fam_notes) > 0:
fam_notes_filename = os.path.join(subdir, fam + '.family_notes')
addhelper(bfile, fam_notes_filename, fam_notes) | python | {
"resource": ""
} |
q34590 | create_bundle | train | def create_bundle(outfile, fmt, reffmt, archive_type=None, data_dir=None):
'''
Create a single archive file containing all basis
sets in a given format
Parameters
----------
outfile : str
Path to the file to create. Existing files will be overwritten
fmt : str
Format of the basis set to archive (nwchem, turbomole, ...)
reffmt : str
Format of the basis set references to archive (nwchem, turbomole, ...)
archive_type : str
Type of archive to create. Can be 'zip' or 'tbz'. Default is
None, which will autodetect based on the outfile name
data_dir : str
Data directory with all the basis set information. By default,
it is in the 'data' subdirectory of this project.
Returns
-------
None
'''
if archive_type is None:
outfile_lower = outfile.lower()
for k, v in _bundle_types.items():
if outfile_lower.endswith(v['extension']):
archive_type = k
break
else:
raise RuntimeError("Cannot autodetect archive type from file name: {}".format(os.path.basename(outfile)))
else:
archive_type = archive_type.lower()
if not archive_type in _bundle_types:
raise RuntimeError("Archive type '{}' is not valid.")
_bundle_types[archive_type]['handler'](outfile, fmt, reffmt, data_dir) | python | {
"resource": ""
} |
q34591 | get_archive_types | train | def get_archive_types():
'''
Return information related to the types of archives available
'''
ret = copy.deepcopy(_bundle_types)
for k, v in ret.items():
v.pop('handler')
return ret | python | {
"resource": ""
} |
q34592 | merge_element_data | train | def merge_element_data(dest, sources, use_copy=True):
"""
Merges the basis set data for an element from multiple sources
into dest.
The destination is not modified, and a (shallow) copy of dest is returned
with the data from sources added.
If use_copy is True, then the data merged into dest will be a (deep)
copy of that found in sources. Otherwise, data may be shared between dest
and sources
"""
if dest is not None:
ret = dest.copy()
else:
ret = {}
if use_copy:
sources = copy.deepcopy(sources)
# Note that we are not copying notes/data_sources
for s in sources:
if 'electron_shells' in s:
if 'electron_shells' not in ret:
ret['electron_shells'] = []
ret['electron_shells'].extend(s['electron_shells'])
if 'ecp_potentials' in s:
if 'ecp_potentials' in ret:
raise RuntimeError('Cannot overwrite existing ECP')
ret['ecp_potentials'] = s['ecp_potentials']
ret['ecp_electrons'] = s['ecp_electrons']
if 'references' in s:
if 'references' not in ret:
ret['references'] = []
for ref in s['references']:
if not ref in ret['references']:
ret['references'].append(ref)
return ret | python | {
"resource": ""
} |
q34593 | prune_shell | train | def prune_shell(shell, use_copy=True):
"""
Removes exact duplicates of primitives, and condenses duplicate exponents
into general contractions
Also removes primitives if all coefficients are zero
"""
new_exponents = []
new_coefficients = []
exponents = shell['exponents']
nprim = len(exponents)
# transpose of the coefficient matrix
coeff_t = list(map(list, zip(*shell['coefficients'])))
# Group by exponents
ex_groups = []
for i in range(nprim):
for ex in ex_groups:
if float(exponents[i]) == float(ex[0]):
ex[1].append(coeff_t[i])
break
else:
ex_groups.append((exponents[i], [coeff_t[i]]))
# Now collapse within groups
for ex in ex_groups:
if len(ex[1]) == 1:
# only add if there is a nonzero contraction coefficient
if not all([float(x) == 0.0 for x in ex[1][0]]):
new_exponents.append(ex[0])
new_coefficients.append(ex[1][0])
continue
# ex[1] contains rows of coefficients. The length of ex[1]
# is the number of times the exponent is duplicated. Columns represent general contractions.
# We want to find the non-zero coefficient in each column, if it exists
# The result is a single row with a length representing the number
# of general contractions
new_coeff_row = []
# so take yet another transpose.
ex_coeff = list(map(list, zip(*ex[1])))
for g in ex_coeff:
nonzero = [x for x in g if float(x) != 0.0]
if len(nonzero) > 1:
raise RuntimeError("Exponent {} is duplicated within a contraction".format(ex[0]))
if len(nonzero) == 0:
new_coeff_row.append(g[0])
else:
new_coeff_row.append(nonzero[0])
# only add if there is a nonzero contraction coefficient anywhere for this exponent
if not all([float(x) == 0.0 for x in new_coeff_row]):
new_exponents.append(ex[0])
new_coefficients.append(new_coeff_row)
# take the transpose again, putting the general contraction
# as the slowest index
new_coefficients = list(map(list, zip(*new_coefficients)))
shell['exponents'] = new_exponents
shell['coefficients'] = new_coefficients
return shell | python | {
"resource": ""
} |
q34594 | prune_basis | train | def prune_basis(basis, use_copy=True):
"""
Removes primitives that have a zero coefficient, and
removes duplicate primitives and shells
This only finds EXACT duplicates, and is meant to be used
after other manipulations
If use_copy is True, the input basis set is not modified.
"""
if use_copy:
basis = copy.deepcopy(basis)
for k, el in basis['elements'].items():
if not 'electron_shells' in el:
continue
shells = el.pop('electron_shells')
shells = [prune_shell(sh, False) for sh in shells]
# Remove any duplicates
el['electron_shells'] = []
for sh in shells:
if sh not in el['electron_shells']:
el['electron_shells'].append(sh)
return basis | python | {
"resource": ""
} |
q34595 | uncontract_spdf | train | def uncontract_spdf(basis, max_am=0, use_copy=True):
"""
Removes sp, spd, spdf, etc, contractions from a basis set
The general contractions are replaced by uncontracted versions
Contractions up to max_am will be left in place. For example,
if max_am = 1, spd will be split into sp and d
The input basis set is not modified. The returned basis
may have functions with coefficients of zero and may have duplicate
shells.
If use_copy is True, the input basis set is not modified.
"""
if use_copy:
basis = copy.deepcopy(basis)
for k, el in basis['elements'].items():
if not 'electron_shells' in el:
continue
newshells = []
for sh in el['electron_shells']:
# am will be a list
am = sh['angular_momentum']
coeff = sh['coefficients']
# if this is an sp, spd,... orbital
if len(am) > 1:
newsh = sh.copy()
newsh['angular_momentum'] = []
newsh['coefficients'] = []
ngen = len(sh['coefficients'])
for g in range(ngen):
if am[g] > max_am:
newsh2 = sh.copy()
newsh2['angular_momentum'] = [am[g]]
newsh2['coefficients'] = [coeff[g]]
newshells.append(newsh2)
else:
newsh['angular_momentum'].append(am[g])
newsh['coefficients'].append(coeff[g])
newshells.insert(0, newsh)
else:
newshells.append(sh)
el['electron_shells'] = newshells
return basis | python | {
"resource": ""
} |
q34596 | uncontract_general | train | def uncontract_general(basis, use_copy=True):
"""
Removes the general contractions from a basis set
The input basis set is not modified. The returned basis
may have functions with coefficients of zero and may have duplicate
shells.
If use_copy is True, the input basis set is not modified.
"""
if use_copy:
basis = copy.deepcopy(basis)
for k, el in basis['elements'].items():
if not 'electron_shells' in el:
continue
newshells = []
for sh in el['electron_shells']:
# See if we actually have to uncontract
# Also, don't uncontract sp, spd,.... orbitals
# (leave that to uncontract_spdf)
if len(sh['coefficients']) == 1 or len(sh['angular_momentum']) > 1:
newshells.append(sh)
else:
if len(sh['angular_momentum']) == 1:
for c in sh['coefficients']:
# copy, them replace 'coefficients'
newsh = sh.copy()
newsh['coefficients'] = [c]
newshells.append(newsh)
el['electron_shells'] = newshells
# If use_basis is True, we already made our deep copy
return prune_basis(basis, False) | python | {
"resource": ""
} |
q34597 | uncontract_segmented | train | def uncontract_segmented(basis, use_copy=True):
"""
Removes the segmented contractions from a basis set
This implicitly removes general contractions as well,
but will leave sp, spd, ... orbitals alone
The input basis set is not modified. The returned basis
may have functions with coefficients of zero and may have duplicate
shells.
If use_copy is True, the input basis set is not modified.
"""
if use_copy:
basis = copy.deepcopy(basis)
for k, el in basis['elements'].items():
if not 'electron_shells' in el:
continue
newshells = []
for sh in el['electron_shells']:
exponents = sh['exponents']
nam = len(sh['angular_momentum'])
for i in range(len(exponents)):
newsh = sh.copy()
newsh['exponents'] = [exponents[i]]
newsh['coefficients'] = [["1.00000000"] * nam]
# Remember to transpose the coefficients
newsh['coefficients'] = list(map(list, zip(*newsh['coefficients'])))
newshells.append(newsh)
el['electron_shells'] = newshells
return basis | python | {
"resource": ""
} |
q34598 | make_general | train | def make_general(basis, use_copy=True):
"""
Makes one large general contraction for each angular momentum
If use_copy is True, the input basis set is not modified.
The output of this function is not pretty. If you want to make it nicer,
use sort_basis afterwards.
"""
zero = '0.00000000'
basis = uncontract_spdf(basis, 0, use_copy)
for k, el in basis['elements'].items():
if not 'electron_shells' in el:
continue
# See what we have
all_am = []
for sh in el['electron_shells']:
if not sh['angular_momentum'] in all_am:
all_am.append(sh['angular_momentum'])
all_am = sorted(all_am)
newshells = []
for am in all_am:
newsh = {
'angular_momentum': am,
'exponents': [],
'coefficients': [],
'region': '',
'function_type': None,
}
# Do exponents first
for sh in el['electron_shells']:
if sh['angular_momentum'] != am:
continue
newsh['exponents'].extend(sh['exponents'])
# Number of primitives in the new shell
nprim = len(newsh['exponents'])
cur_prim = 0
for sh in el['electron_shells']:
if sh['angular_momentum'] != am:
continue
if newsh['function_type'] is None:
newsh['function_type'] = sh['function_type']
# Make sure the shells we are merging have the same function types
ft1 = newsh['function_type']
ft2 = sh['function_type']
# Check if one function type is the subset of another
# (should handle gto/gto_spherical, etc)
if ft1 not in ft2 and ft2 not in ft1:
raise RuntimeError("Cannot make general contraction of different function types")
ngen = len(sh['coefficients'])
for g in range(ngen):
coef = [zero] * cur_prim
coef.extend(sh['coefficients'][g])
coef.extend([zero] * (nprim - len(coef)))
newsh['coefficients'].append(coef)
cur_prim += len(sh['exponents'])
newshells.append(newsh)
el['electron_shells'] = newshells
return basis | python | {
"resource": ""
} |
q34599 | optimize_general | train | def optimize_general(basis, use_copy=True):
"""
Optimizes the general contraction using the method of Hashimoto et al
.. seealso :: | T. Hashimoto, K. Hirao, H. Tatewaki
| 'Comment on Dunning's correlation-consistent basis set'
| Chemical Physics Letters v243, Issues 1-2, pp, 190-192 (1995)
| https://doi.org/10.1016/0009-2614(95)00807-G
"""
if use_copy:
basis = copy.deepcopy(basis)
for k, el in basis['elements'].items():
if not 'electron_shells' in el:
continue
elshells = el.pop('electron_shells')
el['electron_shells'] = []
for sh in elshells:
exponents = sh['exponents']
coefficients = sh['coefficients']
nprim = len(exponents)
nam = len(sh['angular_momentum'])
if nam > 1 or len(coefficients) < 2:
el['electron_shells'].append(sh)
continue
# First, find columns (general contractions) with a single non-zero value
single_columns = [idx for idx, c in enumerate(coefficients) if _is_single_column(c)]
# Find the corresponding rows that have a value in one of these columns
# Note that at this stage, the row may have coefficients in more than one
# column. That is ok, we are going to split it off anyway
single_rows = []
for col_idx in single_columns:
col = coefficients[col_idx]
for row_idx in range(nprim):
if float(col[row_idx]) != 0.0:
single_rows.append(row_idx)
# Split those out into new shells, and remove them from the
# original shell
new_shells_single = []
for row_idx in single_rows:
newsh = copy.deepcopy(sh)
newsh['exponents'] = [exponents[row_idx]]
newsh['coefficients'] = [['1.00000000000']]
new_shells_single.append(newsh)
exponents = [x for idx, x in enumerate(exponents) if idx not in single_rows]
coefficients = [x for idx, x in enumerate(coefficients) if idx not in single_columns]
coefficients = [[x for idx, x in enumerate(col) if not idx in single_rows] for col in coefficients]
# Remove Zero columns
#coefficients = [ x for x in coefficients if not _is_zero_column(x) ]
# Find contiguous rectanglar blocks
new_shells = []
while len(exponents) > 0:
block_rows, block_cols = _find_block(coefficients)
# add as a new shell
newsh = copy.deepcopy(sh)
newsh['exponents'] = [exponents[i] for i in block_rows]
newsh['coefficients'] = [[coefficients[colidx][i] for i in block_rows] for colidx in block_cols]
new_shells.append(newsh)
# Remove from the original exponent/coefficient set
exponents = [x for idx, x in enumerate(exponents) if idx not in block_rows]
coefficients = [x for idx, x in enumerate(coefficients) if idx not in block_cols]
coefficients = [[x for idx, x in enumerate(col) if not idx in block_rows] for col in coefficients]
# I do this order to mimic the output of the original BSE
el['electron_shells'].extend(new_shells)
el['electron_shells'].extend(new_shells_single)
# Fix coefficients for completely uncontracted shells to 1.0
for sh in el['electron_shells']:
if len(sh['coefficients']) == 1 and len(sh['coefficients'][0]) == 1:
sh['coefficients'][0][0] = '1.0000000'
return basis | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.