_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q19100
|
SDOSPlotter.dos_plot_data
|
train
|
def dos_plot_data(self, yscale=1, xmin=-6., xmax=6., colours=None,
plot_total=True, legend_cutoff=3, subplot=False,
zero_to_efermi=True, cache=None):
"""Get the plotting data.
Args:
yscale (:obj:`float`, optional): Scaling factor for the y-axis.
xmin (:obj:`float`, optional): The minimum energy to mask the
energy and density of states data (reduces plotting load).
xmax (:obj:`float`, optional): The maximum energy to mask the
energy and density of states data (reduces plotting load).
colours (:obj:`dict`, optional): Use custom colours for specific
element and orbital combinations. Specified as a :obj:`dict` of
:obj:`dict` of the colours. For example::
{
'Sn': {'s': 'r', 'p': 'b'},
'O': {'s': '#000000'}
}
The colour can be a hex code, series of rgb value, or any other
format supported by matplotlib.
plot_total (:obj:`bool`, optional): Plot the total density of
states. Defaults to ``True``.
legend_cutoff (:obj:`float`, optional): The cut-off (in % of the
maximum density of states within the plotting range) for an
elemental orbital to be labelled in the legend. This prevents
the legend from containing labels for orbitals that have very
little contribution in the plotting range.
subplot (:obj:`bool`, optional): Plot the density of states for
each element on separate subplots. Defaults to ``False``.
zero_to_efermi (:obj:`bool`, optional): Normalise the plot such
that the Fermi level is set as 0 eV.
cache (:obj:`dict`, optional): Cache object tracking how colours
have been assigned to orbitals. The format is the same as the
"colours" dict. This defaults to the module-level
sumo.plotting.colour_cache object, but an empty dict can be
used as a fresh cache. This object will be modified in-place.
Returns:
dict: The plotting data. Formatted with the following keys:
"energies" (:obj:`numpy.ndarray`)
The energies.
"mask" (:obj:`numpy.ndarray`)
A mask used to trim the density of states data and
prevent unwanted data being included in the output file.
"lines" (:obj:`list`)
A :obj:`list` of :obj:`dict` containing the density data
and some metadata. Each line :obj:`dict` contains the keys:
"label" (:obj:`str`)
The label for the legend.
"dens" (:obj:`numpy.ndarray`)
The density of states data.
"colour" (:obj:`str`)
The colour of the line.
"alpha" (:obj:`float`)
The alpha value for line fill.
"ymin" (:obj:`float`)
The minimum y-axis limit.
"ymax" (:obj:`float`)
The maximum y-axis limit.
"""
if cache is None:
cache = colour_cache
# mask needed to prevent unwanted data in pdf and for finding y limit
dos = self._dos
pdos = self._pdos
eners = dos.energies - dos.efermi if zero_to_efermi else dos.energies
mask = (eners >= xmin - 0.05) & (eners <= xmax + 0.05)
plot_data = {'mask': mask, 'energies': eners}
spins = dos.densities.keys()
ymax = 0
if plot_total:
if 'text.color' in matplotlib.rcParams:
tdos_colour = matplotlib.rcParams['text.color']
if tdos_colour is None:
tdos_colour = 'k'
else:
tdos_colour = 'k'
lines = []
tdos = {'label': 'Total DOS', 'dens': dos.densities,
'colour': tdos_colour, 'alpha': 0.15}
# subplot data formatted as a list of lists of dicts, with each
# list of dicts being plotted on a separate graph, if only one list
# then solo plot
lines.append([tdos])
dmax = max([max(d[mask]) for d in dos.densities.values()])
ymax = dmax if dmax > ymax else ymax
elif not subplot:
lines = [[]] # need a blank list to add lines into
else:
lines = []
# TODO: Fix broken behaviour if plot_total is off
cutoff = (legend_cutoff / 100.) * (ymax / 1.05)
for el, el_pdos in pdos.items():
el_lines = []
for orb in sort_orbitals(el_pdos):
dmax = max([max(d[mask])
for d in el_pdos[orb].densities.values()])
ymax = dmax if dmax > ymax else ymax
label = None if dmax < cutoff else '{} ({})'.format(el, orb)
colour, cache = get_cached_colour(el, orb, colours,
cache=cache)
el_lines.append({'label': label, 'alpha': 0.25,
'colour': colour,
'dens': el_pdos[orb].densities})
if subplot:
lines.append(el_lines)
else:
lines[0].extend(el_lines)
ymax = ymax * empty_space / yscale
ymin = 0 if len(spins) == 1 else -ymax
plot_data.update({'lines': lines, 'ymax': ymax, 'ymin': ymin})
return plot_data
|
python
|
{
"resource": ""
}
|
q19101
|
get_projections_by_branches
|
train
|
def get_projections_by_branches(bs, selection, normalise=None):
"""Returns orbital projections for each branch in a band structure.
Args:
bs (:obj:`~pymatgen.electronic_structure.bandstructure.BandStructureSymmLine`):
The band structure.
selection (list): A list of :obj:`tuple` or :obj:`string`
identifying which projections to return. Projections can be
specified by both element and orbital, for example::
[('Sn', 's'), ('Bi', 'p'), ('S', 'p')]
If just the element is specified then all the orbitals of that
element are combined. For example, the following will combine
all the S orbitals into a single projection::
[('Bi', 's'), ('Bi', 'p'), 'S']
Particular orbitals can also be combined, for example::
[('Bi', 's'), ('Bi', 'p'), ('S', ('s', 'p', 'd'))]
normalise (:obj:`str`, optional): Normalisation the projections.
Options are:
* ``'all'``: Projections normalised against the sum of all
other projections.
* ``'select'``: Projections normalised against the sum of the
selected projections.
* ``None``: No normalisation performed.
Defaults to ``None``.
Returns:
list: A ``list`` of orbital projections for each branch of the band
structure, in the same order as specified in ``selection``, with
the format::
[ [ {spin: projections} ], [ {spin: projections} ], ... ]
Where spin is a :obj:`pymatgen.electronic_structure.core.Spin`
object and projections is a :obj:`numpy.array` of::
projections[band_index][kpoint_index]
If there are no projections in the band structure, then an array of
zeros is returned for each spin.
"""
spins = bs.bands.keys()
projections = get_projections(bs, selection, normalise=normalise)
branches = []
for b in bs.branches:
s = b['start_index']
e = b['end_index'] + 1
branch_proj = deepcopy(projections)
for spin, i in it.product(spins, range(len(projections))):
branch_proj[i][spin] = projections[i][spin][:, s:e]
branches.append(branch_proj)
return branches
|
python
|
{
"resource": ""
}
|
q19102
|
get_projections
|
train
|
def get_projections(bs, selection, normalise=None):
"""Returns orbital projections from a band structure.
Args:
bs (:obj:`~pymatgen.electronic_structure.bandstructure.BandStructureSymmLine`):
The band structure.
selection (list): A list of :obj:`tuple` or :obj:`string`
identifying which projections to return. Projections can be
specified by both element and orbital, for example::
[('Bi', 's'), ('Bi', 'p'), ('S', 'p')]
If just the element is specified then all the orbitals of
that element are combined. For example, the following will combine
all the S orbitals into a single projection::
[('Bi', 's'), ('Bi', 'p'), 'S']
Particular orbitals can also be combined, for example::
[('Bi', 's'), ('Bi', 'p'), ('S', ('s', 'p', 'd'))]
normalise (:obj:`str`, optional): Normalisation the projections.
Options are:
* ``'all'``: Projections normalised against the sum of all
other projections.
* ``'select'``: Projections normalised against the sum of the
selected projections.
* ``None``: No normalisation performed.
Defaults to ``None``.
Returns:
list: A ``list`` of orbital projections, in the same order as specified
in ``selection``, with the format::
[ {spin: projections}, {spin: projections} ... ]
Where spin is a :obj:`pymatgen.electronic_structure.core.Spin`
object and projections is a :obj:`numpy.array` of::
projections[band_index][kpoint_index]
If there are no projections in the band structure, then an array of
zeros is returned for each spin.
"""
spins = bs.bands.keys()
nbands = bs.nb_bands
nkpts = len(bs.kpoints)
# if we are to normalise the data later we need access to all projections
elements = bs.structure.symbol_set
all_orbitals = ['s', 'p', 'd', 'f']
# dictio has the form: {'el1': [s, p, d, f], 'el2': [s, p, d, f]...}
dictio = dict(zip(elements, [all_orbitals] * len(elements)))
# bs.get_projection_on_elements_and_orbitals() returns the data in a
# really frustrating format, namely:
# {spin: [band_index][kpoint_index]{element: {orbital: projection}}}
all_proj = bs.get_projections_on_elements_and_orbitals(dictio)
# Make a defaultdict of defaultdicts
dict_proj = defaultdict(lambda: defaultdict(dict))
sum_proj = dict(zip(spins, [np.zeros((nbands, nkpts))] * len(spins)))
# store the projections for all elements and orbitals in a useable format
for spin, element, orbital in it.product(spins, elements, all_orbitals):
# convert data to [nb][nk][projection]
el_orb_proj = [[all_proj[spin][nb][nk][element][orbital]
for nk in range(nkpts)] for nb in range(nbands)]
dict_proj[element][orbital][spin] = np.array(el_orb_proj)
if normalise == 'all':
sum_proj[spin] += el_orb_proj
# now go through the selected orbitals and extract what's needed
spec_proj = []
for spec in selection:
if isinstance(spec, str):
# spec is just an element type, therefore sum all orbitals
element = spec
orbitals = all_orbitals
else:
element, orbitals = spec
# even if there is only one orbital, make sure we can loop over it
orbitals = tuple(orbitals)
proj = dict(zip(spins, [np.zeros((nbands, nkpts))] * len(spins)))
for spin, orbital in it.product(spins, orbitals):
proj[spin] += dict_proj[element][orbital][spin]
if normalise == 'select':
sum_proj[spin] += dict_proj[element][orbital][spin]
spec_proj.append(proj)
if normalise:
# to prevent warnings/errors relating to divide by zero,
# catch warnings and surround divide with np.nan_to_num
with np.errstate(divide='ignore', invalid='ignore'):
for spin, i in it.product(spins, range(len(spec_proj))):
spec_proj[i][spin] = np.nan_to_num(spec_proj[i][spin] /
sum_proj[spin])
return spec_proj
|
python
|
{
"resource": ""
}
|
q19103
|
SBSPlotter._makeplot
|
train
|
def _makeplot(self, ax, fig, data, zero_to_efermi=True,
vbm_cbm_marker=False, ymin=-6., ymax=6.,
height=None, width=None,
dos_plotter=None, dos_options=None, dos_label=None,
aspect=None):
"""Tidy the band structure & add the density of states if required."""
# draw line at Fermi level if not zeroing to e-Fermi
if not zero_to_efermi:
ytick_color = rcParams['ytick.color']
ef = self._bs.efermi
ax.axhline(ef, color=ytick_color)
# set x and y limits
ax.set_xlim(0, data['distances'][-1][-1])
if self._bs.is_metal() and not zero_to_efermi:
ax.set_ylim(self._bs.efermi + ymin, self._bs.efermi + ymax)
else:
ax.set_ylim(ymin, ymax)
if vbm_cbm_marker:
for cbm in data['cbm']:
ax.scatter(cbm[0], cbm[1], color='C2', marker='o', s=100)
for vbm in data['vbm']:
ax.scatter(vbm[0], vbm[1], color='C3', marker='o', s=100)
if dos_plotter:
ax = fig.axes[1]
if not dos_options:
dos_options = {}
dos_options.update({'xmin': ymin, 'xmax': ymax})
self._makedos(ax, dos_plotter, dos_options, dos_label=dos_label)
else:
# keep correct aspect ratio for axes based on canvas size
x0, x1 = ax.get_xlim()
y0, y1 = ax.get_ylim()
if width is None:
width = rcParams['figure.figsize'][0]
if height is None:
height = rcParams['figure.figsize'][1]
if not aspect:
aspect = height / width
ax.set_aspect(aspect * ((x1 - x0) / (y1 - y0)))
|
python
|
{
"resource": ""
}
|
q19104
|
SBSPlotter._makedos
|
train
|
def _makedos(self, ax, dos_plotter, dos_options, dos_label=None):
"""This is basically the same as the SDOSPlotter get_plot function."""
# don't use first 4 colours; these are the band structure line colours
cycle = cycler(
'color', rcParams['axes.prop_cycle'].by_key()['color'][4:])
with context({'axes.prop_cycle': cycle}):
plot_data = dos_plotter.dos_plot_data(**dos_options)
mask = plot_data['mask']
energies = plot_data['energies'][mask]
lines = plot_data['lines']
spins = [Spin.up] if len(lines[0][0]['dens']) == 1 else \
[Spin.up, Spin.down]
for line_set in plot_data['lines']:
for line, spin in it.product(line_set, spins):
if spin == Spin.up:
label = line['label']
densities = line['dens'][spin][mask]
else:
label = ""
densities = -line['dens'][spin][mask]
ax.fill_betweenx(energies, densities, 0, lw=0,
facecolor=line['colour'],
alpha=line['alpha'])
ax.plot(densities, energies, label=label,
color=line['colour'])
# x and y axis reversed versus normal dos plotting
ax.set_ylim(dos_options['xmin'], dos_options['xmax'])
ax.set_xlim(plot_data['ymin'], plot_data['ymax'])
if dos_label is not None:
ax.set_xlabel(dos_label)
ax.set_xticklabels([])
ax.legend(loc=2, frameon=False, ncol=1, bbox_to_anchor=(1., 1.))
|
python
|
{
"resource": ""
}
|
q19105
|
Kpath.correct_structure
|
train
|
def correct_structure(self, atol=1e-8):
"""Determine if the structure matches the standard primitive structure.
The standard primitive will be different between seekpath and pymatgen
high-symmetry paths, but this is handled by the specific subclasses.
Args:
atol (:obj:`float`, optional): Absolute tolerance used to compare
the input structure with the primitive standard structure.
Returns:
bool: ``True`` if the structure is the same as the standard
primitive, otherwise ``False``.
"""
return np.allclose(self.structure.lattice.matrix,
self.prim.lattice.matrix, atol=atol)
|
python
|
{
"resource": ""
}
|
q19106
|
Kpath.get_kpoints
|
train
|
def get_kpoints(self, line_density=20, cart_coords=False, phonopy=False):
r"""Return a list of k-points and labels along the high-symmetry path.
The format of the returned data will be different if phonopy is
``True`` or ``False``. This is because phonopy requires the labels and
kpoints to be provided in a different format than kgen.
Adapted from
:obj:`pymatgen.symmetry.bandstructure.HighSymmKpath.get_kpoints`.
Args:
line_density (:obj:`int`, optional): Density of k-points along the
path.
cart_coords (:obj:`bool`, optional): Whether the k-points are
returned in cartesian or reciprocal coordinates. Defaults to
``False`` (fractional coordinates).
phonopy (:obj:`bool`, optional): Format the k-points and labels for
use with phonopy. Defaults to ``False``.
Returns:
tuple: A :obj:`tuple` of the k-points along the high-symmetry path,
and k-point labels. Returned as ``(kpoints, labels)``.
If ``phonopy == False``, then:
* ``kpoints`` is a :obj:`numpy.ndarray` of the k-point
coordinates along the high-symmetry path. For example::
[[0, 0, 0], [0.25, 0, 0], [0.5, 0, 0], [0.5, 0, 0.25],
[0.5, 0, 0.5]]
* ``labels`` is a :obj:`list` of the high symmetry labels for
each k-point (will be an empty :obj:`str` if the k-point has
no label). For example::
['\Gamma', '', 'X', '', 'Y']
If ``phonopy == True``, then:
* ``kpoints`` is a :obj:`list` of :obj:`numpy.ndarray`
containing the k-points for each branch of the band
structure. This means that the first and last k-points of a
particular branch may be repeated. For example::
[[[0, 0, 0], [0.25, 0, 0], [0.5, 0, 0]],
[[0.5, 0, 0], [0.5, 0, 0.25], [0.5, 0, 0.5]]]
* ``labels`` is a :obj:`list` of the high symmetry labels.
For example::
['\Gamma', 'X', 'Y']
"""
list_k_points = []
sym_point_labels = []
recip_lattice = self.structure.lattice.reciprocal_lattice
for b in self.path:
for i in range(1, len(b)):
start = np.array(self.kpoints[b[i - 1]])
end = np.array(self.kpoints[b[i]])
distance = np.linalg.norm(
recip_lattice.get_cartesian_coords(start) -
recip_lattice.get_cartesian_coords(end))
nb = int(np.ceil(distance * line_density))
sym_point_labels.extend([b[i - 1]] + [''] * (nb - 1))
limit = nb + 1 if phonopy else nb
kpts = [recip_lattice.get_cartesian_coords(start)
+ float(i) / float(nb) *
(recip_lattice.get_cartesian_coords(end)
- recip_lattice.get_cartesian_coords(start))
for i in range(0, limit)]
if phonopy:
list_k_points.append(kpts)
else:
list_k_points.extend(kpts)
# append last k-point to avoid repetition as in pymatgen
if not phonopy:
# for VASP we label every k-point. If a k-point has no
# high-symmetry label then just use an empty string.
sym_point_labels.append(b[-1])
list_k_points.append(recip_lattice.get_cartesian_coords(end))
if phonopy:
# For phonopy, the labels for any discontinuities should be
# combined. For example if the route is X -> Y | Z -> R, the path
# will be [['X', 'Y'], ['Z', 'R']], and the labels should be
# ['X', 'Z', 'R']
sym_point_labels = []
for i, path_branch in enumerate(self.path):
for n, label in enumerate(path_branch):
if i != 0 and n == 0:
sym_point_labels[-1] += " | {}".format(label)
else:
sym_point_labels.append(label)
if cart_coords:
return list_k_points, sym_point_labels
else:
if phonopy:
frac_k_points = [[recip_lattice.get_fractional_coords(k)
for k in p] for p in list_k_points]
frac_k_points = frac_k_points
else:
frac_k_points = [recip_lattice.get_fractional_coords(k)
for k in list_k_points]
return frac_k_points, sym_point_labels
|
python
|
{
"resource": ""
}
|
q19107
|
Kpath.get_lattice_type
|
train
|
def get_lattice_type(number):
"""Return the lattice crystal system.
Hexagonal cells are differentiated into rhombohedral and hexagonal
lattices.
Args:
number (int): The international space group number.
Returns:
str: The lattice crystal system.
"""
f = lambda i, j: i <= number <= j
cs = {'triclinic': (1, 2), 'monoclinic': (3, 15),
'orthorhombic': (16, 74), 'tetragonal': (75, 142),
'trigonal': (143, 167), 'hexagonal': (168, 194),
'cubic': (195, 230)}
crystal_system = None
for k, v in cs.items():
if f(*v):
crystal_system = k
break
if number in [146, 148, 155, 160, 161, 166, 167]:
return "rhombohedral"
elif crystal_system == "trigonal":
return "hexagonal"
else:
return crystal_system
|
python
|
{
"resource": ""
}
|
q19108
|
get_fitting_data
|
train
|
def get_fitting_data(bs, spin, band_id, kpoint_id, num_sample_points=3):
"""Extract fitting data for band extrema based on spin, kpoint and band.
Searches forward and backward from the extrema point, but will only sample
there data if there are enough points in that direction.
Args:
bs (:obj:`~pymatgen.electronic_structure.bandstructure.BandStructureSymmLine`):
The band structure.
spin (:obj:`~pymatgen.electronic_structure.core.Spin`): Which spin
channel to sample.
band_id (int): Index of the band to sample.
kpoint_id (int): Index of the kpoint to sample.
Returns:
list: The data necessary to calculate the effective mass, along with
some metadata. Formatted as a :obj:`list` of :obj:`dict`, each with the
keys:
'energies' (:obj:`numpy.ndarray`)
Band eigenvalues in eV.
'distances' (:obj:`numpy.ndarray`)
Distances of the k-points in reciprocal space.
'band_id' (:obj:`int`)
The index of the band,
'spin' (:obj:`~pymatgen.electronic_structure.core.Spin`)
The spin channel
'start_kpoint' (:obj:`int`)
The index of the k-point at which the band extrema occurs
'end_kpoint' (:obj:`int`)
The k-point towards which the data has been sampled.
"""
# branch data provides data about the start and end points
# of specific band paths
branch_data = [b for b in bs.get_branch(kpoint_id)
if b['index'] == kpoint_id][0]
start_kpoint = bs.kpoints[kpoint_id]
fitting_data = []
# check to see if there are enough points to sample from first
# check in the forward direction
if kpoint_id + num_sample_points <= branch_data['end_index']:
# calculate sampling limits
start_id = kpoint_id
end_id = kpoint_id + num_sample_points + 1
energies = np.array(bs.bands[spin][band_id][start_id:end_id].copy())
dists = np.array(bs.distance[start_id:end_id].copy())
# normalise eigenvalues and distances to starting point
energies -= bs.bands[spin][band_id][kpoint_id]
dists -= bs.distance[kpoint_id]
# symmetrise the data to make fitting more reliable
energies = np.concatenate([energies[::-1], energies[1:]])
dists = np.concatenate([-dists[::-1], dists[1:]])
end_kpoint = bs.kpoints[branch_data['end_index']]
data = {'energies': energies, 'distances': dists, 'band_id': band_id,
'spin': spin, 'start_kpoint': start_kpoint,
'end_kpoint': end_kpoint}
fitting_data.append(data)
# check in the backward direction
if kpoint_id - num_sample_points >= branch_data['start_index']:
# calculate sampling limits
start_id = kpoint_id - num_sample_points
end_id = kpoint_id + 1
energies = bs.bands[spin][band_id][start_id:end_id].copy()
dists = bs.distance[start_id:end_id].copy()
# normalise eigenvalues and distances to starting point
energies -= bs.bands[spin][band_id][kpoint_id]
dists -= bs.distance[kpoint_id]
# symmetrise the data to make fitting more reliable
energies = np.concatenate([energies[:-1], energies[::-1]])
dists = np.concatenate([dists[:-1], -dists[::-1]])
end_kpoint = bs.kpoints[branch_data['start_index']]
data = {'energies': energies, 'distances': dists, 'band_id': band_id,
'spin': spin, 'start_kpoint': start_kpoint,
'end_kpoint': end_kpoint}
fitting_data.append(data)
return fitting_data
|
python
|
{
"resource": ""
}
|
q19109
|
fit_effective_mass
|
train
|
def fit_effective_mass(distances, energies, parabolic=True):
"""Fit the effective masses using either a parabolic or nonparabolic fit.
Args:
distances (:obj:`numpy.ndarray`): The x-distances between k-points in
reciprocal Angstroms, normalised to the band extrema.
energies (:obj:`numpy.ndarray`): The band eigenvalues normalised to the
eigenvalue of the band extrema.
parabolic (:obj:`bool`, optional): Use a parabolic fit of the band
edges. If ``False`` then nonparabolic fitting will be attempted.
Defaults to ``True``.
Returns:
float: The effective mass in units of electron rest mass, :math:`m_0`.
"""
if parabolic:
fit = np.polyfit(distances, energies, 2)
c = 2 * fit[0] # curvature therefore 2 * the exponent on the ^2 term
else:
# Use non parabolic description of the bands
def f(x, alpha, d):
top = np.sqrt(4 * alpha * d * x**2 + 1) - 1
bot = 2 * alpha
return top / bot
# set boundaries for curve fitting: alpha > 1e-8
# as alpha = 0 causes an error
bounds = ((1e-8, -np.inf), (np.inf, np.inf))
popt, _ = curve_fit(f, distances, energies, p0=[1., 1.],
bounds=bounds)
c = 2 * popt[1]
# coefficient is currently in eV/Angstrom^2/h_bar^2
# want it in atomic units so Hartree/bohr^2/h_bar^2
eff_mass = (angstrom_to_bohr**2 / eV_to_hartree) / c
return eff_mass
|
python
|
{
"resource": ""
}
|
q19110
|
get_path_data
|
train
|
def get_path_data(structure, mode='bradcrack', symprec=0.01, spg=None,
line_density=60, cart_coords=False, kpt_list=None,
labels=None, phonopy=False):
r"""Get the k-point path, coordinates and symmetry labels for a structure.
If a manual :obj:`list` of kpoints is supplied using the ``kpt_list``
variable, the ``mode`` option will be ignored.
The format of the returned data will be different if phonopy is ``True`` or
``False``. This is because phonopy requires the labels and kpoints to be
provided in a different format than kgen.
Args:
structure (:obj:`~pymatgen.core.structure.Structure`): The structure.
mode (:obj:`str`, optional): Method used for calculating the
high-symmetry path. The options are:
bradcrack
Use the paths from Bradley and Cracknell. See [brad]_.
pymatgen
Use the paths from pymatgen. See [curt]_.
seekpath
Use the paths from SeeK-path. See [seek]_.
symprec (:obj:`float`, optional): The tolerance for determining the
crystal symmetry.
spg (:obj:`~pymatgen.symmetry.groups.SpaceGroup`, optional): Space
group used to override the symmetry determined by spglib. This is
not recommended and only provided for testing purposes.
This option will only take effect when ``mode = 'bradcrack'``.
line_density (:obj:`int`, optional): Density of k-points along the
path.
cart_coords (:obj:`bool`, optional): Whether the k-points are returned
in cartesian or reciprocal coordinates. Defaults to ``False``
(fractional coordinates).
kpt_list (:obj:`list`, optional): List of k-points to use, formatted as
a list of subpaths, each containing a list of fractional k-points.
For example::
[ [[0., 0., 0.], [0., 0., 0.5]],
[[0.5, 0., 0.], [0.5, 0.5, 0.]] ]
Will return points along ``0 0 0 -> 0 0 1/2 | 1/2 0 0
-> 1/2 1/2 0``
path_labels (:obj:`list`, optional): The k-point labels. These should
be provided as a :obj:`list` of :obj:`str` for each subpath of the
overall path. For example::
[ ['Gamma', 'Z'], ['X', 'M'] ]
combined with the above example for ``kpt_list`` would indicate the
path: Gamma -> Z | X -> M. If no labels are provided, letters from
A -> Z will be used instead.
phonopy (:obj:`bool`, optional): Format the k-points and labels for
use with phonopy. Defaults to ``False``.
Returns:
tuple: A tuple of a :obj:`~sumo.symmetry.kpath` object, the k-points
along the high-symmetry path, and the k-point labels. Returned as
``(kpath, kpoints, labels)``.
The type of ``kpath`` object will depend on the value of ``mode`` and
whether ``kpt_list`` is set.
If ``phonopy == False``, then:
* ``kpoints`` is a :obj:`numpy.ndarray` of the k-point
coordinates along the high-symmetry path. For example::
[[0, 0, 0], [0.25, 0, 0], [0.5, 0, 0], [0.5, 0, 0.25],
[0.5, 0, 0.5]]
* ``labels`` is a :obj:`list` of the high symmetry labels for
each k-point (will be an empty :obj:`str` if the k-point has
no label). For example::
['\Gamma', '', 'X', '', 'Y']
If ``phonopy == True``, then:
* ``kpoints`` is a :obj:`list` of :obj:`numpy.ndarray`
containing the k-points for each branch of the band
structure. This means that the first and last k-points of a
particular branch may be repeated. For example::
[[[0, 0, 0], [0.25, 0, 0], [0.5, 0, 0]],
[[0.5, 0, 0], [0.5, 0, 0.25], [0.5, 0, 0.5]]]
* ``labels`` is a :obj:`list` of the high symmetry labels.
For example::
['\Gamma', 'X', 'Y']
"""
from sumo.symmetry import (BradCrackKpath, SeekpathKpath, PymatgenKpath,
CustomKpath)
spg = _get_space_group_object(spg, mode)
if kpt_list:
kpath = CustomKpath(structure, kpt_list, labels, symprec=symprec)
elif mode == 'bradcrack':
kpath = BradCrackKpath(structure, symprec=symprec, spg=spg)
elif mode == 'seekpath':
kpath = SeekpathKpath(structure, symprec=symprec)
elif mode == 'pymatgen':
kpath = PymatgenKpath(structure, symprec=symprec)
kpoints, labels = kpath.get_kpoints(line_density=line_density,
phonopy=phonopy)
path_str = kpath.path_string
kpt_dict = kpath.kpoints
logging.info('Structure information:')
logging.info('\tSpace group number: {}'.format(kpath._spg_data['number']))
logging.info('\tInternational symbol: {}'.format(kpath.spg_symbol))
logging.info('\tLattice type: {}'.format(kpath.lattice_type))
logging.info('\nk-point path:\n\t{}'.format(path_str))
logging.info('\nk-points:')
for label, kpoint in iter(kpt_dict.items()):
coord_str = ' '.join(['{}'.format(c) for c in kpoint])
logging.info('\t{}: {}'.format(label, coord_str))
return kpath, kpoints, labels
|
python
|
{
"resource": ""
}
|
q19111
|
write_kpoint_files
|
train
|
def write_kpoint_files(filename, kpoints, labels, make_folders=False,
ibzkpt=None, kpts_per_split=None, directory=None,
cart_coords=False):
r"""Write the k-points data to VASP KPOINTS files.
Folders are named as 'split-01', 'split-02', etc ...
KPOINTS files are named KPOINTS_band_split_01 etc ...
Args:
filename (:obj:`str`): Path to VASP structure file.
kpoints (:obj:`numpy.ndarray`): The k-point coordinates along the
high-symmetry path. For example::
[[0, 0, 0], [0.25, 0, 0], [0.5, 0, 0], [0.5, 0, 0.25],
[0.5, 0, 0.5]]
labels (:obj:`list`) The high symmetry labels for each k-point (will be
an empty :obj:`str` if the k-point has no label). For example::
['\Gamma', '', 'X', '', 'Y']
make_folders (:obj:`bool`, optional): Generate folders and copy in
required files (INCAR, POTCAR, POSCAR, and possibly CHGCAR) from
the current directory.
ibzkpt (:obj:`str`, optional): Path to IBZKPT file. If set, the
generated k-points will be appended to the k-points in this file
and given a weight of 0. This is necessary for hybrid band
structure calculations.
kpts_per_split (:obj:`int`, optional): If set, the k-points are split
into separate k-point files (or folders) each containing the number
of k-points specified. This is useful for hybrid band structure
calculations where it is often intractable to calculate all
k-points in the same calculation.
directory (:obj:`str`, optional): The output file directory.
cart_coords (:obj:`bool`, optional): Whether the k-points are returned
in cartesian or reciprocal coordinates. Defaults to ``False``
(fractional coordinates).
"""
if kpts_per_split:
kpt_splits = [kpoints[i:i+kpts_per_split] for
i in range(0, len(kpoints), kpts_per_split)]
label_splits = [labels[i:i+kpts_per_split] for
i in range(0, len(labels), kpts_per_split)]
else:
kpt_splits = [kpoints]
label_splits = [labels]
if cart_coords:
coord_type = 'cartesian'
style = Kpoints.supported_modes.Cartesian
else:
coord_type = 'reciprocal'
style = Kpoints.supported_modes.Reciprocal
kpt_files = []
for kpt_split, label_split in zip(kpt_splits, label_splits):
if ibzkpt:
# hybrid calculation so set k-point weights to 0
kpt_weights = ibzkpt.kpts_weights + [0] * len(kpt_split)
kpt_split = ibzkpt.kpts + kpt_split
label_split = [''] * len(ibzkpt.labels) + label_split
else:
# non-SCF calculation so set k-point weights to 1
kpt_weights = [1] * len(kpt_split)
segment = ' -> '.join([label for label in label_split if label])
kpt_file = Kpoints(comment=segment, num_kpts=len(kpt_split),
kpts=kpt_split, kpts_weights=kpt_weights,
style=style, coord_type=coord_type,
labels=label_split)
kpt_files.append(kpt_file)
pad = int(math.floor(math.log10(len(kpt_files)))) + 2
if make_folders:
for i, kpt_file in enumerate(kpt_files):
folder = 'split-{}'.format(str(i+1).zfill(pad))
if directory:
folder = os.path.join(directory, folder)
try:
os.makedirs(folder)
except OSError as e:
if e.errno == errno.EEXIST:
logging.error("\nERROR: Folders already exist, won't "
"overwrite.")
sys.exit()
else:
raise
kpt_file.write_file(os.path.join(folder, 'KPOINTS'))
vasp_files = [filename, "INCAR", "POTCAR", "job"]
vasp_files += [] if ibzkpt else ['CHGCAR']
for vasp_file in vasp_files:
if os.path.isfile(vasp_file):
shutil.copyfile(vasp_file, os.path.join(folder, vasp_file))
else:
for i, kpt_file in enumerate(kpt_files):
if len(kpt_files) > 1:
kpt_filename = 'KPOINTS_band_split_{:0d}'.format(i + 1)
else:
kpt_filename = 'KPOINTS_band'
if directory:
kpt_filename = os.path.join(directory, kpt_filename)
kpt_file.write_file(kpt_filename)
|
python
|
{
"resource": ""
}
|
q19112
|
styled_plot
|
train
|
def styled_plot(*style_sheets):
"""Return a decorator that will apply matplotlib style sheets to a plot.
``style_sheets`` is a base set of styles, which will be ignored if
``no_base_style`` is set in the decorated function arguments.
The style will further be overwritten by any styles in the ``style``
optional argument of the decorated function.
Args:
style_sheets (:obj:`list`, :obj:`str`, or :obj:`dict`): Any matplotlib
supported definition of a style sheet. Can be a list of style of
style sheets.
"""
def decorator(get_plot):
def wrapper(*args, fonts=None, style=None, no_base_style=False,
**kwargs):
if no_base_style:
list_style = []
else:
list_style = list(style_sheets)
if style is not None:
if isinstance(style, list):
list_style += style
else:
list_style += [style]
if fonts is not None:
list_style += [{'font.family': 'sans-serif',
'font.sans-serif': fonts}]
matplotlib.pyplot.style.use(list_style)
return get_plot(*args, **kwargs)
return wrapper
return decorator
|
python
|
{
"resource": ""
}
|
q19113
|
power_tick
|
train
|
def power_tick(val, pos, times_sign=r'\times'):
"""Custom power ticker function. """
if val == 0:
return r'$\mathregular{0}$'
elif val < 0:
exponent = int(np.log10(-val))
else:
exponent = int(np.log10(val))
coeff = val / 10**exponent
return r'$\mathregular{{{:.1f} {} 10^{:2d}}}$'.format(coeff,
times_sign,
exponent)
|
python
|
{
"resource": ""
}
|
q19114
|
rgbline
|
train
|
def rgbline(x, y, red, green, blue, alpha=1, linestyles="solid",
linewidth=2.5):
"""Get a RGB coloured line for plotting.
Args:
x (list): x-axis data.
y (list): y-axis data (can be multidimensional array).
red (list): Red data (must have same shape as ``y``).
green (list): Green data (must have same shape as ``y``).
blue (list): blue data (must have same shape as ``y``).
alpha (:obj:`list` or :obj:`int`, optional): Alpha (transparency)
data (must have same shape as ``y`` or be an :obj:`int`).
linestyles (:obj:`str`, optional): Linestyle for plot. Options are
``"solid"`` or ``"dotted"``.
"""
y = np.array(y)
if len(y.shape) == 1:
y = np.array([y])
red = np.array([red])
green = np.array([green])
blue = np.array([blue])
alpha = np.array([alpha])
elif isinstance(alpha, int):
alpha = [alpha] * len(y)
seg = []
colours = []
for yy, rr, gg, bb, aa in zip(y, red, green, blue, alpha):
pts = np.array([x, yy]).T.reshape(-1, 1, 2)
seg.extend(np.concatenate([pts[:-1], pts[1:]], axis=1))
nseg = len(x) - 1
r = [0.5 * (rr[i] + rr[i + 1]) for i in range(nseg)]
g = [0.5 * (gg[i] + gg[i + 1]) for i in range(nseg)]
b = [0.5 * (bb[i] + bb[i + 1]) for i in range(nseg)]
a = np.ones(nseg, np.float) * aa
colours.extend(list(zip(r, g, b, a)))
lc = LineCollection(seg, colors=colours, rasterized=True,
linewidth=linewidth, linestyles=linestyles)
return lc
|
python
|
{
"resource": ""
}
|
q19115
|
broaden_eps
|
train
|
def broaden_eps(dielectric, sigma):
"""Apply gaussian broadening to the dielectric response.
Args:
dielectric_data (tuple): The high-frequency dielectric data, following
the same format as
:attr:`pymatgen.io.vasp.outputs.Vasprun.dielectric`.
This is a :obj:`tuple` containing the energy, the real part of the
dielectric tensor, and the imaginary part of the tensor, as a
:obj:`list` of :obj:`floats`. E.g.::
(
[energies],
[[real_xx, real_yy, real_zz, real_xy, real_yz, real_xz]],
[[imag_xx, imag_yy, imag_zz, imag_xy, imag_yz, imag_xz]]
)
sigma (float): Standard deviation for gaussian broadening.
Returns:
:obj:`tuple` of :obj:`list` of :obj:`list` of :obj:`float`: The
broadened dielectric response. Returned as a tuple containing the
energy, the real part of the dielectric tensor, and the imaginary
part of the tensor. E.g.::
(
[energies],
[[real_xx, real_yy, real_zz, real_xy, real_yz, real_xz]],
[[imag_xx, imag_yy, imag_zz, imag_xy, imag_yz, imag_xz]]
)
"""
e = dielectric[0]
diff = [e[i + 1] - e[i] for i in range(len(e) - 1)]
diff_avg = sum(diff) / len(diff)
real = [gaussian_filter1d(np.array(dielectric[1])[:, x], sigma / diff_avg)
for x in range(6)]
imag = [gaussian_filter1d(np.array(dielectric[2])[:, x], sigma / diff_avg)
for x in range(6)]
return (e, np.array(real).T, np.array(imag).T)
|
python
|
{
"resource": ""
}
|
q19116
|
write_files
|
train
|
def write_files(abs_data, basename='absorption', prefix=None, directory=None):
"""Write the absorption or loss spectra to a file.
Note that this function expects to receive an iterable series of spectra.
Args:
abs_data (tuple): Series (either :obj:`list` or :obj:`tuple`) of
optical absorption or loss spectra. Each spectrum should be
formatted as a :obj:`tuple` of :obj:`list` of :obj:`float`. If the
data has been averaged, each spectrum should be::
([energies], [alpha])
Else, if the data has not been averaged, each spectrum should be::
([energies], [alpha_xx, alpha_yy, alpha_zz]).
prefix (:obj:`str`, optional): Prefix for file names.
directory (:obj:`str`, optional): The directory in which to save files.
"""
for i, absorption in enumerate(abs_data):
num_txt = '_{}'.format(i + 1) if len(abs_data) > 1 else ''
prefix_txt = '{}_'.format(prefix) if prefix else ''
filename = prefix_txt + basename + num_txt + '.dat'
if directory:
filename = os.path.join(directory, filename)
header = 'energy(eV)'
if len(absorption[1].shape) == 2:
header += ' alpha_xx alpha_yy alpha_zz'
data = np.concatenate((absorption[0][:, None], absorption[1]),
axis=1)
else:
header += ' alpha'
data = np.stack((absorption[0], absorption[1]), axis=1)
np.savetxt(filename, data, header=header)
|
python
|
{
"resource": ""
}
|
q19117
|
load_phonopy
|
train
|
def load_phonopy(filename, structure, dim, symprec=0.01, primitive_matrix=None,
factor=VaspToTHz, symmetrise=True, born=None, write_fc=False):
"""Load phonopy output and return an ``phonopy.Phonopy`` object.
Args:
filename (str): Path to phonopy output. Can be any of ``FORCE_SETS``,
``FORCE_CONSTANTS``, or ``force_constants.hdf5``.
structure (:obj:`~pymatgen.core.structure.Structure`): The unitcell
structure.
dim (list): The supercell size, as a :obj:`list` of :obj:`float`.
symprec (:obj:`float`, optional): The tolerance for determining the
crystal symmetry.
primitive_matrix (:obj:`list`, optional): The transformation matrix
from the conventional to primitive cell. Only required when the
conventional cell was used as the starting structure. Should be
provided as a 3x3 :obj:`list` of :obj:`float`.
factor (:obj:`float`, optional): The conversion factor for phonon
frequency. Defaults to :obj:`phonopy.units.VaspToTHz`.
symmetrise (:obj:`bool`, optional): Symmetrise the force constants.
Defaults to ``True``.
born (:obj:`str`, optional): Path to file containing Born effective
charges. Should be in the same format as the file produced by the
``phonopy-vasp-born`` script provided by phonopy.
write_fc (:obj:`bool` or :obj:`str`, optional): Write the force
constants to disk. If ``True``, a ``FORCE_CONSTANTS`` file will be
written. Alternatively, if set to ``"hdf5"``, a
``force_constants.hdf5`` file will be written. Defaults to
``False`` (force constants not written).
"""
unitcell = get_phonopy_structure(structure)
num_atom = unitcell.get_number_of_atoms()
num_satom = determinant(dim) * num_atom
phonon = Phonopy(unitcell, dim, primitive_matrix=primitive_matrix,
factor=factor, symprec=symprec)
if 'FORCE_CONSTANTS' == filename or '.hdf5' in filename:
# if force constants exist, use these to avoid recalculating them
if '.hdf5' in filename:
fc = file_IO.read_force_constants_hdf5(filename)
elif 'FORCE_CONSTANTS' == filename:
fc = file_IO.parse_FORCE_CONSTANTS(filename=filename)
if fc.shape[0] != num_satom:
msg = ("\nNumber of atoms in supercell is not consistent with the "
"matrix shape of\nforce constants read from {}.\nPlease"
"carefully check --dim.")
logging.error(msg.format(filename))
sys.exit()
phonon.set_force_constants(fc)
elif 'FORCE_SETS' == filename:
# load the force sets from file and calculate force constants
fs = file_IO.parse_FORCE_SETS()
if fs['natom'] != num_satom:
msg = ("\nNumber of atoms in supercell is not consistent with the "
"the data in FORCE_SETS\nPlease carefully check --dim.")
logging.error(msg.format(filename))
sys.exit()
phonon.set_displacement_dataset(fs)
logging.info("Calculating force constants...")
phonon.produce_force_constants()
if born:
# load born parameters from a file
nac_params = file_IO.parse_BORN(unitcell, filename=born)
# set the nac unit conversion factor manual, specific to VASP
nac_params['factor'] = Hartree * Bohr
phonon.set_nac_params(nac_params)
if symmetrise:
phonon.symmetrize_force_constants()
if write_fc == 'hdf5':
file_IO.write_force_constants_to_hdf5(phonon.get_force_constants())
logging.info("Force constants written to force_constants.hdf5.")
elif write_fc:
file_IO.write_FORCE_CONSTANTS(phonon.get_force_constants())
logging.info("Force constants written to FORCE_CONSTANTS.")
return phonon
|
python
|
{
"resource": ""
}
|
q19118
|
load_dos
|
train
|
def load_dos(vasprun, elements=None, lm_orbitals=None, atoms=None,
gaussian=None, total_only=False, log=False,
adjust_fermi=True):
"""Load a vasprun and extract the total and projected density of states.
Args:
vasprun (str): Path to a vasprun.xml or vasprun.xml.gz file or
a :obj:`pymatgen.io.vasp.outputs.Vasprun` object.
elements (:obj:`dict`, optional): The elements and orbitals to extract
from the projected density of states. Should be provided as a
:obj:`dict` with the keys as the element names and corresponding
values as a :obj:`tuple` of orbitals. For example, the following
would extract the Bi s, px, py and d orbitals::
{'Bi': ('s', 'px', 'py', 'd')}
If an element is included with an empty :obj:`tuple`, all orbitals
for that species will be extracted. If ``elements`` is not set or
set to ``None``, all elements for all species will be extracted.
lm_orbitals (:obj:`dict`, optional): The orbitals to decompose into
their lm contributions (e.g. p -> px, py, pz). Should be provided
as a :obj:`dict`, with the elements names as keys and a
:obj:`tuple` of orbitals as the corresponding values. For example,
the following would be used to decompose the oxygen p and d
orbitals::
{'O': ('p', 'd')}
atoms (:obj:`dict`, optional): Which atomic sites to use when
calculating the projected density of states. Should be provided as
a :obj:`dict`, with the element names as keys and a :obj:`tuple` of
:obj:`int` specifying the atomic indices as the corresponding
values. The elemental projected density of states will be summed
only over the atom indices specified. If an element is included
with an empty :obj:`tuple`, then all sites for that element will
be included. The indices are 0 based for each element specified in
the POSCAR. For example, the following will calculate the density
of states for the first 4 Sn atoms and all O atoms in the
structure::
{'Sn': (1, 2, 3, 4), 'O': (, )}
If ``atoms`` is not set or set to ``None`` then all atomic sites
for all elements will be considered.
gaussian (:obj:`float`, optional): Broaden the density of states using
convolution with a gaussian function. This parameter controls the
sigma or standard deviation of the gaussian distribution.
total_only (:obj:`bool`, optional): Only extract the total density of
states. Defaults to ``False``.
log (:obj:`bool`): Print logging messages. Defaults to ``False``.
adjust_fermi (:obj:`bool`, optional): Shift the Fermi level to sit at
the valence band maximum (does not affect metals).
Returns:
dict: The total and projected density of states. Formatted as a
:obj:`tuple` of ``(dos, pdos)``, where ``dos`` is a
:obj:`~pymatgen.electronic_structure.dos.Dos` object containing the
total density of states and ``pdos`` is a :obj:`dict` of
:obj:`dict` mapping the elements and their orbitals to
:obj:`~pymatgen.electronic_structure.dos.Dos` objects. For example::
{
'Bi': {'s': Dos, 'p': Dos ... },
'S': {'s': Dos}
}
"""
if isinstance(vasprun, str):
vr = Vasprun(vasprun)
else:
vr = vasprun
band = vr.get_band_structure()
dos = vr.complete_dos
if band.is_metal():
if log:
logging.info('System is metallic')
zero_point = vr.efermi
else:
if log:
logging.info('Band gap: {:.3f}'.
format(band.get_band_gap()['energy']))
logging.info('DOS band gap: {:.3f}'.format(dos.get_gap()))
zero_point = band.get_vbm()['energy']
if adjust_fermi:
dos.efermi -= dos.efermi - zero_point
if vr.parameters['ISMEAR'] in [-1, 0, 1]:
dos.energies -= vr.parameters['SIGMA']
if gaussian:
dos.densities = dos.get_smeared_densities(gaussian)
for site in dos.pdos:
for orbital in dos.pdos[site]:
dos.pdos[site][orbital] = dos.get_site_orbital_dos(
site, orbital).get_smeared_densities(gaussian)
if vr.parameters['LSORBIT']:
# pymatgen includes the spin down channel for SOC calculations, even
# though there is no density here. We remove this channel so the
# plotting is easier later on.
del dos.densities[Spin.down]
for site in dos.pdos:
for orbital in dos.pdos[site]:
del dos.pdos[site][orbital][Spin.down]
pdos = {}
if not total_only:
pdos = get_pdos(dos, lm_orbitals=lm_orbitals, atoms=atoms,
elements=elements)
return dos, pdos
|
python
|
{
"resource": ""
}
|
q19119
|
get_pdos
|
train
|
def get_pdos(dos, lm_orbitals=None, atoms=None, elements=None):
"""Extract the projected density of states from a CompleteDos object.
Args:
dos (:obj:`~pymatgen.electronic_structure.dos.CompleteDos`): The
density of states.
elements (:obj:`dict`, optional): The elements and orbitals to extract
from the projected density of states. Should be provided as a
:obj:`dict` with the keys as the element names and corresponding
values as a :obj:`tuple` of orbitals. For example, the following
would extract the Bi s, px, py and d orbitals::
{'Bi': ('s', 'px', 'py', 'd')}
If an element is included with an empty :obj:`tuple`, all orbitals
for that species will be extracted. If ``elements`` is not set or
set to ``None``, all elements for all species will be extracted.
lm_orbitals (:obj:`dict`, optional): The orbitals to decompose into
their lm contributions (e.g. p -> px, py, pz). Should be provided
as a :obj:`dict`, with the elements names as keys and a
:obj:`tuple` of orbitals as the corresponding values. For example,
the following would be used to decompose the oxygen p and d
orbitals::
{'O': ('p', 'd')}
atoms (:obj:`dict`, optional): Which atomic sites to use when
calculating the projected density of states. Should be provided as
a :obj:`dict`, with the element names as keys and a :obj:`tuple` of
:obj:`int` specifying the atomic indices as the corresponding
values. The elemental projected density of states will be summed
only over the atom indices specified. If an element is included
with an empty :obj:`tuple`, then all sites for that element will
be included. The indices are 0 based for each element specified in
the POSCAR. For example, the following will calculate the density
of states for the first 4 Sn atoms and all O atoms in the
structure::
{'Sn': (1, 2, 3, 4), 'O': (, )}
If ``atoms`` is not set or set to ``None`` then all atomic sites
for all elements will be considered.
Returns:
dict: The projected density of states. Formatted as a :obj:`dict` of
:obj:`dict` mapping the elements and their orbitals to
:obj:`~pymatgen.electronic_structure.dos.Dos` objects. For example::
{
'Bi': {'s': Dos, 'p': Dos ... },
'S': {'s': Dos}
}
"""
if not elements:
symbols = dos.structure.symbol_set
elements = dict(zip(symbols, [None] * len(symbols)))
pdos = {}
for el in elements:
if atoms and el not in atoms:
continue
# select which sites to consider, if no sites were specified then
# select all. Make a list of the sites of particular elements first
# due to the dosplot atoms list specification (e.g. starts at 0 for
# each element
element_sites = [site for site in dos.structure.sites
if site.specie == get_el_sp(el)]
sites = [site for i, site in enumerate(element_sites)
if not atoms or (el in atoms and i in atoms[el])]
lm = lm_orbitals[el] if (lm_orbitals and el in lm_orbitals) else None
orbitals = elements[el] if elements and el in elements else None
pdos[el] = get_element_pdos(dos, el, sites, lm, orbitals)
return pdos
|
python
|
{
"resource": ""
}
|
q19120
|
get_element_pdos
|
train
|
def get_element_pdos(dos, element, sites, lm_orbitals=None, orbitals=None):
"""Get the projected density of states for an element.
Args:
dos (:obj:`~pymatgen.electronic_structure.dos.CompleteDos`): The
density of states.
element (str): Element symbol. E.g. 'Zn'.
sites (tuple): The atomic indices over which to sum the density of
states, as a :obj:`tuple`. Indices are zero based for each
element. For example, ``(0, 1, 2)`` will sum the density of states
for the 1st, 2nd and 3rd sites of the element specified.
lm_orbitals (:obj:`tuple`, optional): The orbitals to decompose into
their lm contributions (e.g. p -> px, py, pz). Should be provided
as a :obj:`tuple` of :obj:`str`. For example, ``('p')``, will
extract the projected density of states for the px, py, and pz
orbitals. Defaults to ``None``.
orbitals (:obj:`tuple`, optional): The orbitals to extract from the
projected density of states. Should be provided as a :obj:`tuple`
of :obj:`str`. For example, ``('s', 'px', 'dx2')`` will extract the
s, px, and dx2 orbitals, only. If ``None``, all orbitals will be
extracted. Defaults to ``None``.
Returns:
dict: The projected density of states. Formatted as a :obj:`dict`
mapping the orbitals to :obj:`~pymatgen.electronic_structure.dos.Dos`
objects. For example::
{
's': Dos,
'p': Dos
}
"""
el_dos = {}
for site in sites:
# build a list of which orbitals we are after
# start with s, p, and d orbitals only
spd = [orb for orb in dos.get_element_spd_dos(element).keys() if
((orbitals and orb.name in orbitals) or not orbitals) and
((lm_orbitals and orb.name not in lm_orbitals) or
not lm_orbitals)]
# now add any lm decomposed orbitals
lm = [orb for orb in Orbital
if lm_orbitals and orb.name[0] in lm_orbitals]
# extract the data
for orb in spd:
pdos = dos.get_site_spd_dos(site)[orb]
el_dos[orb.name] = (el_dos[orb.name] + pdos if orb.name in el_dos
else pdos)
for orb in lm:
pdos = dos.get_site_orbital_dos(site, orb)
el_dos[orb.name] = (el_dos[orb.name] + pdos if orb.name in el_dos
else pdos)
return el_dos
|
python
|
{
"resource": ""
}
|
q19121
|
write_files
|
train
|
def write_files(dos, pdos, prefix=None, directory=None, zero_to_efermi=True):
"""Write the density of states data to disk.
Args:
dos (:obj:`~pymatgen.electronic_structure.dos.Dos` or \
:obj:`~pymatgen.electronic_structure.dos.CompleteDos`): The total
density of states.
pdos (dict): The projected density of states. Formatted as a
:obj:`dict` of :obj:`dict` mapping the elements and their orbitals
to :obj:`~pymatgen.electronic_structure.dos.Dos` objects. For
example::
{
'Bi': {'s': Dos, 'p': Dos},
'S': {'s': Dos}
}
prefix (:obj:`str`, optional): A prefix for file names.
directory (:obj:`str`, optional): The directory in which to save files.
zero_to_efermi (:obj:`bool`, optional): Normalise the energy such
that the Fermi level is set as 0 eV.
"""
# defining these cryptic lists makes formatting the data much easier later
if len(dos.densities) == 1:
sdata = [[Spin.up, 1, '']]
else:
sdata = [[Spin.up, 1, '(up)'], [Spin.down, -1, '(down)']]
header = ['energy']
eners = dos.energies - dos.efermi if zero_to_efermi else dos.energies
tdos_data = [eners]
for spin, sign, label in sdata:
header.append('dos{}'.format(label))
tdos_data.append(dos.densities[spin] * sign)
tdos_data = np.stack(tdos_data, axis=1)
filename = "{}_total_dos.dat".format(prefix) if prefix else 'total_dos.dat'
if directory:
filename = os.path.join(directory, filename)
np.savetxt(filename, tdos_data, header=" ".join(header))
spin = len(dos.densities)
for el, el_pdos in pdos.items():
header = ['energy']
pdos_data = [eners]
for orb in sort_orbitals(el_pdos):
for spin, sign, label in sdata:
header.append('{}{}'.format(orb, label))
pdos_data.append(el_pdos[orb].densities[spin] * sign)
pdos_data = np.stack(pdos_data, axis=1)
if prefix:
filename = '{}_{}_dos.dat'.format(prefix, el)
else:
filename = '{}_dos.dat'.format(el)
if directory:
filename = os.path.join(directory, filename)
np.savetxt(filename, pdos_data, header=" ".join(header))
|
python
|
{
"resource": ""
}
|
q19122
|
sort_orbitals
|
train
|
def sort_orbitals(element_pdos):
"""Sort the orbitals of an element's projected density of states.
Sorts the orbitals based on a standard format. E.g. s < p < d.
Will also sort lm decomposed orbitals. This is useful for plotting/saving.
Args:
element_pdos (dict): An element's pdos. Should be formatted as a
:obj:`dict` of ``{orbital: dos}``. Where dos is a
:obj:`~pymatgen.electronic_structure.dos.Dos` object. For example::
{'s': dos, 'px': dos}
Returns:
list: The sorted orbitals.
"""
sorted_orbitals = ['s', 'p', 'py', 'pz', 'px',
'd', 'dxy', 'dyz', 'dz2', 'dxz', 'dx2',
'f', 'f_3', 'f_2', 'f_1', 'f_0', 'f1', 'f2', 'f3']
unsorted_keys = element_pdos.keys()
sorted_keys = []
for key in sorted_orbitals:
if key in unsorted_keys:
sorted_keys.append(key)
return sorted_keys
|
python
|
{
"resource": ""
}
|
q19123
|
bandstats
|
train
|
def bandstats(filenames=None, num_sample_points=3, temperature=None,
degeneracy_tol=1e-4, parabolic=True):
"""Calculate the effective masses of the bands of a semiconductor.
Args:
filenames (:obj:`str` or :obj:`list`, optional): Path to vasprun.xml
or vasprun.xml.gz file. If no filenames are provided, the code
will search for vasprun.xml or vasprun.xml.gz files in folders
named 'split-0*'. Failing that, the code will look for a vasprun in
the current directory. If a :obj:`list` of vasprun files is
provided, these will be combined into a single band structure.
num_sample_points (:obj:`int`, optional): Number of k-points to sample
when fitting the effective masses.
temperature (:obj:`int`, optional): Find band edges within kB * T of
the valence band maximum and conduction band minimum. Not currently
implemented.
degeneracy_tol (:obj:`float`, optional): Tolerance for determining the
degeneracy of the valence band maximum and conduction band minimum.
parabolic (:obj:`bool`, optional): Use a parabolic fit of the band
edges. If ``False`` then nonparabolic fitting will be attempted.
Defaults to ``True``.
Returns:
dict: The hole and electron effective masses. Formatted as a
:obj:`dict` with keys: ``'hole_data'`` and ``'electron_data'``. The
data is a :obj:`list` of :obj:`dict` with the keys:
'effective_mass' (:obj:`float`)
The effective mass in units of electron rest mass, :math:`m_0`.
'energies' (:obj:`numpy.ndarray`)
Band eigenvalues in eV.
'distances' (:obj:`numpy.ndarray`)
Distances of the k-points in reciprocal space.
'band_id' (:obj:`int`)
The index of the band,
'spin' (:obj:`~pymatgen.electronic_structure.core.Spin`)
The spin channel
'start_kpoint' (:obj:`int`)
The index of the k-point at which the band extrema occurs
'end_kpoint' (:obj:`int`)
"""
if not filenames:
filenames = find_vasprun_files()
elif isinstance(filenames, str):
filenames = [filenames]
bandstructures = []
for vr_file in filenames:
vr = BSVasprun(vr_file, parse_projected_eigen=False)
bs = vr.get_band_structure(line_mode=True)
bandstructures.append(bs)
bs = get_reconstructed_band_structure(bandstructures)
if bs.is_metal():
logging.error('ERROR: System is metallic!')
sys.exit()
_log_band_gap_information(bs)
vbm_data = bs.get_vbm()
cbm_data = bs.get_cbm()
logging.info('\nValence band maximum:')
_log_band_edge_information(bs, vbm_data)
logging.info('\nConduction band minimum:')
_log_band_edge_information(bs, cbm_data)
if parabolic:
logging.info('\nUsing parabolic fitting of the band edges')
else:
logging.info('\nUsing nonparabolic fitting of the band edges')
if temperature:
logging.error('ERROR: This feature is not yet supported!')
else:
# Work out where the hole and electron band edges are.
# Fortunately, pymatgen does this for us. Points at which to calculate
# the effective mass are identified as a tuple of:
# (spin, band_index, kpoint_index)
hole_extrema = []
for spin, bands in vbm_data['band_index'].items():
hole_extrema.extend([(spin, band, kpoint) for band in bands
for kpoint in vbm_data['kpoint_index']])
elec_extrema = []
for spin, bands in cbm_data['band_index'].items():
elec_extrema.extend([(spin, band, kpoint) for band in bands
for kpoint in cbm_data['kpoint_index']])
# extract the data we need for fitting from the band structure
hole_data = []
for extrema in hole_extrema:
hole_data.extend(get_fitting_data(bs, *extrema,
num_sample_points=num_sample_points))
elec_data = []
for extrema in elec_extrema:
elec_data.extend(get_fitting_data(bs, *extrema,
num_sample_points=num_sample_points))
# calculate the effective masses and log the information
logging.info('\nHole effective masses:')
for data in hole_data:
eff_mass = fit_effective_mass(data['distances'], data['energies'],
parabolic=parabolic)
data['effective_mass'] = eff_mass
_log_effective_mass_data(data, bs.is_spin_polarized, mass_type='m_h')
logging.info('\nElectron effective masses:')
for data in elec_data:
eff_mass = fit_effective_mass(data['distances'], data['energies'],
parabolic=parabolic)
data['effective_mass'] = eff_mass
_log_effective_mass_data(data, bs.is_spin_polarized)
return {'hole_data': hole_data, 'electron_data': elec_data}
|
python
|
{
"resource": ""
}
|
q19124
|
_log_band_gap_information
|
train
|
def _log_band_gap_information(bs):
"""Log data about the direct and indirect band gaps.
Args:
bs (:obj:`~pymatgen.electronic_structure.bandstructure.BandStructureSymmLine`):
"""
bg_data = bs.get_band_gap()
if not bg_data['direct']:
logging.info('Indirect band gap: {:.3f} eV'.format(bg_data['energy']))
direct_data = bs.get_direct_band_gap_dict()
if bs.is_spin_polarized:
direct_bg = min((spin_data['value']
for spin_data in direct_data.values()))
logging.info('Direct band gap: {:.3f} eV'.format(direct_bg))
for spin, spin_data in direct_data.items():
direct_kindex = spin_data['kpoint_index']
direct_kpoint = bs.kpoints[direct_kindex].frac_coords
direct_kpoint = kpt_str.format(k=direct_kpoint)
eq_kpoints = bs.get_equivalent_kpoints(direct_kindex)
k_indices = ', '.join(map(str, eq_kpoints))
# add 1 to band indices to be consistent with VASP band numbers.
b_indices = ', '.join([str(i+1) for i in spin_data['band_indices']])
logging.info(' {}:'.format(spin.name.capitalize()))
logging.info(' k-point: {}'.format(direct_kpoint))
logging.info(' k-point indices: {}'.format(k_indices))
logging.info(' Band indices: {}'.format(b_indices))
else:
direct_bg = direct_data[Spin.up]['value']
logging.info('Direct band gap: {:.3f} eV'.format(direct_bg))
direct_kindex = direct_data[Spin.up]['kpoint_index']
direct_kpoint = kpt_str.format(k=bs.kpoints[direct_kindex].frac_coords)
k_indices = ', '.join(map(str,
bs.get_equivalent_kpoints(direct_kindex)))
b_indices = ', '.join([str(i+1) for i in
direct_data[Spin.up]['band_indices']])
logging.info(' k-point: {}'.format(direct_kpoint))
logging.info(' k-point indices: {}'.format(k_indices))
logging.info(' Band indices: {}'.format(b_indices))
|
python
|
{
"resource": ""
}
|
q19125
|
_log_band_edge_information
|
train
|
def _log_band_edge_information(bs, edge_data):
"""Log data about the valence band maximum or conduction band minimum.
Args:
bs (:obj:`~pymatgen.electronic_structure.bandstructure.BandStructureSymmLine`):
The band structure.
edge_data (dict): The :obj:`dict` from ``bs.get_vbm()`` or
``bs.get_cbm()``
"""
if bs.is_spin_polarized:
spins = edge_data['band_index'].keys()
b_indices = [', '.join([str(i+1) for i in
edge_data['band_index'][spin]])
+ '({})'.format(spin.name.capitalize()) for spin in spins]
b_indices = ', '.join(b_indices)
else:
b_indices = ', '.join([str(i+1) for i in
edge_data['band_index'][Spin.up]])
kpoint = edge_data['kpoint']
kpoint_str = kpt_str.format(k=kpoint.frac_coords)
k_indices = ', '.join(map(str, edge_data['kpoint_index']))
if kpoint.label:
k_loc = kpoint.label
else:
branch = bs.get_branch(edge_data['kpoint_index'][0])[0]
k_loc = 'between {}'.format(branch['name'])
logging.info(' Energy: {:.3f} eV'.format(edge_data['energy']))
logging.info(' k-point: {}'.format(kpoint_str))
logging.info(' k-point location: {}'.format(k_loc))
logging.info(' k-point indices: {}'.format(k_indices))
logging.info(' Band indices: {}'.format(b_indices))
|
python
|
{
"resource": ""
}
|
q19126
|
_log_effective_mass_data
|
train
|
def _log_effective_mass_data(data, is_spin_polarized, mass_type='m_e'):
"""Log data about the effective masses and their directions.
Args:
data (dict): The effective mass data. Formatted as a :obj:`dict` with
the keys:
'effective_mass' (:obj:`float`)
The effective mass in units of electron rest mass, :math:`m_0`.
'energies' (:obj:`numpy.ndarray`)
Band eigenvalues in eV.
'band_id' (:obj:`int`)
The index of the band,
'spin' (:obj:`~pymatgen.electronic_structure.core.Spin`)
The spin channel
'start_kpoint' (:obj:`int`)
The index of the k-point at which the band extrema occurs
'end_kpoint' (:obj:`int`)
The k-point towards which the data has been sampled.
is_spin_polarized (bool): Whether the system is spin polarized.
"""
s = ' ({})'.format(data['spin'].name) if is_spin_polarized else ''
# add 1 to band id to be consistent with VASP
band_str = 'band {}{}'.format(data['band_id'] + 1, s)
start_kpoint = data['start_kpoint']
end_kpoint = data['end_kpoint']
eff_mass = data['effective_mass']
kpoint_str = kpt_str.format(k=start_kpoint.frac_coords)
if start_kpoint.label:
kpoint_str += ' ({})'.format(start_kpoint.label)
kpoint_str += ' -> '
kpoint_str += kpt_str.format(k=end_kpoint.frac_coords)
if end_kpoint.label:
kpoint_str += ' ({})'.format(end_kpoint.label)
logging.info(' {}: {:.3f} | {} | {}'.format(mass_type, eff_mass,
band_str, kpoint_str))
|
python
|
{
"resource": ""
}
|
q19127
|
SPhononBSPlotter._makeplot
|
train
|
def _makeplot(self, ax, fig, data, ymin=None, ymax=None, height=6,
width=6, dos=None, color=None):
"""Utility method to tidy phonon band structure diagrams. """
# Define colours
if color is None:
color = 'C0' # Default to first colour in matplotlib series
# set x and y limits
tymax = ymax if (ymax is not None) else max(flatten(data['frequency']))
tymin = ymin if (ymin is not None) else min(flatten(data['frequency']))
pad = (tymax - tymin) * 0.05
if ymin is None:
ymin = 0 if tymin >= self.imag_tol else tymin - pad
ymax = ymax if ymax else tymax + pad
ax.set_ylim(ymin, ymax)
ax.set_xlim(0, data['distances'][-1][-1])
if ymin < 0:
dashline = True
ax.axhline(0, color=rcParams['grid.color'], linestyle='--',
dashes=dashes,
zorder=0,
linewidth=rcParams['ytick.major.width'])
else:
dashline = False
if dos is not None:
self._plot_phonon_dos(dos, ax=fig.axes[1], color=color,
dashline=dashline)
else:
# keep correct aspect ratio; match axis to canvas
x0, x1 = ax.get_xlim()
y0, y1 = ax.get_ylim()
if width is None:
width = rcParams['figure.figsize'][0]
if height is None:
height = rcParams['figure.figsize'][1]
ax.set_aspect((height/width) * ((x1-x0)/(y1-y0)))
|
python
|
{
"resource": ""
}
|
q19128
|
find_vasprun_files
|
train
|
def find_vasprun_files():
"""Search for vasprun files from the current directory.
The precedence order for file locations is:
1. First search for folders named: 'split-0*'
2. Else, look in the current directory.
The split folder names should always be zero based, therefore easily
sortable.
"""
folders = glob.glob('split-*')
folders = sorted(folders) if folders else ['.']
filenames = []
for fol in folders:
vr_file = os.path.join(fol, 'vasprun.xml')
vr_file_gz = os.path.join(fol, 'vasprun.xml.gz')
if os.path.exists(vr_file):
filenames.append(vr_file)
elif os.path.exists(vr_file_gz):
filenames.append(vr_file_gz)
else:
logging.error('ERROR: No vasprun.xml found in {}!'.format(fol))
sys.exit()
return filenames
|
python
|
{
"resource": ""
}
|
q19129
|
save_data_files
|
train
|
def save_data_files(vr, bs, prefix=None, directory=None):
"""Write the band structure data files to disk.
Args:
vs (`Vasprun`): Pymatgen `Vasprun` object.
bs (`BandStructureSymmLine`): Calculated band structure.
prefix (`str`, optional): Prefix for data file.
directory (`str`, optional): Directory in which to save the data.
Returns:
The filename of the written data file.
"""
filename = '{}_band.dat'.format(prefix) if prefix else 'band.dat'
directory = directory if directory else '.'
filename = os.path.join(directory, filename)
if bs.is_metal():
zero = vr.efermi
else:
zero = bs.get_vbm()['energy']
with open(filename, 'w') as f:
header = '#k-distance eigenvalue[eV]\n'
f.write(header)
# write the spin up eigenvalues
for band in bs.bands[Spin.up]:
for d, e in zip(bs.distance, band):
f.write('{:.8f} {:.8f}\n'.format(d, e - zero))
f.write('\n')
# calculation is spin polarised, write spin down bands at end of file
if bs.is_spin_polarized:
for band in bs.bands[Spin.down]:
for d, e in zip(bs.distance, band):
f.write('{:.8f} {:.8f}\n'.format(d, e - zero))
f.write('\n')
return filename
|
python
|
{
"resource": ""
}
|
q19130
|
save_data_files
|
train
|
def save_data_files(bs, prefix=None, directory=None):
"""Write the phonon band structure data files to disk.
Args:
bs (:obj:`~pymatgen.phonon.bandstructure.PhononBandStructureSymmLine`):
The phonon band structure.
prefix (:obj:`str`, optional): Prefix for data file.
directory (:obj:`str`, optional): Directory in which to save the data.
Returns:
str: The filename of the written data file.
"""
filename = 'phonon_band.dat'
filename = '{}_phonon_band.dat'.format(prefix) if prefix else filename
directory = directory if directory else '.'
filename = os.path.join(directory, filename)
with open(filename, 'w') as f:
header = '#k-distance frequency[THz]\n'
f.write(header)
for band in bs.bands:
for d, e in zip(bs.distance, band):
f.write('{:.8f} {:.8f}\n'.format(d, e))
f.write('\n')
return filename
|
python
|
{
"resource": ""
}
|
q19131
|
kgen
|
train
|
def kgen(filename='POSCAR', directory=None, make_folders=False, symprec=0.01,
kpts_per_split=None, ibzkpt=None, spg=None, density=60,
mode='bradcrack', cart_coords=False, kpt_list=None, labels=None):
"""Generate KPOINTS files for VASP band structure calculations.
This script provides a wrapper around several frameworks used to generate
k-points along a high-symmetry path. The paths found in Bradley and
Cracknell, SeeK-path, and pymatgen are all supported.
It is important to note that the standard primitive cell symmetry is
different between SeeK-path and pymatgen. If the correct the structure
is not used, the high-symmetry points (and band path) may be invalid.
Args:
filename (:obj:`str`, optional): Path to VASP structure file. Default
is ``POSCAR``.
directory (:obj:`str`, optional): The output file directory.
make_folders (:obj:`bool`, optional): Generate folders and copy in
required files (INCAR, POTCAR, POSCAR, and possibly CHGCAR) from
the current directory.
symprec (:obj:`float`, optional): The precision used for determining
the cell symmetry.
kpts_per_split (:obj:`int`, optional): If set, the k-points are split
into separate k-point files (or folders) each containing the number
of k-points specified. This is useful for hybrid band structure
calculations where it is often intractable to calculate all
k-points in the same calculation.
ibzkpt (:obj:`str`, optional): Path to IBZKPT file. If set, the
generated k-points will be appended to the k-points in this file
and given a weight of 0. This is necessary for hybrid band
structure calculations.
spg (:obj:`str` or :obj:`int`, optional): The space group international
number or symbol to override the symmetry determined by spglib.
This is not recommended and only provided for testing purposes.
This option will only take effect when ``mode = 'bradcrack'``.
line_density (:obj:`int`, optional): Density of k-points along the
path.
mode (:obj:`str`, optional): Method used for calculating the
high-symmetry path. The options are:
bradcrack
Use the paths from Bradley and Cracknell. See [brad]_.
pymatgen
Use the paths from pymatgen. See [curt]_.
seekpath
Use the paths from SeeK-path. See [seek]_.
cart_coords (:obj:`bool`, optional): Whether the k-points are returned
in cartesian or reciprocal coordinates. Defaults to ``False``
(fractional coordinates).
kpt_list (:obj:`list`, optional): List of k-points to use, formatted as
a list of subpaths, each containing a list of fractional k-points.
For example::
[ [[0., 0., 0.], [0., 0., 0.5]],
[[0.5, 0., 0.], [0.5, 0.5, 0.]] ]
Will return points along ``0 0 0 -> 0 0 1/2 | 1/2 0 0
-> 1/2 1/2 0``
path_labels (:obj:`list`, optional): The k-point labels. These should
be provided as a :obj:`list` of :obj:`str` for each subpath of the
overall path. For example::
[ ['Gamma', 'Z'], ['X', 'M'] ]
combined with the above example for ``kpt_list`` would indicate the
path: Gamma -> Z | X -> M. If no labels are provided, letters from
A -> Z will be used instead. If a label begins with '@' it will be
concealed when plotting with sumo-bandplot.
"""
poscar = Poscar.from_file(filename)
kpath, kpoints, labels = get_path_data(poscar.structure, mode=mode,
symprec=symprec, kpt_list=kpt_list,
labels=labels, spg=spg,
line_density=density)
logging.info('\nk-point label indices:')
for i, label in enumerate(labels):
if label:
logging.info('\t{}: {}'.format(label, i+1))
if not kpt_list and not np.allclose(poscar.structure.lattice.matrix,
kpath.prim.lattice.matrix):
prim_filename = '{}_prim'.format(os.path.basename(filename))
kpath.prim.to(filename=prim_filename)
logging.error("\nWARNING: The input structure does not match the "
"expected standard\nprimitive symmetry, the path may be "
"incorrect! Use at your own risk.\n\nThe correct "
"symmetry primitive structure has been saved as {}.".
format(prim_filename))
ibz = _parse_ibzkpt(ibzkpt)
if make_folders and ibz and kpts_per_split is None:
logging.info("\nFound {} total kpoints in path, do you want to "
"split them up? (y/n)".format(len(kpoints)))
if input()[0].lower() == 'y':
logging.info("How many kpoints per file?")
kpts_per_split = int(input())
write_kpoint_files(filename, kpoints, labels, make_folders=make_folders,
ibzkpt=ibz, kpts_per_split=kpts_per_split,
directory=directory, cart_coords=cart_coords)
|
python
|
{
"resource": ""
}
|
q19132
|
dosplot
|
train
|
def dosplot(filename=None, prefix=None, directory=None, elements=None,
lm_orbitals=None, atoms=None, subplot=False, shift=True,
total_only=False, plot_total=True, legend_on=True,
legend_frame_on=False, legend_cutoff=3., gaussian=None, height=6.,
width=8., xmin=-6., xmax=6., num_columns=2, colours=None, yscale=1,
xlabel='Energy (eV)', ylabel='Arb. units',
style=None, no_base_style=False,
image_format='pdf', dpi=400, plt=None, fonts=None):
"""A script to plot the density of states from a vasprun.xml file.
Args:
filename (:obj:`str`, optional): Path to a vasprun.xml file (can be
gzipped).
prefix (:obj:`str`, optional): Prefix for file names.
directory (:obj:`str`, optional): The directory in which to save files.
elements (:obj:`dict`, optional): The elements and orbitals to extract
from the projected density of states. Should be provided as a
:obj:`dict` with the keys as the element names and corresponding
values as a :obj:`tuple` of orbitals. For example, the following
would extract the Bi s, px, py and d orbitals::
{'Bi': ('s', 'px', 'py', 'd')}
If an element is included with an empty :obj:`tuple`, all orbitals
for that species will be extracted. If ``elements`` is not set or
set to ``None``, all elements for all species will be extracted.
lm_orbitals (:obj:`dict`, optional): The orbitals to decompose into
their lm contributions (e.g. p -> px, py, pz). Should be provided
as a :obj:`dict`, with the elements names as keys and a
:obj:`tuple` of orbitals as the corresponding values. For example,
the following would be used to decompose the oxygen p and d
orbitals::
{'O': ('p', 'd')}
atoms (:obj:`dict`, optional): Which atomic sites to use when
calculating the projected density of states. Should be provided as
a :obj:`dict`, with the element names as keys and a :obj:`tuple` of
:obj:`int` specifying the atomic indices as the corresponding
values. The elemental projected density of states will be summed
only over the atom indices specified. If an element is included
with an empty :obj:`tuple`, then all sites for that element will
be included. The indices are 0 based for each element specified in
the POSCAR. For example, the following will calculate the density
of states for the first 4 Sn atoms and all O atoms in the
structure::
{'Sn': (1, 2, 3, 4), 'O': (, )}
If ``atoms`` is not set or set to ``None`` then all atomic sites
for all elements will be considered.
subplot (:obj:`bool`, optional): Plot the density of states for each
element on separate subplots. Defaults to ``False``.
shift (:obj:`bool`, optional): Shift the energies such that the valence
band maximum (or Fermi level for metals) is at 0 eV. Defaults to
``True``.
total_only (:obj:`bool`, optional): Only extract the total density of
states. Defaults to ``False``.
plot_total (:obj:`bool`, optional): Plot the total density of states.
Defaults to ``True``.
legend_on (:obj:`bool`, optional): Plot the graph legend. Defaults
to ``True``.
legend_frame_on (:obj:`bool`, optional): Plot a frame around the
graph legend. Defaults to ``False``.
legend_cutoff (:obj:`float`, optional): The cut-off (in % of the
maximum density of states within the plotting range) for an
elemental orbital to be labelled in the legend. This prevents
the legend from containing labels for orbitals that have very
little contribution in the plotting range.
gaussian (:obj:`float`, optional): Broaden the density of states using
convolution with a gaussian function. This parameter controls the
sigma or standard deviation of the gaussian distribution.
height (:obj:`float`, optional): The height of the plot.
width (:obj:`float`, optional): The width of the plot.
xmin (:obj:`float`, optional): The minimum energy on the x-axis.
xmax (:obj:`float`, optional): The maximum energy on the x-axis.
num_columns (:obj:`int`, optional): The number of columns in the
legend.
colours (:obj:`dict`, optional): Use custom colours for specific
element and orbital combinations. Specified as a :obj:`dict` of
:obj:`dict` of the colours. For example::
{
'Sn': {'s': 'r', 'p': 'b'},
'O': {'s': '#000000'}
}
The colour can be a hex code, series of rgb value, or any other
format supported by matplotlib.
xlabel (:obj:`str`, optional): Label/units for x-axis (i.e. energy)
ylabel (:obj:`str`, optional): Label/units for y-axis (i.e. DOS)
yscale (:obj:`float`, optional): Scaling factor for the y-axis.
style (:obj:`list` or :obj:`str`, optional): (List of) matplotlib style
specifications, to be composed on top of Sumo base style.
no_base_style (:obj:`bool`, optional): Prevent use of sumo base style.
This can make alternative styles behave more predictably.
image_format (:obj:`str`, optional): The image file format. Can be any
format supported by matplotlib, including: png, jpg, pdf, and svg.
Defaults to pdf.
dpi (:obj:`int`, optional): The dots-per-inch (pixel density) for
the image.
plt (:obj:`matplotlib.pyplot`, optional): A
:obj:`matplotlib.pyplot` object to use for plotting.
fonts (:obj:`list`, optional): Fonts to use in the plot. Can be a
a single font, specified as a :obj:`str`, or several fonts,
specified as a :obj:`list` of :obj:`str`.
Returns:
A matplotlib pyplot object.
"""
if not filename:
if os.path.exists('vasprun.xml'):
filename = 'vasprun.xml'
elif os.path.exists('vasprun.xml.gz'):
filename = 'vasprun.xml.gz'
else:
logging.error('ERROR: No vasprun.xml found!')
sys.exit()
dos, pdos = load_dos(filename, elements, lm_orbitals, atoms, gaussian,
total_only)
save_files = False if plt else True # don't save if pyplot object provided
plotter = SDOSPlotter(dos, pdos)
plt = plotter.get_plot(subplot=subplot, width=width, height=height,
xmin=xmin, xmax=xmax, yscale=yscale,
colours=colours, plot_total=plot_total,
legend_on=legend_on, num_columns=num_columns,
legend_frame_on=legend_frame_on,
xlabel=xlabel, ylabel=ylabel,
legend_cutoff=legend_cutoff, dpi=dpi, plt=plt,
fonts=fonts, style=style,
no_base_style=no_base_style)
if save_files:
basename = 'dos.{}'.format(image_format)
filename = '{}_{}'.format(prefix, basename) if prefix else basename
if directory:
filename = os.path.join(directory, filename)
plt.savefig(filename, format=image_format, dpi=dpi,
bbox_inches='tight')
write_files(dos, pdos, prefix=prefix, directory=directory)
else:
return plt
|
python
|
{
"resource": ""
}
|
q19133
|
_atoms
|
train
|
def _atoms(atoms_string):
"""Parse the atom string.
Args:
atoms_string (str): The atoms to plot, in the form ``"C.1.2.3,"``.
Returns:
dict: The atomic indices over which to sum the DOS. Formatted as::
{Element: [atom_indices]}.
Indices are zero indexed for each atomic species. If an element symbol
is included with an empty list, then all sites for that species are
considered.
"""
atoms = {}
for split in atoms_string.split(','):
sites = split.split('.')
el = sites.pop(0)
sites = list(map(int, sites))
atoms[el] = np.array(sites) - 1
return atoms
|
python
|
{
"resource": ""
}
|
q19134
|
f
|
train
|
def f(s):
"""
Basic support for 3.6's f-strings, in 3.5!
Formats "s" using appropriate globals and locals
dictionaries. This f-string:
f"hello a is {a}"
simply becomes
f("hello a is {a}")
In other words, just throw parentheses around the
string, and you're done!
Implemented internally using str.format_map().
This means it doesn't support expressions:
f("two minus three is {2-3}")
And it doesn't support function calls:
f("how many elements? {len(my_list)}")
But most other f-string features work.
"""
frame = sys._getframe(1)
d = dict(builtins.__dict__)
d.update(frame.f_globals)
d.update(frame.f_locals)
return s.format_map(d)
|
python
|
{
"resource": ""
}
|
q19135
|
which
|
train
|
def which(cmd, path="PATH"):
"""Find cmd on PATH."""
if os.path.exists(cmd):
return cmd
if cmd[0] == '/':
return None
for segment in os.getenv(path).split(":"):
program = os.path.normpath(os.path.join(segment, cmd))
if os.path.exists(program):
return program
return None
|
python
|
{
"resource": ""
}
|
q19136
|
help
|
train
|
def help(subcommand=None):
"""
Print help for subcommands.
Prints the help text for the specified subcommand.
If subcommand is not specified, prints one-line summaries for every command.
"""
if not subcommand:
print("blurb version", __version__)
print()
print("Management tool for CPython Misc/NEWS and Misc/NEWS.d entries.")
print()
print("Usage:")
print(" blurb [subcommand] [options...]")
print()
# print list of subcommands
summaries = []
longest_name_len = -1
for name, fn in subcommands.items():
if name.startswith('-'):
continue
longest_name_len = max(longest_name_len, len(name))
if not fn.__doc__:
error("help is broken, no docstring for " + fn.__name__)
fields = fn.__doc__.lstrip().split("\n")
if not fields:
first_line = "(no help available)"
else:
first_line = fields[0]
summaries.append((name, first_line))
summaries.sort()
print("Available subcommands:")
print()
for name, summary in summaries:
print(" ", name.ljust(longest_name_len), " ", summary)
print()
print("If blurb is run without any arguments, this is equivalent to 'blurb add'.")
sys.exit(0)
fn = get_subcommand(subcommand)
doc = fn.__doc__.strip()
if not doc:
error("help is broken, no docstring for " + subcommand)
options = []
positionals = []
nesting = 0
for name, p in inspect.signature(fn).parameters.items():
if p.kind == inspect.Parameter.KEYWORD_ONLY:
short_option = name[0]
options.append(f(" [-{short_option}|--{name}]"))
elif p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD:
positionals.append(" ")
has_default = (p.default != inspect._empty)
if has_default:
positionals.append("[")
nesting += 1
positionals.append(f("<{name}>"))
positionals.append("]" * nesting)
parameters = "".join(options + positionals)
print(f("blurb {subcommand}{parameters}"))
print()
print(doc)
sys.exit(0)
|
python
|
{
"resource": ""
}
|
q19137
|
release
|
train
|
def release(version):
"""
Move all new blurbs to a single blurb file for the release.
This is used by the release manager when cutting a new release.
"""
if version == ".":
# harvest version number from dirname of repo
# I remind you, we're in the Misc subdir right now
version = os.path.basename(root)
existing_filenames = glob_blurbs(version)
if existing_filenames:
error("Sorry, can't handle appending 'next' files to an existing version (yet).")
output = f("Misc/NEWS.d/{version}.rst")
filenames = glob_blurbs("next")
blurbs = Blurbs()
date = current_date()
if not filenames:
print(f("No blurbs found. Setting {version} as having no changes."))
body = f("There were no new changes in version {version}.\n")
metadata = {"no changes": "True", "bpo": "0", "section": "Library", "date": date, "nonce": nonceify(body)}
blurbs.append((metadata, body))
else:
no_changes = None
count = len(filenames)
print(f('Merging {count} blurbs to "{output}".'))
for filename in filenames:
if not filename.endswith(".rst"):
continue
blurbs.load_next(filename)
metadata = blurbs[0][0]
metadata['release date'] = date
print("Saving.")
blurbs.save(output)
git_add_files.append(output)
flush_git_add_files()
how_many = len(filenames)
print(f("Removing {how_many} 'next' files from git."))
git_rm_files.extend(filenames)
flush_git_rm_files()
# sanity check: ensuring that saving/reloading the merged blurb file works.
blurbs2 = Blurbs()
blurbs2.load(output)
assert blurbs2 == blurbs, f("Reloading {output} isn't reproducible?!")
print()
print("Ready for commit.")
|
python
|
{
"resource": ""
}
|
q19138
|
Blurbs.load
|
train
|
def load(self, filename, *, metadata=None):
"""
Read a blurb file.
Broadly equivalent to blurb.parse(open(filename).read()).
"""
with open(filename, "rt", encoding="utf-8") as file:
text = file.read()
self.parse(text, metadata=metadata, filename=filename)
|
python
|
{
"resource": ""
}
|
q19139
|
Blurbs._parse_next_filename
|
train
|
def _parse_next_filename(filename):
"""
Parses a "next" filename into its equivalent blurb metadata.
Returns a dict.
"""
components = filename.split(os.sep)
section, filename = components[-2:]
section = unsanitize_section(section)
assert section in sections, f("Unknown section {section}")
fields = [x.strip() for x in filename.split(".")]
assert len(fields) >= 4, f("Can't parse 'next' filename! filename {filename!r} fields {fields}")
assert fields[-1] == "rst"
metadata = {"date": fields[0], "nonce": fields[-2], "section": section}
for field in fields[1:-2]:
for name in ("bpo",):
_, got, value = field.partition(name + "-")
if got:
metadata[name] = value.strip()
break
else:
assert False, "Found unparsable field in 'next' filename: " + repr(field)
return metadata
|
python
|
{
"resource": ""
}
|
q19140
|
Blurbs.save_split_next
|
train
|
def save_split_next(self):
"""
Save out blurbs created from "blurb split".
They don't have dates, so we have to get creative.
"""
filenames = []
# the "date" MUST have a leading zero.
# this ensures these files sort after all
# newly created blurbs.
width = int(math.ceil(math.log(len(self), 10))) + 1
i = 1
blurb = Blurbs()
while self:
metadata, body = self.pop()
metadata['date'] = str(i).rjust(width, '0')
if 'release date' in metadata:
del metadata['release date']
blurb.append((metadata, body))
filename = blurb._extract_next_filename()
blurb.save(filename)
blurb.clear()
filenames.append(filename)
i += 1
return filenames
|
python
|
{
"resource": ""
}
|
q19141
|
cherry_pick_cli
|
train
|
def cherry_pick_cli(
ctx, dry_run, pr_remote, abort, status, push, config_path, commit_sha1, branches
):
"""cherry-pick COMMIT_SHA1 into target BRANCHES."""
click.echo("\U0001F40D \U0001F352 \u26CF")
chosen_config_path, config = load_config(config_path)
try:
cherry_picker = CherryPicker(
pr_remote,
commit_sha1,
branches,
dry_run=dry_run,
push=push,
config=config,
chosen_config_path=chosen_config_path,
)
except InvalidRepoException:
click.echo(f"You're not inside a {config['repo']} repo right now! \U0001F645")
sys.exit(-1)
except ValueError as exc:
ctx.fail(exc)
if abort is not None:
if abort:
cherry_picker.abort_cherry_pick()
else:
cherry_picker.continue_cherry_pick()
elif status:
click.echo(cherry_picker.status())
else:
try:
cherry_picker.backport()
except BranchCheckoutException:
sys.exit(-1)
except CherryPickException:
sys.exit(-1)
|
python
|
{
"resource": ""
}
|
q19142
|
get_base_branch
|
train
|
def get_base_branch(cherry_pick_branch):
"""
return '2.7' from 'backport-sha-2.7'
raises ValueError if the specified branch name is not of a form that
cherry_picker would have created
"""
prefix, sha, base_branch = cherry_pick_branch.split("-", 2)
if prefix != "backport":
raise ValueError(
'branch name is not prefixed with "backport-". Is this a cherry_picker branch?'
)
if not re.match("[0-9a-f]{7,40}", sha):
raise ValueError(f"branch name has an invalid sha: {sha}")
# Validate that the sha refers to a valid commit within the repo
# Throws a ValueError if the sha is not present in the repo
validate_sha(sha)
# Subject the parsed base_branch to the same tests as when we generated it
# This throws a ValueError if the base_branch doesn't meet our requirements
version_from_branch(base_branch)
return base_branch
|
python
|
{
"resource": ""
}
|
q19143
|
validate_sha
|
train
|
def validate_sha(sha):
"""
Validate that a hexdigest sha is a valid commit in the repo
raises ValueError if the sha does not reference a commit within the repo
"""
cmd = ["git", "log", "-r", sha]
try:
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.SubprocessError:
raise ValueError(
f"The sha listed in the branch name, {sha}, is not present in the repository"
)
|
python
|
{
"resource": ""
}
|
q19144
|
version_from_branch
|
train
|
def version_from_branch(branch):
"""
return version information from a git branch name
"""
try:
return tuple(
map(
int,
re.match(r"^.*(?P<version>\d+(\.\d+)+).*$", branch)
.groupdict()["version"]
.split("."),
)
)
except AttributeError as attr_err:
raise ValueError(
f"Branch {branch} seems to not have a version in its name."
) from attr_err
|
python
|
{
"resource": ""
}
|
q19145
|
get_current_branch
|
train
|
def get_current_branch():
"""
Return the current branch
"""
cmd = ["git", "rev-parse", "--abbrev-ref", "HEAD"]
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
return output.strip().decode("utf-8")
|
python
|
{
"resource": ""
}
|
q19146
|
normalize_commit_message
|
train
|
def normalize_commit_message(commit_message):
"""
Return a tuple of title and body from the commit message
"""
split_commit_message = commit_message.split("\n")
title = split_commit_message[0]
body = "\n".join(split_commit_message[1:])
return title, body.lstrip("\n")
|
python
|
{
"resource": ""
}
|
q19147
|
is_git_repo
|
train
|
def is_git_repo():
"""Check whether the current folder is a Git repo."""
cmd = "git", "rev-parse", "--git-dir"
try:
subprocess.run(cmd, stdout=subprocess.DEVNULL, check=True)
return True
except subprocess.CalledProcessError:
return False
|
python
|
{
"resource": ""
}
|
q19148
|
find_config
|
train
|
def find_config(revision):
"""Locate and return the default config for current revison."""
if not is_git_repo():
return None
cfg_path = f"{revision}:.cherry_picker.toml"
cmd = "git", "cat-file", "-t", cfg_path
try:
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
path_type = output.strip().decode("utf-8")
return cfg_path if path_type == "blob" else None
except subprocess.CalledProcessError:
return None
|
python
|
{
"resource": ""
}
|
q19149
|
load_config
|
train
|
def load_config(path=None):
"""Choose and return the config path and it's contents as dict."""
# NOTE: Initially I wanted to inherit Path to encapsulate Git access
# there but there's no easy way to subclass pathlib.Path :(
head_sha = get_sha1_from("HEAD")
revision = head_sha
saved_config_path = load_val_from_git_cfg("config_path")
if not path and saved_config_path is not None:
path = saved_config_path
if path is None:
path = find_config(revision=revision)
else:
if ":" not in path:
path = f"{head_sha}:{path}"
revision, _col, _path = path.partition(":")
if not revision:
revision = head_sha
config = DEFAULT_CONFIG
if path is not None:
config_text = from_git_rev_read(path)
d = toml.loads(config_text)
config = config.new_child(d)
return path, config
|
python
|
{
"resource": ""
}
|
q19150
|
save_cfg_vals_to_git_cfg
|
train
|
def save_cfg_vals_to_git_cfg(**cfg_map):
"""Save a set of options into Git config."""
for cfg_key_suffix, cfg_val in cfg_map.items():
cfg_key = f'cherry-picker.{cfg_key_suffix.replace("_", "-")}'
cmd = "git", "config", "--local", cfg_key, cfg_val
subprocess.check_call(cmd, stderr=subprocess.STDOUT)
|
python
|
{
"resource": ""
}
|
q19151
|
wipe_cfg_vals_from_git_cfg
|
train
|
def wipe_cfg_vals_from_git_cfg(*cfg_opts):
"""Remove a set of options from Git config."""
for cfg_key_suffix in cfg_opts:
cfg_key = f'cherry-picker.{cfg_key_suffix.replace("_", "-")}'
cmd = "git", "config", "--local", "--unset-all", cfg_key
subprocess.check_call(cmd, stderr=subprocess.STDOUT)
|
python
|
{
"resource": ""
}
|
q19152
|
load_val_from_git_cfg
|
train
|
def load_val_from_git_cfg(cfg_key_suffix):
"""Retrieve one option from Git config."""
cfg_key = f'cherry-picker.{cfg_key_suffix.replace("_", "-")}'
cmd = "git", "config", "--local", "--get", cfg_key
try:
return (
subprocess.check_output(cmd, stderr=subprocess.DEVNULL)
.strip()
.decode("utf-8")
)
except subprocess.CalledProcessError:
return None
|
python
|
{
"resource": ""
}
|
q19153
|
from_git_rev_read
|
train
|
def from_git_rev_read(path):
"""Retrieve given file path contents of certain Git revision."""
if ":" not in path:
raise ValueError("Path identifier must start with a revision hash.")
cmd = "git", "show", "-t", path
try:
return subprocess.check_output(cmd).rstrip().decode("utf-8")
except subprocess.CalledProcessError:
raise ValueError
|
python
|
{
"resource": ""
}
|
q19154
|
CherryPicker.set_paused_state
|
train
|
def set_paused_state(self):
"""Save paused progress state into Git config."""
if self.chosen_config_path is not None:
save_cfg_vals_to_git_cfg(config_path=self.chosen_config_path)
set_state(WORKFLOW_STATES.BACKPORT_PAUSED)
|
python
|
{
"resource": ""
}
|
q19155
|
CherryPicker.upstream
|
train
|
def upstream(self):
"""Get the remote name to use for upstream branches
Uses "upstream" if it exists, "origin" otherwise
"""
cmd = ["git", "remote", "get-url", "upstream"]
try:
subprocess.check_output(cmd, stderr=subprocess.DEVNULL)
except subprocess.CalledProcessError:
return "origin"
return "upstream"
|
python
|
{
"resource": ""
}
|
q19156
|
CherryPicker.checkout_default_branch
|
train
|
def checkout_default_branch(self):
""" git checkout default branch """
set_state(WORKFLOW_STATES.CHECKING_OUT_DEFAULT_BRANCH)
cmd = "git", "checkout", self.config["default_branch"]
self.run_cmd(cmd)
set_state(WORKFLOW_STATES.CHECKED_OUT_DEFAULT_BRANCH)
|
python
|
{
"resource": ""
}
|
q19157
|
CherryPicker.create_gh_pr
|
train
|
def create_gh_pr(self, base_branch, head_branch, *, commit_message, gh_auth):
"""
Create PR in GitHub
"""
request_headers = sansio.create_headers(self.username, oauth_token=gh_auth)
title, body = normalize_commit_message(commit_message)
if not self.prefix_commit:
title = f"[{base_branch}] {title}"
data = {
"title": title,
"body": body,
"head": f"{self.username}:{head_branch}",
"base": base_branch,
"maintainer_can_modify": True,
}
url = CREATE_PR_URL_TEMPLATE.format(config=self.config)
response = requests.post(url, headers=request_headers, json=data)
if response.status_code == requests.codes.created:
click.echo(f"Backport PR created at {response.json()['html_url']}")
else:
click.echo(response.status_code)
click.echo(response.text)
|
python
|
{
"resource": ""
}
|
q19158
|
CherryPicker.open_pr
|
train
|
def open_pr(self, url):
"""
open url in the web browser
"""
if self.dry_run:
click.echo(f" dry-run: Create new PR: {url}")
else:
click.echo("Backport PR URL:")
click.echo(url)
webbrowser.open_new_tab(url)
|
python
|
{
"resource": ""
}
|
q19159
|
CherryPicker.cleanup_branch
|
train
|
def cleanup_branch(self, branch):
"""Remove the temporary backport branch.
Switch to the default branch before that.
"""
set_state(WORKFLOW_STATES.REMOVING_BACKPORT_BRANCH)
self.checkout_default_branch()
try:
self.delete_branch(branch)
except subprocess.CalledProcessError:
click.echo(f"branch {branch} NOT deleted.")
set_state(WORKFLOW_STATES.REMOVING_BACKPORT_BRANCH_FAILED)
else:
click.echo(f"branch {branch} has been deleted.")
set_state(WORKFLOW_STATES.REMOVED_BACKPORT_BRANCH)
|
python
|
{
"resource": ""
}
|
q19160
|
CherryPicker.get_state_and_verify
|
train
|
def get_state_and_verify(self):
"""Return the run progress state stored in the Git config.
Raises ValueError if the retrieved state is not of a form that
cherry_picker would have stored in the config.
"""
try:
state = get_state()
except KeyError as ke:
class state:
name = str(ke.args[0])
if state not in self.ALLOWED_STATES:
raise ValueError(
f"Run state cherry-picker.state={state.name} in Git config "
"is not known.\nPerhaps it has been set by a newer "
"version of cherry-picker. Try upgrading.\n"
"Valid states are: "
f'{", ".join(s.name for s in self.ALLOWED_STATES)}. '
"If this looks suspicious, raise an issue at "
"https://github.com/python/core-workflow/issues/new.\n"
"As the last resort you can reset the runtime state "
"stored in Git config using the following command: "
"`git config --local --remove-section cherry-picker`"
)
return state
|
python
|
{
"resource": ""
}
|
q19161
|
openDatFile
|
train
|
def openDatFile(datpath):
'''
Open a file-like object using a pkg relative path.
Example:
fd = openDatFile('foopkg.barpkg/wootwoot.bin')
'''
pkgname, filename = datpath.split('/', 1)
pkgmod = s_dyndeps.getDynMod(pkgname)
# are we a regular file?
pkgfile = os.path.abspath(pkgmod.__file__)
if os.path.isfile(pkgfile):
dirname = os.path.dirname(pkgfile)
datname = os.path.join(dirname, filename)
return open(datname, 'rb')
|
python
|
{
"resource": ""
}
|
q19162
|
scrape
|
train
|
def scrape(text, ptype=None):
'''
Scrape types from a blob of text and return node tuples.
Args:
text (str): Text to scrape.
ptype (str): Optional ptype to scrape. If present, only scrape rules which match the provided type.
Returns:
(str, str): Yield tuples of type, valu strings.
'''
for ruletype, rule, info in scrape_types:
if ptype and ptype != ruletype:
continue
regx = regexes.get(ruletype)
for valu in regx.findall(text):
yield (ruletype, valu)
|
python
|
{
"resource": ""
}
|
q19163
|
en
|
train
|
def en(item):
'''
Use msgpack to serialize a compatible python object.
Args:
item (obj): The object to serialize
Notes:
String objects are encoded using utf8 encoding. In order to handle
potentially malformed input, ``unicode_errors='surrogatepass'`` is set
to allow encoding bad input strings.
Returns:
bytes: The serialized bytes in msgpack format.
'''
if pakr is None: # pragma: no cover
return msgpack.packb(item, use_bin_type=True, unicode_errors='surrogatepass')
try:
return pakr.pack(item)
except Exception:
pakr.reset()
raise
|
python
|
{
"resource": ""
}
|
q19164
|
un
|
train
|
def un(byts):
'''
Use msgpack to de-serialize a python object.
Args:
byts (bytes): The bytes to de-serialize
Notes:
String objects are decoded using utf8 encoding. In order to handle
potentially malformed input, ``unicode_errors='surrogatepass'`` is set
to allow decoding bad input strings.
Returns:
obj: The de-serialized object
'''
# This uses a subset of unpacker_kwargs
return msgpack.loads(byts, use_list=False, raw=False, unicode_errors='surrogatepass')
|
python
|
{
"resource": ""
}
|
q19165
|
iterfd
|
train
|
def iterfd(fd):
'''
Generator which unpacks a file object of msgpacked content.
Args:
fd: File object to consume data from.
Notes:
String objects are decoded using utf8 encoding. In order to handle
potentially malformed input, ``unicode_errors='surrogatepass'`` is set
to allow decoding bad input strings.
Yields:
Objects from a msgpack stream.
'''
unpk = msgpack.Unpacker(fd, **unpacker_kwargs)
for mesg in unpk:
yield mesg
|
python
|
{
"resource": ""
}
|
q19166
|
iterfile
|
train
|
def iterfile(path, since=-1):
'''
Generator which yields msgpack objects from a file path.
Args:
path: File path to open and consume data from.
Notes:
String objects are decoded using utf8 encoding. In order to handle
potentially malformed input, ``unicode_errors='surrogatepass'`` is set
to allow decoding bad input strings.
Yields:
Objects from a msgpack stream.
'''
with io.open(path, 'rb') as fd:
unpk = msgpack.Unpacker(fd, **unpacker_kwargs)
for i, mesg in enumerate(unpk):
if i <= since:
continue
yield mesg
|
python
|
{
"resource": ""
}
|
q19167
|
dumpfile
|
train
|
def dumpfile(item, path):
'''
Dump an object to a file by path.
Args:
item (object): The object to serialize.
path (str): The file path to save.
Returns:
None
'''
with io.open(path, 'wb') as fd:
fd.write(en(item))
|
python
|
{
"resource": ""
}
|
q19168
|
Unpk.feed
|
train
|
def feed(self, byts):
'''
Feed bytes to the unpacker and return completed objects.
Args:
byts (bytes): Bytes to unpack.
Notes:
It is intended that this function is called multiple times with
bytes from some sort of a stream, as it will unpack and return
objects as they are available.
Returns:
list: List of tuples containing the item size and the unpacked item.
'''
self.unpk.feed(byts)
retn = []
while True:
try:
item = self.unpk.unpack()
tell = self.unpk.tell()
retn.append((tell - self.size, item))
self.size = tell
except msgpack.exceptions.OutOfData:
break
return retn
|
python
|
{
"resource": ""
}
|
q19169
|
SynModule._onCoreModuleLoad
|
train
|
def _onCoreModuleLoad(self, event):
'''
Clear the cached model rows and rebuild them only if they have been loaded already.
'''
if not self._modelRuntsByBuid:
return
# Discard previously cached data. It will be computed upon the next
# lift that needs it.
self._modelRuntsByBuid = {}
self._modelRuntsByPropValu = collections.defaultdict(list)
|
python
|
{
"resource": ""
}
|
q19170
|
Log.encodeMsg
|
train
|
def encodeMsg(self, mesg):
'''Get byts for a message'''
fmt = self.locs.get('log:fmt')
if fmt == 'jsonl':
s = json.dumps(mesg, sort_keys=True) + '\n'
buf = s.encode()
return buf
elif fmt == 'mpk':
buf = s_msgpack.en(mesg)
return buf
mesg = f'Unknown encoding format: {fmt}'
raise s_exc.SynErr(mesg=mesg)
|
python
|
{
"resource": ""
}
|
q19171
|
imeicsum
|
train
|
def imeicsum(text):
'''
Calculate the imei check byte.
'''
digs = []
for i in range(14):
v = int(text[i])
if i % 2:
v *= 2
[digs.append(int(x)) for x in str(v)]
chek = 0
valu = sum(digs)
remd = valu % 10
if remd != 0:
chek = 10 - remd
return str(chek)
|
python
|
{
"resource": ""
}
|
q19172
|
executor
|
train
|
async def executor(func, *args, **kwargs):
'''
Execute a function in an executor thread.
Args:
todo ((func,args,kwargs)): A todo tuple.
'''
def syncfunc():
return func(*args, **kwargs)
loop = asyncio.get_running_loop()
return await loop.run_in_executor(None, syncfunc)
|
python
|
{
"resource": ""
}
|
q19173
|
varget
|
train
|
def varget(name, defval=None, task=None):
'''
Access a task local variable by name
Precondition:
If task is None, this must be called from task context
'''
taskdict = _taskdict(task)
retn = taskdict.get(name, s_common.NoValu)
if retn is not s_common.NoValu:
return retn
func = _TaskDictCtors.get(name)
if func is None:
return defval
item = func()
taskdict[name] = item
return item
|
python
|
{
"resource": ""
}
|
q19174
|
getPhoneInfo
|
train
|
def getPhoneInfo(numb):
'''
Walk the phone info tree to find the best-match info for the given number.
Example:
info = getPhoneInfo(17035551212)
country = info.get('cc')
'''
text = str(numb)
info = {}
node = phonetree
# make decisions down the tree (but only keep info for
# nodes where it's populated) and return the last info
for c in text:
chld = node[2].get(c)
if chld is None:
break
if chld[1]:
info = chld[1]
node = chld
return info
|
python
|
{
"resource": ""
}
|
q19175
|
Hive.dict
|
train
|
async def dict(self, full):
'''
Open a HiveDict at the given full path.
'''
node = await self.open(full)
return await HiveDict.anit(self, node)
|
python
|
{
"resource": ""
}
|
q19176
|
Hive.add
|
train
|
async def add(self, full, valu):
'''
Atomically increments a node's value.
'''
node = await self.open(full)
oldv = node.valu
newv = oldv + valu
node.valu = await self.storNodeValu(full, node.valu + valu)
await node.fire('hive:set', path=full, valu=valu, oldv=oldv)
return newv
|
python
|
{
"resource": ""
}
|
q19177
|
Hive.pop
|
train
|
async def pop(self, full):
'''
Remove and return the value for the given node.
'''
node = self.nodes.get(full)
if node is None:
return
valu = await self._popHiveNode(node)
return valu
|
python
|
{
"resource": ""
}
|
q19178
|
CryoTank.puts
|
train
|
async def puts(self, items, seqn=None):
'''
Add the structured data from items to the CryoTank.
Args:
items (list): A list of objects to store in the CryoTank.
seqn (iden, offs): An iden / offset pair to record.
Returns:
int: The ending offset of the items or seqn.
'''
size = 0
for chunk in s_common.chunks(items, 1000):
metrics = self._items.save(chunk)
self._metrics.add(metrics)
await self.fire('cryotank:puts', numrecords=len(chunk))
size += len(chunk)
await asyncio.sleep(0)
if seqn is not None:
iden, offs = seqn
self.setOffset(iden, offs + size)
return size
|
python
|
{
"resource": ""
}
|
q19179
|
CryoTank.metrics
|
train
|
async def metrics(self, offs, size=None):
'''
Yield metrics rows starting at offset.
Args:
offs (int): The index offset.
size (int): The maximum number of records to yield.
Yields:
((int, dict)): An index offset, info tuple for metrics.
'''
for i, (indx, item) in enumerate(self._metrics.iter(offs)):
if size is not None and i >= size:
return
yield indx, item
|
python
|
{
"resource": ""
}
|
q19180
|
CryoTank.slice
|
train
|
async def slice(self, offs, size=None, iden=None):
'''
Yield a number of items from the CryoTank starting at a given offset.
Args:
offs (int): The index of the desired datum (starts at 0)
size (int): The max number of items to yield.
Yields:
((index, object)): Index and item values.
'''
if iden is not None:
self.setOffset(iden, offs)
for i, (indx, item) in enumerate(self._items.iter(offs)):
if size is not None and i >= size:
return
yield indx, item
|
python
|
{
"resource": ""
}
|
q19181
|
CryoTank.rows
|
train
|
async def rows(self, offs, size=None, iden=None):
'''
Yield a number of raw items from the CryoTank starting at a given offset.
Args:
offs (int): The index of the desired datum (starts at 0)
size (int): The max number of items to yield.
Yields:
((indx, bytes)): Index and msgpacked bytes.
'''
if iden is not None:
self.setOffset(iden, offs)
for i, (indx, byts) in enumerate(self._items.rows(offs)):
if size is not None and i >= size:
return
yield indx, byts
|
python
|
{
"resource": ""
}
|
q19182
|
CryoTank.info
|
train
|
async def info(self):
'''
Returns information about the CryoTank instance.
Returns:
dict: A dict containing items and metrics indexes.
'''
stat = self._items.stat()
return {'indx': self._items.index(), 'metrics': self._metrics.index(), 'stat': stat}
|
python
|
{
"resource": ""
}
|
q19183
|
CryoCell.init
|
train
|
async def init(self, name, conf=None):
'''
Generate a new CryoTank with a given name or get an reference to an existing CryoTank.
Args:
name (str): Name of the CryoTank.
Returns:
CryoTank: A CryoTank instance.
'''
tank = self.tanks.get(name)
if tank is not None:
return tank
iden = s_common.guid()
logger.info('Creating new tank: %s', name)
path = s_common.genpath(self.dirn, 'tanks', iden)
tank = await CryoTank.anit(path, conf)
node = await self.names.open((name,))
await node.set((iden, conf))
self.tanks.put(name, tank)
return tank
|
python
|
{
"resource": ""
}
|
q19184
|
hashitem
|
train
|
def hashitem(item):
'''
Generate a uniq hash for the JSON compatible primitive data structure.
'''
norm = normitem(item)
byts = s_msgpack.en(norm)
return hashlib.md5(byts).hexdigest()
|
python
|
{
"resource": ""
}
|
q19185
|
getVolInfo
|
train
|
def getVolInfo(*paths):
'''
Retrieve volume usage info for the given path.
'''
path = os.path.join(*paths)
path = os.path.expanduser(path)
st = os.statvfs(path)
free = st.f_bavail * st.f_frsize
total = st.f_blocks * st.f_frsize
return {
'free': free,
'used': total - free,
'total': total,
}
|
python
|
{
"resource": ""
}
|
q19186
|
parse_cmd_string
|
train
|
def parse_cmd_string(text, off, trim=True):
'''
Parse in a command line string which may be quoted.
'''
if trim:
_, off = nom(text, off, whites)
if isquote(text, off):
return parse_string(text, off, trim=trim)
if nextchar(text, off, '('):
return parse_list(text, off)
return meh(text, off, whites)
|
python
|
{
"resource": ""
}
|
q19187
|
parse_valu
|
train
|
def parse_valu(text, off=0):
'''
Special syntax for the right side of equals in a macro
'''
_, off = nom(text, off, whites)
if nextchar(text, off, '('):
return parse_list(text, off)
if isquote(text, off):
return parse_string(text, off)
# since it's not quoted, we can assume we are bound by both
# white space and storm syntax chars ( ) , =
valu, off = meh(text, off, valmeh)
# for now, give it a shot as an int... maybe eventually
# we'll be able to disable this completely, but for now
# lets maintain backward compatibility...
try:
# NOTE: this is ugly, but faster than parsing the string
valu = int(valu, 0)
except ValueError:
pass
return valu, off
|
python
|
{
"resource": ""
}
|
q19188
|
Parser.editunivset
|
train
|
def editunivset(self):
'''
.foo = bar
'''
self.ignore(whitespace)
if not self.nextstr('.'):
self._raiseSyntaxExpects('.')
univ = self.univprop()
self.ignore(whitespace)
self.nextmust('=')
self.ignore(whitespace)
valu = self.valu()
return s_ast.EditPropSet(kids=(univ, valu))
|
python
|
{
"resource": ""
}
|
q19189
|
parse
|
train
|
def parse(text, base=None, chop=False):
'''
Parse a time string into an epoch millis value.
'''
#TODO: use base to facilitate relative time offsets
text = text.strip().lower()
text = (''.join([c for c in text if c.isdigit()]))
if chop:
text = text[:17]
# TODO: support relative time offsets here...
tlen = len(text)
if tlen == 4:
dt = datetime.datetime.strptime(text, '%Y')
elif tlen == 6:
dt = datetime.datetime.strptime(text, '%Y%m')
elif tlen == 8:
dt = datetime.datetime.strptime(text, '%Y%m%d')
elif tlen == 10:
dt = datetime.datetime.strptime(text, '%Y%m%d%H')
elif tlen == 12:
dt = datetime.datetime.strptime(text, '%Y%m%d%H%M')
elif tlen == 14:
dt = datetime.datetime.strptime(text, '%Y%m%d%H%M%S')
elif tlen in (15, 16, 17):
dt = datetime.datetime.strptime(text, '%Y%m%d%H%M%S%f')
else:
raise s_exc.BadTypeValu(valu=text, name='time',
mesg='Unknown time format')
epoch = datetime.datetime(1970, 1, 1)
return int((dt - epoch).total_seconds() * 1000)
|
python
|
{
"resource": ""
}
|
q19190
|
repr
|
train
|
def repr(tick, pack=False):
'''
Return a date string for an epoch-millis timestamp.
Args:
tick (int): The timestamp in milliseconds since the epoch.
Returns:
(str): A date time string
'''
if tick == 0x7fffffffffffffff:
return '?'
dt = datetime.datetime(1970, 1, 1) + datetime.timedelta(milliseconds=tick)
millis = dt.microsecond / 1000
if pack:
return '%d%.2d%.2d%.2d%.2d%.2d%.3d' % (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, millis)
return '%d/%.2d/%.2d %.2d:%.2d:%.2d.%.3d' % (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, millis)
|
python
|
{
"resource": ""
}
|
q19191
|
delta
|
train
|
def delta(text):
'''
Parse a simple time delta string and return the delta.
'''
text = text.strip().lower()
_, offs = _noms(text, 0, ' \t\r\n')
sign = '+'
if text and text[0] in ('+', '-'):
sign = text[0]
offs += 1
_, offs = _noms(text, offs, ' \t\r\n')
sizetext, offs = _noms(text, offs, '0123456789')
_, offs = _noms(text, offs, ' \t\r\n')
unittext = text[offs:]
size = int(sizetext, 0)
if sign == '-':
size = -size
base = timeunits.get(unittext)
if base is None:
mesg = f'unknown time delta units: {unittext}'
raise s_exc.BadTypeValu(name='time', valu=text, mesg=mesg)
return size * base
|
python
|
{
"resource": ""
}
|
q19192
|
TinFoilHat.enc
|
train
|
def enc(self, byts, asscd=None):
'''
Encrypt the given bytes and return an envelope dict in msgpack form.
Args:
byts (bytes): The message to be encrypted.
asscd (bytes): Extra data that needs to be authenticated (but not encrypted).
Returns:
bytes: The encrypted message. This is a msgpacked dictionary
containing the IV, ciphertext, and associated data.
'''
iv = os.urandom(16)
encryptor = AESGCM(self.ekey)
byts = encryptor.encrypt(iv, byts, asscd)
envl = {'iv': iv, 'data': byts, 'asscd': asscd}
return s_msgpack.en(envl)
|
python
|
{
"resource": ""
}
|
q19193
|
TinFoilHat.dec
|
train
|
def dec(self, byts):
'''
Decode an envelope dict and decrypt the given bytes.
Args:
byts (bytes): Bytes to decrypt.
Returns:
bytes: Decrypted message.
'''
envl = s_msgpack.un(byts)
iv = envl.get('iv', b'')
asscd = envl.get('asscd', b'')
data = envl.get('data', b'')
decryptor = AESGCM(self.ekey)
try:
data = decryptor.decrypt(iv, data, asscd)
except Exception:
logger.exception('Error decrypting data')
return None
return data
|
python
|
{
"resource": ""
}
|
q19194
|
CryptSeq.encrypt
|
train
|
def encrypt(self, mesg):
'''
Wrap a message with a sequence number and encrypt it.
Args:
mesg: The mesg to encrypt.
Returns:
bytes: The encrypted message.
'''
seqn = next(self._tx_sn)
rv = self._tx_tinh.enc(s_msgpack.en((seqn, mesg)))
return rv
|
python
|
{
"resource": ""
}
|
q19195
|
CryptSeq.decrypt
|
train
|
def decrypt(self, ciphertext):
'''
Decrypt a message, validating its sequence number is as we expect.
Args:
ciphertext (bytes): The message to decrypt and verify.
Returns:
mesg: A mesg.
Raises:
s_exc.CryptoErr: If the message decryption fails or the sequence number was unexpected.
'''
plaintext = self._rx_tinh.dec(ciphertext)
if plaintext is None:
logger.error('Message decryption failure')
raise s_exc.CryptoErr(mesg='Message decryption failure')
seqn = next(self._rx_sn)
sn, mesg = s_msgpack.un(plaintext)
if sn != seqn:
logger.error('Message out of sequence: got %d expected %d', sn, seqn)
raise s_exc.CryptoErr(mesg='Message out of sequence', expected=seqn, got=sn)
return mesg
|
python
|
{
"resource": ""
}
|
q19196
|
parseSemver
|
train
|
def parseSemver(text):
'''
Parse a Semantic Version string into is component parts.
Args:
text (str): A text string to parse into semver components. This string has whitespace and leading 'v'
characters stripped off of it.
Examples:
Parse a string into it semvar parts::
parts = parseSemver('v1.2.3')
Returns:
dict: The dictionary will contain the keys 'major', 'minor' and 'patch' pointing to integer values.
The dictionary may also contain keys for 'build' and 'pre' information if that data is parsed out
of a semver string. None is returned if the string is not a valid Semver string.
'''
# eat whitespace and leading chars common on version strings
txt = text.strip().lstrip('vV')
ret = {}
m = semver_re.match(txt)
if not m:
return None
d = m.groupdict()
ret['major'] = int(d.get('maj'))
ret['minor'] = int(d.get('min'))
ret['patch'] = int(d.get('pat'))
pre = d.get('pre')
bld = d.get('bld')
if pre:
# Validate pre
parts = pre.split('.')
for part in parts:
if not part:
return None
try:
int(part)
except ValueError:
continue
else:
if part[0] == '0' and len(part) > 1:
return None
ret['pre'] = pre
if bld:
# Validate bld
parts = bld.split('.')
for part in parts:
if not part:
return None
ret['build'] = bld
return ret
|
python
|
{
"resource": ""
}
|
q19197
|
unpackVersion
|
train
|
def unpackVersion(ver):
'''
Unpack a system normalized integer representing a softare version into its component parts.
Args:
ver (int): System normalized integer value to unpack into a tuple.
Returns:
(int, int, int): A tuple containing the major, minor and patch values shifted out of the integer.
'''
major = (ver >> 20 * 2) & mask20
minor = (ver >> 20) & mask20
patch = ver & mask20
return major, minor, patch
|
python
|
{
"resource": ""
}
|
q19198
|
fmtVersion
|
train
|
def fmtVersion(*vsnparts):
'''
Join a string of parts together with a . separator.
Args:
*vsnparts:
Returns:
'''
if len(vsnparts) < 1:
raise s_exc.BadTypeValu(valu=repr(vsnparts), name='fmtVersion',
mesg='Not enough version parts to form a version string with.',)
ret = '.'.join([str(part).lower() for part in vsnparts])
return ret
|
python
|
{
"resource": ""
}
|
q19199
|
SlabDict.set
|
train
|
def set(self, name, valu):
'''
Set a name in the SlabDict.
Args:
name (str): The key name.
valu (obj): A msgpack compatible value.
Returns:
None
'''
byts = s_msgpack.en(valu)
lkey = self.pref + name.encode('utf8')
self.slab.put(lkey, byts, db=self.db)
self.info[name] = valu
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.