_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3 values | text stringlengths 75 19.8k | language stringclasses 1 value | meta_information dict |
|---|---|---|---|---|---|
q33400 | _create_equivalence_transform | train | def _create_equivalence_transform(equiv):
"""Compute an equivalence transformation that transforms this compound
to another compound's coordinate system.
Parameters
----------
equiv : np.ndarray, shape=(n, 3), dtype=float
Array of equivalent points.
Returns
-------
T : CoordinateTransform
Transform that maps this point cloud to the other point cloud's
coordinates system.
"""
from mbuild.compound import Compound
self_points = np.array([])
self_points.shape = (0, 3)
other_points = np.array([])
other_points.shape = (0, 3)
for pair in equiv:
if not isinstance(pair, tuple) or len(pair) != 2:
raise ValueError('Equivalence pair not a 2-tuple')
if not (isinstance(pair[0], Compound) and isinstance(pair[1], Compound)):
raise ValueError('Equivalence pair type mismatch: pair[0] is a {0} '
'and pair[1] is a {1}'.format(type(pair[0]),
type(pair[1])))
# TODO: vstack is slow, replace with list concatenation
if not pair[0].children:
self_points = np.vstack([self_points, pair[0].pos])
other_points = np.vstack([other_points, pair[1].pos])
else:
for atom0 in pair[0]._particles(include_ports=True):
self_points = np.vstack([self_points, atom0.pos])
for atom1 in pair[1]._particles(include_ports=True):
other_points = np.vstack([other_points, atom1.pos])
T = RigidTransform(self_points, other_points)
return T | python | {
"resource": ""
} |
q33401 | _choose_correct_port | train | def _choose_correct_port(from_port, to_port):
"""Chooses the direction when using an equivalence transform on two Ports.
Each Port object actually contains 2 sets of 4 atoms, either of which can be
used to make a connection with an equivalence transform. This function
chooses the set of 4 atoms that makes the anchor atoms not overlap which is
the intended behavior for most use-cases.
TODO: -Increase robustness for cases where the anchors are a different
distance from their respective ports.
-Provide options in `force_overlap` to override this behavior.
Parameters
----------
from_port : mb.Port
to_port : mb.Port
Returns
-------
equivalence_pairs : tuple of Ports, shape=(2,)
Technically, a tuple of the Ports' sub-Compounds ('up' or 'down')
that are used to make the correct connection between components.
"""
# First we try matching the two 'up' ports.
T1 = _create_equivalence_transform([(from_port['up'], to_port['up'])])
new_position = T1.apply_to(np.array(from_port.anchor.pos, ndmin=2))
dist_between_anchors_up_up = norm(new_position[0] - to_port.anchor.pos)
# Then matching a 'down' with an 'up' port.
T2 = _create_equivalence_transform([(from_port['down'], to_port['up'])])
new_position = T2.apply_to(np.array(from_port.anchor.pos, ndmin=2))
# Determine which transform places the anchors further away from each other.
dist_between_anchors_down_up = norm(new_position[0] - to_port.anchor.pos)
difference_between_distances = dist_between_anchors_down_up - dist_between_anchors_up_up
if difference_between_distances > 0:
correct_port = from_port['down']
T = T2
else:
correct_port = from_port['up']
T = T1
return [(correct_port, to_port['up'])], T | python | {
"resource": ""
} |
q33402 | translate | train | def translate(compound, pos):
"""Translate a compound by a vector.
Parameters
----------
compound : mb.Compound
The compound being translated.
pos : np.ndarray, shape=(3,), dtype=float
The vector to translate the compound by.
"""
atom_positions = compound.xyz_with_ports
atom_positions = Translation(pos).apply_to(atom_positions)
compound.xyz_with_ports = atom_positions | python | {
"resource": ""
} |
q33403 | translate_to | train | def translate_to(compound, pos):
"""Translate a compound to a coordinate.
Parameters
----------
compound : mb.Compound
The compound being translated.
pos : np.ndarray, shape=(3,), dtype=float
The coordinate to translate the compound to.
"""
atom_positions = compound.xyz_with_ports
atom_positions -= compound.center
atom_positions = Translation(pos).apply_to(atom_positions)
compound.xyz_with_ports = atom_positions | python | {
"resource": ""
} |
q33404 | _translate_to | train | def _translate_to(coordinates, to):
"""Translate a set of coordinates to a location.
Parameters
----------
coordinates : np.ndarray, shape=(n,3), dtype=float
The coordinates being translated.
to : np.ndarray, shape=(3,), dtype=float
The new average position of the coordinates.
"""
coordinates -= np.mean(coordinates, axis=0)
return Translation(to).apply_to(coordinates) | python | {
"resource": ""
} |
q33405 | _rotate | train | def _rotate(coordinates, theta, around):
"""Rotate a set of coordinates around an arbitrary vector.
Parameters
----------
coordinates : np.ndarray, shape=(n,3), dtype=float
The coordinates being rotated.
theta : float
The angle by which to rotate the coordinates, in radians.
around : np.ndarray, shape=(3,), dtype=float
The vector about which to rotate the coordinates.
"""
around = np.asarray(around).reshape(3)
if np.array_equal(around, np.zeros(3)):
raise ValueError('Cannot rotate around a zero vector')
return Rotation(theta, around).apply_to(coordinates) | python | {
"resource": ""
} |
q33406 | rotate | train | def rotate(compound, theta, around):
"""Rotate a compound around an arbitrary vector.
Parameters
----------
compound : mb.Compound
The compound being rotated.
theta : float
The angle by which to rotate the compound, in radians.
around : np.ndarray, shape=(3,), dtype=float
The vector about which to rotate the compound.
"""
around = np.asarray(around).reshape(3)
if np.array_equal(around, np.zeros(3)):
raise ValueError('Cannot rotate around a zero vector')
atom_positions = compound.xyz_with_ports
atom_positions = Rotation(theta, around).apply_to(atom_positions)
compound.xyz_with_ports = atom_positions | python | {
"resource": ""
} |
q33407 | spin | train | def spin(compound, theta, around):
"""Rotate a compound in place around an arbitrary vector.
Parameters
----------
compound : mb.Compound
The compound being rotated.
theta : float
The angle by which to rotate the compound, in radians.
around : np.ndarray, shape=(3,), dtype=float
The axis about which to spin the compound.
"""
around = np.asarray(around).reshape(3)
if np.array_equal(around, np.zeros(3)):
raise ValueError('Cannot spin around a zero vector')
center_pos = compound.center
translate(compound, -center_pos)
rotate(compound, theta, around)
translate(compound, center_pos) | python | {
"resource": ""
} |
q33408 | _spin | train | def _spin(coordinates, theta, around):
"""Rotate a set of coordinates in place around an arbitrary vector.
Parameters
----------
coordinates : np.ndarray, shape=(n,3), dtype=float
The coordinates being spun.
theta : float
The angle by which to spin the coordinates, in radians.
around : np.ndarray, shape=(3,), dtype=float
The axis about which to spin the coordinates.
"""
around = np.asarray(around).reshape(3)
if np.array_equal(around, np.zeros(3)):
raise ValueError('Cannot spin around a zero vector')
center_pos = np.mean(coordinates, axis=0)
coordinates -= center_pos
coordinates = _rotate(coordinates, theta, around)
coordinates += center_pos
return coordinates | python | {
"resource": ""
} |
q33409 | x_axis_transform | train | def x_axis_transform(compound, new_origin=None,
point_on_x_axis=None,
point_on_xy_plane=None):
"""Move a compound such that the x-axis lies on specified points.
Parameters
----------
compound : mb.Compound
The compound to move.
new_origin : mb.Compound or list-like of size 3, optional, default=[0.0, 0.0, 0.0]
Where to place the new origin of the coordinate system.
point_on_x_axis : mb.Compound or list-like of size 3, optional, default=[1.0, 0.0, 0.0]
A point on the new x-axis.
point_on_xy_plane : mb.Compound, or list-like of size 3, optional, default=[1.0, 0.0, 0.0]
A point on the new xy-plane.
"""
import mbuild as mb
if new_origin is None:
new_origin = np.array([0, 0, 0])
elif isinstance(new_origin, mb.Compound):
new_origin = new_origin.pos
elif isinstance(new_origin, (tuple, list,np.ndarray)):
new_origin = np.asarray(new_origin)
else:
raise TypeError('x_axis_transform, y_axis_transform, and z_axis_transform only accept'
' mb.Compounds, list-like of length 3 or None for the new_origin'
' parameter. User passed type: {}.'.format(type(new_origin)))
if point_on_x_axis is None:
point_on_x_axis = np.array([1.0, 0.0, 0.0])
elif isinstance(point_on_x_axis, mb.Compound):
point_on_x_axis = point_on_x_axis.pos
elif isinstance(point_on_x_axis, (list, tuple, np.ndarray)):
point_on_x_axis = np.asarray(point_on_x_axis)
else:
raise TypeError('x_axis_transform, y_axis_transform, and z_axis_transform only accept'
' mb.Compounds, list-like of size 3, or None for the point_on_x_axis'
' parameter. User passed type: {}.'.format(type(point_on_x_axis)))
if point_on_xy_plane is None:
point_on_xy_plane = np.array([1.0, 1.0, 0.0])
elif isinstance(point_on_xy_plane, mb.Compound):
point_on_xy_plane = point_on_xy_plane.pos
elif isinstance(point_on_xy_plane, (list, tuple, np.ndarray)):
point_on_xy_plane = np.asarray(point_on_xy_plane)
else:
raise TypeError('x_axis_transform, y_axis_transform, and z_axis_transform only accept'
' mb.Compounds, list-like of size 3, or None for the point_on_xy_plane'
' parameter. User passed type: {}.'.format(type(point_on_xy_plane)))
atom_positions = compound.xyz_with_ports
transform = AxisTransform(new_origin=new_origin,
point_on_x_axis=point_on_x_axis,
point_on_xy_plane=point_on_xy_plane)
atom_positions = transform.apply_to(atom_positions)
compound.xyz_with_ports = atom_positions | python | {
"resource": ""
} |
q33410 | y_axis_transform | train | def y_axis_transform(compound, new_origin=None,
point_on_y_axis=None,
point_on_xy_plane=None):
"""Move a compound such that the y-axis lies on specified points.
Parameters
----------
compound : mb.Compound
The compound to move.
new_origin : mb.Compound or like-like of size 3, optional, default=[0.0, 0.0, 0.0]
Where to place the new origin of the coordinate system.
point_on_y_axis : mb.Compound or list-like of size 3, optional, default=[0.0, 1.0, 0.0]
A point on the new y-axis.
point_on_xy_plane : mb.Compound or list-like of size 3, optional, default=[0.0, 1.0, 0.0]
A point on the new xy-plane.
"""
x_axis_transform(compound, new_origin=new_origin,
point_on_x_axis=point_on_y_axis,
point_on_xy_plane=point_on_xy_plane)
rotate_around_z(compound, np.pi / 2) | python | {
"resource": ""
} |
q33411 | z_axis_transform | train | def z_axis_transform(compound, new_origin=None,
point_on_z_axis=None,
point_on_zx_plane=None):
"""Move a compound such that the z-axis lies on specified points.
Parameters
----------
compound : mb.Compound
The compound to move.
new_origin : mb.Compound or list-like of size 3, optional, default=[0.0, 0.0, 0.0]
Where to place the new origin of the coordinate system.
point_on_z_axis : mb.Compound or list-like of size 3, optional, default=[0.0, 0.0, 1.0]
A point on the new z-axis.
point_on_zx_plane : mb.Compound or list-like of size 3, optional, default=[0.0, 0.0, 1.0]
A point on the new xz-plane.
"""
x_axis_transform(compound, new_origin=new_origin,
point_on_x_axis=point_on_z_axis,
point_on_xy_plane=point_on_zx_plane)
rotate_around_y(compound, np.pi * 3 / 2) | python | {
"resource": ""
} |
q33412 | CoordinateTransform.apply_to | train | def apply_to(self, A):
"""Apply the coordinate transformation to points in A. """
if A.ndim == 1:
A = np.expand_dims(A, axis=0)
rows, cols = A.shape
A_new = np.hstack([A, np.ones((rows, 1))])
A_new = np.transpose(self.T.dot(np.transpose(A_new)))
return A_new[:, 0:cols] | python | {
"resource": ""
} |
q33413 | TiledCompound._add_tile | train | def _add_tile(self, new_tile, ijk):
"""Add a tile with a label indicating its tiling position. """
tile_label = "{0}_{1}".format(self.name, '-'.join(str(d) for d in ijk))
self.add(new_tile, label=tile_label, inherit_periodicity=False) | python | {
"resource": ""
} |
q33414 | TiledCompound._find_particle_image | train | def _find_particle_image(self, query, match, all_particles):
"""Find particle with the same index as match in a neighboring tile. """
_, idxs = self.particle_kdtree.query(query.pos, k=10)
neighbors = all_particles[idxs]
for particle in neighbors:
if particle.index == match.index:
return particle
raise MBuildError('Unable to find matching particle image while'
' stitching bonds.') | python | {
"resource": ""
} |
q33415 | RB_to_OPLS | train | def RB_to_OPLS(c0, c1, c2, c3, c4, c5):
"""Converts Ryckaert-Bellemans type dihedrals to OPLS type.
Parameters
----------
c0, c1, c2, c3, c4, c5 : Ryckaert-Belleman coefficients (in kcal/mol)
Returns
-------
opls_coeffs : np.array, shape=(4,)
Array containing the OPLS dihedrals coeffs f1, f2, f3, and f4
(in kcal/mol)
"""
f1 = (-1.5 * c3) - (2 * c1)
f2 = c0 + c1 + c3
f3 = -0.5 * c3
f4 = -0.25 * c4
return np.array([f1, f2, f3, f4]) | python | {
"resource": ""
} |
q33416 | write_hoomdxml | train | def write_hoomdxml(structure, filename, ref_distance=1.0, ref_mass=1.0,
ref_energy=1.0, rigid_bodies=None, shift_coords=True,
auto_scale=False):
"""Output a HOOMD XML file.
Parameters
----------
structure : parmed.Structure
ParmEd structure object
filename : str
Path of the output file.
ref_distance : float, optional, default=1.0, units=nanometers
Reference distance for conversion to reduced units
ref_mass : float, optional, default=1.0, units=amu
Reference mass for conversion to reduced units
ref_energy : float, optional, default=1.0, units=kJ/mol
Reference energy for conversion to reduced units
rigid_bodies : list
List of rigid body information. An integer value is required
for each particle corresponding to the number of the rigid body with
which the particle should be included. A value of None indicates the
particle is not part of any rigid body.
shift_coords : bool, optional, default=True
Shift coordinates from (0, L) to (-L/2, L/2) if necessary.
auto_scale : bool, optional, default=False
Automatically use largest sigma value as ref_distance, largest mass value
as ref_mass and largest epsilon value as ref_energy.
Returns
-------
ReferenceValues : namedtuple
Values used in scaling
Example
-------
ref_values = ethane.save(filename='ethane-opls.hoomdxml', forcefield_name='oplsaa', auto_scale=True)
print(ref_values.mass, ref_values.distance, ref_values.energy)
Notes
-----
The following elements are always written:
* **position** : particle positions
* **type** : particle types
* **mass** : particle masses (default 1.0)
* **charge** : particle charges
The following elements may be written if applicable:
* **pair_coeffs** : Pair coefficients for each particle type (assumes a 12-6 LJ pair style). The following information is written for each particle type:
* type : particle type
* epsilon : LJ epsilon
* sigma : LJ sigma
* **bond_coeffs** : Coefficients for each bond type (assumes a harmonic bond style). The following information is written for each bond type:
* type : bond type
* k : force constant (units of energy/distance^2)
* r0 : bond rest length (units of distance)
* **bond** : system bonds
* **angle_coeffs** : Coefficients for each angle type (assumes a harmonic angle style). The following information is written for each angle type:
* type : angle type
* k : force constant (units of energy/radians^2)
* theta : rest angle (units of radians)
* **angle** : system angles
* **dihedral_coeffs** : Coefficients for each dihedral type (assumes an OPLS dihedral style). The following information is written for each dihedral type:
* type : dihedral type
* k1, k2, k3, k4 : force coefficients (units of energy)
* **dihedral** : system dihedrals
* **body** : ID of the rigid body to which each particle belongs
"""
ref_distance *= 10 # Parmed unit hack
ref_energy /= 4.184 # Parmed unit hack
forcefield = True
if structure[0].type == '':
forcefield = False
if auto_scale and forcefield:
ref_mass = max([atom.mass for atom in structure.atoms])
pair_coeffs = list(set((atom.type,
atom.epsilon,
atom.sigma) for atom in structure.atoms))
ref_energy = max(pair_coeffs, key=operator.itemgetter(1))[1]
ref_distance = max(pair_coeffs, key=operator.itemgetter(2))[2]
xyz = np.array([[atom.xx, atom.xy, atom.xz] for atom in structure.atoms])
if shift_coords:
xyz = coord_shift(xyz, structure.box[:3])
with open(filename, 'w') as xml_file:
xml_file.write('<?xml version="1.2" encoding="UTF-8"?>\n')
xml_file.write('<hoomd_xml version="1.2">\n')
xml_file.write('<!-- ref_distance (nm) ref_mass (amu) ref_energy (kJ/mol) -->\n')
xml_file.write('<!-- {} {} {} -->\n'.format(ref_distance, ref_mass, ref_energy))
xml_file.write('<configuration time_step="0">\n')
_write_box_information(xml_file, structure, ref_distance)
_write_particle_information(xml_file, structure, xyz, forcefield,
ref_distance, ref_mass, ref_energy)
_write_bond_information(xml_file, structure, ref_distance, ref_energy)
_write_angle_information(xml_file, structure, ref_energy)
_write_dihedral_information(xml_file, structure, ref_energy)
_write_rigid_information(xml_file, rigid_bodies)
xml_file.write('</configuration>\n')
xml_file.write('</hoomd_xml>')
ReferenceValues = namedtuple("ref_values", ["distance", "mass", "energy"])
return ReferenceValues(ref_distance, ref_mass, ref_energy) | python | {
"resource": ""
} |
q33417 | _write_dihedral_information | train | def _write_dihedral_information(xml_file, structure, ref_energy):
"""Write dihedrals in the system.
Parameters
----------
xml_file : file object
The file object of the hoomdxml file being written
structure : parmed.Structure
Parmed structure object
ref_energy : float, default=1.0
Reference energy for conversion to reduced units
"""
unique_dihedral_types = set()
xml_file.write('<dihedral>\n')
for dihedral in structure.rb_torsions:
t1, t2 = dihedral.atom1.type, dihedral.atom2.type,
t3, t4 = dihedral.atom3.type, dihedral.atom4.type
if [t2, t3] == sorted([t2, t3]):
types_in_dihedral = '-'.join((t1, t2, t3, t4))
else:
types_in_dihedral = '-'.join((t4, t3, t2, t1))
dihedral_type = (types_in_dihedral, dihedral.type.c0,
dihedral.type.c1, dihedral.type.c2, dihedral.type.c3, dihedral.type.c4,
dihedral.type.c5, dihedral.type.scee, dihedral.type.scnb)
unique_dihedral_types.add(dihedral_type)
xml_file.write('{} {} {} {} {}\n'.format(
dihedral_type[0], dihedral.atom1.idx, dihedral.atom2.idx,
dihedral.atom3.idx, dihedral.atom4.idx))
xml_file.write('</dihedral>\n')
xml_file.write('<dihedral_coeffs>\n')
xml_file.write('<!-- type k1 k2 k3 k4 -->\n')
for dihedral_type, c0, c1, c2, c3, c4, c5, scee, scnb in unique_dihedral_types:
opls_coeffs = RB_to_OPLS(c0, c1, c2, c3, c4, c5)
opls_coeffs /= ref_energy
xml_file.write('{} {:.5f} {:.5f} {:.5f} {:.5f}\n'.format(
dihedral_type, *opls_coeffs))
xml_file.write('</dihedral_coeffs>\n') | python | {
"resource": ""
} |
q33418 | _write_rigid_information | train | def _write_rigid_information(xml_file, rigid_bodies):
"""Write rigid body information.
Parameters
----------
xml_file : file object
The file object of the hoomdxml file being written
rigid_bodies : list, len=n_particles
The rigid body that each particle belongs to (-1 for none)
"""
if not all(body is None for body in rigid_bodies):
xml_file.write('<body>\n')
for body in rigid_bodies:
if body is None:
body = -1
xml_file.write('{}\n'.format(int(body)))
xml_file.write('</body>\n') | python | {
"resource": ""
} |
q33419 | _write_box_information | train | def _write_box_information(xml_file, structure, ref_distance):
"""Write box information.
Parameters
----------
xml_file : file object
The file object of the hoomdxml file being written
structure : parmed.Structure
Parmed structure object
ref_energy : float, default=1.0
Reference energy for conversion to reduced units
"""
if np.allclose(structure.box[3:6], np.array([90, 90, 90])):
box_str = '<box units="sigma" Lx="{}" Ly="{}" Lz="{}"/>\n'
xml_file.write(box_str.format(*structure.box[:3] / ref_distance))
else:
a, b, c = structure.box[0:3] / ref_distance
alpha, beta, gamma = np.radians(structure.box[3:6])
lx = a
xy = b * np.cos(gamma)
xz = c * np.cos(beta)
ly = np.sqrt(b**2 - xy**2)
yz = (b*c*np.cos(alpha) - xy*xz) / ly
lz = np.sqrt(c**2 - xz**2 - yz**2)
box_str = '<box units="sigma" Lx="{}" Ly="{}" Lz="{}" xy="{}" xz="{}" yz="{}"/>\n'
xml_file.write(box_str.format(lx, ly, lz, xy, xz, yz)) | python | {
"resource": ""
} |
q33420 | Port.access_labels | train | def access_labels(self):
"""List of labels used to access the Port
Returns
-------
list of str
Strings that can be used to access this Port relative to self.root
"""
access_labels = []
for referrer in self.referrers:
referrer_labels = [key for key, val in self.root.labels.items()
if val == referrer]
port_labels = [key for key, val in referrer.labels.items()
if val == self]
if referrer is self.root:
for label in port_labels:
access_labels.append("['{}']".format(label))
for label in itertools.product(referrer_labels, port_labels):
access_labels.append("['{}']".format("']['".join(label)))
return access_labels | python | {
"resource": ""
} |
q33421 | signed_area | train | def signed_area(coords):
"""Return the signed area enclosed by a ring using the linear time
algorithm. A value >= 0 indicates a counter-clockwise oriented ring.
"""
xs, ys = map(list, zip(*coords))
xs.append(xs[1])
ys.append(ys[1])
return sum(xs[i]*(ys[i+1]-ys[i-1]) for i in range(1, len(coords)))/2.0 | python | {
"resource": ""
} |
q33422 | Reader.load | train | def load(self, shapefile=None):
"""Opens a shapefile from a filename or file-like
object. Normally this method would be called by the
constructor with the file name as an argument."""
if shapefile:
(shapeName, ext) = os.path.splitext(shapefile)
self.shapeName = shapeName
self.load_shp(shapeName)
self.load_shx(shapeName)
self.load_dbf(shapeName)
if not (self.shp or self.dbf):
raise ShapefileException("Unable to open %s.dbf or %s.shp." % (shapeName, shapeName))
if self.shp:
self.__shpHeader()
if self.dbf:
self.__dbfHeader() | python | {
"resource": ""
} |
q33423 | Reader.load_shp | train | def load_shp(self, shapefile_name):
"""
Attempts to load file with .shp extension as both lower and upper case
"""
shp_ext = 'shp'
try:
self.shp = open("%s.%s" % (shapefile_name, shp_ext), "rb")
except IOError:
try:
self.shp = open("%s.%s" % (shapefile_name, shp_ext.upper()), "rb")
except IOError:
pass | python | {
"resource": ""
} |
q33424 | Reader.load_shx | train | def load_shx(self, shapefile_name):
"""
Attempts to load file with .shx extension as both lower and upper case
"""
shx_ext = 'shx'
try:
self.shx = open("%s.%s" % (shapefile_name, shx_ext), "rb")
except IOError:
try:
self.shx = open("%s.%s" % (shapefile_name, shx_ext.upper()), "rb")
except IOError:
pass | python | {
"resource": ""
} |
q33425 | Reader.load_dbf | train | def load_dbf(self, shapefile_name):
"""
Attempts to load file with .dbf extension as both lower and upper case
"""
dbf_ext = 'dbf'
try:
self.dbf = open("%s.%s" % (shapefile_name, dbf_ext), "rb")
except IOError:
try:
self.dbf = open("%s.%s" % (shapefile_name, dbf_ext.upper()), "rb")
except IOError:
pass | python | {
"resource": ""
} |
q33426 | Reader.__getFileObj | train | def __getFileObj(self, f):
"""Checks to see if the requested shapefile file object is
available. If not a ShapefileException is raised."""
if not f:
raise ShapefileException("Shapefile Reader requires a shapefile or file-like object.")
if self.shp and self.shpLength is None:
self.load()
if self.dbf and len(self.fields) == 0:
self.load()
return f | python | {
"resource": ""
} |
q33427 | Reader.__restrictIndex | train | def __restrictIndex(self, i):
"""Provides list-like handling of a record index with a clearer
error message if the index is out of bounds."""
if self.numRecords:
rmax = self.numRecords - 1
if abs(i) > rmax:
raise IndexError("Shape or Record index out of range.")
if i < 0: i = range(self.numRecords)[i]
return i | python | {
"resource": ""
} |
q33428 | Reader.iterShapes | train | def iterShapes(self):
"""Serves up shapes in a shapefile as an iterator. Useful
for handling large shapefiles."""
shp = self.__getFileObj(self.shp)
shp.seek(0,2)
self.shpLength = shp.tell()
shp.seek(100)
while shp.tell() < self.shpLength:
yield self.__shape() | python | {
"resource": ""
} |
q33429 | Reader.iterRecords | train | def iterRecords(self):
"""Serves up records in a dbf file as an iterator.
Useful for large shapefiles or dbf files."""
if self.numRecords is None:
self.__dbfHeader()
f = self.__getFileObj(self.dbf)
f.seek(self.__dbfHdrLength)
for i in xrange(self.numRecords):
r = self.__record()
if r:
yield r | python | {
"resource": ""
} |
q33430 | Writer.close | train | def close(self):
"""
Write final shp, shx, and dbf headers, close opened files.
"""
# Check if any of the files have already been closed
shp_open = self.shp and not (hasattr(self.shp, 'closed') and self.shp.closed)
shx_open = self.shx and not (hasattr(self.shx, 'closed') and self.shx.closed)
dbf_open = self.dbf and not (hasattr(self.dbf, 'closed') and self.dbf.closed)
# Balance if already not balanced
if self.shp and shp_open and self.dbf and dbf_open:
if self.autoBalance:
self.balance()
if self.recNum != self.shpNum:
raise ShapefileException("When saving both the dbf and shp file, "
"the number of records (%s) must correspond "
"with the number of shapes (%s)" % (self.recNum, self.shpNum))
# Fill in the blank headers
if self.shp and shp_open:
self.__shapefileHeader(self.shp, headerType='shp')
if self.shx and shx_open:
self.__shapefileHeader(self.shx, headerType='shx')
# Update the dbf header with final length etc
if self.dbf and dbf_open:
self.__dbfHeader()
# Close files, if target is a filepath
if self.target:
for attribute in (self.shp, self.shx, self.dbf):
if hasattr(attribute, 'close'):
try:
attribute.close()
except IOError:
pass | python | {
"resource": ""
} |
q33431 | Writer.__getFileObj | train | def __getFileObj(self, f):
"""Safety handler to verify file-like objects"""
if not f:
raise ShapefileException("No file-like object available.")
elif hasattr(f, "write"):
return f
else:
pth = os.path.split(f)[0]
if pth and not os.path.exists(pth):
os.makedirs(pth)
return open(f, "wb+") | python | {
"resource": ""
} |
q33432 | Writer.balance | train | def balance(self):
"""Adds corresponding empty attributes or null geometry records depending
on which type of record was created to make sure all three files
are in synch."""
while self.recNum > self.shpNum:
self.null()
while self.recNum < self.shpNum:
self.record() | python | {
"resource": ""
} |
q33433 | Writer.point | train | def point(self, x, y):
"""Creates a POINT shape."""
shapeType = POINT
pointShape = Shape(shapeType)
pointShape.points.append([x, y])
self.shape(pointShape) | python | {
"resource": ""
} |
q33434 | Writer.multipoint | train | def multipoint(self, points):
"""Creates a MULTIPOINT shape.
Points is a list of xy values."""
shapeType = MULTIPOINT
points = [points] # nest the points inside a list to be compatible with the generic shapeparts method
self._shapeparts(parts=points, shapeType=shapeType) | python | {
"resource": ""
} |
q33435 | Writer.line | train | def line(self, lines):
"""Creates a POLYLINE shape.
Lines is a collection of lines, each made up of a list of xy values."""
shapeType = POLYLINE
self._shapeparts(parts=lines, shapeType=shapeType) | python | {
"resource": ""
} |
q33436 | Writer.poly | train | def poly(self, polys):
"""Creates a POLYGON shape.
Polys is a collection of polygons, each made up of a list of xy values.
Note that for ordinary polygons the coordinates must run in a clockwise direction.
If some of the polygons are holes, these must run in a counterclockwise direction."""
shapeType = POLYGON
self._shapeparts(parts=polys, shapeType=shapeType) | python | {
"resource": ""
} |
q33437 | forecast | train | def forecast(stl, fc_func, steps=10, seasonal=False, **fc_func_kwargs):
"""Forecast the given decomposition ``stl`` forward by ``steps`` steps using the forecasting
function ``fc_func``, optionally including the calculated seasonality.
This is an additive model, Y[t] = T[t] + S[t] + e[t]
Args:
stl (a modified statsmodels.tsa.seasonal.DecomposeResult): STL decomposition of observed time
series created using the ``stldecompose.decompose()`` method.
fc_func (function): Function which takes an array of observations and returns a single
valued forecast for the next point.
steps (int, optional): Number of forward steps to include in the forecast
seasonal (bool, optional): Include seasonal component in forecast
fc_func_kwargs: keyword arguments
All remaining arguments are passed to the forecasting function ``fc_func``
Returns:
forecast_frame (pd.Dataframe): A ``pandas.Dataframe`` containing forecast values and a
DatetimeIndex matching the observed index.
"""
# container for forecast values
forecast_array = np.array([])
# forecast trend
# unpack precalculated trend array stl frame
trend_array = stl.trend
# iteratively forecast trend ("seasonally adjusted") component
# note: this loop can be slow
for step in range(steps):
# make this prediction on all available data
pred = fc_func(np.append(trend_array, forecast_array), **fc_func_kwargs)
# add this prediction to current array
forecast_array = np.append(forecast_array, pred)
col_name = fc_func.__name__
# forecast start and index are determined by observed data
observed_timedelta = stl.observed.index[-1] - stl.observed.index[-2]
forecast_idx_start = stl.observed.index[-1] + observed_timedelta
forecast_idx = pd.date_range(start=forecast_idx_start,
periods=steps,
freq=pd.tseries.frequencies.to_offset(observed_timedelta))
# (optionally) forecast seasonal & combine
if seasonal:
# track index and value of max correlation
seasonal_ix = 0
max_correlation = -np.inf
# loop over indexes=length of period avgs
detrended_array = np.asanyarray(stl.observed - stl.trend).squeeze()
for i, x in enumerate(stl.period_averages):
# work slices backward from end of detrended observations
if i == 0:
# slicing w/ [x:-0] doesn't work
detrended_slice = detrended_array[-len(stl.period_averages):]
else:
detrended_slice = detrended_array[-(len(stl.period_averages) + i):-i]
# calculate corr b/w period_avgs and detrend_slice
this_correlation = np.correlate(detrended_slice, stl.period_averages)[0]
if this_correlation > max_correlation:
# update ix and max correlation
max_correlation = this_correlation
seasonal_ix = i
# roll seasonal signal to matching phase
rolled_period_averages = np.roll(stl.period_averages, -seasonal_ix)
# tile as many time as needed to reach "steps", then truncate
tiled_averages = np.tile(rolled_period_averages,
(steps // len(stl.period_averages) + 1))[:steps]
# add seasonal values to previous forecast
forecast_array += tiled_averages
col_name += '+seasonal'
# combine data array with index into named dataframe
forecast_frame = pd.DataFrame(data=forecast_array, index=forecast_idx)
forecast_frame.columns = [col_name]
return forecast_frame | python | {
"resource": ""
} |
q33438 | mean | train | def mean(data, n=3, **kwargs):
"""The mean forecast for the next point is the mean value of the previous ``n`` points in
the series.
Args:
data (np.array): Observed data, presumed to be ordered in time.
n (int): period over which to calculate the mean
Returns:
float: a single-valued forecast for the next value in the series.
"""
# don't start averaging until we've seen n points
if len(data[-n:]) < n:
forecast = np.nan
else:
# nb: we'll keep the forecast as a float
forecast = np.mean(data[-n:])
return forecast | python | {
"resource": ""
} |
q33439 | drift | train | def drift(data, n=3, **kwargs):
"""The drift forecast for the next point is a linear extrapolation from the previous ``n``
points in the series.
Args:
data (np.array): Observed data, presumed to be ordered in time.
n (int): period over which to calculate linear model for extrapolation
Returns:
float: a single-valued forecast for the next value in the series.
"""
yi = data[-n]
yf = data[-1]
slope = (yf - yi) / (n - 1)
forecast = yf + slope
return forecast | python | {
"resource": ""
} |
q33440 | Client.exchange_token | train | def exchange_token(self, code):
"""Given the value of the code parameter, request an access token."""
url = '%s%s/oauth2/token' % (self.scheme, self.host)
options = {
'grant_type': 'authorization_code',
'redirect_uri': self._redirect_uri(),
'client_id': self.options.get('client_id'),
'client_secret': self.options.get('client_secret'),
'code': code,
}
options.update({
'verify_ssl': self.options.get('verify_ssl', True),
'proxies': self.options.get('proxies', None)
})
self.token = wrapped_resource(
make_request('post', url, options))
self.access_token = self.token.access_token
return self.token | python | {
"resource": ""
} |
q33441 | Client._authorization_code_flow | train | def _authorization_code_flow(self):
"""Build the the auth URL so the user can authorize the app."""
options = {
'scope': getattr(self, 'scope', 'non-expiring'),
'client_id': self.options.get('client_id'),
'response_type': 'code',
'redirect_uri': self._redirect_uri()
}
url = '%s%s/connect' % (self.scheme, self.host)
self._authorize_url = '%s?%s' % (url, urlencode(options)) | python | {
"resource": ""
} |
q33442 | Client._refresh_token_flow | train | def _refresh_token_flow(self):
"""Given a refresh token, obtain a new access token."""
url = '%s%s/oauth2/token' % (self.scheme, self.host)
options = {
'grant_type': 'refresh_token',
'client_id': self.options.get('client_id'),
'client_secret': self.options.get('client_secret'),
'refresh_token': self.options.get('refresh_token')
}
options.update({
'verify_ssl': self.options.get('verify_ssl', True),
'proxies': self.options.get('proxies', None)
})
self.token = wrapped_resource(
make_request('post', url, options))
self.access_token = self.token.access_token | python | {
"resource": ""
} |
q33443 | Client._request | train | def _request(self, method, resource, **kwargs):
"""Given an HTTP method, a resource name and kwargs, construct a
request and return the response.
"""
url = self._resolve_resource_name(resource)
if hasattr(self, 'access_token'):
kwargs.update(dict(oauth_token=self.access_token))
if hasattr(self, 'client_id'):
kwargs.update(dict(client_id=self.client_id))
kwargs.update({
'verify_ssl': self.options.get('verify_ssl', True),
'proxies': self.options.get('proxies', None)
})
return wrapped_resource(make_request(method, url, kwargs)) | python | {
"resource": ""
} |
q33444 | extract_files_from_dict | train | def extract_files_from_dict(d):
"""Return any file objects from the provided dict.
>>> extract_files_from_dict({
... 'oauth_token': 'foo',
... 'track': {
... 'title': 'bar',
... 'asset_data': open('setup.py', 'rb')
... }}) # doctest:+ELLIPSIS
{'track': {'asset_data': <...}}
"""
files = {}
for key, value in six.iteritems(d):
if isinstance(value, dict):
files[key] = extract_files_from_dict(value)
elif is_file_like(value):
files[key] = value
return files | python | {
"resource": ""
} |
q33445 | remove_files_from_dict | train | def remove_files_from_dict(d):
"""Return the provided dict with any file objects removed.
>>> remove_files_from_dict({
... 'oauth_token': 'foo',
... 'track': {
... 'title': 'bar',
... 'asset_data': open('setup.py', 'rb')
... }
... }) == {'track': {'title': 'bar'}, 'oauth_token': 'foo'}
... # doctest:+ELLIPSIS
True
"""
file_free = {}
for key, value in six.iteritems(d):
if isinstance(value, dict):
file_free[key] = remove_files_from_dict(value)
elif not is_file_like(value):
if hasattr(value, '__iter__'):
file_free[key] = value
else:
if hasattr(value, 'encode'):
file_free[key] = value.encode('utf-8')
else:
file_free[key] = str(value)
return file_free | python | {
"resource": ""
} |
q33446 | namespaced_query_string | train | def namespaced_query_string(d, prefix=""):
"""Transform a nested dict into a string with namespaced query params.
>>> namespaced_query_string({
... 'oauth_token': 'foo',
... 'track': {'title': 'bar', 'sharing': 'private'}}) == {
... 'track[sharing]': 'private',
... 'oauth_token': 'foo',
... 'track[title]': 'bar'} # doctest:+ELLIPSIS
True
"""
qs = {}
prefixed = lambda k: prefix and "%s[%s]" % (prefix, k) or k
for key, value in six.iteritems(d):
if isinstance(value, dict):
qs.update(namespaced_query_string(value, prefix=key))
else:
qs[prefixed(key)] = value
return qs | python | {
"resource": ""
} |
q33447 | make_request | train | def make_request(method, url, params):
"""Make an HTTP request, formatting params as required."""
empty = []
# TODO
# del params[key]
# without list
for key, value in six.iteritems(params):
if value is None:
empty.append(key)
for key in empty:
del params[key]
# allow caller to disable automatic following of redirects
allow_redirects = params.get('allow_redirects', True)
kwargs = {
'allow_redirects': allow_redirects,
'headers': {
'User-Agent': soundcloud.USER_AGENT
}
}
# options, not params
if 'verify_ssl' in params:
if params['verify_ssl'] is False:
kwargs['verify'] = params['verify_ssl']
del params['verify_ssl']
if 'proxies' in params:
kwargs['proxies'] = params['proxies']
del params['proxies']
if 'allow_redirects' in params:
del params['allow_redirects']
params = hashconversions.to_params(params)
files = namespaced_query_string(extract_files_from_dict(params))
data = namespaced_query_string(remove_files_from_dict(params))
request_func = getattr(requests, method, None)
if request_func is None:
raise TypeError('Unknown method: %s' % (method,))
if method == 'get':
kwargs['headers']['Accept'] = 'application/json'
qs = urlencode(data)
if '?' in url:
url_qs = '%s&%s' % (url, qs)
else:
url_qs = '%s?%s' % (url, qs)
result = request_func(url_qs, **kwargs)
else:
kwargs['data'] = data
if files:
kwargs['files'] = files
result = request_func(url, **kwargs)
# if redirects are disabled, don't raise for 301 / 302
if result.status_code in (301, 302):
if allow_redirects:
result.raise_for_status()
else:
result.raise_for_status()
return result | python | {
"resource": ""
} |
q33448 | wrapped_resource | train | def wrapped_resource(response):
"""Return a response wrapped in the appropriate wrapper type.
Lists will be returned as a ```ResourceList``` instance,
dicts will be returned as a ```Resource``` instance.
"""
# decode response text, assuming utf-8 if unset
response_content = response.content.decode(response.encoding or 'utf-8')
try:
content = json.loads(response_content)
except ValueError:
# not JSON
content = response_content
if isinstance(content, list):
result = ResourceList(content)
else:
result = Resource(content)
if hasattr(result, 'collection'):
result.collection = ResourceList(result.collection)
result.raw_data = response_content
for attr in ('encoding', 'url', 'status_code', 'reason'):
setattr(result, attr, getattr(response, attr))
return result | python | {
"resource": ""
} |
q33449 | normalize_param | train | def normalize_param(key, value):
"""Convert a set of key, value parameters into a dictionary suitable for
passing into requests. This will convert lists into the syntax required
by SoundCloud. Heavily lifted from HTTParty.
>>> normalize_param('playlist', {
... 'title': 'foo',
... 'sharing': 'private',
... 'tracks': [
... {id: 1234}, {id: 4567}
... ]}) == {
... u'playlist[tracks][][<built-in function id>]': [1234, 4567],
... u'playlist[sharing]': 'private',
... u'playlist[title]': 'foo'} # doctest:+ELLIPSIS
True
>>> normalize_param('oauth_token', 'foo')
{'oauth_token': 'foo'}
>>> normalize_param('playlist[tracks]', [1234, 4567]) == {
... u'playlist[tracks][]': [1234, 4567]}
True
"""
params = {}
stack = []
if isinstance(value, list):
normalized = [normalize_param(u"{0[key]}[]".format(dict(key=key)), e) for e in value]
keys = [item for sublist in tuple(h.keys() for h in normalized) for item in sublist]
lists = {}
if len(keys) != len(set(keys)):
duplicates = [x for x, y in collections.Counter(keys).items() if y > 1]
for dup in duplicates:
lists[dup] = [h[dup] for h in normalized]
for h in normalized:
del h[dup]
params.update(dict((k, v) for d in normalized for (k, v) in d.items()))
params.update(lists)
elif isinstance(value, dict):
stack.append([key, value])
else:
params.update({key: value})
for (parent, hash) in stack:
for (key, value) in six.iteritems(hash):
if isinstance(value, dict):
stack.append([u"{0[parent]}[{0[key]}]".format(dict(parent=parent, key=key)), value])
else:
params.update(normalize_param(u"{0[parent]}[{0[key]}]".format(dict(parent=parent, key=key)), value))
return params | python | {
"resource": ""
} |
q33450 | SmartObject.open | train | def open(self, external_dir=None):
"""
Open the smart object as binary IO.
:param external_dir: Path to the directory of the external file.
Example::
with layer.smart_object.open() as f:
data = f.read()
"""
if self.kind == 'data':
with io.BytesIO(self._data.data) as f:
yield f
elif self.kind == 'external':
filepath = self._data.linked_file[b'fullPath'].value
filepath = filepath.replace('\x00', '').replace('file://', '')
if not os.path.exists(filepath):
filepath = self._data.linked_file[b'relPath'].value
filepath = filepath.replace('\x00', '')
if external_dir is not None:
filepath = os.path.join(external_dir, filepath)
if not os.path.exists(filepath):
raise FileNotFoundError(filepath)
with open(filepath, 'rb') as f:
yield f
else:
raise NotImplementedError('alias is not supported.') | python | {
"resource": ""
} |
q33451 | SmartObject.data | train | def data(self):
"""Embedded file content, or empty if kind is `external` or `alias`"""
if self.kind == 'data':
return self._data.data
else:
with self.open() as f:
return f.read() | python | {
"resource": ""
} |
q33452 | SmartObject.filesize | train | def filesize(self):
"""File size of the object."""
if self.kind == 'data':
return len(self._data.data)
return self._data.filesize | python | {
"resource": ""
} |
q33453 | SmartObject.save | train | def save(self, filename=None):
"""
Save the smart object to a file.
:param filename: File name to export. If None, use the embedded name.
"""
if filename is None:
filename = self.filename
with open(filename, 'wb') as f:
f.write(self.data) | python | {
"resource": ""
} |
q33454 | BaseElement._traverse | train | def _traverse(element, condition=None):
"""
Traversal API intended for debugging.
"""
if condition is None or condition(element):
yield element
if isinstance(element, DictElement):
for child in element.values():
for _ in BaseElement._traverse(child, condition):
yield _
elif isinstance(element, ListElement):
for child in element:
for _ in BaseElement._traverse(child, condition):
yield _
elif attr.has(element.__class__):
for field in attr.fields(element.__class__):
child = getattr(element, field.name)
for _ in BaseElement._traverse(child, condition):
yield _ | python | {
"resource": ""
} |
q33455 | read_length_and_key | train | def read_length_and_key(fp):
"""
Helper to read descriptor key.
"""
length = read_fmt('I', fp)[0]
key = fp.read(length or 4)
if length == 0 and key not in _TERMS:
logger.debug('Unknown term: %r' % (key))
_TERMS.add(key)
return key | python | {
"resource": ""
} |
q33456 | write_length_and_key | train | def write_length_and_key(fp, value):
"""
Helper to write descriptor key.
"""
written = write_fmt(fp, 'I', 0 if value in _TERMS else len(value))
written += write_bytes(fp, value)
return written | python | {
"resource": ""
} |
q33457 | main | train | def main(argv=None):
"""
psd-tools command line utility.
Usage:
psd-tools export <input_file> <output_file> [options]
psd-tools show <input_file> [options]
psd-tools debug <input_file> [options]
psd-tools -h | --help
psd-tools --version
Options:
-v --verbose Be more verbose.
Example:
psd-tools show example.psd # Show the file content
psd-tools export example.psd example.png # Export as PNG
psd-tools export example.psd[0] example-0.png # Export layer as PNG
"""
args = docopt.docopt(main.__doc__, version=__version__, argv=argv)
if args['--verbose']:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
if args['export']:
input_parts = args['<input_file>'].split('[')
input_file = input_parts[0]
if len(input_parts) > 1:
indices = [int(x.rstrip(']')) for x in input_parts[1:]]
else:
indices = []
layer = PSDImage.open(input_file)
for index in indices:
layer = layer[index]
if isinstance(layer, PSDImage) and layer.has_preview():
image = layer.topil()
else:
image = layer.compose()
image.save(args['<output_file>'])
elif args['show']:
psd = PSDImage.open(args['<input_file>'])
pprint(psd)
elif args['debug']:
psd = PSDImage.open(args['<input_file>'])
pprint(psd._record) | python | {
"resource": ""
} |
q33458 | PSDImage.new | train | def new(cls, mode, size, color=0, depth=8, **kwargs):
"""
Create a new PSD document.
:param mode: The color mode to use for the new image.
:param size: A tuple containing (width, height) in pixels.
:param color: What color to use for the image. Default is black.
:return: A :py:class:`~psd_tools.api.psd_image.PSDImage` object.
"""
header = cls._make_header(mode, size, depth)
image_data = ImageData.new(header, color=color, **kwargs)
# TODO: Add default metadata.
return cls(PSD(
header=header,
image_data=image_data,
image_resources=ImageResources.new(),
)) | python | {
"resource": ""
} |
q33459 | PSDImage.frompil | train | def frompil(cls, image, compression=Compression.PACK_BITS):
"""
Create a new PSD document from PIL Image.
:param image: PIL Image object.
:param compression: ImageData compression option. See
:py:class:`~psd_tools.constants.Compression`.
:return: A :py:class:`~psd_tools.api.psd_image.PSDImage` object.
"""
header = cls._make_header(image.mode, image.size)
# TODO: Add default metadata.
# TODO: Perhaps make this smart object.
image_data = ImageData(compression=compression)
image_data.set_data([channel.tobytes() for channel in image.split()],
header)
return cls(PSD(
header=header,
image_data=image_data,
image_resources=ImageResources.new(),
)) | python | {
"resource": ""
} |
q33460 | PSDImage.open | train | def open(cls, fp):
"""
Open a PSD document.
:param fp: filename or file-like object.
:return: A :py:class:`~psd_tools.api.psd_image.PSDImage` object.
"""
if hasattr(fp, 'read'):
self = cls(PSD.read(fp))
else:
with open(fp, 'rb') as f:
self = cls(PSD.read(f))
return self | python | {
"resource": ""
} |
q33461 | PSDImage.save | train | def save(self, fp, mode='wb'):
"""
Save the PSD file.
:param fp: filename or file-like object.
:param mode: file open mode, default 'wb'.
"""
if hasattr(fp, 'write'):
self._record.write(fp)
else:
with open(fp, mode) as f:
self._record.write(f) | python | {
"resource": ""
} |
q33462 | PSDImage.topil | train | def topil(self, **kwargs):
"""
Get PIL Image.
:return: :py:class:`PIL.Image`, or `None` if the composed image is not
available.
"""
if self.has_preview():
return pil_io.convert_image_data_to_pil(self._record, **kwargs)
return None | python | {
"resource": ""
} |
q33463 | PSDImage.compose | train | def compose(self, force=False, bbox=None, **kwargs):
"""
Compose the PSD image.
See :py:func:`~psd_tools.compose` for available extra arguments.
:param bbox: Viewport tuple (left, top, right, bottom).
:return: :py:class:`PIL.Image`, or `None` if there is no pixel.
"""
from psd_tools.api.composer import compose
image = None
if not force or len(self) == 0:
image = self.topil(**kwargs)
if image is None:
image = compose(
self, bbox=bbox or self.viewbox, force=force, **kwargs
)
return image | python | {
"resource": ""
} |
q33464 | PSDImage.bbox | train | def bbox(self):
"""
Minimal bounding box that contains all the visible layers.
Use :py:attr:`~psd_tools.api.psd_image.PSDImage.viewbox` to get
viewport bounding box. When the psd is empty, bbox is equal to the
canvas bounding box.
:return: (left, top, right, bottom) `tuple`.
"""
bbox = super(PSDImage, self).bbox
if bbox == (0, 0, 0, 0):
bbox = self.viewbox
return bbox | python | {
"resource": ""
} |
q33465 | PSDImage.viewbox | train | def viewbox(self):
"""
Return bounding box of the viewport.
:return: (left, top, right, bottom) `tuple`.
"""
return self.left, self.top, self.right, self.bottom | python | {
"resource": ""
} |
q33466 | PSDImage.thumbnail | train | def thumbnail(self):
"""
Returns a thumbnail image in PIL.Image. When the file does not
contain an embedded thumbnail image, returns None.
"""
if 'THUMBNAIL_RESOURCE' in self.image_resources:
return pil_io.convert_thumbnail_to_pil(
self.image_resources.get_data('THUMBNAIL_RESOURCE')
)
elif 'THUMBNAIL_RESOURCE_PS4' in self.image_resources:
return pil_io.convert_thumbnail_to_pil(
self.image_resources.get_data('THUMBNAIL_RESOURCE_PS4'), 'BGR'
)
return None | python | {
"resource": ""
} |
q33467 | PSDImage._get_pattern | train | def _get_pattern(self, pattern_id):
"""Get pattern item by id."""
for key in ('PATTERNS1', 'PATTERNS2', 'PATTERNS3'):
if key in self.tagged_blocks:
data = self.tagged_blocks.get_data(key)
for pattern in data:
if pattern.pattern_id == pattern_id:
return pattern
return None | python | {
"resource": ""
} |
q33468 | PSDImage._init | train | def _init(self):
"""Initialize layer structure."""
group_stack = [self]
clip_stack = []
last_layer = None
for record, channels in self._record._iter_layers():
current_group = group_stack[-1]
blocks = record.tagged_blocks
end_of_group = False
divider = blocks.get_data('SECTION_DIVIDER_SETTING', None)
divider = blocks.get_data('NESTED_SECTION_DIVIDER_SETTING',
divider)
if divider is not None:
if divider.kind == SectionDivider.BOUNDING_SECTION_DIVIDER:
layer = Group(self, None, None, current_group)
group_stack.append(layer)
elif divider.kind in (SectionDivider.OPEN_FOLDER,
SectionDivider.CLOSED_FOLDER):
layer = group_stack.pop()
assert layer is not self
layer._record = record
layer._channels = channels
for key in (
'ARTBOARD_DATA1', 'ARTBOARD_DATA2', 'ARTBOARD_DATA3'
):
if key in blocks:
layer = Artboard._move(layer)
end_of_group = True
elif (
'TYPE_TOOL_OBJECT_SETTING' in blocks or
'TYPE_TOOL_INFO' in blocks
):
layer = TypeLayer(self, record, channels, current_group)
elif (
record.flags.pixel_data_irrelevant and (
'VECTOR_ORIGINATION_DATA' in blocks or
'VECTOR_MASK_SETTING1' in blocks or
'VECTOR_MASK_SETTING2' in blocks or
'VECTOR_STROKE_DATA' in blocks or
'VECTOR_STROKE_CONTENT_DATA' in blocks
)
):
layer = ShapeLayer(self, record, channels, current_group)
elif (
'SMART_OBJECT_LAYER_DATA1' in blocks or
'SMART_OBJECT_LAYER_DATA2' in blocks or
'PLACED_LAYER1' in blocks or
'PLACED_LAYER2' in blocks
):
layer = SmartObjectLayer(self, record, channels,
current_group)
else:
layer = None
for key in adjustments.TYPES.keys():
if key in blocks:
layer = adjustments.TYPES[key](
self, record, channels, current_group
)
break
# If nothing applies, this is a pixel layer.
if layer is None:
layer = PixelLayer(
self, record, channels, current_group
)
if record.clipping == Clipping.NON_BASE:
clip_stack.append(layer)
else:
if clip_stack:
last_layer._clip_layers = clip_stack
clip_stack = []
if not end_of_group:
current_group._layers.append(layer)
last_layer = layer
if clip_stack and last_layer:
last_layer._clip_layers = clip_stack | python | {
"resource": ""
} |
q33469 | _AngleMixin.angle | train | def angle(self):
"""Angle value."""
if self.use_global_light:
return self._image_resources.get_data('global_angle', 30.0)
return self.value.get(Key.LocalLightingAngle).value | python | {
"resource": ""
} |
q33470 | get_color_mode | train | def get_color_mode(mode):
"""Convert PIL mode to ColorMode."""
name = mode.upper()
name = name.rstrip('A') # Trim alpha.
name = {'1': 'BITMAP', 'L': 'GRAYSCALE'}.get(name, name)
return getattr(ColorMode, name) | python | {
"resource": ""
} |
q33471 | get_pil_mode | train | def get_pil_mode(value, alpha=False):
"""Get PIL mode from ColorMode."""
name = {
'GRAYSCALE': 'L',
'BITMAP': '1',
'DUOTONE': 'L',
'INDEXED': 'P',
}.get(value, value)
if alpha and name in ('L', 'RGB'):
name += 'A'
return name | python | {
"resource": ""
} |
q33472 | convert_image_data_to_pil | train | def convert_image_data_to_pil(psd, apply_icc=True, **kwargs):
"""Convert ImageData to PIL Image.
.. note:: Image resources contain extra alpha channels in these keys:
`ALPHA_NAMES_UNICODE`, `ALPHA_NAMES_PASCAL`, `ALPHA_IDENTIFIERS`.
"""
from PIL import Image, ImageOps
header = psd.header
size = (header.width, header.height)
channels = []
for channel_data in psd.image_data.get_data(header):
channels.append(_create_channel(size, channel_data, header.depth))
alpha = _get_alpha_use(psd)
mode = get_pil_mode(header.color_mode.name)
if mode == 'P':
image = Image.merge('L', channels[:get_pil_channels(mode)])
image.putpalette(psd.color_mode_data.interleave())
elif mode == 'MULTICHANNEL':
image = channels[0] # Multi-channel mode is a collection of alpha.
else:
image = Image.merge(mode, channels[:get_pil_channels(mode)])
if mode == 'CMYK':
image = image.point(lambda x: 255 - x)
if apply_icc and 'ICC_PROFILE' in psd.image_resources:
image = _apply_icc(image, psd.image_resources.get_data('ICC_PROFILE'))
if alpha and mode in ('L', 'RGB'):
image.putalpha(channels[-1])
return _remove_white_background(image) | python | {
"resource": ""
} |
q33473 | convert_layer_to_pil | train | def convert_layer_to_pil(layer, apply_icc=True, **kwargs):
"""Convert Layer to PIL Image."""
from PIL import Image
header = layer._psd._record.header
if header.color_mode == ColorMode.BITMAP:
raise NotImplementedError
width, height = layer.width, layer.height
channels, alpha = [], None
for ci, cd in zip(layer._record.channel_info, layer._channels):
if ci.id in (ChannelID.USER_LAYER_MASK,
ChannelID.REAL_USER_LAYER_MASK):
continue
channel = cd.get_data(width, height, header.depth, header.version)
channel_image = _create_channel(
(width, height), channel, header.depth
)
if ci.id == ChannelID.TRANSPARENCY_MASK:
alpha = channel_image
else:
channels.append(channel_image)
mode = get_pil_mode(header.color_mode.name)
channels = _check_channels(channels, header.color_mode)
image = Image.merge(mode, channels)
if mode == 'CMYK':
image = image.point(lambda x: 255 - x)
if alpha is not None:
if mode in ('RGB', 'L'):
image.putalpha(alpha)
else:
logger.debug('Alpha channel is not supported in %s' % (mode))
if apply_icc and 'ICC_PROFILE' in layer._psd.image_resources:
image = _apply_icc(
image, layer._psd.image_resources.get_data('ICC_PROFILE')
)
return image | python | {
"resource": ""
} |
q33474 | convert_mask_to_pil | train | def convert_mask_to_pil(mask, real=True):
"""Convert Mask to PIL Image."""
from PIL import Image
header = mask._layer._psd._record.header
channel_ids = [ci.id for ci in mask._layer._record.channel_info]
if real and mask._has_real():
width = mask._data.real_right - mask._data.real_left
height = mask._data.real_bottom - mask._data.real_top
channel = mask._layer._channels[
channel_ids.index(ChannelID.REAL_USER_LAYER_MASK)
]
else:
width = mask._data.right - mask._data.left
height = mask._data.bottom - mask._data.top
channel = mask._layer._channels[
channel_ids.index(ChannelID.USER_LAYER_MASK)
]
data = channel.get_data(width, height, header.depth, header.version)
return _create_channel((width, height), data, header.depth) | python | {
"resource": ""
} |
q33475 | convert_pattern_to_pil | train | def convert_pattern_to_pil(pattern, version=1):
"""Convert Pattern to PIL Image."""
from PIL import Image
mode = get_pil_mode(pattern.image_mode.name, False)
# The order is different here.
size = pattern.data.rectangle[3], pattern.data.rectangle[2]
channels = [
_create_channel(size, c.get_data(version), c.pixel_depth).convert('L')
for c in pattern.data.channels if c.is_written
]
if len(channels) == len(mode) + 1:
mode += 'A' # TODO: Perhaps doesn't work for some modes.
if mode == 'P':
image = channels[0]
image.putpalette([x for rgb in pattern.color_table for x in rgb])
else:
image = Image.merge(mode, channels)
if mode == 'CMYK':
image = image.point(lambda x: 255 - x)
return image | python | {
"resource": ""
} |
q33476 | convert_thumbnail_to_pil | train | def convert_thumbnail_to_pil(thumbnail, mode='RGB'):
"""Convert thumbnail resource."""
from PIL import Image
if thumbnail.fmt == 0:
size = (thumbnail.width, thumbnail.height)
stride = thumbnail.widthbytes
return Image.frombytes('RGBX', size, thumbnail.data, 'raw', mode,
stride)
elif thumbnail.fmt == 1:
return Image.open(io.BytesIO(thumbnail.data))
else:
raise ValueError('Unknown thumbnail format %d' % (thumbnail.fmt)) | python | {
"resource": ""
} |
q33477 | _apply_icc | train | def _apply_icc(image, icc_profile):
"""Apply ICC Color profile."""
from io import BytesIO
try:
from PIL import ImageCms
except ImportError:
logger.debug(
'ICC profile found but not supported. Install little-cms.'
)
return image
if image.mode not in ('RGB',):
logger.debug('%s ICC profile is not supported.' % image.mode)
return image
try:
in_profile = ImageCms.ImageCmsProfile(BytesIO(icc_profile))
out_profile = ImageCms.createProfile('sRGB')
return ImageCms.profileToProfile(image, in_profile, out_profile)
except ImageCms.PyCMSError as e:
logger.warning('PyCMSError: %s' % (e))
return image | python | {
"resource": ""
} |
q33478 | _remove_white_background | train | def _remove_white_background(image):
"""Remove white background in the preview image."""
from PIL import ImageMath, Image
if image.mode == "RGBA":
bands = image.split()
a = bands[3]
rgb = [
ImageMath.eval(
'convert('
'float(x + a - 255) * 255.0 / float(max(a, 1)) * '
'float(min(a, 1)) + float(x) * float(1 - min(a, 1))'
', "L")',
x=x, a=a
)
for x in bands[:3]
]
return Image.merge(bands=rgb + [a], mode="RGBA")
return image | python | {
"resource": ""
} |
q33479 | Mask.background_color | train | def background_color(self):
"""Background color."""
if self._has_real():
return self._data.real_background_color
return self._data.background_color | python | {
"resource": ""
} |
q33480 | Mask.left | train | def left(self):
"""Left coordinate."""
if self._has_real():
return self._data.real_left
return self._data.left | python | {
"resource": ""
} |
q33481 | Mask.right | train | def right(self):
"""Right coordinate."""
if self._has_real():
return self._data.real_right
return self._data.right | python | {
"resource": ""
} |
q33482 | Mask.top | train | def top(self):
"""Top coordinate."""
if self._has_real():
return self._data.real_top
return self._data.top | python | {
"resource": ""
} |
q33483 | Mask.bottom | train | def bottom(self):
"""Bottom coordinate."""
if self._has_real():
return self._data.real_bottom
return self._data.bottom | python | {
"resource": ""
} |
q33484 | compress | train | def compress(data, compression, width, height, depth, version=1):
"""Compress raw data.
:param data: raw data bytes to write.
:param compression: compression type, see :py:class:`.Compression`.
:param width: width.
:param height: height.
:param depth: bit depth of the pixel.
:param version: psd file version.
:return: compressed data bytes.
"""
if compression == Compression.RAW:
result = data
elif compression == Compression.PACK_BITS:
result = encode_packbits(data, width, height, depth, version)
elif compression == Compression.ZIP:
result = zlib.compress(data)
else:
encoded = encode_prediction(data, width, height, depth)
result = zlib.compress(encoded)
return result | python | {
"resource": ""
} |
q33485 | decompress | train | def decompress(data, compression, width, height, depth, version=1):
"""Decompress raw data.
:param data: compressed data bytes.
:param compression: compression type,
see :py:class:`~psd_tools.constants.Compression`.
:param width: width.
:param height: height.
:param depth: bit depth of the pixel.
:param version: psd file version.
:return: decompressed data bytes.
"""
length = width * height * depth // 8
result = None
if compression == Compression.RAW:
result = data[:length]
elif compression == Compression.PACK_BITS:
result = decode_packbits(data, height, version)
elif compression == Compression.ZIP:
result = zlib.decompress(data)
else:
decompressed = zlib.decompress(data)
result = decode_prediction(decompressed, width, height, depth)
assert len(result) == length, 'len=%d, expected=%d' % (
len(result), length
)
return result | python | {
"resource": ""
} |
q33486 | _shuffled_order | train | def _shuffled_order(w, h):
"""
Generator for the order of 4-byte values.
32bit channels are also encoded using delta encoding,
but it make no sense to apply delta compression to bytes.
It is possible to apply delta compression to 2-byte or 4-byte
words, but it seems it is not the best way either.
In PSD, each 4-byte item is split into 4 bytes and these
bytes are packed together: "123412341234" becomes "111222333444";
delta compression is applied to the packed data.
So we have to (a) decompress data from the delta compression
and (b) recombine data back to 4-byte values.
"""
rowsize = 4 * w
for row in range(0, rowsize * h, rowsize):
for offset in range(row, row + w):
for x in range(offset, offset + rowsize, w):
yield x | python | {
"resource": ""
} |
q33487 | compose_layer | train | def compose_layer(layer, force=False, **kwargs):
"""Compose a single layer with pixels."""
from PIL import Image, ImageChops
assert layer.bbox != (0, 0, 0, 0), 'Layer bbox is (0, 0, 0, 0)'
image = layer.topil(**kwargs)
if image is None or force:
texture = create_fill(layer)
if texture is not None:
image = texture
if image is None:
return image
# TODO: Group should have the following too.
# Apply mask.
if layer.has_mask() and not layer.mask.disabled:
mask_bbox = layer.mask.bbox
if (
(mask_bbox[2] - mask_bbox[0]) > 0 and
(mask_bbox[3] - mask_bbox[1]) > 0
):
color = layer.mask.background_color
offset = (mask_bbox[0] - layer.left, mask_bbox[1] - layer.top)
mask = Image.new('L', image.size, color=color)
mask.paste(layer.mask.topil(), offset)
if image.mode.endswith('A'):
# What should we do here? There are two alpha channels.
pass
image.putalpha(mask)
elif layer.has_vector_mask() and (force or not layer.has_pixels()):
mask = draw_vector_mask(layer)
# TODO: Stroke drawing.
texture = image
image = Image.new(image.mode, image.size, 'white')
image.paste(texture, mask=mask)
# Apply layer fill effects.
apply_effect(layer, image)
# Clip layers.
if layer.has_clip_layers():
clip_box = extract_bbox(layer.clip_layers)
inter_box = intersect(layer.bbox, clip_box)
if inter_box != (0, 0, 0, 0):
clip_image = compose(layer.clip_layers, bbox=layer.bbox)
mask = image.getchannel('A')
if clip_image.mode.endswith('A'):
mask = ImageChops.multiply(clip_image.getchannel('A'), mask)
clip_image.putalpha(mask)
image = _blend(image, clip_image, (0, 0))
# Apply opacity.
if layer.opacity < 255:
opacity = layer.opacity
if image.mode.endswith('A'):
opacity = opacity / 255.
channels = list(image.split())
channels[-1] = channels[-1].point(lambda x: int(x * opacity))
image = Image.merge(image.mode, channels)
else:
image.putalpha(opacity)
return image | python | {
"resource": ""
} |
q33488 | apply_effect | train | def apply_effect(layer, image):
"""Apply effect to the image.
..note: Correct effect order is the following. All the effects are first
applied to the original image then blended together.
* dropshadow
* outerglow
* (original)
* patternoverlay
* gradientoverlay
* coloroverlay
* innershadow
* innerglow
* bevelemboss
* satin
* stroke
"""
for effect in layer.effects:
if effect.__class__.__name__ == 'PatternOverlay':
draw_pattern_fill(image, layer._psd, effect.value)
for effect in layer.effects:
if effect.__class__.__name__ == 'GradientOverlay':
draw_gradient_fill(image, effect.value)
for effect in layer.effects:
if effect.__class__.__name__ == 'ColorOverlay':
draw_solid_color_fill(image, effect.value) | python | {
"resource": ""
} |
q33489 | _generate_symbol | train | def _generate_symbol(path, width, height, command='C'):
"""Sequence generator for SVG path."""
if len(path) == 0:
return
# Initial point.
yield 'M'
yield path[0].anchor[1] * width
yield path[0].anchor[0] * height
yield command
# Closed path or open path
points = (zip(path, path[1:] + path[0:1]) if path.is_closed()
else zip(path, path[1:]))
# Rest of the points.
for p1, p2 in points:
yield p1.leaving[1] * width
yield p1.leaving[0] * height
yield p2.preceding[1] * width
yield p2.preceding[0] * height
yield p2.anchor[1] * width
yield p2.anchor[0] * height
if path.is_closed():
yield 'Z' | python | {
"resource": ""
} |
q33490 | draw_pattern_fill | train | def draw_pattern_fill(image, psd, setting, blend=True):
"""
Draw pattern fill on the image.
:param image: Image to be filled.
:param psd: :py:class:`PSDImage`.
:param setting: Descriptor containing pattern fill.
:param blend: Blend the fill or ignore. Effects blend.
"""
from PIL import Image
pattern_id = setting[b'Ptrn'][b'Idnt'].value.rstrip('\x00')
pattern = psd._get_pattern(pattern_id)
if not pattern:
logger.error('Pattern not found: %s' % (pattern_id))
return None
panel = convert_pattern_to_pil(pattern, psd._record.header.version)
scale = setting.get(b'Scl ', 100) / 100.
if scale != 1.:
panel = panel.resize((
int(panel.width * scale),
int(panel.height * scale)
))
opacity = int(setting.get(b'Opct', 100) / 100. * 255)
if opacity != 255:
panel.putalpha(opacity)
pattern_image = Image.new(image.mode, image.size)
mask = image.getchannel('A') if blend else Image.new('L', image.size, 255)
for left in range(0, pattern_image.width, panel.width):
for top in range(0, pattern_image.height, panel.height):
panel_mask = mask.crop(
(left, top, left + panel.width, top + panel.height)
)
pattern_image.paste(panel, (left, top), panel_mask)
if blend:
image.paste(_blend(image, pattern_image, (0, 0)))
else:
image.paste(pattern_image) | python | {
"resource": ""
} |
q33491 | _make_linear_gradient | train | def _make_linear_gradient(width, height, angle=90.):
"""Generates index map for linear gradients."""
import numpy as np
X, Y = np.meshgrid(np.linspace(0, 1, width), np.linspace(0, 1, height))
theta = np.radians(angle % 360)
c, s = np.cos(theta), np.sin(theta)
if 0 <= theta and theta < 0.5 * np.pi:
Z = np.abs(c * X + s * Y)
elif 0.5 * np.pi <= theta and theta < np.pi:
Z = np.abs(c * (X - width) + s * Y)
elif np.pi <= theta and theta < 1.5 * np.pi:
Z = np.abs(c * (X - width) + s * (Y - height))
elif 1.5 * np.pi <= theta and theta < 2.0 * np.pi:
Z = np.abs(c * X + s * (Y - height))
return (Z - Z.min()) / (Z.max() - Z.min()) | python | {
"resource": ""
} |
q33492 | Stroke.line_cap_type | train | def line_cap_type(self):
"""Cap type, one of `butt`, `round`, `square`."""
key = self._data.get(b'strokeStyleLineCapType').enum
return self.STROKE_STYLE_LINE_CAP_TYPES.get(key, str(key)) | python | {
"resource": ""
} |
q33493 | Stroke.line_join_type | train | def line_join_type(self):
"""Join type, one of `miter`, `round`, `bevel`."""
key = self._data.get(b'strokeStyleLineJoinType').enum
return self.STROKE_STYLE_LINE_JOIN_TYPES.get(key, str(key)) | python | {
"resource": ""
} |
q33494 | Stroke.line_alignment | train | def line_alignment(self):
"""Alignment, one of `inner`, `outer`, `center`."""
key = self._data.get(b'strokeStyleLineAlignment').enum
return self.STROKE_STYLE_LINE_ALIGNMENTS.get(key, str(key)) | python | {
"resource": ""
} |
q33495 | Origination.bbox | train | def bbox(self):
"""
Bounding box of the live shape.
:return: :py:class:`~psd_tools.psd.descriptor.Descriptor`
"""
bbox = self._data.get(b'keyOriginShapeBBox')
if bbox:
return (
bbox.get(b'Left').value,
bbox.get(b'Top ').value,
bbox.get(b'Rght').value,
bbox.get(b'Btom').value,
)
return (0, 0, 0, 0) | python | {
"resource": ""
} |
q33496 | Layer.mask | train | def mask(self):
"""
Returns mask associated with this layer.
:return: :py:class:`~psd_tools.api.mask.Mask` or `None`
"""
if not hasattr(self, "_mask"):
self._mask = Mask(self) if self.has_mask() else None
return self._mask | python | {
"resource": ""
} |
q33497 | Layer.vector_mask | train | def vector_mask(self):
"""
Returns vector mask associated with this layer.
:return: :py:class:`~psd_tools.api.shape.VectorMask` or `None`
"""
if not hasattr(self, '_vector_mask'):
self._vector_mask = None
blocks = self.tagged_blocks
for key in ('VECTOR_MASK_SETTING1', 'VECTOR_MASK_SETTING2'):
if key in blocks:
self._vector_mask = VectorMask(blocks.get_data(key))
return self._vector_mask | python | {
"resource": ""
} |
q33498 | Layer.origination | train | def origination(self):
"""
Property for a list of live shapes or a line.
Some of the vector masks have associated live shape properties, that
are Photoshop feature to handle primitive shapes such as a rectangle,
an ellipse, or a line. Vector masks without live shape properties are
plain path objects.
See :py:mod:`psd_tools.api.shape`.
:return: List of :py:class:`~psd_tools.api.shape.Invalidated`,
:py:class:`~psd_tools.api.shape.Rectangle`,
:py:class:`~psd_tools.api.shape.RoundedRectangle`,
:py:class:`~psd_tools.api.shape.Ellipse`, or
:py:class:`~psd_tools.api.shape.Line`.
"""
if not hasattr(self, '_origination'):
data = self.tagged_blocks.get_data('VECTOR_ORIGINATION_DATA', {})
self._origination = [
Origination.create(x) for x
in data.get(b'keyDescriptorList', [])
if not data.get(b'keyShapeInvalidated')
]
return self._origination | python | {
"resource": ""
} |
q33499 | Layer.effects | train | def effects(self):
"""
Layer effects.
:return: :py:class:`~psd_tools.api.effects.Effects`
"""
if not hasattr(self, '_effects'):
self._effects = Effects(self)
return self._effects | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.