idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
60,000
def from_sequence ( cls , sequence , phos_3_prime = False ) : strand1 = NucleicAcidStrand ( sequence , phos_3_prime = phos_3_prime ) duplex = cls ( strand1 ) return duplex
Creates a DNA duplex from a nucleotide sequence .
60,001
def from_start_and_end ( cls , start , end , sequence , phos_3_prime = False ) : strand1 = NucleicAcidStrand . from_start_and_end ( start , end , sequence , phos_3_prime = phos_3_prime ) duplex = cls ( strand1 ) return duplex
Creates a DNA duplex from a start and end point .
60,002
def generate_complementary_strand ( strand1 ) : rise_adjust = ( strand1 . rise_per_nucleotide * strand1 . axis . unit_tangent ) * 2 strand2 = NucleicAcidStrand . from_start_and_end ( strand1 . helix_end - rise_adjust , strand1 . helix_start - rise_adjust , generate_antisense_sequence ( strand1 . base_sequence ) , phos_3_prime = strand1 . phos_3_prime ) ad_ang = dihedral ( strand1 [ 0 ] [ "C1'" ] . _vector , strand1 . axis . start , strand2 . axis . start + rise_adjust , strand2 [ - 1 ] [ "C1'" ] . _vector ) strand2 . rotate ( 225.0 + ad_ang , strand2 . axis . unit_tangent , point = strand2 . helix_start ) return strand2
Takes a SingleStrandHelix and creates the antisense strand .
60,003
def total_accessibility ( in_rsa , path = True ) : if path : with open ( in_rsa , 'r' ) as inf : rsa = inf . read ( ) else : rsa = in_rsa [ : ] all_atoms , side_chains , main_chain , non_polar , polar = [ float ( x ) for x in rsa . splitlines ( ) [ - 1 ] . split ( ) [ 1 : ] ] return all_atoms , side_chains , main_chain , non_polar , polar
Parses rsa file for the total surface accessibility data .
60,004
def get_aa_code ( aa_letter ) : aa_code = None if aa_letter != 'X' : for key , val in standard_amino_acids . items ( ) : if key == aa_letter : aa_code = val return aa_code
Get three - letter aa code if possible . If not return None .
60,005
def get_aa_letter ( aa_code ) : aa_letter = 'X' for key , val in standard_amino_acids . items ( ) : if val == aa_code : aa_letter = key return aa_letter
Get one - letter version of aa_code if possible . If not return X .
60,006
def get_aa_info ( code ) : letter = 'X' url_string = 'http://www.ebi.ac.uk/pdbe-srv/pdbechem/chemicalCompound/show/{0}' . format ( code ) r = requests . get ( url_string ) if not r . ok : raise IOError ( "Could not get to url {0}" . format ( url_string ) ) description = r . text . split ( '<h3>Molecule name' ) [ 1 ] . split ( '</tr>' ) [ 0 ] description = description . strip ( ) . split ( '\n' ) [ 3 ] . strip ( ) [ : 255 ] modified = r . text . split ( "<h3>Standard parent " ) [ 1 ] . split ( '</tr>' ) [ 0 ] modified = modified . replace ( " " , "" ) . replace ( '\n' , '' ) . split ( '<' ) [ - 3 ] . split ( '>' ) [ - 1 ] if modified == "NotAssigned" : modified = None aa_dict = { 'code' : code , 'description' : description , 'modified' : modified , 'letter' : letter } return aa_dict
Get dictionary of information relating to a new amino acid code not currently in the database .
60,007
def add_amino_acid_to_json ( code , description , letter = 'X' , modified = None , force_add = False ) : if ( not force_add ) and code in amino_acids_dict . keys ( ) : raise IOError ( "{0} is already in the amino_acids dictionary, with values: {1}" . format ( code , amino_acids_dict [ code ] ) ) add_code = code add_code_dict = { 'description' : description , 'letter' : letter , 'modified' : modified } amino_acids_dict [ add_code ] = add_code_dict with open ( _amino_acids_json_path , 'w' ) as foo : foo . write ( json . dumps ( amino_acids_dict ) ) return
Add an amino acid to the amino_acids . json file used to populate the amino_acid table .
60,008
def from_polymers ( cls , polymers ) : n = len ( polymers ) instance = cls ( n = n , auto_build = False ) instance . major_radii = [ x . major_radius for x in polymers ] instance . major_pitches = [ x . major_pitch for x in polymers ] instance . major_handedness = [ x . major_handedness for x in polymers ] instance . aas = [ x . num_monomers for x in polymers ] instance . minor_helix_types = [ x . minor_helix_type for x in polymers ] instance . orientations = [ x . orientation for x in polymers ] instance . phi_c_alphas = [ x . phi_c_alpha for x in polymers ] instance . minor_repeats = [ x . minor_repeat for x in polymers ] instance . build ( ) return instance
Creates a CoiledCoil from a list of HelicalHelices .
60,009
def from_parameters ( cls , n , aa = 28 , major_radius = None , major_pitch = None , phi_c_alpha = 26.42 , minor_helix_type = 'alpha' , auto_build = True ) : instance = cls ( n = n , auto_build = False ) instance . aas = [ aa ] * n instance . phi_c_alphas = [ phi_c_alpha ] * n instance . minor_helix_types = [ minor_helix_type ] * n if major_pitch is not None : instance . major_pitches = [ major_pitch ] * n if major_radius is not None : instance . major_radii = [ major_radius ] * n if auto_build : instance . build ( ) return instance
Creates a CoiledCoil from defined super - helical parameters .
60,010
def tropocollagen ( cls , aa = 28 , major_radius = 5.0 , major_pitch = 85.0 , auto_build = True ) : instance = cls . from_parameters ( n = 3 , aa = aa , major_radius = major_radius , major_pitch = major_pitch , phi_c_alpha = 0.0 , minor_helix_type = 'collagen' , auto_build = False ) instance . major_handedness = [ 'r' ] * 3 rpr_collagen = _helix_parameters [ 'collagen' ] [ 1 ] instance . z_shifts = [ - rpr_collagen * 2 , - rpr_collagen , 0.0 ] instance . minor_repeats = [ None ] * 3 if auto_build : instance . build ( ) return instance
Creates a model of a collagen triple helix .
60,011
def build ( self ) : monomers = [ HelicalHelix ( major_pitch = self . major_pitches [ i ] , major_radius = self . major_radii [ i ] , major_handedness = self . major_handedness [ i ] , aa = self . aas [ i ] , minor_helix_type = self . minor_helix_types [ i ] , orientation = self . orientations [ i ] , phi_c_alpha = self . phi_c_alphas [ i ] , minor_repeat = self . minor_repeats [ i ] , ) for i in range ( self . oligomeric_state ) ] axis_unit_vector = numpy . array ( [ 0 , 0 , 1 ] ) for i , m in enumerate ( monomers ) : m . rotate ( angle = self . rotational_offsets [ i ] , axis = axis_unit_vector ) m . translate ( axis_unit_vector * self . z_shifts [ i ] ) self . _molecules = monomers [ : ] self . relabel_all ( ) for m in self . _molecules : m . ampal_parent = self return
Builds a model of a coiled coil protein using input parameters .
60,012
def find_max_rad_npnp ( self ) : max_rad = 0 max_npnp = 0 for res , atoms in self . items ( ) : if res != 'KEY' : for atom , ff_params in self [ res ] . items ( ) : if max_rad < ff_params [ 1 ] : max_rad = ff_params [ 1 ] if max_npnp < ff_params [ 4 ] : max_npnp = ff_params [ 4 ] return max_rad , max_npnp
Finds the maximum radius and npnp in the force field .
60,013
def parameter_struct_dict ( self ) : if self . _parameter_struct_dict is None : self . _parameter_struct_dict = self . _make_ff_params_dict ( ) elif self . auto_update_f_params : new_hash = hash ( tuple ( [ tuple ( item ) for sublist in self . values ( ) for item in sublist . values ( ) ] ) ) if self . _old_hash != new_hash : self . _parameter_struct_dict = self . _make_ff_params_dict ( ) self . _old_hash = new_hash return self . _parameter_struct_dict
Dictionary containing PyAtomData structs for the force field .
60,014
def run_reduce ( input_file , path = True ) : if path : input_path = Path ( input_file ) if not input_path . exists ( ) : print ( 'No file found at' , path ) return None , None else : pathf = tempfile . NamedTemporaryFile ( ) encoded_input = input_file . encode ( ) pathf . write ( encoded_input ) pathf . seek ( 0 ) file_path = pathf . name input_path = Path ( file_path ) reduce_folder = Path ( global_settings [ 'reduce' ] [ 'folder' ] ) reduce_exe = reduce_folder / global_settings [ 'reduce' ] [ 'path' ] reduce_dict = reduce_folder / 'reduce_wwPDB_het_dict.txt' try : reduce_output = subprocess . run ( [ str ( reduce_exe ) , '-build' , '-DB' , str ( reduce_dict ) , str ( input_path ) ] , stdout = subprocess . PIPE , stderr = subprocess . PIPE ) except FileNotFoundError as e : raise FileNotFoundError ( 'The Reduce executable cannot be found. Ensure the ' 'location and filename are specified in settings.' ) try : reduced_mmol = reduce_output . stdout . decode ( ) except UnicodeDecodeError : print ( "Reduce could not detect any missing protons in the protein. " "Using the original structure." ) if path : reduced_mmol = input_path . read_text ( ) else : reduced_mmol = input_file reduce_message = reduce_output . stderr . decode ( ) if 'could not open' in reduce_message : print ( 'Caution: the Reduce connectivity dictionary could not be ' 'found. Some protons may be missing. See notes.' ) return reduced_mmol , reduce_message
Runs reduce on a pdb or mmol file at the specified path .
60,015
def reduce_output_path ( path = None , pdb_name = None ) : if not path : if not pdb_name : raise NameError ( "Cannot save an output for a temporary file without a PDB" "code specified" ) pdb_name = pdb_name . lower ( ) output_path = Path ( global_settings [ 'structural_database' ] [ 'path' ] , pdb_name [ 1 : 3 ] . lower ( ) , pdb_name [ : 4 ] . lower ( ) , 'reduce' , pdb_name + '_reduced.mmol' ) else : input_path = Path ( path ) if len ( input_path . parents ) > 1 : output_path = input_path . parents [ 1 ] / 'reduce' / ( input_path . stem + '_reduced' + input_path . suffix ) else : output_path = input_path . parent / ( input_path . stem + '_reduced' + input_path . suffix ) return output_path
Defines location of Reduce output files relative to input files .
60,016
def output_reduce ( input_file , path = True , pdb_name = None , force = False ) : if path : output_path = reduce_output_path ( path = input_file ) else : output_path = reduce_output_path ( pdb_name = pdb_name ) if output_path . exists ( ) and not force : return output_path reduce_mmol , reduce_message = run_reduce ( input_file , path = path ) if not reduce_mmol : return None output_path . parent . mkdir ( exist_ok = True ) output_path . write_text ( reduce_mmol ) return output_path
Runs Reduce on a pdb or mmol file and creates a new file with the output .
60,017
def output_reduce_list ( path_list , force = False ) : output_paths = [ ] for path in path_list : output_path = output_reduce ( path , force = force ) if output_path : output_paths . append ( output_path ) return output_paths
Generates structure file with protons from a list of structure files .
60,018
def assembly_plus_protons ( input_file , path = True , pdb_name = None , save_output = False , force_save = False ) : from ampal . pdb_parser import convert_pdb_to_ampal if path : input_path = Path ( input_file ) if not pdb_name : pdb_name = input_path . stem [ : 4 ] reduced_path = reduce_output_path ( path = input_path ) if reduced_path . exists ( ) and not save_output and not force_save : reduced_assembly = convert_pdb_to_ampal ( str ( reduced_path ) , pdb_id = pdb_name ) return reduced_assembly if save_output : reduced_path = output_reduce ( input_file , path = path , pdb_name = pdb_name , force = force_save ) reduced_assembly = convert_pdb_to_ampal ( str ( reduced_path ) , path = True ) else : reduce_mmol , reduce_message = run_reduce ( input_file , path = path ) if not reduce_mmol : return None reduced_assembly = convert_pdb_to_ampal ( reduce_mmol , path = False , pdb_id = pdb_name ) return reduced_assembly
Returns an Assembly with protons added by Reduce .
60,019
def from_start_and_end ( cls , start , end , aa = None , helix_type = 'alpha' ) : start = numpy . array ( start ) end = numpy . array ( end ) if aa is None : rise_per_residue = _helix_parameters [ helix_type ] [ 1 ] aa = int ( ( numpy . linalg . norm ( end - start ) / rise_per_residue ) + 1 ) instance = cls ( aa = aa , helix_type = helix_type ) instance . move_to ( start = start , end = end ) return instance
Creates a Helix between start and end .
60,020
def build ( self ) : ang_per_res = ( 2 * numpy . pi ) / self . residues_per_turn atom_offsets = _atom_offsets [ self . helix_type ] if self . handedness == 'l' : handedness = - 1 else : handedness = 1 atom_labels = [ 'N' , 'CA' , 'C' , 'O' ] if all ( [ x in atom_offsets . keys ( ) for x in atom_labels ] ) : res_label = 'GLY' else : res_label = 'UNK' monomers = [ ] for i in range ( self . num_monomers ) : residue = Residue ( mol_code = res_label , ampal_parent = self ) atoms_dict = OrderedDict ( ) for atom_label in atom_labels : r , zeta , z_shift = atom_offsets [ atom_label ] rot_ang = ( ( i * ang_per_res ) + zeta ) * handedness z = ( self . rise_per_residue * i ) + z_shift coords = cylindrical_to_cartesian ( radius = r , azimuth = rot_ang , z = z , radians = True ) atom = Atom ( coordinates = coords , element = atom_label [ 0 ] , ampal_parent = residue , res_label = atom_label ) atoms_dict [ atom_label ] = atom residue . atoms = atoms_dict monomers . append ( residue ) self . _monomers = monomers self . relabel_monomers ( ) self . relabel_atoms ( ) return
Build straight helix along z - axis starting with CA1 on x - axis
60,021
def from_start_and_end ( cls , start , end , aa = None , major_pitch = 225.8 , major_radius = 5.07 , major_handedness = 'l' , minor_helix_type = 'alpha' , orientation = 1 , phi_c_alpha = 0.0 , minor_repeat = None ) : start = numpy . array ( start ) end = numpy . array ( end ) if aa is None : minor_rise_per_residue = _helix_parameters [ minor_helix_type ] [ 1 ] aa = int ( ( numpy . linalg . norm ( end - start ) / minor_rise_per_residue ) + 1 ) instance = cls ( aa = aa , major_pitch = major_pitch , major_radius = major_radius , major_handedness = major_handedness , minor_helix_type = minor_helix_type , orientation = orientation , phi_c_alpha = phi_c_alpha , minor_repeat = minor_repeat ) instance . move_to ( start = start , end = end ) return instance
Creates a HelicalHelix between a start and end point .
60,022
def curve ( self ) : return HelicalCurve . pitch_and_radius ( self . major_pitch , self . major_radius , handedness = self . major_handedness )
Curve of the super helix .
60,023
def curve_primitive ( self ) : curve = self . curve curve . axis_start = self . helix_start curve . axis_end = self . helix_end coords = curve . get_coords ( n_points = ( self . num_monomers + 1 ) , spacing = self . minor_rise_per_residue ) if self . orientation == - 1 : coords . reverse ( ) return Primitive . from_coordinates ( coords )
Primitive of the super - helical curve .
60,024
def major_rise_per_monomer ( self ) : return numpy . cos ( numpy . deg2rad ( self . curve . alpha ) ) * self . minor_rise_per_residue
Rise along super - helical axis per monomer .
60,025
def minor_residues_per_turn ( self , minor_repeat = None ) : if minor_repeat is None : minor_rpt = _helix_parameters [ self . minor_helix_type ] [ 0 ] else : precession = self . curve . t_from_arc_length ( minor_repeat * self . minor_rise_per_residue ) if self . orientation == - 1 : precession = - precession if self . major_handedness != self . minor_handedness : precession = - precession minor_rpt = ( ( minor_repeat * numpy . pi * 2 ) / ( ( 2 * numpy . pi ) + precession ) ) return minor_rpt
Calculates the number of residues per turn of the minor helix .
60,026
def build ( self ) : helical_helix = Polypeptide ( ) primitive_coords = self . curve_primitive . coordinates helices = [ Helix . from_start_and_end ( start = primitive_coords [ i ] , end = primitive_coords [ i + 1 ] , helix_type = self . minor_helix_type , aa = 1 ) for i in range ( len ( primitive_coords ) - 1 ) ] residues_per_turn = self . minor_residues_per_turn ( minor_repeat = self . minor_repeat ) if residues_per_turn == 0 : residues_per_turn = _helix_parameters [ self . minor_helix_type ] [ 0 ] if self . minor_handedness == 'l' : residues_per_turn *= - 1 if self . orientation != - 1 : initial_angle = dihedral ( numpy . array ( [ 0 , 0 , 0 ] ) , primitive_coords [ 0 ] , primitive_coords [ 1 ] , helices [ 0 ] [ 0 ] [ 'CA' ] ) else : initial_angle = dihedral ( numpy . array ( [ 0 , 0 , primitive_coords [ 0 ] [ 2 ] ] ) , primitive_coords [ 0 ] , numpy . array ( [ primitive_coords [ 0 ] [ 0 ] , primitive_coords [ 0 ] [ 1 ] , primitive_coords [ 1 ] [ 2 ] ] ) , helices [ 0 ] [ 0 ] [ 'CA' ] ) addition_angle = self . phi_c_alpha - initial_angle for i , h in enumerate ( helices ) : angle = ( i * ( 360.0 / residues_per_turn ) ) + addition_angle h . rotate ( angle = angle , axis = h . axis . unit_tangent , point = h . helix_start ) helical_helix . extend ( h ) helical_helix . relabel_all ( ) self . _monomers = helical_helix . _monomers [ : ] for monomer in self . _monomers : monomer . ampal_parent = self return
Builds the HelicalHelix .
60,027
def rotate_monomers ( self , angle , radians = False ) : if radians : angle = numpy . rad2deg ( angle ) for i in range ( len ( self . primitive ) - 1 ) : axis = self . primitive [ i + 1 ] [ 'CA' ] - self . primitive [ i ] [ 'CA' ] point = self . primitive [ i ] [ 'CA' ] . _vector self [ i ] . rotate ( angle = angle , axis = axis , point = point ) return
Rotates each Residue in the Polypeptide .
60,028
def side_chain_centres ( assembly , masses = False ) : if masses : elts = set ( [ x . element for x in assembly . get_atoms ( ) ] ) masses_dict = { e : element_data [ e ] [ 'atomic mass' ] for e in elts } pseudo_monomers = [ ] for chain in assembly : if isinstance ( chain , Polypeptide ) : centres = OrderedDict ( ) for r in chain . get_monomers ( ligands = False ) : side_chain = r . side_chain if masses : masses_list = [ masses_dict [ x . element ] for x in side_chain ] else : masses_list = None if side_chain : centre = centre_of_mass ( points = [ x . _vector for x in side_chain ] , masses = masses_list ) else : centre = r [ 'CA' ] . _vector centres [ r . unique_id ] = PseudoAtom ( coordinates = centre , name = r . unique_id , ampal_parent = r ) pseudo_monomers . append ( PseudoMonomer ( pseudo_atoms = centres , monomer_id = ' ' , ampal_parent = chain ) ) return PseudoGroup ( monomers = pseudo_monomers , ampal_parent = assembly )
PseudoGroup containing side_chain centres of each Residue in each Polypeptide in Assembly .
60,029
def cluster_helices ( helices , cluster_distance = 12.0 ) : condensed_distance_matrix = [ ] for h1 , h2 in itertools . combinations ( helices , 2 ) : md = minimal_distance_between_lines ( h1 [ 0 ] [ 'CA' ] . _vector , h1 [ - 1 ] [ 'CA' ] . _vector , h2 [ 0 ] [ 'CA' ] . _vector , h2 [ - 1 ] [ 'CA' ] . _vector , segments = True ) condensed_distance_matrix . append ( md ) z = linkage ( condensed_distance_matrix , method = 'single' ) clusters = fcluster ( z , t = cluster_distance , criterion = 'distance' ) cluster_dict = { } for h , k in zip ( helices , clusters ) : if k not in cluster_dict : cluster_dict [ k ] = [ h ] else : cluster_dict [ k ] . append ( h ) return cluster_dict
Clusters helices according to the minimum distance between the line segments representing their backbone .
60,030
def find_kihs ( assembly , hole_size = 4 , cutoff = 7.0 ) : pseudo_group = side_chain_centres ( assembly = assembly , masses = False ) pairs = itertools . permutations ( pseudo_group , 2 ) kihs = [ ] for pp_1 , pp_2 in pairs : for r in pp_1 : close_atoms = pp_2 . is_within ( cutoff , r ) if len ( close_atoms ) < hole_size : continue elif len ( close_atoms ) > hole_size : close_atoms = sorted ( close_atoms , key = lambda x : distance ( x , r ) ) [ : hole_size ] kih = OrderedDict ( ) kih [ 'k' ] = r for i , hole_atom in enumerate ( close_atoms ) : kih [ 'h{0}' . format ( i ) ] = hole_atom knob_into_hole = KnobIntoHole ( pseudo_atoms = kih ) kihs . append ( knob_into_hole ) return kihs
KnobIntoHoles between residues of different chains in assembly .
60,031
def find_contiguous_packing_segments ( polypeptide , residues , max_dist = 10.0 ) : segments = Assembly ( assembly_id = polypeptide . ampal_parent . id ) residues_in_polypeptide = list ( sorted ( residues . intersection ( set ( polypeptide . get_monomers ( ) ) ) , key = lambda x : int ( x . id ) ) ) if not residues_in_polypeptide : return segments residue_pots = [ ] pot = [ residues_in_polypeptide [ 0 ] ] for r1 , r2 in zip ( residues_in_polypeptide , residues_in_polypeptide [ 1 : ] ) : d = distance ( r1 [ 'CA' ] , r2 [ 'CA' ] ) if d <= max_dist : pot . append ( r2 ) if sum ( [ len ( x ) for x in residue_pots ] + [ len ( pot ) ] ) == len ( residues_in_polypeptide ) : residue_pots . append ( pot ) else : residue_pots . append ( pot ) pot = [ r2 ] for pot in residue_pots : segment = polypeptide . get_slice_from_res_id ( pot [ 0 ] . id , pot [ - 1 ] . id ) segment . ampal_parent = polypeptide . ampal_parent segments . append ( segment ) return segments
Assembly containing segments of polypeptide divided according to separation of contiguous residues .
60,032
def gen_reference_primitive ( polypeptide , start , end ) : prim = polypeptide . primitive q = find_foot ( a = start , b = end , p = prim . coordinates [ 0 ] ) ax = Axis ( start = q , end = end ) if not is_acute ( polypeptide_vector ( polypeptide ) , ax . unit_tangent ) : ax = Axis ( start = end , end = q ) arc_length = 0 points = [ ax . start ] for rise in prim . rise_per_residue ( ) [ : - 1 ] : arc_length += rise t = ax . t_from_arc_length ( arc_length = arc_length ) point = ax . point ( t ) points . append ( point ) reference_primitive = Primitive . from_coordinates ( points ) return reference_primitive
Generates a reference Primitive for a Polypeptide given start and end coordinates .
60,033
def from_helices ( cls , assembly , cutoff = 7.0 , min_helix_length = 8 ) : cutoff = float ( cutoff ) helices = Assembly ( [ x for x in assembly . helices if len ( x ) >= min_helix_length ] ) if len ( helices ) <= 1 : return None helices . relabel_polymers ( [ x . ampal_parent . id for x in helices ] ) for i , h in enumerate ( helices ) : h . number = i h . ampal_parent = h [ 0 ] . ampal_parent for r in h . get_monomers ( ) : r . tags [ 'helix' ] = h all_kihs = [ ] cluster_dict = cluster_helices ( helices , cluster_distance = ( cutoff + 10 ) ) for k , v in cluster_dict . items ( ) : if len ( v ) > 1 : kihs = find_kihs ( v , cutoff = cutoff , hole_size = 4 ) if len ( kihs ) == 0 : continue for x in kihs : all_kihs . append ( x ) instance = cls ( ampal_parent = helices , cutoff = cutoff ) for x in all_kihs : x . ampal_parent = instance instance . _monomers = all_kihs instance . relabel_monomers ( ) return instance
Generate KnobGroup from the helices in the assembly - classic socket functionality .
60,034
def knob_subgroup ( self , cutoff = 7.0 ) : if cutoff > self . cutoff : raise ValueError ( "cutoff supplied ({0}) cannot be greater than self.cutoff ({1})" . format ( cutoff , self . cutoff ) ) return KnobGroup ( monomers = [ x for x in self . get_monomers ( ) if x . max_kh_distance <= cutoff ] , ampal_parent = self . ampal_parent )
KnobGroup where all KnobsIntoHoles have max_kh_distance < = cutoff .
60,035
def graph ( self ) : g = networkx . MultiDiGraph ( ) edge_list = [ ( x . knob_helix , x . hole_helix , x . id , { 'kih' : x } ) for x in self . get_monomers ( ) ] g . add_edges_from ( edge_list ) return g
Returns MultiDiGraph from kihs . Nodes are helices and edges are kihs .
60,036
def filter_graph ( g , cutoff = 7.0 , min_kihs = 2 ) : edge_list = [ e for e in g . edges ( keys = True , data = True ) if e [ 3 ] [ 'kih' ] . max_kh_distance <= cutoff ] if min_kihs > 0 : c = Counter ( [ ( e [ 0 ] , e [ 1 ] ) for e in edge_list ] ) node_list = set ( list ( itertools . chain . from_iterable ( [ k for k , v in c . items ( ) if v > min_kihs ] ) ) ) edge_list = [ e for e in edge_list if ( e [ 0 ] in node_list ) and ( e [ 1 ] in node_list ) ] return networkx . MultiDiGraph ( edge_list )
Get subgraph formed from edges that have max_kh_distance < cutoff .
60,037
def get_coiledcoil_region ( self , cc_number = 0 , cutoff = 7.0 , min_kihs = 2 ) : g = self . filter_graph ( self . graph , cutoff = cutoff , min_kihs = min_kihs ) ccs = sorted ( networkx . connected_component_subgraphs ( g , copy = True ) , key = lambda x : len ( x . nodes ( ) ) , reverse = True ) cc = ccs [ cc_number ] helices = [ x for x in g . nodes ( ) if x . number in cc . nodes ( ) ] assigned_regions = self . get_assigned_regions ( helices = helices , include_alt_states = False , complementary_only = True ) coiledcoil_monomers = [ h . get_slice_from_res_id ( * assigned_regions [ h . number ] ) for h in helices ] return Assembly ( coiledcoil_monomers )
Assembly containing only assigned regions ( i . e . regions with contiguous KnobsIntoHoles .
60,038
def daisy_chain_graph ( self ) : g = networkx . DiGraph ( ) for x in self . get_monomers ( ) : for h in x . hole : g . add_edge ( x . knob , h ) return g
Directed graph with edges from knob residue to each hole residue for each KnobIntoHole in self .
60,039
def knob_end ( self ) : side_chain_atoms = self . knob_residue . side_chain if not side_chain_atoms : return self . knob_residue [ 'CA' ] distances = [ distance ( self . knob_residue [ 'CB' ] , x ) for x in side_chain_atoms ] max_d = max ( distances ) knob_end_atoms = [ atom for atom , d in zip ( side_chain_atoms , distances ) if d == max_d ] if len ( knob_end_atoms ) == 1 : return knob_end_atoms [ 0 ] . _vector else : return numpy . mean ( [ x . _vector for x in knob_end_atoms ] , axis = 0 )
Coordinates of the end of the knob residue ( atom in side - chain furthest from CB atom . Returns CA coordinates for GLY .
60,040
def max_knob_end_distance ( self ) : return max ( [ distance ( self . knob_end , h ) for h in self . hole ] )
Maximum distance between knob_end and each of the hole side - chain centres .
60,041
def base_install ( ) : scwrl = { } print ( '{BOLD}{HEADER}Generating configuration files for ISAMBARD.{END_C}\n' 'All required input can use tab completion for paths.\n' '{BOLD}Setting up SCWRL 4.0 (Recommended){END_C}' . format ( ** text_colours ) ) scwrl_path = get_user_path ( 'Please provide a path to your SCWRL executable' , required = False ) scwrl [ 'path' ] = str ( scwrl_path ) pack_mode = get_user_option ( 'Please choose your packing mode (flexible is significantly slower but is more accurate).' , [ 'flexible' , 'rigid' ] ) if pack_mode == 'rigid' : scwrl [ 'rigid_rotamer_model' ] = True else : scwrl [ 'rigid_rotamer_model' ] = False settings [ 'scwrl' ] = scwrl print ( '{BOLD}Setting up DSSP (Recommended){END_C}' . format ( ** text_colours ) ) dssp = { } dssp_path = get_user_path ( 'Please provide a path to your DSSP executable.' , required = False ) dssp [ 'path' ] = str ( dssp_path ) settings [ 'dssp' ] = dssp print ( '{BOLD}Setting up BUFF (Required){END_C}' . format ( ** text_colours ) ) buff = { } ffs = [ ] ff_dir = isambard_path / 'buff' / 'force_fields' for ff_file in os . listdir ( str ( ff_dir ) ) : ff = pathlib . Path ( ff_file ) ffs . append ( ff . stem ) force_field_choice = get_user_option ( 'Please choose the default BUFF force field, this can be modified during runtime.' , ffs ) buff [ 'default_force_field' ] = force_field_choice settings [ 'buff' ] = buff return
Generates configuration setting for required functionality of ISAMBARD .
60,042
def optional_install ( ) : print ( '{BOLD}Setting up Reduce (optional){END_C}' . format ( ** text_colours ) ) reduce = { } reduce_path = get_user_path ( 'Please provide a path to your reduce executable.' , required = False ) reduce [ 'path' ] = str ( reduce_path ) reduce [ 'folder' ] = str ( reduce_path . parent ) if reduce_path else '' settings [ 'reduce' ] = reduce print ( '{BOLD}Setting up naccess (optional){END_C}' . format ( ** text_colours ) ) naccess = { } naccess_path = get_user_path ( 'Please provide a path to your naccess executable.' , required = False ) naccess [ 'path' ] = str ( naccess_path ) settings [ 'naccess' ] = naccess print ( '{BOLD}Setting up ProFit (optional){END_C}' . format ( ** text_colours ) ) profit = { } profit_path = get_user_path ( 'Please provide a path to your ProFit executable.' , required = False ) profit [ 'path' ] = str ( profit_path ) settings [ 'profit' ] = profit return
Generates configuration settings for optional functionality of ISAMBARD .
60,043
def pdb ( self ) : pdb_str = write_pdb ( [ self ] , ' ' if not self . tags [ 'chain_id' ] else self . tags [ 'chain_id' ] ) return pdb_str
Generates a PDB string for the PseudoMonomer .
60,044
def from_coordinates ( cls , coordinates ) : prim = cls ( ) for coord in coordinates : pm = PseudoMonomer ( ampal_parent = prim ) pa = PseudoAtom ( coord , ampal_parent = pm ) pm . atoms = OrderedDict ( [ ( 'CA' , pa ) ] ) prim . append ( pm ) prim . relabel_all ( ) return prim
Creates a Primitive from a list of coordinates .
60,045
def rise_per_residue ( self ) : rprs = [ distance ( self [ i ] [ 'CA' ] , self [ i + 1 ] [ 'CA' ] ) for i in range ( len ( self ) - 1 ) ] rprs . append ( None ) return rprs
The rise per residue at each point on the Primitive .
60,046
def sequence ( self ) : seq = [ x . mol_code for x in self . _monomers ] return ' ' . join ( seq )
Returns the sequence of the Polynucleotide as a string .
60,047
def run_dssp ( pdb , path = True , outfile = None ) : if not path : if type ( pdb ) == str : pdb = pdb . encode ( ) try : temp_pdb = tempfile . NamedTemporaryFile ( delete = False ) temp_pdb . write ( pdb ) temp_pdb . seek ( 0 ) dssp_out = subprocess . check_output ( [ global_settings [ 'dssp' ] [ 'path' ] , temp_pdb . name ] ) temp_pdb . close ( ) finally : os . remove ( temp_pdb . name ) else : dssp_out = subprocess . check_output ( [ global_settings [ 'dssp' ] [ 'path' ] , pdb ] ) dssp_out = dssp_out . decode ( ) if outfile : with open ( outfile , 'w' ) as outf : outf . write ( dssp_out ) return dssp_out
Uses DSSP to find helices and extracts helices from a pdb file or string .
60,048
def extract_solvent_accessibility_dssp ( in_dssp , path = True ) : if path : with open ( in_dssp , 'r' ) as inf : dssp_out = inf . read ( ) else : dssp_out = in_dssp [ : ] dssp_residues = [ ] go = False for line in dssp_out . splitlines ( ) : if go : try : res_num = int ( line [ 5 : 10 ] . strip ( ) ) chain = line [ 10 : 12 ] . strip ( ) residue = line [ 13 ] acc = int ( line [ 35 : 38 ] . strip ( ) ) dssp_residues . append ( [ res_num , chain , residue , acc ] ) except ValueError : pass else : if line [ 2 ] == '#' : go = True pass return dssp_residues
Uses DSSP to extract solvent accessibilty information on every residue .
60,049
def extract_helices_dssp ( in_pdb ) : from ampal . pdb_parser import split_pdb_lines dssp_out = subprocess . check_output ( [ global_settings [ 'dssp' ] [ 'path' ] , in_pdb ] ) helix = 0 helices = [ ] h_on = False for line in dssp_out . splitlines ( ) : dssp_line = line . split ( ) try : if dssp_line [ 4 ] == 'H' : if helix not in [ x [ 0 ] for x in helices ] : helices . append ( [ helix , dssp_line [ 2 ] , { int ( dssp_line [ 1 ] ) : None } ] ) else : helices [ helix ] [ 2 ] [ int ( dssp_line [ 1 ] ) ] = None h_on = True else : if h_on : helix += 1 h_on = False except IndexError : pass with open ( in_pdb , 'r' ) as pdb : pdb_atoms = split_pdb_lines ( pdb . read ( ) ) for atom in pdb_atoms : for helix in helices : if ( atom [ 2 ] == "CA" ) and ( atom [ 5 ] == helix [ 1 ] ) and ( atom [ 6 ] in helix [ 2 ] . keys ( ) ) : helix [ 2 ] [ atom [ 6 ] ] = tuple ( atom [ 8 : 11 ] ) return helices
Uses DSSP to find alpha - helices and extracts helices from a pdb file .
60,050
def extract_pp_helices ( in_pdb ) : t_phi = - 75.0 t_phi_d = 29.0 t_psi = 145.0 t_psi_d = 29.0 pph_dssp = subprocess . check_output ( [ global_settings [ 'dssp' ] [ 'path' ] , in_pdb ] ) dssp_residues = [ ] go = False for line in pph_dssp . splitlines ( ) : if go : res_num = int ( line [ : 5 ] . strip ( ) ) chain = line [ 11 : 13 ] . strip ( ) ss_type = line [ 16 ] phi = float ( line [ 103 : 109 ] . strip ( ) ) psi = float ( line [ 109 : 116 ] . strip ( ) ) dssp_residues . append ( ( res_num , ss_type , chain , phi , psi ) ) else : if line [ 2 ] == '#' : go = True pass pp_chains = [ ] chain = [ ] ch_on = False for item in dssp_residues : if ( item [ 1 ] == ' ' ) and ( t_phi - t_phi_d < item [ 3 ] < t_phi + t_phi_d ) and ( t_psi - t_psi_d < item [ 4 ] < t_psi + t_psi_d ) : chain . append ( item ) ch_on = True else : if ch_on : pp_chains . append ( chain ) chain = [ ] ch_on = False pp_chains = [ x for x in pp_chains if len ( x ) > 1 ] pp_helices = [ ] with open ( in_pdb , 'r' ) as pdb : pdb_atoms = split_pdb_lines ( pdb . read ( ) ) for pp_helix in pp_chains : chain_id = pp_helix [ 0 ] [ 2 ] res_range = [ x [ 0 ] for x in pp_helix ] helix = [ ] for atom in pdb_atoms : if ( atom [ 2 ] == "CA" ) and ( atom [ 5 ] == chain_id ) and ( atom [ 6 ] in res_range ) : helix . append ( tuple ( atom [ 8 : 11 ] ) ) pp_helices . append ( helix ) return pp_helices
Uses DSSP to find polyproline helices in a pdb file .
60,051
def main ( ) : args = get_args ( ) tr = TestRail ( project_dict [ args . project ] ) project = tr . project ( project_dict [ args . project ] ) new_run = tr . run ( ) new_run . name = "Creating a new Run through the API" new_run . project = project new_run . include_all = True run = tr . add ( new_run ) print ( "Created new run: {0}" . format ( run . name ) ) PASSED = tr . status ( 'passed' ) FAILED = tr . status ( 'failed' ) BLOCKED = tr . status ( 'blocked' ) tests = list ( tr . tests ( run ) ) print ( "Found {0} tests" . format ( len ( tests ) ) ) for test_num , test in enumerate ( tests ) : print ( "Executing test #{0}" . format ( test_num ) ) test_status = random . choice ( [ PASSED , FAILED , BLOCKED ] ) print ( "Updating test #{0} with a status of {1}" . format ( test_num , test_status . name ) ) result = tr . result ( ) result . test = test result . status = test_status result . comment = "The test case was udpated via a script" tr . add ( result ) print ( "Finished, closing the run" ) tr . close ( run )
This will offer a step by step guide to create a new run in TestRail update tests in the run with results and close the run
60,052
def memory ( ) : mem_info = { } if platform . linux_distribution ( ) [ 0 ] : with open ( '/proc/meminfo' ) as file : c = 0 for line in file : lst = line . split ( ) if str ( lst [ 0 ] ) == 'MemTotal:' : mem_info [ 'total' ] = int ( lst [ 1 ] ) elif str ( lst [ 0 ] ) in ( 'MemFree:' , 'Buffers:' , 'Cached:' ) : c += int ( lst [ 1 ] ) mem_info [ 'free' ] = c mem_info [ 'used' ] = ( mem_info [ 'total' ] ) - c elif platform . mac_ver ( ) [ 0 ] : ps = subprocess . Popen ( [ 'ps' , '-caxm' , '-orss,comm' ] , stdout = subprocess . PIPE ) . communicate ( ) [ 0 ] vm = subprocess . Popen ( [ 'vm_stat' ] , stdout = subprocess . PIPE ) . communicate ( ) [ 0 ] process_lines = ps . split ( '\n' ) sep = re . compile ( '[\s]+' ) rss_total = 0 for row in range ( 1 , len ( process_lines ) ) : row_text = process_lines [ row ] . strip ( ) row_elements = sep . split ( row_text ) try : rss = float ( row_elements [ 0 ] ) * 1024 except : rss = 0 rss_total += rss vm_lines = vm . split ( '\n' ) sep = re . compile ( ':[\s]+' ) vm_stats = { } for row in range ( 1 , len ( vm_lines ) - 2 ) : row_text = vm_lines [ row ] . strip ( ) row_elements = sep . split ( row_text ) vm_stats [ ( row_elements [ 0 ] ) ] = int ( row_elements [ 1 ] . strip ( '\.' ) ) * 4096 mem_info [ 'total' ] = rss_total mem_info [ 'used' ] = vm_stats [ "Pages active" ] mem_info [ 'free' ] = vm_stats [ "Pages free" ] else : raise ( 'Unsupported Operating System.\n' ) exit ( 1 ) return mem_info
Determine the machine s memory specifications .
60,053
def get_chunk_size ( N , n ) : mem_free = memory ( ) [ 'free' ] if mem_free > 60000000 : chunks_size = int ( ( ( mem_free - 10000000 ) * 1000 ) / ( 4 * n * N ) ) return chunks_size elif mem_free > 40000000 : chunks_size = int ( ( ( mem_free - 7000000 ) * 1000 ) / ( 4 * n * N ) ) return chunks_size elif mem_free > 14000000 : chunks_size = int ( ( ( mem_free - 2000000 ) * 1000 ) / ( 4 * n * N ) ) return chunks_size elif mem_free > 8000000 : chunks_size = int ( ( ( mem_free - 1400000 ) * 1000 ) / ( 4 * n * N ) ) return chunks_size elif mem_free > 2000000 : chunks_size = int ( ( ( mem_free - 900000 ) * 1000 ) / ( 4 * n * N ) ) return chunks_size elif mem_free > 1000000 : chunks_size = int ( ( ( mem_free - 400000 ) * 1000 ) / ( 4 * n * N ) ) return chunks_size else : raise MemoryError ( "\nERROR: DBSCAN_multiplex @ get_chunk_size:\n" "this machine does not have enough free memory " "to perform the remaining computations.\n" )
Given a dimension of size N determine the number of rows or columns that can fit into memory .
60,054
def all_floating_ips ( self ) : if self . api_version == 2 : json = self . request ( '/floating_ips' ) return json [ 'floating_ips' ] else : raise DoError ( v2_api_required_str )
Lists all of the Floating IPs available on the account .
60,055
def new_floating_ip ( self , ** kwargs ) : droplet_id = kwargs . get ( 'droplet_id' ) region = kwargs . get ( 'region' ) if self . api_version == 2 : if droplet_id is not None and region is not None : raise DoError ( 'Only one of droplet_id and region is required to create a Floating IP. ' 'Set one of the variables and try again.' ) elif droplet_id is None and region is None : raise DoError ( 'droplet_id or region is required to create a Floating IP. ' 'Set one of the variables and try again.' ) else : if droplet_id is not None : params = { 'droplet_id' : droplet_id } else : params = { 'region' : region } json = self . request ( '/floating_ips' , params = params , method = 'POST' ) return json [ 'floating_ip' ] else : raise DoError ( v2_api_required_str )
Creates a Floating IP and assigns it to a Droplet or reserves it to a region .
60,056
def destroy_floating_ip ( self , ip_addr ) : if self . api_version == 2 : self . request ( '/floating_ips/' + ip_addr , method = 'DELETE' ) else : raise DoError ( v2_api_required_str )
Deletes a Floating IP and removes it from the account .
60,057
def assign_floating_ip ( self , ip_addr , droplet_id ) : if self . api_version == 2 : params = { 'type' : 'assign' , 'droplet_id' : droplet_id } json = self . request ( '/floating_ips/' + ip_addr + '/actions' , params = params , method = 'POST' ) return json [ 'action' ] else : raise DoError ( v2_api_required_str )
Assigns a Floating IP to a Droplet .
60,058
def unassign_floating_ip ( self , ip_addr ) : if self . api_version == 2 : params = { 'type' : 'unassign' } json = self . request ( '/floating_ips/' + ip_addr + '/actions' , params = params , method = 'POST' ) return json [ 'action' ] else : raise DoError ( v2_api_required_str )
Unassign a Floating IP from a Droplet . The Floating IP will be reserved in the region but not assigned to a Droplet .
60,059
def list_floating_ip_actions ( self , ip_addr ) : if self . api_version == 2 : json = self . request ( '/floating_ips/' + ip_addr + '/actions' ) return json [ 'actions' ] else : raise DoError ( v2_api_required_str )
Retrieve a list of all actions that have been executed on a Floating IP .
60,060
def get_floating_ip_action ( self , ip_addr , action_id ) : if self . api_version == 2 : json = self . request ( '/floating_ips/' + ip_addr + '/actions/' + action_id ) return json [ 'action' ] else : raise DoError ( v2_api_required_str )
Retrieve the status of a Floating IP action .
60,061
def raw_sign ( message , secret ) : digest = hmac . new ( secret , message , hashlib . sha256 ) . digest ( ) return base64 . b64encode ( digest )
Sign a message .
60,062
def get_signature_from_signature_string ( self , signature ) : match = self . SIGNATURE_RE . search ( signature ) if not match : return None return match . group ( 1 )
Return the signature from the signature header or None .
60,063
def get_headers_from_signature ( self , signature ) : match = self . SIGNATURE_HEADERS_RE . search ( signature ) if not match : return [ 'date' ] headers_string = match . group ( 1 ) return headers_string . split ( )
Returns a list of headers fields to sign .
60,064
def header_canonical ( self , header_name ) : header_name = header_name . lower ( ) if header_name == 'content-type' : return 'CONTENT-TYPE' elif header_name == 'content-length' : return 'CONTENT-LENGTH' return 'HTTP_%s' % header_name . replace ( '-' , '_' ) . upper ( )
Translate HTTP headers to Django header names .
60,065
def build_dict_to_sign ( self , request , signature_headers ) : d = { } for header in signature_headers : if header == '(request-target)' : continue d [ header ] = request . META . get ( self . header_canonical ( header ) ) return d
Build a dict with headers and values used in the signature .
60,066
def build_signature ( self , user_api_key , user_secret , request ) : path = request . get_full_path ( ) sent_signature = request . META . get ( self . header_canonical ( 'Authorization' ) ) signature_headers = self . get_headers_from_signature ( sent_signature ) unsigned = self . build_dict_to_sign ( request , signature_headers ) signer = HeaderSigner ( key_id = user_api_key , secret = user_secret , headers = signature_headers , algorithm = self . ALGORITHM ) signed = signer . sign ( unsigned , method = request . method , path = path ) return signed [ 'authorization' ]
Return the signature for the request .
60,067
def camel_to_snake_case ( string ) : s = _1 . sub ( r'\1_\2' , string ) return _2 . sub ( r'\1_\2' , s ) . lower ( )
Converts string presented in camel case to snake case .
60,068
def url_assembler ( query_string , no_redirect = 0 , no_html = 0 , skip_disambig = 0 ) : params = [ ( 'q' , query_string . encode ( "utf-8" ) ) , ( 'format' , 'json' ) ] if no_redirect : params . append ( ( 'no_redirect' , 1 ) ) if no_html : params . append ( ( 'no_html' , 1 ) ) if skip_disambig : params . append ( ( 'skip_disambig' , 1 ) ) return '/?' + urlencode ( params )
Assembler of parameters for building request query .
60,069
def query ( query_string , secure = False , container = 'namedtuple' , verbose = False , user_agent = api . USER_AGENT , no_redirect = False , no_html = False , skip_disambig = False ) : if container not in Hook . containers : raise exc . DuckDuckArgumentError ( "Argument 'container' must be one of the values: " "{0}" . format ( ', ' . join ( Hook . containers ) ) ) headers = { "User-Agent" : user_agent } url = url_assembler ( query_string , no_redirect = no_redirect , no_html = no_html , skip_disambig = skip_disambig ) if secure : conn = http_client . HTTPSConnection ( api . SERVER_HOST ) else : conn = http_client . HTTPConnection ( api . SERVER_HOST ) try : conn . request ( "GET" , url , "" , headers ) resp = conn . getresponse ( ) data = decoder ( resp . read ( ) ) except socket . gaierror as e : raise exc . DuckDuckConnectionError ( e . strerror ) finally : conn . close ( ) hook = Hook ( container , verbose = verbose ) try : obj = json . loads ( data , object_hook = hook ) except ValueError : raise exc . DuckDuckDeserializeError ( "Unable to deserialize response to an object" ) return obj
Generates and sends a query to DuckDuckGo API .
60,070
def create ( type_dict , * type_parameters ) : assert len ( type_parameters ) == 1 klazz = TypeFactory . new ( type_dict , * type_parameters [ 0 ] ) assert isclass ( klazz ) assert issubclass ( klazz , Object ) return TypeMetaclass ( '%sList' % klazz . __name__ , ( ListContainer , ) , { 'TYPE' : klazz } )
Construct a List containing type klazz .
60,071
def load_file ( filename ) : "Runs the given scent.py file." mod_name = '.' . join ( os . path . basename ( filename ) . split ( '.' ) [ : - 1 ] ) mod_path = os . path . dirname ( filename ) if mod_name in sys . modules : del sys . modules [ mod_name ] if mod_path not in set ( sys . modules . keys ( ) ) : sys . path . insert ( 0 , mod_path ) return ScentModule ( __import__ ( mod_name , g , g ) , filename )
Runs the given scent . py file .
60,072
def new ( type_dict , type_factory , * type_parameters ) : type_tuple = ( type_factory , ) + type_parameters if type_tuple not in type_dict : factory = TypeFactory . get_factory ( type_factory ) reified_type = factory . create ( type_dict , * type_parameters ) type_dict [ type_tuple ] = reified_type return type_dict [ type_tuple ]
Create a fully reified type from a type schema .
60,073
def wrap ( sig ) : if isclass ( sig ) and issubclass ( sig , Object ) : return TypeSignature ( sig ) elif isinstance ( sig , TypeSignature ) : return sig
Convert a Python class into a type signature .
60,074
def trigger_modified ( self , filepath ) : mod_time = self . _get_modified_time ( filepath ) if mod_time > self . _watched_files . get ( filepath , 0 ) : self . _trigger ( 'modified' , filepath ) self . _watched_files [ filepath ] = mod_time
Triggers modified event if the given filepath mod time is newer .
60,075
def trigger_created ( self , filepath ) : if os . path . exists ( filepath ) : self . _trigger ( 'created' , filepath )
Triggers created event if file exists .
60,076
def trigger_deleted ( self , filepath ) : if not os . path . exists ( filepath ) : self . _trigger ( 'deleted' , filepath )
Triggers deleted event if the flie doesn t exist .
60,077
def log ( self , * message ) : if self . _logger is None : return s = " " . join ( [ str ( m ) for m in message ] ) self . _logger . write ( s + '\n' ) self . _logger . flush ( )
Logs a messate to a defined io stream if available .
60,078
def in_repo ( self , filepath ) : filepath = set ( filepath . replace ( '\\' , '/' ) . split ( '/' ) ) for p in ( '.git' , '.hg' , '.svn' , '.cvs' , '.bzr' ) : if p in filepath : return True return False
This excludes repository directories because they cause some exceptions occationally .
60,079
def _modify_event ( self , event_name , method , func ) : if event_name not in self . ALL_EVENTS : raise TypeError ( ( 'event_name ("%s") can only be one of the ' 'following: %s' ) % ( event_name , repr ( self . ALL_EVENTS ) ) ) if not isinstance ( func , collections . Callable ) : raise TypeError ( ( 'func must be callable to be added as an ' 'observer.' ) ) getattr ( self . _events [ event_name ] , method ) ( func )
Wrapper to call a list s method from one of the events
60,080
def _watch_file ( self , filepath , trigger_event = True ) : is_new = filepath not in self . _watched_files if trigger_event : if is_new : self . trigger_created ( filepath ) else : self . trigger_modified ( filepath ) try : self . _watched_files [ filepath ] = self . _get_modified_time ( filepath ) except OSError : return
Adds the file s modified time into its internal watchlist .
60,081
def _unwatch_file ( self , filepath , trigger_event = True ) : if filepath not in self . _watched_files : return if trigger_event : self . trigger_deleted ( filepath ) del self . _watched_files [ filepath ]
Removes the file from the internal watchlist if exists .
60,082
def _is_modified ( self , filepath ) : if self . _is_new ( filepath ) : return False mtime = self . _get_modified_time ( filepath ) return self . _watched_files [ filepath ] < mtime
Returns True if the file has been modified since last seen . Will return False if the file has not been seen before .
60,083
def loop ( self , sleep_time = 1 , callback = None ) : self . log ( "No supported libraries found: using polling-method." ) self . _running = True self . trigger_init ( ) self . _scan ( trigger = False ) if self . _warn : print ( ) while self . _running : self . _scan ( ) if isinstance ( callback , collections . Callable ) : callback ( ) time . sleep ( sleep_time )
Goes into a blocking IO loop . If polling is used the sleep_time is the interval in seconds between polls .
60,084
def run ( sniffer_instance = None , wait_time = 0.5 , clear = True , args = ( ) , debug = False ) : if sniffer_instance is None : sniffer_instance = ScentSniffer ( ) if debug : scanner = Scanner ( sniffer_instance . watch_paths , scent = sniffer_instance . scent , logger = sys . stdout ) else : scanner = Scanner ( sniffer_instance . watch_paths , scent = sniffer_instance . scent ) sniffer_instance . set_up ( tuple ( args ) , clear , debug ) sniffer_instance . observe_scanner ( scanner ) scanner . loop ( wait_time )
Runs the auto tester loop . Internally the runner instanciates the sniffer_cls and scanner class .
60,085
def main ( sniffer_instance = None , test_args = ( ) , progname = sys . argv [ 0 ] , args = sys . argv [ 1 : ] ) : parser = OptionParser ( version = "%prog " + __version__ ) parser . add_option ( '-w' , '--wait' , dest = "wait_time" , metavar = "TIME" , default = 0.5 , type = "float" , help = "Wait time, in seconds, before possibly rerunning" "tests. (default: %default)" ) parser . add_option ( '--no-clear' , dest = "clear_on_run" , default = True , action = "store_false" , help = "Disable the clearing of screen" ) parser . add_option ( '--debug' , dest = "debug" , default = False , action = "store_true" , help = "Enabled debugging output. (default: %default)" ) parser . add_option ( '-x' , '--test-arg' , dest = "test_args" , default = [ ] , action = "append" , help = "Arguments to pass to nose (use multiple times to " "pass multiple arguments.)" ) ( options , args ) = parser . parse_args ( args ) test_args = test_args + tuple ( options . test_args ) if options . debug : print ( "Options:" , options ) print ( "Test Args:" , test_args ) try : print ( "Starting watch..." ) run ( sniffer_instance , options . wait_time , options . clear_on_run , test_args , options . debug ) except KeyboardInterrupt : print ( "Good bye." ) except Exception : import traceback traceback . print_exc ( ) return sys . exit ( 1 ) return sys . exit ( 0 )
Runs the program . This is used when you want to run this program standalone .
60,086
def set_up ( self , test_args = ( ) , clear = True , debug = False ) : self . test_args = test_args self . debug , self . clear = debug , clear
Sets properties right before calling run .
60,087
def observe_scanner ( self , scanner ) : scanner . observe ( scanner . ALL_EVENTS , self . absorb_args ( self . modules . restore ) ) if self . clear : scanner . observe ( scanner . ALL_EVENTS , self . absorb_args ( self . clear_on_run ) ) scanner . observe ( scanner . ALL_EVENTS , self . absorb_args ( self . _run ) ) if self . debug : scanner . observe ( 'created' , echo ( "callback - created %(file)s" ) ) scanner . observe ( 'modified' , echo ( "callback - changed %(file)s" ) ) scanner . observe ( 'deleted' , echo ( "callback - deleted %(file)s" ) ) self . _scanners . append ( scanner )
Hooks into multiple events of a scanner .
60,088
def clear_on_run ( self , prefix = "Running Tests:" ) : if platform . system ( ) == 'Windows' : os . system ( 'cls' ) else : os . system ( 'clear' ) if prefix : print ( prefix )
Clears console before running the tests .
60,089
def run ( self ) : try : import nose arguments = [ sys . argv [ 0 ] ] + list ( self . test_args ) return nose . run ( argv = arguments ) except ImportError : print ( ) print ( "*** Nose library missing. Please install it. ***" ) print ( ) raise
Runs the unit test framework . Can be overridden to run anything . Returns True on passing and False on failure .
60,090
def run ( self ) : if not self . scent or len ( self . scent . runners ) == 0 : print ( "Did not find 'scent.py', running nose:" ) return super ( ScentSniffer , self ) . run ( ) else : print ( "Using scent:" ) arguments = [ sys . argv [ 0 ] ] + list ( self . test_args ) return self . scent . run ( arguments ) return True
Runs the CWD s scent file .
60,091
def copy ( self ) : self_copy = self . dup ( ) self_copy . _scopes = copy . copy ( self . _scopes ) return self_copy
Return a copy of this object .
60,092
def bind ( self , * args , ** kw ) : new_self = self . copy ( ) new_scopes = Object . translate_to_scopes ( * args , ** kw ) new_self . _scopes = tuple ( reversed ( new_scopes ) ) + new_self . _scopes return new_self
Bind environment variables into this object s scope .
60,093
def check ( self ) : try : si , uninterp = self . interpolate ( ) except ( Object . CoercionError , MustacheParser . Uninterpolatable ) as e : return TypeCheck ( False , "Unable to interpolate: %s" % e ) return self . checker ( si )
Type check this object .
60,094
def restore ( self ) : sys = set ( self . _sys_modules . keys ( ) ) for mod_name in sys . difference ( self . _saved_modules ) : del self . _sys_modules [ mod_name ]
Unloads all modules that weren t loaded when save_modules was called .
60,095
def join ( cls , splits , * namables ) : isplits = [ ] unbound = [ ] for ref in splits : if isinstance ( ref , Ref ) : resolved = False for namable in namables : try : value = namable . find ( ref ) resolved = True break except Namable . Error : continue if resolved : isplits . append ( value ) else : isplits . append ( ref ) unbound . append ( ref ) else : isplits . append ( ref ) return ( '' . join ( map ( str if Compatibility . PY3 else unicode , isplits ) ) , unbound )
Interpolate strings .
60,096
def outitem ( title , elems , indent = 4 ) : out ( title ) max_key_len = max ( len ( key ) for key , _ in elems ) + 1 for key , val in elems : key_spaced = ( '%s:' % key ) . ljust ( max_key_len ) out ( '%s%s %s' % ( indent * ' ' , key_spaced , val ) ) out ( )
Output formatted as list item .
60,097
def profile_dir ( name ) : if name : possible_path = Path ( name ) if possible_path . exists ( ) : return possible_path profiles = list ( read_profiles ( ) ) try : if name : profile = next ( p for p in profiles if p . name == name ) else : profile = next ( p for p in profiles if p . default ) except StopIteration : raise ProfileNotFoundError ( name ) return profile . path
Return path to FF profile for a given profile name or path .
60,098
def formatter ( name , default = False ) : def decorator ( func ) : func . _output_format = dict ( name = name , default = default ) return func return decorator
Decorate a Feature method to register it as an output formatter .
60,099
def load_sqlite ( self , db , query = None , table = None , cls = None , column_map = None ) : if column_map is None : column_map = { } db_path = self . profile_path ( db , must_exist = True ) def obj_factory ( cursor , row ) : dict_ = { } for idx , col in enumerate ( cursor . description ) : new_name = column_map . get ( col [ 0 ] , col [ 0 ] ) dict_ [ new_name ] = row [ idx ] return cls ( ** dict_ ) con = sqlite3 . connect ( str ( db_path ) ) con . row_factory = obj_factory cursor = con . cursor ( ) if not query : columns = [ f . name for f in attr . fields ( cls ) ] for k , v in column_map . items ( ) : columns [ columns . index ( v ) ] = k query = 'SELECT %s FROM %s' % ( ',' . join ( columns ) , table ) cursor . execute ( query ) while True : item = cursor . fetchone ( ) if item is None : break yield item con . close ( )
Load data from sqlite db and return as list of specified objects .