idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
14,200
def add_0x ( string ) : if isinstance ( string , bytes ) : string = string . decode ( 'utf-8' ) return '0x' + str ( string )
Add 0x to string at start .
14,201
def guess_depth ( packages ) : if len ( packages ) == 1 : return packages [ 0 ] . count ( '.' ) + 2 return min ( p . count ( '.' ) for p in packages ) + 1
Guess the optimal depth to use for the given list of arguments .
14,202
def print ( self , format = TEXT , output = sys . stdout , ** kwargs ) : if format is None : format = TEXT if format == TEXT : print ( self . _to_text ( ** kwargs ) , file = output ) elif format == CSV : print ( self . _to_csv ( ** kwargs ) , file = output ) elif format == JSON : print ( self . _to_json ( ** kwargs ) , file = output )
Print the object in a file or on standard output by default .
14,203
def import_training_data ( self , positive_corpus_file = os . path . join ( os . path . dirname ( __file__ ) , "positive.txt" ) , negative_corpus_file = os . path . join ( os . path . dirname ( __file__ ) , "negative.txt" ) ) : positive_corpus = open ( positive_corpus_file ) negative_corpus = open ( negative_corpus_file ) positive_training_data = list ( map ( lambda x : ( x , True ) , positive_corpus ) ) negative_training_data = list ( map ( lambda x : ( x , False ) , negative_corpus ) ) self . training_data = positive_training_data + negative_training_data
This method imports the positive and negative training data from the two corpus files and creates the training data list .
14,204
def train ( self ) : if not self . training_data : self . import_training_data ( ) training_feature_set = [ ( self . extract_features ( line ) , label ) for ( line , label ) in self . training_data ] self . classifier = nltk . NaiveBayesClassifier . train ( training_feature_set )
This method generates the classifier . This method assumes that the training data has been loaded
14,205
def extract_features ( self , phrase ) : words = nltk . word_tokenize ( phrase ) features = { } for word in words : features [ 'contains(%s)' % word ] = ( word in words ) return features
This function will extract features from the phrase being used . Currently the feature we are extracting are unigrams of the text corpus .
14,206
def is_twss ( self , phrase ) : featureset = self . extract_features ( phrase ) return self . classifier . classify ( featureset )
The magic function - this accepts a phrase and tells you if it classifies as an entendre
14,207
def save ( self , filename = 'classifier.dump' ) : ofile = open ( filename , 'w+' ) pickle . dump ( self . classifier , ofile ) ofile . close ( )
Pickles the classifier and dumps it into a file
14,208
def load ( self , filename = 'classifier.dump' ) : ifile = open ( filename , 'r+' ) self . classifier = pickle . load ( ifile ) ifile . close ( )
Unpickles the classifier used
14,209
def pw ( ctx , key_pattern , user_pattern , mode , strict_flag , user_flag , file , edit_subcommand , gen_subcommand , ) : def handle_sigint ( * _ ) : click . echo ( ) ctx . exit ( 1 ) signal . signal ( signal . SIGINT , handle_sigint ) if gen_subcommand : length = int ( key_pattern ) if key_pattern else None generate_password ( mode , length ) return elif edit_subcommand : launch_editor ( ctx , file ) return if not os . path . exists ( file ) : click . echo ( "error: password store not found at '%s'" % file , err = True ) ctx . exit ( 1 ) store = Store . load ( file ) if not user_pattern : user_pattern , _ , key_pattern = key_pattern . rpartition ( "@" ) results = store . search ( key_pattern , user_pattern ) results = list ( results ) if strict_flag and len ( results ) != 1 : click . echo ( "error: multiple or no records found (but using --strict flag)" , err = True ) ctx . exit ( 2 ) if mode == Mode . RAW : for entry in results : click . echo ( entry . user if user_flag else entry . password ) return for idx , entry in enumerate ( results ) : line = highlight_match ( key_pattern , entry . key ) if entry . user : line += ": " + highlight_match ( user_pattern , entry . user ) if mode == Mode . ECHO and not user_flag : line += " | " + style_password ( entry . password ) elif mode == Mode . COPY and idx == 0 : try : import pyperclip pyperclip . copy ( entry . user if user_flag else entry . password ) result = style_success ( "*** %s COPIED TO CLIPBOARD ***" % ( "USERNAME" if user_flag else "PASSWORD" ) ) except ImportError : result = style_error ( '*** PYTHON PACKAGE "PYPERCLIP" NOT FOUND ***' ) line += " | " + result if entry . notes : if idx == 0 : line += "\n" line += "\n" . join ( " " + line for line in entry . notes . splitlines ( ) ) else : lines = entry . notes . splitlines ( ) line += " | " + lines [ 0 ] if len ( lines ) > 1 : line += " (...)" click . echo ( line )
Search for USER and KEY in GPG - encrypted password file .
14,210
def launch_editor ( ctx , file ) : editor = os . environ . get ( "PW_EDITOR" ) if not editor : click . echo ( "error: no editor set in PW_EDITOR environment variables" ) ctx . exit ( 1 ) if not os . path . exists ( file ) : click . echo ( "error: password store not found at '%s'" % file , err = True ) ctx . exit ( 1 ) is_encrypted = _gpg . is_encrypted ( file ) if is_encrypted : original = _gpg . decrypt ( file ) else : original = open ( file , "rb" ) . read ( ) if is_encrypted : recipient = os . environ . get ( "PW_GPG_RECIPIENT" ) if not recipient : click . echo ( "error: no recipient set in PW_GPG_RECIPIENT environment variables" ) ctx . exit ( 1 ) ext = _gpg . unencrypted_ext ( file ) modified = click . edit ( original . decode ( "utf-8" ) , editor = editor , require_save = True , extension = ext ) if modified is None : click . echo ( "not modified" ) return modified = modified . encode ( "utf-8" ) if not is_encrypted : with open ( file , "wb" ) as fp : fp . write ( modified ) return _gpg . encrypt ( recipient = recipient , dest_path = file , content = modified )
launch editor with decrypted password database
14,211
def generate_password ( mode , length ) : r = random . SystemRandom ( ) length = length or RANDOM_PASSWORD_DEFAULT_LENGTH password = "" . join ( r . choice ( RANDOM_PASSWORD_ALPHABET ) for _ in range ( length ) ) if mode == Mode . ECHO : click . echo ( style_password ( password ) ) elif mode == Mode . COPY : try : import pyperclip pyperclip . copy ( password ) result = style_success ( "*** PASSWORD COPIED TO CLIPBOARD ***" ) except ImportError : result = style_error ( '*** PYTHON PACKAGE "PYPERCLIP" NOT FOUND ***' ) click . echo ( result ) elif mode == Mode . RAW : click . echo ( password )
generate a random password
14,212
def _load_data ( self ) : url = urljoin ( self . base_url , 'levels' ) resp = requests . get ( url , headers = self . headers ) if resp . content : return resp . json ( ) else : return None
Internal method for querying the GM api for currently running levels and storing that state .
14,213
def dump2sqlite ( records , output_file ) : results_keys = list ( records . results [ 0 ] . keys ( ) ) pad_data = [ ] for key in REQUIRED_KEYS : if key not in results_keys : results_keys . append ( key ) pad_data . append ( "" ) conn = sqlite3 . connect ( os . path . expanduser ( output_file ) , detect_types = sqlite3 . PARSE_DECLTYPES ) pad_data . append ( datetime . datetime . utcnow ( ) ) to_db = [ list ( row . values ( ) ) + pad_data for row in records . results ] cur = conn . cursor ( ) cur . execute ( "CREATE TABLE testcases ({},sqltime TIMESTAMP)" . format ( "," . join ( "{} TEXT" . format ( key ) for key in results_keys ) ) ) cur . executemany ( "INSERT INTO testcases VALUES ({},?)" . format ( "," . join ( [ "?" ] * len ( results_keys ) ) ) , to_db ) if records . testrun : cur . execute ( "CREATE TABLE testrun (testrun TEXT)" ) cur . execute ( "INSERT INTO testrun VALUES (?)" , ( records . testrun , ) ) conn . commit ( ) conn . close ( ) logger . info ( "Data written to '%s'" , output_file )
Dumps tests results to database .
14,214
def fit ( sim_mat , D_len , cidx ) : min_energy = np . inf for j in range ( 3 ) : inds = [ np . argmin ( [ sim_mat [ idy ] . get ( idx , 0 ) for idx in cidx ] ) for idy in range ( D_len ) if idy in sim_mat ] cidx = [ ] energy = 0 for i in np . unique ( inds ) : indsi = np . where ( inds == i ) [ 0 ] minind , min_value = 0 , 0 for index , idy in enumerate ( indsi ) : if idy in sim_mat : value = 0 for idx in indsi : value += sim_mat [ idy ] . get ( idx , 0 ) if value < min_value : minind , min_value = index , value energy += min_value cidx . append ( indsi [ minind ] ) if energy < min_energy : min_energy , inds_min , cidx_min = energy , inds , cidx return inds_min , cidx_min
Algorithm maximizes energy between clusters which is distinction in this algorithm . Distance matrix contains mostly 0 which are overlooked due to search of maximal distances . Algorithm does not try to retain k clusters .
14,215
def _calc_theta ( self ) : if self . decaying_prior : n_sampled = np . clip ( self . alpha_ + self . beta_ , 1 , np . inf ) prior_weight = 1 / n_sampled alpha = self . alpha_ + prior_weight * self . alpha_0 beta = self . beta_ + prior_weight * self . beta_0 else : alpha = self . alpha_ + self . alpha_0 beta = self . beta_ + self . beta_0 self . theta_ = alpha / ( alpha + beta ) if self . store_wp : alpha = self . alpha_ + self . _wp_weight * self . alpha_0 beta = self . beta_ + self . _wp_weight * self . beta_0 self . theta_wp_ = alpha / ( alpha + beta )
Calculate an estimate of theta
14,216
def update ( self , ell , k ) : self . alpha_ [ k ] += ell self . beta_ [ k ] += 1 - ell self . _calc_theta ( ) if self . store_variance : self . _calc_var_theta ( )
Update the posterior and estimates after a label is sampled
14,217
def reset ( self ) : self . alpha_ = np . zeros ( self . _size , dtype = int ) self . beta_ = np . zeros ( self . _size , dtype = int ) self . theta_ = np . empty ( self . _size , dtype = float ) if self . store_variance : self . var_theta_ = np . empty ( self . _size , dtype = float ) if self . store_wp : self . theta_wp_ = np . empty ( self . _size , dtype = float ) self . _calc_theta ( ) if self . store_variance : self . _calc_var_theta ( )
Reset the instance to its initial state
14,218
def _calc_BB_prior ( self , theta_0 ) : prior_strength = self . prior_strength n_strata = len ( theta_0 ) weighted_strength = prior_strength / n_strata alpha_0 = theta_0 * weighted_strength beta_0 = ( 1 - theta_0 ) * weighted_strength return alpha_0 , beta_0
Generate a prior for the BB model
14,219
def authenticate ( self , request , remote_user = None ) : if not remote_user : remote_user = request if not remote_user : return None user = None username = self . clean_username ( remote_user ) try : if self . create_unknown_user : defaults = { } if isinstance ( request , dict ) : session_data = request if 'full_name' in session_data : first_name , _ , last_name = full_name_natural_split ( session_data [ 'full_name' ] ) defaults . update ( { 'first_name' : first_name , 'last_name' : last_name } ) for key in ( 'email' , 'first_name' , 'last_name' ) : if key in session_data : defaults . update ( { key : session_data [ key ] } ) user , created = UserModel . _default_manager . get_or_create ( ** { UserModel . USERNAME_FIELD : username , 'defaults' : defaults , } ) if created : LOGGER . debug ( "created user '%s' in database." , username ) user = self . configure_user ( user ) else : try : user = UserModel . _default_manager . get_by_natural_key ( username ) except UserModel . DoesNotExist : pass except DatabaseError as err : LOGGER . debug ( "User table missing from database? (err:%s)" , err ) for user in six . itervalues ( self . users ) : LOGGER . debug ( "match %s with User(id=%d, username=%s)" , username , user . id , user . username ) if user . username == username : LOGGER . debug ( "found %d %s" , user . id , user . username ) return user user = UserModel ( id = random . randint ( 1 , ( 1 << 32 ) - 1 ) , username = username ) LOGGER . debug ( "add User(id=%d, username=%s) to cache." , user . id , user . username ) self . users [ user . id ] = user return user if self . user_can_authenticate ( user ) else None
The username passed here is considered trusted . This method simply returns the User object with the given username .
14,220
def numval ( token ) : if token . type == 'INTEGER' : return int ( token . value ) elif token . type == 'FLOAT' : return float ( token . value ) else : return token . value
Return the numerical value of token . value if it is a number
14,221
def tokenize ( code ) : tok_regex = '|' . join ( '(?P<{}>{})' . format ( * pair ) for pair in _tokens ) tok_regex = re . compile ( tok_regex , re . IGNORECASE | re . M ) line_num = 1 line_start = 0 for mo in re . finditer ( tok_regex , code ) : kind = mo . lastgroup value = mo . group ( kind ) if kind == 'NEWLINE' : line_start = mo . end ( ) line_num += 1 elif kind == 'SKIP' or value == '' : pass else : column = mo . start ( ) - line_start yield Token ( kind , value , line_num , column )
Tokenize the string code
14,222
def parse ( tokens ) : d = collections . OrderedDict ( ) prev_line = 0 blockname = None blockline = None for token in tokens : if token . type == 'COMMENT' : continue elif token . type == 'BLOCK' : block = token blockline = token . line blocktype = token . value . upper ( ) blockname = None if blocktype not in d : d [ blocktype ] = collections . OrderedDict ( ) elif token . line == blockline : if blockname is None : blockname = token . value d [ blocktype ] [ blockname ] = collections . defaultdict ( list ) else : d [ blocktype ] [ blockname ] [ 'info' ] . append ( numval ( token ) ) elif token . line != prev_line : if blockname is None : raise ParseError ( "Found value outside block!" ) d [ blocktype ] [ blockname ] [ 'values' ] . append ( [ numval ( token ) ] ) else : if blockname is None : raise ParseError ( "Found value outside block!" ) d [ blocktype ] [ blockname ] [ 'values' ] [ - 1 ] . append ( numval ( token ) ) prev_line = token . line return d
Parse the token list into a hierarchical data structure
14,223
def load ( stream ) : if isinstance ( stream , str ) : string = stream else : string = stream . read ( ) tokens = tokenize ( string ) return parse ( tokens )
Parse the LHA document and produce the corresponding Python object . Accepts a string or a file - like object .
14,224
def get_checksum ( self ) : arr = [ ] for elem in self . parsed : s = elem_checksum ( elem ) if s : arr . append ( s ) arr . sort ( ) return md5 ( json . dumps ( arr ) )
Returns a checksum based on the IDL that ignores comments and ordering but detects changes to types parameter order and enum values .
14,225
def _update_estimate_and_sampler ( self , ell , ell_hat , weight , extra_info , ** kwargs ) : stratum_idx = extra_info [ 'stratum' ] self . _BB_TP . update ( ell * ell_hat , stratum_idx ) self . _BB_PP . update ( ell_hat , stratum_idx ) self . _BB_P . update ( ell , stratum_idx ) self . _update_cov_model ( strata_to_update = [ stratum_idx ] ) self . _update_estimates ( )
Update the BB models and the estimates
14,226
def get_path ( num ) : num = int ( num ) dig_len = len ( str ( num ) ) paths = [ ] for i in range ( dig_len - 2 ) : divisor = 10 ** ( dig_len - i - 1 ) paths . append ( "{}-{}" . format ( ( num // divisor ) * divisor , ( ( ( num // divisor ) + 1 ) * divisor ) - 1 ) ) return "/" . join ( paths )
Gets a path from the workitem number .
14,227
def get_tree ( self , work_item_id ) : try : __ , tcid = work_item_id . split ( "-" ) except ValueError : logger . warning ( "Couldn't load workitem %s, bad format" , work_item_id ) self . _cache [ work_item_id ] = InvalidObject ( ) return None path = os . path . join ( self . test_case_dir , self . get_path ( tcid ) , work_item_id , "workitem.xml" ) try : tree = etree . parse ( path ) except Exception : logger . warning ( "Couldn't load workitem %s" , work_item_id ) self . _cache [ work_item_id ] = InvalidObject ( ) return None return tree
Gets XML tree of the workitem .
14,228
def get_all_items ( self ) : for item in os . walk ( self . test_case_dir ) : if "workitem.xml" not in item [ 2 ] : continue case_id = os . path . split ( item [ 0 ] ) [ - 1 ] if not ( case_id and "*" not in case_id ) : continue item_cache = self [ case_id ] if not item_cache : continue if not item_cache . get ( "title" ) : continue yield item_cache
Walks the repo and returns work items .
14,229
def _remove_files ( self , directory , pattern ) : for root , dirnames , file_names in os . walk ( directory ) : for file_name in fnmatch . filter ( file_names , pattern ) : os . remove ( os . path . join ( root , file_name ) )
Removes all files matching the search path
14,230
def post ( self , request , * args , ** kwargs ) : serializer = EventSerializer ( data = request . data ) if not serializer . is_valid ( ) : return Response ( { "accepted" : False , "reason" : serializer . errors } , status = 400 ) data = serializer . validated_data event_type = { "ack" : "ack" , "nack" : "nack" , "delivery_report" : "delivery_succeeded" , } . get ( data [ "event_type" ] ) accepted , reason = process_event ( data [ "user_message_id" ] , event_type , data [ "nack_reason" ] , data [ "timestamp" ] ) return Response ( { "accepted" : accepted , "reason" : reason } , status = 200 if accepted else 400 )
Checks for expect event types before continuing
14,231
def create ( self , group , grouptype ) : try : self . client . add ( self . __distinguished_name ( group ) , API . __object_class ( ) , self . __ldap_attr ( group , grouptype ) ) except ldap3 . core . exceptions . LDAPNoSuchObjectResult : print ( "Error creating LDAP Group.\nRequest: " , self . __ldap_attr ( group , grouptype ) , "\nDistinguished Name: " , self . __distinguished_name ( group ) , file = sys . stderr ) except ldap3 . core . exceptions . LDAPEntryAlreadyExistsResult : print ( "Error creating LDAP Group. Group already exists. \nRequest: " , self . __ldap_attr ( group , grouptype ) , "\nDistinguished Name: " , self . __distinguished_name ( group ) , file = sys . stderr )
Create an LDAP Group .
14,232
def add_user ( self , group , username ) : try : self . lookup_id ( group ) except ldap_tools . exceptions . InvalidResult as err : raise err from None operation = { 'memberUid' : [ ( ldap3 . MODIFY_ADD , [ username ] ) ] } self . client . modify ( self . __distinguished_name ( group ) , operation )
Add a user to the specified LDAP group .
14,233
def remove_user ( self , group , username ) : try : self . lookup_id ( group ) except ldap_tools . exceptions . InvalidResult as err : raise err from None operation = { 'memberUid' : [ ( ldap3 . MODIFY_DELETE , [ username ] ) ] } self . client . modify ( self . __distinguished_name ( group ) , operation )
Remove a user from the specified LDAP group .
14,234
def lookup_id ( self , group ) : filter = [ "(cn={})" . format ( group ) , "(objectclass=posixGroup)" ] results = self . client . search ( filter , [ 'gidNumber' ] ) if len ( results ) < 1 : raise ldap_tools . exceptions . NoGroupsFound ( 'No Groups Returned by LDAP' ) elif len ( results ) > 1 : raise ldap_tools . exceptions . TooManyResults ( 'Multiple groups found. Please narrow your search.' ) else : return results [ 0 ] . gidNumber . value
Lookup GID for the given group .
14,235
def create ( config , group , type ) : if type not in ( 'user' , 'service' ) : raise click . BadOptionUsage ( "--grouptype must be 'user' or 'service'" ) client = Client ( ) client . prepare_connection ( ) group_api = API ( client ) group_api . create ( group , type )
Create an LDAP group .
14,236
def delete ( config , group , force ) : if not force : if not click . confirm ( 'Confirm that you want to delete group {}' . format ( group ) ) : sys . exit ( "Deletion of {} aborted" . format ( group ) ) client = Client ( ) client . prepare_connection ( ) group_api = API ( client ) group_api . delete ( group )
Delete an LDAP group .
14,237
def add_user ( config , group , username ) : client = Client ( ) client . prepare_connection ( ) group_api = API ( client ) try : group_api . add_user ( group , username ) except ldap_tools . exceptions . NoGroupsFound : print ( "Group ({}) not found" . format ( group ) ) except ldap_tools . exceptions . TooManyResults : print ( "Query for group ({}) returned multiple results." . format ( group ) ) except ldap3 . TYPE_OR_VALUE_EXISTS : print ( "{} already exists in {}" . format ( username , group ) )
Add specified user to specified group .
14,238
def remove_user ( config , group , username ) : client = Client ( ) client . prepare_connection ( ) group_api = API ( client ) try : group_api . remove_user ( group , username ) except ldap_tools . exceptions . NoGroupsFound : print ( "Group ({}) not found" . format ( group ) ) except ldap_tools . exceptions . TooManyResults : print ( "Query for group ({}) returned multiple results." . format ( group ) ) except ldap3 . NO_SUCH_ATTRIBUTE : print ( "{} does not exist in {}" . format ( username , group ) )
Remove specified user from specified group .
14,239
def index ( config ) : client = Client ( ) client . prepare_connection ( ) group_api = API ( client ) print ( group_api . index ( ) )
Display group info in raw format .
14,240
def _get_importer ( input_file ) : __ , ext = os . path . splitext ( input_file ) ext = ext . lower ( ) if "ostriz" in input_file : from dump2polarion . results import ostriztools importer = ostriztools . import_ostriz elif ext == ".xml" : from dump2polarion . results import junittools importer = junittools . import_junit elif ext == ".csv" : from dump2polarion . results import csvtools importer = csvtools . import_csv elif ext in dbtools . SQLITE_EXT : importer = dbtools . import_sqlite elif ext == ".json" : from dump2polarion . results import jsontools importer = jsontools . import_json else : raise Dump2PolarionException ( "Cannot recognize type of input data, add file extension." ) return importer
Selects importer based on input file type .
14,241
def parse_db_url ( db_url ) : u = urlparse ( db_url ) db = { } db [ "database" ] = u . path [ 1 : ] db [ "user" ] = u . username db [ "password" ] = u . password db [ "host" ] = u . hostname db [ "port" ] = u . port return db
provided a db url return a dict with connection properties
14,242
def bounds_handler ( ctx , param , value ) : retval = from_like_context ( ctx , param , value ) if retval is None and value is not None : try : value = value . strip ( ", []" ) retval = tuple ( float ( x ) for x in re . split ( r"[,\s]+" , value ) ) assert len ( retval ) == 4 return retval except Exception : raise click . BadParameter ( "{0!r} is not a valid bounding box representation" . format ( value ) ) else : return retval
Handle different forms of bounds .
14,243
def info ( dataset , indent , meta_member ) : table = bcdata . validate_name ( dataset ) wfs = WebFeatureService ( url = bcdata . OWS_URL , version = "2.0.0" ) info = { } info [ "name" ] = table info [ "count" ] = bcdata . get_count ( table ) info [ "schema" ] = wfs . get_schema ( "pub:" + table ) if meta_member : click . echo ( info [ meta_member ] ) else : click . echo ( json . dumps ( info , indent = indent ) )
Print basic metadata about a DataBC WFS layer as JSON .
14,244
def dem ( bounds , src_crs , dst_crs , out_file , resolution ) : if not dst_crs : dst_crs = "EPSG:3005" bcdata . get_dem ( bounds , out_file = out_file , src_crs = src_crs , dst_crs = dst_crs , resolution = resolution )
Dump BC DEM to TIFF
14,245
def dump ( dataset , query , out_file , bounds ) : table = bcdata . validate_name ( dataset ) data = bcdata . get_data ( table , query = query , bounds = bounds ) if out_file : with open ( out_file , "w" ) as f : json . dump ( data . json ( ) , f ) else : sink = click . get_text_stream ( "stdout" ) sink . write ( json . dumps ( data ) )
Write DataBC features to stdout as GeoJSON feature collection .
14,246
def cat ( dataset , query , bounds , indent , compact , dst_crs , pagesize , sortby ) : dump_kwds = { "sort_keys" : True } if indent : dump_kwds [ "indent" ] = indent if compact : dump_kwds [ "separators" ] = ( "," , ":" ) table = bcdata . validate_name ( dataset ) for feat in bcdata . get_features ( table , query = query , bounds = bounds , sortby = sortby , crs = dst_crs ) : click . echo ( json . dumps ( feat , ** dump_kwds ) )
Write DataBC features to stdout as GeoJSON feature objects .
14,247
def bc2pg ( dataset , db_url , table , schema , query , append , pagesize , sortby , max_workers ) : src = bcdata . validate_name ( dataset ) src_schema , src_table = [ i . lower ( ) for i in src . split ( "." ) ] if not schema : schema = src_schema if not table : table = src_table conn = pgdata . connect ( db_url ) if schema not in conn . schemas : click . echo ( "Schema {} does not exist, creating it" . format ( schema ) ) conn . create_schema ( schema ) param_dicts = bcdata . define_request ( dataset , query = query , sortby = sortby , pagesize = pagesize ) try : payload = urlencode ( param_dicts [ 0 ] , doseq = True ) url = bcdata . WFS_URL + "?" + payload db = parse_db_url ( db_url ) db_string = "PG:host={h} user={u} dbname={db} password={pwd}" . format ( h = db [ "host" ] , u = db [ "user" ] , db = db [ "database" ] , pwd = db [ "password" ] ) if not append : command = [ "ogr2ogr" , "-lco" , "OVERWRITE=YES" , "-lco" , "SCHEMA={}" . format ( schema ) , "-lco" , "GEOMETRY_NAME=geom" , "-f" , "PostgreSQL" , db_string , "-t_srs" , "EPSG:3005" , "-nln" , table , url , ] click . echo ( " " . join ( command ) ) subprocess . run ( command ) if len ( param_dicts ) > 1 or append : if append : idx = 0 else : idx = 1 commands = [ ] for chunk , paramdict in enumerate ( param_dicts [ idx : ] ) : payload = urlencode ( paramdict , doseq = True ) url = bcdata . WFS_URL + "?" + payload command = [ "ogr2ogr" , "-update" , "-append" , "-f" , "PostgreSQL" , db_string + " active_schema=" + schema , "-t_srs" , "EPSG:3005" , "-nln" , table , url , ] commands . append ( command ) pool = Pool ( max_workers ) with click . progressbar ( pool . imap ( partial ( call ) , commands ) , length = len ( param_dicts ) ) as bar : for returncode in bar : if returncode != 0 : click . echo ( "Command failed: {}" . format ( returncode ) ) click . echo ( "Load of {} to {} in {} complete" . format ( src , schema + "." + table , db_url ) ) except Exception : click . echo ( "Data load failed" ) raise click . Abort ( )
Download a DataBC WFS layer to postgres - an ogr2ogr wrapper .
14,248
def __parseFormat ( self , fmt , content , fps = 25 ) : headerFound = False subSection = '' for lineNo , line in enumerate ( content ) : line = self . _initialLinePrepare ( line , lineNo ) if not fmt . WITH_HEADER and not self . _formatFound and lineNo > self . _maxFmtSearch : return subSection = '' . join ( [ subSection , line ] ) if fmt . WITH_HEADER and not headerFound : if lineNo > self . _maxHeaderLen : return headerFound = fmt . addHeaderInfo ( subSection , self . _subtitles . header ( ) ) if headerFound : self . _formatFound = True subSection = '' elif fmt . subtitleEnds ( line ) or ( lineNo + 1 ) == len ( content ) : subtitle = fmt . createSubtitle ( fps , subSection ) if subtitle is None : if subSection in ( '\n' , '\r\n' , '\r' ) : subSection = '' continue elif self . _subtitles . size ( ) > 0 : raise SubParsingError ( _ ( "Parsing error" ) , lineNo ) else : return if subtitle . start and subtitle . text : self . _formatFound = True try : self . _subtitles . append ( subtitle ) except SubException as msg : raise SubParsingError ( msg , lineNo ) elif subtitle . start and not subtitle . text : pass else : return subSection = ''
Actual parser . Please note that time_to is not required to process as not all subtitles provide it .
14,249
def get_args ( args = None ) : parser = argparse . ArgumentParser ( description = "dump2polarion" ) parser . add_argument ( "-i" , "--input_file" , required = True , help = "Path to CSV, SQLite or JUnit reports file or importers XML file" , ) parser . add_argument ( "-o" , "--output_file" , help = "Where to save the XML output file (default: not saved)" ) parser . add_argument ( "-t" , "--testrun-id" , help = "Polarion test run id" ) parser . add_argument ( "-c" , "--config-file" , help = "Path to config YAML" ) parser . add_argument ( "-n" , "--no-submit" , action = "store_true" , help = "Don't submit results to Polarion" ) parser . add_argument ( "--user" , help = "Username to use to submit results to Polarion" ) parser . add_argument ( "--password" , help = "Password to use to submit results to Polarion" ) parser . add_argument ( "--polarion-url" , help = "Base Polarion URL" ) parser . add_argument ( "-f" , "--force" , action = "store_true" , help = "Don't validate test run id" ) parser . add_argument ( "--dry-run" , action = "store_true" , help = "Dry run, don't update anything" ) parser . add_argument ( "--no-verify" , action = "store_true" , help = "Don't verify import success" ) parser . add_argument ( "--verify-timeout" , type = int , default = 300 , metavar = "SEC" , help = "How long to wait (in seconds) for verification of results submission" " (default: %(default)s)" , ) parser . add_argument ( "--job-log" , help = "Where to save the log file produced by the Importer (default: not saved)" ) parser . add_argument ( "--log-level" , help = "Set logging to specified level" ) return parser . parse_args ( args )
Get command line arguments .
14,250
def get_submit_args ( args ) : submit_args = dict ( testrun_id = args . testrun_id , user = args . user , password = args . password , no_verify = args . no_verify , verify_timeout = args . verify_timeout , log_file = args . job_log , dry_run = args . dry_run , ) submit_args = { k : v for k , v in submit_args . items ( ) if v is not None } return Box ( submit_args , frozen_box = True , default_box = True )
Gets arguments for the submit_and_verify method .
14,251
def process_args ( args ) : passed_args = args if isinstance ( args , argparse . Namespace ) : passed_args = vars ( passed_args ) elif hasattr ( args , "to_dict" ) : passed_args = passed_args . to_dict ( ) return Box ( passed_args , frozen_box = True , default_box = True )
Processes passed arguments .
14,252
def submit_if_ready ( args , submit_args , config ) : __ , ext = os . path . splitext ( args . input_file ) if ext . lower ( ) != ".xml" : return None with io . open ( args . input_file , encoding = "utf-8" ) as input_file : xml = input_file . read ( 1024 ) if not ( "<testsuites" in xml or "<testcases" in xml or "<requirements" in xml ) : return None if args . no_submit : logger . info ( "Nothing to do" ) return 0 response = dump2polarion . submit_and_verify ( xml_file = args . input_file , config = config , ** submit_args ) return 0 if response else 2
Submits the input XML file if it s already in the expected format .
14,253
def dumper ( args , config , transform_func = None ) : args = process_args ( args ) submit_args = get_submit_args ( args ) submit_outcome = submit_if_ready ( args , submit_args , config ) if submit_outcome is not None : return submit_outcome import_time = datetime . datetime . utcnow ( ) try : records = dump2polarion . import_results ( args . input_file , older_than = import_time ) testrun_id = get_testrun_id ( args , config , records . testrun ) exporter = dump2polarion . XunitExport ( testrun_id , records , config , transform_func = transform_func ) output = exporter . export ( ) except NothingToDoException as info : logger . info ( info ) return 0 except ( EnvironmentError , Dump2PolarionException ) as err : logger . fatal ( err ) return 1 if args . output_file or args . no_submit : exporter . write_xml ( output , args . output_file ) if not args . no_submit : response = dump2polarion . submit_and_verify ( output , config = config , ** submit_args ) __ , ext = os . path . splitext ( args . input_file ) if ext . lower ( ) in dbtools . SQLITE_EXT and response : dbtools . mark_exported_sqlite ( args . input_file , import_time ) return 0 if response else 2 return 0
Dumper main function .
14,254
def load_ldap_config ( self ) : try : with open ( '{}/ldap_info.yaml' . format ( self . config_dir ) , 'r' ) as FILE : config = yaml . load ( FILE ) self . host = config [ 'server' ] self . user_dn = config [ 'user_dn' ] self . port = config [ 'port' ] self . basedn = config [ 'basedn' ] self . mail_domain = config [ 'mail_domain' ] self . service_ou = config [ 'service_ou' ] except OSError as err : print ( '{}: Config file ({}/ldap_info.yaml) not found' . format ( type ( err ) , self . config_dir ) )
Configure LDAP Client settings .
14,255
def load_ldap_password ( self ) : with open ( '{}/ldap.secret' . format ( self . config_dir ) , 'r' ) as FILE : secure_config = FILE . read ( ) self . user_pw = base64 . b64decode ( secure_config . encode ( ) )
Import LDAP password from file .
14,256
def connection ( self ) : self . server = ldap3 . Server ( self . host , port = self . port , get_info = ldap3 . ALL ) self . conn = ldap3 . Connection ( self . server , user = self . user_dn , password = self . user_pw , auto_bind = True , lazy = True , receive_timeout = 1 )
Establish LDAP connection .
14,257
def add ( self , distinguished_name , object_class , attributes ) : self . conn . add ( distinguished_name , object_class , attributes )
Add object to LDAP .
14,258
def search ( self , filter , attributes = None ) : if attributes is None : attributes = [ '*' ] if filter is None : filter = [ "(objectclass=*)" ] filterstr = "(&{})" . format ( '' . join ( filter ) ) self . conn . search ( search_base = self . basedn , search_filter = filterstr , search_scope = ldap3 . SUBTREE , attributes = attributes ) return self . conn . entries
Search LDAP for records .
14,259
def get_max_id ( self , object_type , role ) : if object_type == 'user' : objectclass = 'posixAccount' ldap_attr = 'uidNumber' elif object_type == 'group' : objectclass = 'posixGroup' ldap_attr = 'gidNumber' else : raise ldap_tools . exceptions . InvalidResult ( 'Unknown object type' ) minID , maxID = Client . __set_id_boundary ( role ) filter = [ "(objectclass={})" . format ( objectclass ) , "({}>={})" . format ( ldap_attr , minID ) ] if maxID is not None : filter . append ( "({}<={})" . format ( ldap_attr , maxID ) ) id_list = self . search ( filter , [ ldap_attr ] ) if id_list == [ ] : id = minID else : if object_type == 'user' : id = max ( [ i . uidNumber . value for i in id_list ] ) + 1 elif object_type == 'group' : id = max ( [ i . gidNumber . value for i in id_list ] ) + 1 else : raise ldap_tools . exceptions . InvalidResult ( 'Unknown object' ) return id
Get the highest used ID .
14,260
def get_dem ( bounds , out_file = "dem.tif" , src_crs = "EPSG:3005" , dst_crs = "EPSG:3005" , resolution = 25 ) : bbox = "," . join ( [ str ( b ) for b in bounds ] ) payload = { "service" : "WCS" , "version" : "1.0.0" , "request" : "GetCoverage" , "coverage" : "pub:bc_elevation_25m_bcalb" , "Format" : "GeoTIFF" , "bbox" : bbox , "CRS" : src_crs , "RESPONSE_CRS" : dst_crs , "resx" : str ( resolution ) , "resy" : str ( resolution ) , } r = requests . get ( bcdata . WCS_URL , params = payload ) if r . status_code == 200 : with open ( out_file , "wb" ) as file : file . write ( r . content ) return out_file else : raise RuntimeError ( "WCS request failed with status code {}" . format ( str ( r . status_code ) ) )
Get 25m DEM for provided bounds write to GeoTIFF
14,261
def main ( ) : entry_point . add_command ( CLI . version ) entry_point . add_command ( UserCLI . user ) entry_point . add_command ( GroupCLI . group ) entry_point . add_command ( AuditCLI . audit ) entry_point . add_command ( KeyCLI . key ) entry_point ( )
Enter main function .
14,262
def print_args ( output = sys . stdout ) : def decorator ( func ) : @ wraps ( func ) def _ ( * args , ** kwargs ) : output . write ( "Args: {0}, KwArgs: {1}\n" . format ( str ( args ) , str ( kwargs ) ) ) return func ( * args , ** kwargs ) return _ return decorator
Decorate a function so that print arguments before calling it .
14,263
def constant ( func ) : @ wraps ( func ) def _ ( * args , ** kwargs ) : if not _ . res : _ . res = func ( * args , ** kwargs ) return _ . res _ . res = None return _
Decorate a function so that the result is a constant value .
14,264
def memoized ( func ) : cache = { } @ wraps ( func ) def memoized_function ( * args ) : try : return cache [ args ] except KeyError : value = func ( * args ) try : cache [ args ] = value except MemoryError : cache . clear ( ) gc . collect ( ) return value return memoized_function
Decorate a function to memoize results .
14,265
def _open_sqlite ( db_file ) : db_file = os . path . expanduser ( db_file ) try : with open ( db_file ) : pass return sqlite3 . connect ( db_file , detect_types = sqlite3 . PARSE_DECLTYPES ) except ( IOError , sqlite3 . Error ) as err : raise Dump2PolarionException ( "{}" . format ( err ) )
Opens database connection .
14,266
def import_sqlite ( db_file , older_than = None , ** kwargs ) : conn = _open_sqlite ( db_file ) cur = conn . cursor ( ) select = "SELECT * FROM testcases WHERE exported != 'yes'" if older_than : cur . execute ( " " . join ( ( select , "AND sqltime < ?" ) ) , ( older_than , ) ) else : cur . execute ( select ) columns = [ description [ 0 ] for description in cur . description ] rows = cur . fetchall ( ) results = [ ] for row in rows : record = OrderedDict ( list ( zip ( columns , row ) ) ) results . append ( record ) testrun = _get_testrun_from_sqlite ( conn ) conn . close ( ) return xunit_exporter . ImportedData ( results = results , testrun = testrun )
Reads the content of the database file and returns imported data .
14,267
def mark_exported_sqlite ( db_file , older_than = None ) : logger . debug ( "Marking rows in database as exported" ) conn = _open_sqlite ( db_file ) cur = conn . cursor ( ) update = "UPDATE testcases SET exported = 'yes' WHERE verdict IS NOT null AND verdict != ''" if older_than : cur . execute ( " " . join ( ( update , "AND sqltime < ?" ) ) , ( older_than , ) ) else : cur . execute ( update ) conn . commit ( ) conn . close ( )
Marks rows with verdict as exported .
14,268
def createSubtitle ( self , fps , section ) : matched = self . _pattern . search ( section ) if matched is not None : matchedDict = matched . groupdict ( ) return Subtitle ( self . frametime ( fps , matchedDict . get ( "time_from" ) ) , self . frametime ( fps , matchedDict . get ( "time_to" ) ) , self . formatSub ( matchedDict . get ( "text" ) ) ) return None
Returns a correct Subtitle object from a text given in section . If section cannot be parsed None is returned . By default section is checked against subPattern regular expression .
14,269
def convertTime ( self , frametime , which ) : SubAssert ( frametime . frame >= 0 , _ ( "Negative time present." ) ) return frametime . frame
Convert FrameTime object to properly formatted string that describes subtitle start or end time .
14,270
def _set_location ( instance , location ) : location = str ( location ) if not location . startswith ( '/' ) : location = urljoin ( instance . request_path . rstrip ( '/' ) + '/' , location ) instance . response . location = location
Sets a Location response header . If the location does not start with a slash the path of the current request is prepended .
14,271
def no_cache ( asset_url ) : pos = asset_url . rfind ( '?' ) if pos > 0 : asset_url = asset_url [ : pos ] return asset_url
Removes query parameters
14,272
def __ngrams ( s , n = 3 ) : return list ( zip ( * [ s [ i : ] for i in range ( n ) ] ) )
Raw n - grams from a sequence
14,273
def word_ngrams ( s , n = 3 , token_fn = tokens . on_whitespace ) : tokens = token_fn ( s ) return __ngrams ( tokens , n = min ( len ( tokens ) , n ) )
Word - level n - grams in a string
14,274
def char_ngrams ( s , n = 3 , token_fn = tokens . on_whitespace ) : tokens = token_fn ( s ) ngram_tuples = [ __ngrams ( t , n = min ( len ( t ) , n ) ) for t in tokens ] def unpack ( l ) : return sum ( l , [ ] ) def untuple ( l ) : return [ '' . join ( t ) for t in l ] return untuple ( unpack ( ngram_tuples ) )
Character - level n - grams from within the words in a string .
14,275
def __matches ( s1 , s2 , ngrams_fn , n = 3 ) : ngrams1 , ngrams2 = set ( ngrams_fn ( s1 , n = n ) ) , set ( ngrams_fn ( s2 , n = n ) ) return ngrams1 . intersection ( ngrams2 )
Returns the n - grams that match between two sequences
14,276
def char_matches ( s1 , s2 , n = 3 ) : return __matches ( s1 , s2 , char_ngrams , n = n )
Character - level n - grams that match between two strings
14,277
def word_matches ( s1 , s2 , n = 3 ) : return __matches ( s1 , s2 , word_ngrams , n = n )
Word - level n - grams that match between two strings
14,278
def __similarity ( s1 , s2 , ngrams_fn , n = 3 ) : ngrams1 , ngrams2 = set ( ngrams_fn ( s1 , n = n ) ) , set ( ngrams_fn ( s2 , n = n ) ) matches = ngrams1 . intersection ( ngrams2 ) return 2 * len ( matches ) / ( len ( ngrams1 ) + len ( ngrams2 ) )
The fraction of n - grams matching between two sequences
14,279
def _get_json ( self , model , space = None , rel_path = None , extra_params = None , get_all = None ) : if space is None and model not in ( Space , Event ) : raise Exception ( 'In general, `API._get_json` should always ' 'be called with a `space` argument.' ) if not extra_params : extra_params = { } extra_params [ 'page' ] = extra_params . get ( 'page' , 1 ) url = '{0}/{1}/{2}.json?{3}' . format ( settings . API_ROOT_PATH , settings . API_VERSION , rel_path or model . rel_path , urllib . urlencode ( extra_params ) , ) if self . cache_responses and url in self . cache : response = self . cache [ url ] else : headers = { 'X-Api-Key' : self . key , 'X-Api-Secret' : self . secret , } response = self . session . get ( url = url , headers = headers ) if self . cache_responses : self . cache [ url ] = response if response . status_code == 200 : results = [ ] json_response = response . json ( ) for obj in json_response : instance = model ( data = obj ) instance . api = self if space : instance . space = space results . append ( instance ) per_page = extra_params . get ( 'per_page' , None ) if ( get_all and per_page and len ( json_response ) and per_page == len ( json_response ) ) : extra_params [ 'page' ] += 1 results = results + self . _get_json ( model , space , rel_path , extra_params , get_all = get_all ) return results elif response . status_code == 204 : return [ ] else : raise Exception ( 'Code {0} returned from `{1}`. Response text: "{2}".' . format ( response . status_code , url , response . text ) )
Base level method for fetching data from the API
14,280
def _post_json ( self , instance , space = None , rel_path = None , extra_params = None ) : model = type ( instance ) if space is None and model not in ( Space , Event ) : raise Exception ( 'In general, `API._post_json` should always ' 'be called with a `space` argument.' ) if 'number' in instance . data : raise AttributeError ( 'You cannot create a ticket which already has a number' ) if not extra_params : extra_params = { } url = '{0}/{1}/{2}?{3}' . format ( settings . API_ROOT_PATH , settings . API_VERSION , rel_path or model . rel_path , urllib . urlencode ( extra_params ) , ) response = requests . post ( url = url , data = json . dumps ( instance . data ) , headers = { 'X-Api-Key' : self . key , 'X-Api-Secret' : self . secret , 'Content-type' : "application/json" , } , ) if response . status_code == 201 : instance = model ( data = response . json ( ) ) instance . api = self if space : instance . space = space return instance else : raise Exception ( 'Code {0} returned from `{1}`. Response text: "{2}".' . format ( response . status_code , url , response . text ) )
Base level method for updating data via the API
14,281
def _put_json ( self , instance , space = None , rel_path = None , extra_params = None , id_field = None ) : model = type ( instance ) if space is None and model not in ( Space , Event ) : raise Exception ( 'In general, `API._put_json` should always ' 'be called with a `space` argument.' ) if not extra_params : extra_params = { } if not id_field : id_field = 'number' url = '{0}/{1}/{2}/{3}.json?{4}' . format ( settings . API_ROOT_PATH , settings . API_VERSION , rel_path or model . rel_path , instance [ id_field ] , urllib . urlencode ( extra_params ) , ) response = requests . put ( url = url , data = json . dumps ( instance . data ) , headers = { 'X-Api-Key' : self . key , 'X-Api-Secret' : self . secret , 'Content-type' : "application/json" , } , ) if response . status_code == 204 : return instance else : raise Exception ( 'Code {0} returned from `{1}`. Response text: "{2}".' . format ( response . status_code , url , response . text ) )
Base level method for adding new data to the API
14,282
def _delete_json ( self , instance , space = None , rel_path = None , extra_params = None , id_field = None , append_to_path = None ) : model = type ( instance ) if space is None and model not in ( Space , Event ) : raise Exception ( 'In general, `API._delete_json` should always ' 'be called with a `space` argument.' ) if not extra_params : extra_params = { } if not id_field : id_field = 'number' if not instance . get ( id_field , None ) : raise AttributeError ( '%s does not have a value for the id field \'%s\'' % ( instance . __class__ . __name__ , id_field ) ) url = '{0}/{1}/{2}/{3}{4}.json?{5}' . format ( settings . API_ROOT_PATH , settings . API_VERSION , rel_path or model . rel_path , instance [ id_field ] , append_to_path or '' , urllib . urlencode ( extra_params ) , ) response = requests . delete ( url = url , headers = { 'X-Api-Key' : self . key , 'X-Api-Secret' : self . secret , 'Content-type' : "application/json" , } , ) if response . status_code == 204 : return True else : raise Exception ( 'Code {0} returned from `{1}`. Response text: "{2}".' . format ( response . status_code , url , response . text ) )
Base level method for removing data from the API
14,283
def _bind_variables ( self , instance , space ) : instance . api = self if space : instance . space = space return instance
Bind related variables to the instance
14,284
def tickets ( self , extra_params = None ) : params = { 'per_page' : settings . MAX_PER_PAGE , 'report' : 0 , } if extra_params : params . update ( extra_params ) return self . api . _get_json ( Ticket , space = self , rel_path = self . _build_rel_path ( 'tickets' ) , extra_params = params , get_all = True , )
All Tickets in this Space
14,285
def milestones ( self , extra_params = None ) : params = { 'per_page' : settings . MAX_PER_PAGE , } if extra_params : params . update ( extra_params ) return self . api . _get_json ( Milestone , space = self , rel_path = self . _build_rel_path ( 'milestones/all' ) , extra_params = params , get_all = True , )
All Milestones in this Space
14,286
def tools ( self , extra_params = None ) : return self . api . _get_json ( SpaceTool , space = self , rel_path = self . _build_rel_path ( 'space_tools' ) , extra_params = extra_params , )
All Tools in this Space
14,287
def components ( self , extra_params = None ) : return self . api . _get_json ( Component , space = self , rel_path = self . _build_rel_path ( 'ticket_components' ) , extra_params = extra_params , )
All components in this Space
14,288
def users ( self , extra_params = None ) : return self . api . _get_json ( User , space = self , rel_path = self . _build_rel_path ( 'users' ) , extra_params = extra_params , )
All Users with access to this Space
14,289
def tags ( self , extra_params = None ) : return self . api . _get_json ( Tag , space = self , rel_path = self . _build_rel_path ( 'tags' ) , extra_params = extra_params , )
All Tags in this Space
14,290
def wiki_pages ( self , extra_params = None ) : return self . api . _get_json ( WikiPage , space = self , rel_path = self . _build_rel_path ( 'wiki_pages' ) , extra_params = extra_params , )
All Wiki Pages with access to this Space
14,291
def tickets ( self , extra_params = None ) : return filter ( lambda ticket : ticket . get ( 'milestone_id' , None ) == self [ 'id' ] , self . space . tickets ( extra_params = extra_params ) )
All Tickets which are a part of this Milestone
14,292
def tags ( self , extra_params = None ) : params = { 'per_page' : settings . MAX_PER_PAGE , } if extra_params : params . update ( extra_params ) return self . api . _get_json ( Tag , space = self , rel_path = self . space . _build_rel_path ( 'tickets/%s/tags' % self [ 'number' ] ) , extra_params = params , get_all = True , )
All Tags in this Ticket
14,293
def milestone ( self , extra_params = None ) : if self . get ( 'milestone_id' , None ) : milestones = self . space . milestones ( id = self [ 'milestone_id' ] , extra_params = extra_params ) if milestones : return milestones [ 0 ]
The Milestone that the Ticket is a part of
14,294
def user ( self , extra_params = None ) : if self . get ( 'assigned_to_id' , None ) : users = self . space . users ( id = self [ 'assigned_to_id' ] , extra_params = extra_params ) if users : return users [ 0 ]
The User currently assigned to the Ticket
14,295
def component ( self , extra_params = None ) : if self . get ( 'component_id' , None ) : components = self . space . components ( id = self [ 'component_id' ] , extra_params = extra_params ) if components : return components [ 0 ]
The Component currently assigned to the Ticket
14,296
def comments ( self , extra_params = None ) : params = { 'per_page' : settings . MAX_PER_PAGE , } if extra_params : params . update ( extra_params ) return self . api . _get_json ( TicketComment , space = self , rel_path = self . space . _build_rel_path ( 'tickets/%s/ticket_comments' % self [ 'number' ] ) , extra_params = params , get_all = True , )
All Comments in this Ticket
14,297
def write ( self ) : if not hasattr ( self , 'space' ) : raise AttributeError ( "A ticket must have a 'space' attribute before you can write it to Assembla." ) if self . get ( 'number' ) : method = self . space . api . _put_json else : method = self . space . api . _post_json return method ( self , space = self . space , rel_path = self . space . _build_rel_path ( 'tickets' ) , )
Create or update the Ticket on Assembla
14,298
def delete ( self ) : if not hasattr ( self , 'space' ) : raise AttributeError ( "A ticket must have a 'space' attribute before you can remove it from Assembla." ) return self . space . api . _delete_json ( self , space = self . space , rel_path = self . space . _build_rel_path ( 'tickets' ) , )
Remove the Ticket from Assembla
14,299
def tickets ( self , extra_params = None ) : tickets = [ ] for space in self . api . spaces ( ) : tickets += filter ( lambda ticket : ticket . get ( 'assigned_to_id' , None ) == self [ 'id' ] , space . tickets ( extra_params = extra_params ) ) return tickets
A User s tickets across all available spaces