idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
14,200
def add_0x ( string ) : if isinstance ( string , bytes ) : string = string . decode ( 'utf-8' ) return '0x' + str ( string )
Add 0x to string at start .
14,201
def guess_depth ( packages ) : if len ( packages ) == 1 : return packages [ 0 ] . count ( '.' ) + 2 return min ( p . count ( '.' ) for p in packages ) + 1
Guess the optimal depth to use for the given list of arguments .
14,202
def print ( self , format = TEXT , output = sys . stdout , ** kwargs ) : if format is None : format = TEXT if format == TEXT : print ( self . _to_text ( ** kwargs ) , file = output ) elif format == CSV : print ( self . _to_csv ( ** kwargs ) , file = output ) elif format == JSON : print ( self . _to_json ( ** kwargs ) ,...
Print the object in a file or on standard output by default .
14,203
def import_training_data ( self , positive_corpus_file = os . path . join ( os . path . dirname ( __file__ ) , "positive.txt" ) , negative_corpus_file = os . path . join ( os . path . dirname ( __file__ ) , "negative.txt" ) ) : positive_corpus = open ( positive_corpus_file ) negative_corpus = open ( negative_corpus_fil...
This method imports the positive and negative training data from the two corpus files and creates the training data list .
14,204
def train ( self ) : if not self . training_data : self . import_training_data ( ) training_feature_set = [ ( self . extract_features ( line ) , label ) for ( line , label ) in self . training_data ] self . classifier = nltk . NaiveBayesClassifier . train ( training_feature_set )
This method generates the classifier . This method assumes that the training data has been loaded
14,205
def extract_features ( self , phrase ) : words = nltk . word_tokenize ( phrase ) features = { } for word in words : features [ 'contains(%s)' % word ] = ( word in words ) return features
This function will extract features from the phrase being used . Currently the feature we are extracting are unigrams of the text corpus .
14,206
def is_twss ( self , phrase ) : featureset = self . extract_features ( phrase ) return self . classifier . classify ( featureset )
The magic function - this accepts a phrase and tells you if it classifies as an entendre
14,207
def save ( self , filename = 'classifier.dump' ) : ofile = open ( filename , 'w+' ) pickle . dump ( self . classifier , ofile ) ofile . close ( )
Pickles the classifier and dumps it into a file
14,208
def load ( self , filename = 'classifier.dump' ) : ifile = open ( filename , 'r+' ) self . classifier = pickle . load ( ifile ) ifile . close ( )
Unpickles the classifier used
14,209
def pw ( ctx , key_pattern , user_pattern , mode , strict_flag , user_flag , file , edit_subcommand , gen_subcommand , ) : def handle_sigint ( * _ ) : click . echo ( ) ctx . exit ( 1 ) signal . signal ( signal . SIGINT , handle_sigint ) if gen_subcommand : length = int ( key_pattern ) if key_pattern else None generate_...
Search for USER and KEY in GPG - encrypted password file .
14,210
def launch_editor ( ctx , file ) : editor = os . environ . get ( "PW_EDITOR" ) if not editor : click . echo ( "error: no editor set in PW_EDITOR environment variables" ) ctx . exit ( 1 ) if not os . path . exists ( file ) : click . echo ( "error: password store not found at '%s'" % file , err = True ) ctx . exit ( 1 ) ...
launch editor with decrypted password database
14,211
def generate_password ( mode , length ) : r = random . SystemRandom ( ) length = length or RANDOM_PASSWORD_DEFAULT_LENGTH password = "" . join ( r . choice ( RANDOM_PASSWORD_ALPHABET ) for _ in range ( length ) ) if mode == Mode . ECHO : click . echo ( style_password ( password ) ) elif mode == Mode . COPY : try : impo...
generate a random password
14,212
def _load_data ( self ) : url = urljoin ( self . base_url , 'levels' ) resp = requests . get ( url , headers = self . headers ) if resp . content : return resp . json ( ) else : return None
Internal method for querying the GM api for currently running levels and storing that state .
14,213
def dump2sqlite ( records , output_file ) : results_keys = list ( records . results [ 0 ] . keys ( ) ) pad_data = [ ] for key in REQUIRED_KEYS : if key not in results_keys : results_keys . append ( key ) pad_data . append ( "" ) conn = sqlite3 . connect ( os . path . expanduser ( output_file ) , detect_types = sqlite3 ...
Dumps tests results to database .
14,214
def fit ( sim_mat , D_len , cidx ) : min_energy = np . inf for j in range ( 3 ) : inds = [ np . argmin ( [ sim_mat [ idy ] . get ( idx , 0 ) for idx in cidx ] ) for idy in range ( D_len ) if idy in sim_mat ] cidx = [ ] energy = 0 for i in np . unique ( inds ) : indsi = np . where ( inds == i ) [ 0 ] minind , min_value ...
Algorithm maximizes energy between clusters which is distinction in this algorithm . Distance matrix contains mostly 0 which are overlooked due to search of maximal distances . Algorithm does not try to retain k clusters .
14,215
def _calc_theta ( self ) : if self . decaying_prior : n_sampled = np . clip ( self . alpha_ + self . beta_ , 1 , np . inf ) prior_weight = 1 / n_sampled alpha = self . alpha_ + prior_weight * self . alpha_0 beta = self . beta_ + prior_weight * self . beta_0 else : alpha = self . alpha_ + self . alpha_0 beta = self . be...
Calculate an estimate of theta
14,216
def update ( self , ell , k ) : self . alpha_ [ k ] += ell self . beta_ [ k ] += 1 - ell self . _calc_theta ( ) if self . store_variance : self . _calc_var_theta ( )
Update the posterior and estimates after a label is sampled
14,217
def reset ( self ) : self . alpha_ = np . zeros ( self . _size , dtype = int ) self . beta_ = np . zeros ( self . _size , dtype = int ) self . theta_ = np . empty ( self . _size , dtype = float ) if self . store_variance : self . var_theta_ = np . empty ( self . _size , dtype = float ) if self . store_wp : self . theta...
Reset the instance to its initial state
14,218
def _calc_BB_prior ( self , theta_0 ) : prior_strength = self . prior_strength n_strata = len ( theta_0 ) weighted_strength = prior_strength / n_strata alpha_0 = theta_0 * weighted_strength beta_0 = ( 1 - theta_0 ) * weighted_strength return alpha_0 , beta_0
Generate a prior for the BB model
14,219
def authenticate ( self , request , remote_user = None ) : if not remote_user : remote_user = request if not remote_user : return None user = None username = self . clean_username ( remote_user ) try : if self . create_unknown_user : defaults = { } if isinstance ( request , dict ) : session_data = request if 'full_name...
The username passed here is considered trusted . This method simply returns the User object with the given username .
14,220
def numval ( token ) : if token . type == 'INTEGER' : return int ( token . value ) elif token . type == 'FLOAT' : return float ( token . value ) else : return token . value
Return the numerical value of token . value if it is a number
14,221
def tokenize ( code ) : tok_regex = '|' . join ( '(?P<{}>{})' . format ( * pair ) for pair in _tokens ) tok_regex = re . compile ( tok_regex , re . IGNORECASE | re . M ) line_num = 1 line_start = 0 for mo in re . finditer ( tok_regex , code ) : kind = mo . lastgroup value = mo . group ( kind ) if kind == 'NEWLINE' : li...
Tokenize the string code
14,222
def parse ( tokens ) : d = collections . OrderedDict ( ) prev_line = 0 blockname = None blockline = None for token in tokens : if token . type == 'COMMENT' : continue elif token . type == 'BLOCK' : block = token blockline = token . line blocktype = token . value . upper ( ) blockname = None if blocktype not in d : d [ ...
Parse the token list into a hierarchical data structure
14,223
def load ( stream ) : if isinstance ( stream , str ) : string = stream else : string = stream . read ( ) tokens = tokenize ( string ) return parse ( tokens )
Parse the LHA document and produce the corresponding Python object . Accepts a string or a file - like object .
14,224
def get_checksum ( self ) : arr = [ ] for elem in self . parsed : s = elem_checksum ( elem ) if s : arr . append ( s ) arr . sort ( ) return md5 ( json . dumps ( arr ) )
Returns a checksum based on the IDL that ignores comments and ordering but detects changes to types parameter order and enum values .
14,225
def _update_estimate_and_sampler ( self , ell , ell_hat , weight , extra_info , ** kwargs ) : stratum_idx = extra_info [ 'stratum' ] self . _BB_TP . update ( ell * ell_hat , stratum_idx ) self . _BB_PP . update ( ell_hat , stratum_idx ) self . _BB_P . update ( ell , stratum_idx ) self . _update_cov_model ( strata_to_up...
Update the BB models and the estimates
14,226
def get_path ( num ) : num = int ( num ) dig_len = len ( str ( num ) ) paths = [ ] for i in range ( dig_len - 2 ) : divisor = 10 ** ( dig_len - i - 1 ) paths . append ( "{}-{}" . format ( ( num // divisor ) * divisor , ( ( ( num // divisor ) + 1 ) * divisor ) - 1 ) ) return "/" . join ( paths )
Gets a path from the workitem number .
14,227
def get_tree ( self , work_item_id ) : try : __ , tcid = work_item_id . split ( "-" ) except ValueError : logger . warning ( "Couldn't load workitem %s, bad format" , work_item_id ) self . _cache [ work_item_id ] = InvalidObject ( ) return None path = os . path . join ( self . test_case_dir , self . get_path ( tcid ) ,...
Gets XML tree of the workitem .
14,228
def get_all_items ( self ) : for item in os . walk ( self . test_case_dir ) : if "workitem.xml" not in item [ 2 ] : continue case_id = os . path . split ( item [ 0 ] ) [ - 1 ] if not ( case_id and "*" not in case_id ) : continue item_cache = self [ case_id ] if not item_cache : continue if not item_cache . get ( "title...
Walks the repo and returns work items .
14,229
def _remove_files ( self , directory , pattern ) : for root , dirnames , file_names in os . walk ( directory ) : for file_name in fnmatch . filter ( file_names , pattern ) : os . remove ( os . path . join ( root , file_name ) )
Removes all files matching the search path
14,230
def post ( self , request , * args , ** kwargs ) : serializer = EventSerializer ( data = request . data ) if not serializer . is_valid ( ) : return Response ( { "accepted" : False , "reason" : serializer . errors } , status = 400 ) data = serializer . validated_data event_type = { "ack" : "ack" , "nack" : "nack" , "del...
Checks for expect event types before continuing
14,231
def create ( self , group , grouptype ) : try : self . client . add ( self . __distinguished_name ( group ) , API . __object_class ( ) , self . __ldap_attr ( group , grouptype ) ) except ldap3 . core . exceptions . LDAPNoSuchObjectResult : print ( "Error creating LDAP Group.\nRequest: " , self . __ldap_attr ( group , g...
Create an LDAP Group .
14,232
def add_user ( self , group , username ) : try : self . lookup_id ( group ) except ldap_tools . exceptions . InvalidResult as err : raise err from None operation = { 'memberUid' : [ ( ldap3 . MODIFY_ADD , [ username ] ) ] } self . client . modify ( self . __distinguished_name ( group ) , operation )
Add a user to the specified LDAP group .
14,233
def remove_user ( self , group , username ) : try : self . lookup_id ( group ) except ldap_tools . exceptions . InvalidResult as err : raise err from None operation = { 'memberUid' : [ ( ldap3 . MODIFY_DELETE , [ username ] ) ] } self . client . modify ( self . __distinguished_name ( group ) , operation )
Remove a user from the specified LDAP group .
14,234
def lookup_id ( self , group ) : filter = [ "(cn={})" . format ( group ) , "(objectclass=posixGroup)" ] results = self . client . search ( filter , [ 'gidNumber' ] ) if len ( results ) < 1 : raise ldap_tools . exceptions . NoGroupsFound ( 'No Groups Returned by LDAP' ) elif len ( results ) > 1 : raise ldap_tools . exce...
Lookup GID for the given group .
14,235
def create ( config , group , type ) : if type not in ( 'user' , 'service' ) : raise click . BadOptionUsage ( "--grouptype must be 'user' or 'service'" ) client = Client ( ) client . prepare_connection ( ) group_api = API ( client ) group_api . create ( group , type )
Create an LDAP group .
14,236
def delete ( config , group , force ) : if not force : if not click . confirm ( 'Confirm that you want to delete group {}' . format ( group ) ) : sys . exit ( "Deletion of {} aborted" . format ( group ) ) client = Client ( ) client . prepare_connection ( ) group_api = API ( client ) group_api . delete ( group )
Delete an LDAP group .
14,237
def add_user ( config , group , username ) : client = Client ( ) client . prepare_connection ( ) group_api = API ( client ) try : group_api . add_user ( group , username ) except ldap_tools . exceptions . NoGroupsFound : print ( "Group ({}) not found" . format ( group ) ) except ldap_tools . exceptions . TooManyResults...
Add specified user to specified group .
14,238
def remove_user ( config , group , username ) : client = Client ( ) client . prepare_connection ( ) group_api = API ( client ) try : group_api . remove_user ( group , username ) except ldap_tools . exceptions . NoGroupsFound : print ( "Group ({}) not found" . format ( group ) ) except ldap_tools . exceptions . TooManyR...
Remove specified user from specified group .
14,239
def index ( config ) : client = Client ( ) client . prepare_connection ( ) group_api = API ( client ) print ( group_api . index ( ) )
Display group info in raw format .
14,240
def _get_importer ( input_file ) : __ , ext = os . path . splitext ( input_file ) ext = ext . lower ( ) if "ostriz" in input_file : from dump2polarion . results import ostriztools importer = ostriztools . import_ostriz elif ext == ".xml" : from dump2polarion . results import junittools importer = junittools . import_ju...
Selects importer based on input file type .
14,241
def parse_db_url ( db_url ) : u = urlparse ( db_url ) db = { } db [ "database" ] = u . path [ 1 : ] db [ "user" ] = u . username db [ "password" ] = u . password db [ "host" ] = u . hostname db [ "port" ] = u . port return db
provided a db url return a dict with connection properties
14,242
def bounds_handler ( ctx , param , value ) : retval = from_like_context ( ctx , param , value ) if retval is None and value is not None : try : value = value . strip ( ", []" ) retval = tuple ( float ( x ) for x in re . split ( r"[,\s]+" , value ) ) assert len ( retval ) == 4 return retval except Exception : raise clic...
Handle different forms of bounds .
14,243
def info ( dataset , indent , meta_member ) : table = bcdata . validate_name ( dataset ) wfs = WebFeatureService ( url = bcdata . OWS_URL , version = "2.0.0" ) info = { } info [ "name" ] = table info [ "count" ] = bcdata . get_count ( table ) info [ "schema" ] = wfs . get_schema ( "pub:" + table ) if meta_member : clic...
Print basic metadata about a DataBC WFS layer as JSON .
14,244
def dem ( bounds , src_crs , dst_crs , out_file , resolution ) : if not dst_crs : dst_crs = "EPSG:3005" bcdata . get_dem ( bounds , out_file = out_file , src_crs = src_crs , dst_crs = dst_crs , resolution = resolution )
Dump BC DEM to TIFF
14,245
def dump ( dataset , query , out_file , bounds ) : table = bcdata . validate_name ( dataset ) data = bcdata . get_data ( table , query = query , bounds = bounds ) if out_file : with open ( out_file , "w" ) as f : json . dump ( data . json ( ) , f ) else : sink = click . get_text_stream ( "stdout" ) sink . write ( json ...
Write DataBC features to stdout as GeoJSON feature collection .
14,246
def cat ( dataset , query , bounds , indent , compact , dst_crs , pagesize , sortby ) : dump_kwds = { "sort_keys" : True } if indent : dump_kwds [ "indent" ] = indent if compact : dump_kwds [ "separators" ] = ( "," , ":" ) table = bcdata . validate_name ( dataset ) for feat in bcdata . get_features ( table , query = qu...
Write DataBC features to stdout as GeoJSON feature objects .
14,247
def bc2pg ( dataset , db_url , table , schema , query , append , pagesize , sortby , max_workers ) : src = bcdata . validate_name ( dataset ) src_schema , src_table = [ i . lower ( ) for i in src . split ( "." ) ] if not schema : schema = src_schema if not table : table = src_table conn = pgdata . connect ( db_url ) if...
Download a DataBC WFS layer to postgres - an ogr2ogr wrapper .
14,248
def __parseFormat ( self , fmt , content , fps = 25 ) : headerFound = False subSection = '' for lineNo , line in enumerate ( content ) : line = self . _initialLinePrepare ( line , lineNo ) if not fmt . WITH_HEADER and not self . _formatFound and lineNo > self . _maxFmtSearch : return subSection = '' . join ( [ subSecti...
Actual parser . Please note that time_to is not required to process as not all subtitles provide it .
14,249
def get_args ( args = None ) : parser = argparse . ArgumentParser ( description = "dump2polarion" ) parser . add_argument ( "-i" , "--input_file" , required = True , help = "Path to CSV, SQLite or JUnit reports file or importers XML file" , ) parser . add_argument ( "-o" , "--output_file" , help = "Where to save the XM...
Get command line arguments .
14,250
def get_submit_args ( args ) : submit_args = dict ( testrun_id = args . testrun_id , user = args . user , password = args . password , no_verify = args . no_verify , verify_timeout = args . verify_timeout , log_file = args . job_log , dry_run = args . dry_run , ) submit_args = { k : v for k , v in submit_args . items (...
Gets arguments for the submit_and_verify method .
14,251
def process_args ( args ) : passed_args = args if isinstance ( args , argparse . Namespace ) : passed_args = vars ( passed_args ) elif hasattr ( args , "to_dict" ) : passed_args = passed_args . to_dict ( ) return Box ( passed_args , frozen_box = True , default_box = True )
Processes passed arguments .
14,252
def submit_if_ready ( args , submit_args , config ) : __ , ext = os . path . splitext ( args . input_file ) if ext . lower ( ) != ".xml" : return None with io . open ( args . input_file , encoding = "utf-8" ) as input_file : xml = input_file . read ( 1024 ) if not ( "<testsuites" in xml or "<testcases" in xml or "<requ...
Submits the input XML file if it s already in the expected format .
14,253
def dumper ( args , config , transform_func = None ) : args = process_args ( args ) submit_args = get_submit_args ( args ) submit_outcome = submit_if_ready ( args , submit_args , config ) if submit_outcome is not None : return submit_outcome import_time = datetime . datetime . utcnow ( ) try : records = dump2polarion ....
Dumper main function .
14,254
def load_ldap_config ( self ) : try : with open ( '{}/ldap_info.yaml' . format ( self . config_dir ) , 'r' ) as FILE : config = yaml . load ( FILE ) self . host = config [ 'server' ] self . user_dn = config [ 'user_dn' ] self . port = config [ 'port' ] self . basedn = config [ 'basedn' ] self . mail_domain = config [ '...
Configure LDAP Client settings .
14,255
def load_ldap_password ( self ) : with open ( '{}/ldap.secret' . format ( self . config_dir ) , 'r' ) as FILE : secure_config = FILE . read ( ) self . user_pw = base64 . b64decode ( secure_config . encode ( ) )
Import LDAP password from file .
14,256
def connection ( self ) : self . server = ldap3 . Server ( self . host , port = self . port , get_info = ldap3 . ALL ) self . conn = ldap3 . Connection ( self . server , user = self . user_dn , password = self . user_pw , auto_bind = True , lazy = True , receive_timeout = 1 )
Establish LDAP connection .
14,257
def add ( self , distinguished_name , object_class , attributes ) : self . conn . add ( distinguished_name , object_class , attributes )
Add object to LDAP .
14,258
def search ( self , filter , attributes = None ) : if attributes is None : attributes = [ '*' ] if filter is None : filter = [ "(objectclass=*)" ] filterstr = "(&{})" . format ( '' . join ( filter ) ) self . conn . search ( search_base = self . basedn , search_filter = filterstr , search_scope = ldap3 . SUBTREE , attri...
Search LDAP for records .
14,259
def get_max_id ( self , object_type , role ) : if object_type == 'user' : objectclass = 'posixAccount' ldap_attr = 'uidNumber' elif object_type == 'group' : objectclass = 'posixGroup' ldap_attr = 'gidNumber' else : raise ldap_tools . exceptions . InvalidResult ( 'Unknown object type' ) minID , maxID = Client . __set_id...
Get the highest used ID .
14,260
def get_dem ( bounds , out_file = "dem.tif" , src_crs = "EPSG:3005" , dst_crs = "EPSG:3005" , resolution = 25 ) : bbox = "," . join ( [ str ( b ) for b in bounds ] ) payload = { "service" : "WCS" , "version" : "1.0.0" , "request" : "GetCoverage" , "coverage" : "pub:bc_elevation_25m_bcalb" , "Format" : "GeoTIFF" , "bbox...
Get 25m DEM for provided bounds write to GeoTIFF
14,261
def main ( ) : entry_point . add_command ( CLI . version ) entry_point . add_command ( UserCLI . user ) entry_point . add_command ( GroupCLI . group ) entry_point . add_command ( AuditCLI . audit ) entry_point . add_command ( KeyCLI . key ) entry_point ( )
Enter main function .
14,262
def print_args ( output = sys . stdout ) : def decorator ( func ) : @ wraps ( func ) def _ ( * args , ** kwargs ) : output . write ( "Args: {0}, KwArgs: {1}\n" . format ( str ( args ) , str ( kwargs ) ) ) return func ( * args , ** kwargs ) return _ return decorator
Decorate a function so that print arguments before calling it .
14,263
def constant ( func ) : @ wraps ( func ) def _ ( * args , ** kwargs ) : if not _ . res : _ . res = func ( * args , ** kwargs ) return _ . res _ . res = None return _
Decorate a function so that the result is a constant value .
14,264
def memoized ( func ) : cache = { } @ wraps ( func ) def memoized_function ( * args ) : try : return cache [ args ] except KeyError : value = func ( * args ) try : cache [ args ] = value except MemoryError : cache . clear ( ) gc . collect ( ) return value return memoized_function
Decorate a function to memoize results .
14,265
def _open_sqlite ( db_file ) : db_file = os . path . expanduser ( db_file ) try : with open ( db_file ) : pass return sqlite3 . connect ( db_file , detect_types = sqlite3 . PARSE_DECLTYPES ) except ( IOError , sqlite3 . Error ) as err : raise Dump2PolarionException ( "{}" . format ( err ) )
Opens database connection .
14,266
def import_sqlite ( db_file , older_than = None , ** kwargs ) : conn = _open_sqlite ( db_file ) cur = conn . cursor ( ) select = "SELECT * FROM testcases WHERE exported != 'yes'" if older_than : cur . execute ( " " . join ( ( select , "AND sqltime < ?" ) ) , ( older_than , ) ) else : cur . execute ( select ) columns = ...
Reads the content of the database file and returns imported data .
14,267
def mark_exported_sqlite ( db_file , older_than = None ) : logger . debug ( "Marking rows in database as exported" ) conn = _open_sqlite ( db_file ) cur = conn . cursor ( ) update = "UPDATE testcases SET exported = 'yes' WHERE verdict IS NOT null AND verdict != ''" if older_than : cur . execute ( " " . join ( ( update ...
Marks rows with verdict as exported .
14,268
def createSubtitle ( self , fps , section ) : matched = self . _pattern . search ( section ) if matched is not None : matchedDict = matched . groupdict ( ) return Subtitle ( self . frametime ( fps , matchedDict . get ( "time_from" ) ) , self . frametime ( fps , matchedDict . get ( "time_to" ) ) , self . formatSub ( mat...
Returns a correct Subtitle object from a text given in section . If section cannot be parsed None is returned . By default section is checked against subPattern regular expression .
14,269
def convertTime ( self , frametime , which ) : SubAssert ( frametime . frame >= 0 , _ ( "Negative time present." ) ) return frametime . frame
Convert FrameTime object to properly formatted string that describes subtitle start or end time .
14,270
def _set_location ( instance , location ) : location = str ( location ) if not location . startswith ( '/' ) : location = urljoin ( instance . request_path . rstrip ( '/' ) + '/' , location ) instance . response . location = location
Sets a Location response header . If the location does not start with a slash the path of the current request is prepended .
14,271
def no_cache ( asset_url ) : pos = asset_url . rfind ( '?' ) if pos > 0 : asset_url = asset_url [ : pos ] return asset_url
Removes query parameters
14,272
def __ngrams ( s , n = 3 ) : return list ( zip ( * [ s [ i : ] for i in range ( n ) ] ) )
Raw n - grams from a sequence
14,273
def word_ngrams ( s , n = 3 , token_fn = tokens . on_whitespace ) : tokens = token_fn ( s ) return __ngrams ( tokens , n = min ( len ( tokens ) , n ) )
Word - level n - grams in a string
14,274
def char_ngrams ( s , n = 3 , token_fn = tokens . on_whitespace ) : tokens = token_fn ( s ) ngram_tuples = [ __ngrams ( t , n = min ( len ( t ) , n ) ) for t in tokens ] def unpack ( l ) : return sum ( l , [ ] ) def untuple ( l ) : return [ '' . join ( t ) for t in l ] return untuple ( unpack ( ngram_tuples ) )
Character - level n - grams from within the words in a string .
14,275
def __matches ( s1 , s2 , ngrams_fn , n = 3 ) : ngrams1 , ngrams2 = set ( ngrams_fn ( s1 , n = n ) ) , set ( ngrams_fn ( s2 , n = n ) ) return ngrams1 . intersection ( ngrams2 )
Returns the n - grams that match between two sequences
14,276
def char_matches ( s1 , s2 , n = 3 ) : return __matches ( s1 , s2 , char_ngrams , n = n )
Character - level n - grams that match between two strings
14,277
def word_matches ( s1 , s2 , n = 3 ) : return __matches ( s1 , s2 , word_ngrams , n = n )
Word - level n - grams that match between two strings
14,278
def __similarity ( s1 , s2 , ngrams_fn , n = 3 ) : ngrams1 , ngrams2 = set ( ngrams_fn ( s1 , n = n ) ) , set ( ngrams_fn ( s2 , n = n ) ) matches = ngrams1 . intersection ( ngrams2 ) return 2 * len ( matches ) / ( len ( ngrams1 ) + len ( ngrams2 ) )
The fraction of n - grams matching between two sequences
14,279
def _get_json ( self , model , space = None , rel_path = None , extra_params = None , get_all = None ) : if space is None and model not in ( Space , Event ) : raise Exception ( 'In general, `API._get_json` should always ' 'be called with a `space` argument.' ) if not extra_params : extra_params = { } extra_params [ 'pa...
Base level method for fetching data from the API
14,280
def _post_json ( self , instance , space = None , rel_path = None , extra_params = None ) : model = type ( instance ) if space is None and model not in ( Space , Event ) : raise Exception ( 'In general, `API._post_json` should always ' 'be called with a `space` argument.' ) if 'number' in instance . data : raise Attrib...
Base level method for updating data via the API
14,281
def _put_json ( self , instance , space = None , rel_path = None , extra_params = None , id_field = None ) : model = type ( instance ) if space is None and model not in ( Space , Event ) : raise Exception ( 'In general, `API._put_json` should always ' 'be called with a `space` argument.' ) if not extra_params : extra_p...
Base level method for adding new data to the API
14,282
def _delete_json ( self , instance , space = None , rel_path = None , extra_params = None , id_field = None , append_to_path = None ) : model = type ( instance ) if space is None and model not in ( Space , Event ) : raise Exception ( 'In general, `API._delete_json` should always ' 'be called with a `space` argument.' )...
Base level method for removing data from the API
14,283
def _bind_variables ( self , instance , space ) : instance . api = self if space : instance . space = space return instance
Bind related variables to the instance
14,284
def tickets ( self , extra_params = None ) : params = { 'per_page' : settings . MAX_PER_PAGE , 'report' : 0 , } if extra_params : params . update ( extra_params ) return self . api . _get_json ( Ticket , space = self , rel_path = self . _build_rel_path ( 'tickets' ) , extra_params = params , get_all = True , )
All Tickets in this Space
14,285
def milestones ( self , extra_params = None ) : params = { 'per_page' : settings . MAX_PER_PAGE , } if extra_params : params . update ( extra_params ) return self . api . _get_json ( Milestone , space = self , rel_path = self . _build_rel_path ( 'milestones/all' ) , extra_params = params , get_all = True , )
All Milestones in this Space
14,286
def tools ( self , extra_params = None ) : return self . api . _get_json ( SpaceTool , space = self , rel_path = self . _build_rel_path ( 'space_tools' ) , extra_params = extra_params , )
All Tools in this Space
14,287
def components ( self , extra_params = None ) : return self . api . _get_json ( Component , space = self , rel_path = self . _build_rel_path ( 'ticket_components' ) , extra_params = extra_params , )
All components in this Space
14,288
def users ( self , extra_params = None ) : return self . api . _get_json ( User , space = self , rel_path = self . _build_rel_path ( 'users' ) , extra_params = extra_params , )
All Users with access to this Space
14,289
def tags ( self , extra_params = None ) : return self . api . _get_json ( Tag , space = self , rel_path = self . _build_rel_path ( 'tags' ) , extra_params = extra_params , )
All Tags in this Space
14,290
def wiki_pages ( self , extra_params = None ) : return self . api . _get_json ( WikiPage , space = self , rel_path = self . _build_rel_path ( 'wiki_pages' ) , extra_params = extra_params , )
All Wiki Pages with access to this Space
14,291
def tickets ( self , extra_params = None ) : return filter ( lambda ticket : ticket . get ( 'milestone_id' , None ) == self [ 'id' ] , self . space . tickets ( extra_params = extra_params ) )
All Tickets which are a part of this Milestone
14,292
def tags ( self , extra_params = None ) : params = { 'per_page' : settings . MAX_PER_PAGE , } if extra_params : params . update ( extra_params ) return self . api . _get_json ( Tag , space = self , rel_path = self . space . _build_rel_path ( 'tickets/%s/tags' % self [ 'number' ] ) , extra_params = params , get_all = Tr...
All Tags in this Ticket
14,293
def milestone ( self , extra_params = None ) : if self . get ( 'milestone_id' , None ) : milestones = self . space . milestones ( id = self [ 'milestone_id' ] , extra_params = extra_params ) if milestones : return milestones [ 0 ]
The Milestone that the Ticket is a part of
14,294
def user ( self , extra_params = None ) : if self . get ( 'assigned_to_id' , None ) : users = self . space . users ( id = self [ 'assigned_to_id' ] , extra_params = extra_params ) if users : return users [ 0 ]
The User currently assigned to the Ticket
14,295
def component ( self , extra_params = None ) : if self . get ( 'component_id' , None ) : components = self . space . components ( id = self [ 'component_id' ] , extra_params = extra_params ) if components : return components [ 0 ]
The Component currently assigned to the Ticket
14,296
def comments ( self , extra_params = None ) : params = { 'per_page' : settings . MAX_PER_PAGE , } if extra_params : params . update ( extra_params ) return self . api . _get_json ( TicketComment , space = self , rel_path = self . space . _build_rel_path ( 'tickets/%s/ticket_comments' % self [ 'number' ] ) , extra_param...
All Comments in this Ticket
14,297
def write ( self ) : if not hasattr ( self , 'space' ) : raise AttributeError ( "A ticket must have a 'space' attribute before you can write it to Assembla." ) if self . get ( 'number' ) : method = self . space . api . _put_json else : method = self . space . api . _post_json return method ( self , space = self . space...
Create or update the Ticket on Assembla
14,298
def delete ( self ) : if not hasattr ( self , 'space' ) : raise AttributeError ( "A ticket must have a 'space' attribute before you can remove it from Assembla." ) return self . space . api . _delete_json ( self , space = self . space , rel_path = self . space . _build_rel_path ( 'tickets' ) , )
Remove the Ticket from Assembla
14,299
def tickets ( self , extra_params = None ) : tickets = [ ] for space in self . api . spaces ( ) : tickets += filter ( lambda ticket : ticket . get ( 'assigned_to_id' , None ) == self [ 'id' ] , space . tickets ( extra_params = extra_params ) ) return tickets
A User s tickets across all available spaces