idx int64 0 63k | question stringlengths 61 4.03k | target stringlengths 6 1.23k |
|---|---|---|
62,000 | def _is_requirement ( line ) : line = line . strip ( ) return line and not ( line . startswith ( "-r" ) or line . startswith ( "#" ) ) | Returns whether the line is a valid package requirement . |
62,001 | def render_to_response ( self , context ) : if self . redirect_if_one_result : if self . object_list . count ( ) == 1 and self . form . is_bound : return redirect ( self . object_list . get ( ) . get_absolute_url ( ) ) return super ( SearchMixin , self ) . render_to_response ( context ) | When the user makes a search and there is only one result redirect to the result s detail page rather than rendering the list . |
62,002 | def clean_start_time ( self ) : start = self . cleaned_data . get ( 'start_time' ) if not start : return start active_entries = self . user . timepiece_entries . filter ( start_time__gte = start , end_time__isnull = True ) for entry in active_entries : output = ( 'The start time is on or before the current entry: ' '%s... | Make sure that the start time doesn t come before the active entry |
62,003 | def clean ( self ) : active = utils . get_active_entry ( self . user ) start_time = self . cleaned_data . get ( 'start_time' , None ) end_time = self . cleaned_data . get ( 'end_time' , None ) if active and active . pk != self . instance . pk : if ( start_time and start_time > active . start_time ) or ( end_time and en... | If we re not editing the active entry ensure that this entry doesn t conflict with or come after the active entry . |
62,004 | def clock_in ( request ) : user = request . user active_entry = utils . get_active_entry ( user , select_for_update = True ) initial = dict ( [ ( k , v ) for k , v in request . GET . items ( ) ] ) data = request . POST or None form = ClockInForm ( data , initial = initial , user = user , active = active_entry ) if form... | For clocking the user into a project . |
62,005 | def toggle_pause ( request ) : entry = utils . get_active_entry ( request . user ) if not entry : raise Http404 entry . toggle_paused ( ) entry . save ( ) action = 'paused' if entry . is_paused else 'resumed' message = 'Your entry, {0} on {1}, has been {2}.' . format ( entry . activity . name , entry . project , action... | Allow the user to pause and unpause the active entry . |
62,006 | def reject_entry ( request , entry_id ) : return_url = request . GET . get ( 'next' , reverse ( 'dashboard' ) ) try : entry = Entry . no_join . get ( pk = entry_id ) except : message = 'No such log entry.' messages . error ( request , message ) return redirect ( return_url ) if entry . status == Entry . UNVERIFIED or e... | Admins can reject an entry that has been verified or approved but not invoiced to set its status to unverified for the user to fix . |
62,007 | def delete_entry ( request , entry_id ) : try : entry = Entry . no_join . get ( pk = entry_id , user = request . user ) except Entry . DoesNotExist : message = 'No such entry found.' messages . info ( request , message ) url = request . GET . get ( 'next' , reverse ( 'dashboard' ) ) return HttpResponseRedirect ( url ) ... | Give the user the ability to delete a log entry with a confirmation beforehand . If this method is invoked via a GET request a form asking for a confirmation of intent will be presented to the user . If this method is invoked via a POST request the entry will be deleted . |
62,008 | def get_hours_per_week ( self , user = None ) : try : profile = UserProfile . objects . get ( user = user or self . user ) except UserProfile . DoesNotExist : profile = None return profile . hours_per_week if profile else Decimal ( '40.00' ) | Retrieves the number of hours the user should work per week . |
62,009 | def get_hours_for_week ( self , week_start = None ) : week_start = week_start if week_start else self . week_start week_end = week_start + relativedelta ( days = 7 ) return ProjectHours . objects . filter ( week_start__gte = week_start , week_start__lt = week_end ) | Gets all ProjectHours entries in the 7 - day period beginning on week_start . |
62,010 | def get_users_from_project_hours ( self , project_hours ) : name = ( 'user__first_name' , 'user__last_name' ) users = project_hours . values_list ( 'user__id' , * name ) . distinct ( ) . order_by ( * name ) return users | Gets a list of the distinct users included in the project hours entries ordered by name . |
62,011 | def check_all ( self , all_entries , * args , ** kwargs ) : all_overlaps = 0 while True : try : user_entries = all_entries . next ( ) except StopIteration : return all_overlaps else : user_total_overlaps = self . check_entry ( user_entries , * args , ** kwargs ) all_overlaps += user_total_overlaps | Go through lists of entries find overlaps among each return the total |
62,012 | def check_entry ( self , entries , * args , ** kwargs ) : verbosity = kwargs . get ( 'verbosity' , 1 ) user_total_overlaps = 0 user = '' for index_a , entry_a in enumerate ( entries ) : if index_a == 0 : if args and verbosity >= 1 or verbosity >= 2 : self . show_name ( entry_a . user ) user = entry_a . user for index_b... | With a list of entries check each entry against every other |
62,013 | def find_start ( self , ** kwargs ) : week = kwargs . get ( 'week' , False ) month = kwargs . get ( 'month' , False ) year = kwargs . get ( 'year' , False ) days = kwargs . get ( 'days' , 0 ) start = timezone . now ( ) - relativedelta ( months = 1 , day = 1 ) if week : start = utils . get_week_start ( ) if month : star... | Determine the starting point of the query using CLI keyword arguments |
62,014 | def find_users ( self , * args ) : if args : names = reduce ( lambda query , arg : query | ( Q ( first_name__icontains = arg ) | Q ( last_name__icontains = arg ) ) , args , Q ( ) ) users = User . objects . filter ( names ) else : users = User . objects . all ( ) if not users . count ( ) and args : if len ( args ) == 1 ... | Returns the users to search given names as args . Return all users if there are no args provided . |
62,015 | def find_entries ( self , users , start , * args , ** kwargs ) : forever = kwargs . get ( 'all' , False ) for user in users : if forever : entries = Entry . objects . filter ( user = user ) . order_by ( 'start_time' ) else : entries = Entry . objects . filter ( user = user , start_time__gte = start ) . order_by ( 'star... | Find all entries for all users from a given starting point . If no starting point is provided all entries are returned . |
62,016 | def cbv_decorator ( function_decorator ) : def class_decorator ( View ) : View . dispatch = method_decorator ( function_decorator ) ( View . dispatch ) return View return class_decorator | Allows a function - based decorator to be used on a CBV . |
62,017 | def date_totals ( entries , by ) : date_dict = { } for date , date_entries in groupby ( entries , lambda x : x [ 'date' ] ) : if isinstance ( date , datetime . datetime ) : date = date . date ( ) d_entries = list ( date_entries ) if by == 'user' : name = ' ' . join ( ( d_entries [ 0 ] [ 'user__first_name' ] , d_entries... | Yield a user s name and a dictionary of their hours |
62,018 | def get_project_totals ( entries , date_headers , hour_type = None , overtime = False , total_column = False , by = 'user' ) : totals = [ 0 for date in date_headers ] rows = [ ] for thing , thing_entries in groupby ( entries , lambda x : x [ by ] ) : name , thing_id , date_dict = date_totals ( thing_entries , by ) date... | Yield hour totals grouped by user and date . Optionally including overtime . |
62,019 | def validate ( self , validation_instances , metrics , iteration = None ) : if not validation_instances or not metrics : return { } split_id = 'val%s' % iteration if iteration is not None else 'val' train_results = evaluate . evaluate ( self , validation_instances , metrics = metrics , split_id = split_id ) output . ou... | Evaluate this model on validation_instances during training and output a report . |
62,020 | def predict_and_score ( self , eval_instances , random = False , verbosity = 0 ) : if hasattr ( self , '_using_default_separate' ) and self . _using_default_separate : raise NotImplementedError self . _using_default_combined = True return ( self . predict ( eval_instances , random = random , verbosity = verbosity ) , s... | Return most likely outputs and scores for the particular set of outputs given in eval_instances as a tuple . Return value should be equivalent to the default implementation of |
62,021 | def load ( self , infile ) : model = pickle . load ( infile ) self . __dict__ . update ( model . __dict__ ) | Deserialize a model from a stored file . |
62,022 | def iter_batches ( iterable , batch_size ) : sourceiter = iter ( iterable ) while True : batchiter = islice ( sourceiter , batch_size ) yield chain ( [ batchiter . next ( ) ] , batchiter ) | Given a sequence or iterable yield batches from that iterable until it runs out . Note that this function returns a generator and also each batch will be a generator . |
62,023 | def gen_batches ( iterable , batch_size ) : def batches_thunk ( ) : return iter_batches ( iterable , batch_size ) try : length = len ( iterable ) except TypeError : return batches_thunk ( ) num_batches = ( length - 1 ) // batch_size + 1 return SizedGenerator ( batches_thunk , length = num_batches ) | Returns a generator object that yields batches from iterable . See iter_batches for more details and caveats . |
62,024 | def inverted ( self ) : return Instance ( input = self . output , output = self . input , annotated_input = self . annotated_output , annotated_output = self . annotated_input , alt_inputs = self . alt_outputs , alt_outputs = self . alt_inputs , source = self . source ) | Return a version of this instance with inputs replaced by outputs and vice versa . |
62,025 | def get_data_or_download ( dir_name , file_name , url = '' , size = 'unknown' ) : dname = os . path . join ( stanza . DATA_DIR , dir_name ) fname = os . path . join ( dname , file_name ) if not os . path . isdir ( dname ) : assert url , 'Could not locate data {}, and url was not specified. Cannot retrieve data.' . form... | Returns the data . if the data hasn t been downloaded then first download the data . |
62,026 | def add ( self , word , count = 1 ) : if word not in self : super ( Vocab , self ) . __setitem__ ( word , len ( self ) ) self . _counts [ word ] += count return self [ word ] | Add a word to the vocabulary and return its index . |
62,027 | def subset ( self , words ) : v = self . __class__ ( unk = self . _unk ) unique = lambda seq : len ( set ( seq ) ) == len ( seq ) assert unique ( words ) for w in words : if w in self : v . add ( w , count = self . count ( w ) ) return v | Get a new Vocab containing only the specified subset of words . |
62,028 | def _index2word ( self ) : compute_index2word = lambda : self . keys ( ) try : self . _index2word_cache except AttributeError : self . _index2word_cache = compute_index2word ( ) if len ( self . _index2word_cache ) != len ( self ) : self . _index2word_cache = compute_index2word ( ) return self . _index2word_cache | Mapping from indices to words . |
62,029 | def from_dict ( cls , word2index , unk , counts = None ) : try : if word2index [ unk ] != 0 : raise ValueError ( 'unk must be assigned index 0' ) except KeyError : raise ValueError ( 'word2index must have an entry for unk.' ) vals = set ( word2index . values ( ) ) n = len ( vals ) bijection = ( len ( word2index ) == n ... | Create Vocab from an existing string to integer dictionary . |
62,030 | def to_file ( self , f ) : for word in self . _index2word : count = self . _counts [ word ] f . write ( u'{}\t{}\n' . format ( word , count ) . encode ( 'utf-8' ) ) | Write vocab to a file . |
62,031 | def backfill_unk_emb ( self , E , filled_words ) : unk_emb = E [ self [ self . _unk ] ] for i , word in enumerate ( self ) : if word not in filled_words : E [ i ] = unk_emb | Backfills an embedding matrix with the embedding for the unknown token . |
62,032 | def best_gpu ( max_usage = USAGE_THRESHOLD , verbose = False ) : try : proc = subprocess . Popen ( "nvidia-smi" , stdout = subprocess . PIPE , stderr = subprocess . PIPE ) output , error = proc . communicate ( ) if error : raise Exception ( error ) except Exception , e : sys . stderr . write ( "Couldn't run nvidia-smi ... | Return the name of a device to use either cpu or gpu0 gpu1 ... The least - used GPU with usage under the constant threshold will be chosen ; ties are broken randomly . |
62,033 | def evaluate ( learner , eval_data , metrics , metric_names = None , split_id = None , write_data = False ) : if metric_names is None : metric_names = [ ( metric . __name__ if hasattr ( metric , '__name__' ) else ( 'm%d' % i ) ) for i , metric in enumerate ( metrics ) ] split_prefix = split_id + '.' if split_id else ''... | Evaluate learner on the instances in eval_data according to each metric in metric and return a dictionary summarizing the values of the metrics . |
62,034 | def json2pb ( pb , js , useFieldNumber = False ) : for field in pb . DESCRIPTOR . fields : if useFieldNumber : key = field . number else : key = field . name if key not in js : continue if field . type == FD . TYPE_MESSAGE : pass elif field . type in _js2ftype : ftype = _js2ftype [ field . type ] else : raise ParseErro... | convert JSON string to google . protobuf . descriptor instance |
62,035 | def annotate_json ( self , text , annotators = None ) : doc = self . annotate ( text , annotators ) return doc . json | Return a JSON dict from the CoreNLP server containing annotations of the text . |
62,036 | def annotate_proto ( self , text , annotators = None ) : properties = { 'annotators' : ',' . join ( annotators or self . default_annotators ) , 'outputFormat' : 'serialized' , 'serializer' : 'edu.stanford.nlp.pipeline.ProtobufAnnotationSerializer' } r = self . _request ( text , properties ) buffer = r . content size , ... | Return a Document protocol buffer from the CoreNLP server containing annotations of the text . |
62,037 | def annotate ( self , text , annotators = None ) : doc_pb = self . annotate_proto ( text , annotators ) return AnnotatedDocument . from_pb ( doc_pb ) | Return an AnnotatedDocument from the CoreNLP server . |
62,038 | def from_pb ( cls , pb ) : obj = cls . _from_pb ( pb ) obj . _pb = pb return obj | Instantiate the object from a protocol buffer . |
62,039 | def character_span ( self ) : begin , end = self . token_span return ( self . sentence [ begin ] . character_span [ 0 ] , self . sentence [ end - 1 ] . character_span [ - 1 ] ) | Returns the character span of the token |
62,040 | def log_proto ( self , proto , step_num ) : self . summ_writer . add_summary ( proto , step_num ) return proto | Log a Summary protobuf to the event file . |
62,041 | def log ( self , key , val , step_num ) : try : ph , summ = self . summaries [ key ] except KeyError : with self . g . as_default ( ) : ph = tf . placeholder ( tf . float32 , ( ) , name = key ) summ = tf . scalar_summary ( key , ph ) self . summaries [ key ] = ( ph , summ ) summary_str = self . sess . run ( summ , { ph... | Directly log a scalar value to the event file . |
62,042 | def read_events ( stream ) : header_size = struct . calcsize ( '<QI' ) len_size = struct . calcsize ( '<Q' ) footer_size = struct . calcsize ( '<I' ) while True : header = stream . read ( header_size ) if len ( header ) == 0 : break elif len ( header ) < header_size : raise SummaryReaderException ( 'unexpected EOF (exp... | Read and return as a generator a sequence of Event protos from file - like object stream . |
62,043 | def write_events ( stream , events ) : for event in events : data = event . SerializeToString ( ) len_field = struct . pack ( '<Q' , len ( data ) ) len_crc = struct . pack ( '<I' , masked_crc ( len_field ) ) data_crc = struct . pack ( '<I' , masked_crc ( data ) ) stream . write ( len_field ) stream . write ( len_crc ) ... | Write a sequence of Event protos to file - like object stream . |
62,044 | def log_image ( self , step , tag , val ) : if len ( val . shape ) != 3 : raise ValueError ( '`log_image` value should be a 3-D tensor, instead got shape %s' % ( val . shape , ) ) if val . shape [ 2 ] != 3 : raise ValueError ( 'Last dimension of `log_image` value should be 3 (RGB), ' 'instead got shape %s' % ( val . sh... | Write an image event . |
62,045 | def log_scalar ( self , step , tag , val ) : summary = Summary ( value = [ Summary . Value ( tag = tag , simple_value = float ( np . float32 ( val ) ) ) ] ) self . _add_event ( step , summary ) | Write a scalar event . |
62,046 | def log_histogram ( self , step , tag , val ) : hist = Histogram ( ) hist . add ( val ) summary = Summary ( value = [ Summary . Value ( tag = tag , histo = hist . encode_to_proto ( ) ) ] ) self . _add_event ( step , summary ) | Write a histogram event . |
62,047 | def options ( allow_partial = False , read = False ) : global _options if allow_partial : opts , extras = _options_parser . parse_known_args ( ) if opts . run_dir : mkdirp ( opts . run_dir ) return opts if _options is None : _options_parser . add_argument ( '-h' , '--help' , action = 'help' , default = argparse . SUPPR... | Get the object containing the values of the parsed command line options . |
62,048 | def inner_products ( self , vec ) : products = self . array . dot ( vec ) return self . _word_to_score ( np . arange ( len ( products ) ) , products ) | Get the inner product of a vector with every embedding . |
62,049 | def _word_to_score ( self , ids , scores ) : assert len ( ids . shape ) == 1 assert ids . shape == scores . shape w2s = { } for i in range ( len ( ids ) ) : w2s [ self . vocab . index2word ( ids [ i ] ) ] = scores [ i ] return w2s | Return a map from each word to its score . |
62,050 | def _init_lsh_forest ( self ) : import sklearn . neighbors lshf = sklearn . neighbors . LSHForest ( ) lshf . fit ( self . array ) return lshf | Construct an LSH forest for nearest neighbor search . |
62,051 | def to_dict ( self ) : d = { } for word , idx in self . vocab . iteritems ( ) : d [ word ] = self . array [ idx ] . tolist ( ) return d | Convert to dictionary . |
62,052 | def to_files ( self , array_file , vocab_file ) : logging . info ( 'Writing array...' ) np . save ( array_file , self . array ) logging . info ( 'Writing vocab...' ) self . vocab . to_file ( vocab_file ) | Write the embedding matrix and the vocab to files . |
62,053 | def from_files ( cls , array_file , vocab_file ) : logging . info ( 'Loading array...' ) array = np . load ( array_file ) logging . info ( 'Loading vocab...' ) vocab = Vocab . from_file ( vocab_file ) return cls ( array , vocab ) | Load the embedding matrix and the vocab from files . |
62,054 | def get_uuids ( ) : result = shell ( 'cl ls -w {} -u' . format ( worksheet ) ) uuids = result . split ( '\n' ) uuids = uuids [ 1 : - 1 ] return uuids | List all bundle UUIDs in the worksheet . |
62,055 | def open_file ( uuid , path ) : f = tempfile . NamedTemporaryFile ( ) f . close ( ) fname = f . name cmd = 'cl down -o {} -w {} {}/{}' . format ( fname , worksheet , uuid , path ) try : shell ( cmd ) except RuntimeError : try : os . remove ( fname ) except OSError : pass raise IOError ( 'Failed to open file {}/{}' . fo... | Get the raw file content within a particular bundle at a particular path . |
62,056 | def load_img ( self , img_path ) : with open_file ( self . uuid , img_path ) as f : return mpimg . imread ( f ) | Return an image object that can be immediately plotted with matplotlib |
62,057 | def output_results ( results , split_id = 'results' , output_stream = None ) : if output_stream is None : output_stream = sys . stdout output_stream . write ( '----- %s -----\n' % split_id ) for name in sorted ( results . keys ( ) ) : output_stream . write ( '%s: %s\n' % ( name , repr ( results [ name ] ) ) ) output_st... | Log results readably to output_stream with a header containing split_id . |
62,058 | def labels_to_onehots ( labels , num_classes ) : batch_size = labels . get_shape ( ) . as_list ( ) [ 0 ] with tf . name_scope ( "one_hot" ) : labels = tf . expand_dims ( labels , 1 ) indices = tf . expand_dims ( tf . range ( 0 , batch_size , 1 ) , 1 ) sparse_ptrs = tf . concat ( 1 , [ indices , labels ] , name = "ptrs"... | Convert a vector of integer class labels to a matrix of one - hot target vectors . |
62,059 | def start_task ( self , name , size ) : if len ( self . task_stack ) == 0 : self . start_time = datetime . datetime . now ( ) self . task_stack . append ( Task ( name , size , 0 ) ) | Add a task to the stack . If for example name is Iteration and size is 10 progress on that task will be shown as |
62,060 | def progress ( self , p ) : self . task_stack [ - 1 ] = self . task_stack [ - 1 ] . _replace ( progress = p ) self . progress_report ( ) | Update the current progress on the task at the top of the stack . |
62,061 | def end_task ( self ) : self . progress ( self . task_stack [ - 1 ] . size ) self . task_stack . pop ( ) | Remove the current task from the stack . |
62,062 | def progress_report ( self , force = False ) : now = datetime . datetime . now ( ) if ( len ( self . task_stack ) > 1 or self . task_stack [ 0 ] > 0 ) and now - self . last_report < self . resolution and not force : return stack_printout = ', ' . join ( '%s %s of %s' % ( t . name , t . progress , t . size ) for t in se... | Print the current progress . |
62,063 | def write_conll ( self , fname ) : if 'label' not in self . fields : raise InvalidFieldsException ( "dataset is not in CONLL format: missing label field" ) def instance_to_conll ( inst ) : tab = [ v for k , v in inst . items ( ) if k != 'label' ] return '{}\n{}' . format ( inst [ 'label' ] , '\n' . join ( [ '\t' . join... | Serializes the dataset in CONLL format to fname |
62,064 | def convert ( self , converters , in_place = False ) : dataset = self if in_place else self . __class__ ( OrderedDict ( [ ( name , data [ : ] ) for name , data in self . fields . items ( ) ] ) ) for name , convert in converters . items ( ) : if name not in self . fields . keys ( ) : raise InvalidFieldsException ( 'Conv... | Applies transformations to the dataset . |
62,065 | def shuffle ( self ) : order = range ( len ( self ) ) random . shuffle ( order ) for name , data in self . fields . items ( ) : reindexed = [ ] for _ , i in enumerate ( order ) : reindexed . append ( data [ i ] ) self . fields [ name ] = reindexed return self | Re - indexes the dataset in random order |
62,066 | def pad ( cls , sequences , padding , pad_len = None ) : max_len = max ( [ len ( s ) for s in sequences ] ) pad_len = pad_len or max_len assert pad_len >= max_len , 'pad_len {} must be greater or equal to the longest sequence {}' . format ( pad_len , max_len ) for i , s in enumerate ( sequences ) : sequences [ i ] = [ ... | Pads a list of sequences such that they form a matrix . |
62,067 | def bleu ( eval_data , predictions , scores = 'ignored' , learner = 'ignored' ) : ref_groups = ( [ inst . output . split ( ) ] if isinstance ( inst . output , basestring ) else [ _maybe_tokenize ( r ) for r in inst . output ] for inst in eval_data ) return [ corpus_bleu ( ref_groups , [ p . split ( ) for p in predictio... | Return corpus - level BLEU score of predictions using the output field of the instances in eval_data as references . This is returned as a length - 1 list of floats . |
62,068 | def squared_error ( eval_data , predictions , scores = 'ignored' , learner = 'ignored' ) : return [ np . sum ( ( np . array ( pred ) - np . array ( inst . output ) ) ** 2 ) for inst , pred in zip ( eval_data , predictions ) ] | Return the squared error of each prediction in predictions with respect to the correct output in eval_data . |
62,069 | def encrypt_variable ( variable , build_repo , * , tld = '.org' , public_key = None , travis_token = None , ** login_kwargs ) : if not isinstance ( variable , bytes ) : raise TypeError ( "variable should be bytes" ) if not b"=" in variable : raise ValueError ( "variable should be of the form 'VARIABLE=value'" ) if not ... | Encrypt an environment variable for build_repo for Travis |
62,070 | def encrypt_to_file ( contents , filename ) : if not filename . endswith ( '.enc' ) : raise ValueError ( "%s does not end with .enc" % filename ) key = Fernet . generate_key ( ) fer = Fernet ( key ) encrypted_file = fer . encrypt ( contents ) with open ( filename , 'wb' ) as f : f . write ( encrypted_file ) return key | Encrypts contents and writes it to filename . |
62,071 | def GitHub_login ( * , username = None , password = None , OTP = None , headers = None ) : if not username : username = input ( "What is your GitHub username? " ) if not password : password = getpass ( "Enter the GitHub password for {username}: " . format ( username = username ) ) headers = headers or { } if OTP : head... | Login to GitHub . |
62,072 | def GitHub_post ( data , url , * , auth , headers ) : r = requests . post ( url , auth = auth , headers = headers , data = json . dumps ( data ) ) GitHub_raise_for_status ( r ) return r . json ( ) | POST the data data to GitHub . |
62,073 | def get_travis_token ( * , GitHub_token = None , ** login_kwargs ) : _headers = { 'Content-Type' : 'application/json' , 'User-Agent' : 'MyClient/1.0.0' , } headersv2 = { ** _headers , ** Travis_APIv2 } token_id = None try : if not GitHub_token : print ( green ( "I need to generate a temporary token with GitHub to authe... | Generate a temporary token for authenticating with Travis |
62,074 | def generate_GitHub_token ( * , note = "Doctr token for pushing to gh-pages from Travis" , scopes = None , ** login_kwargs ) : if scopes is None : scopes = [ 'public_repo' ] AUTH_URL = "https://api.github.com/authorizations" data = { "scopes" : scopes , "note" : note , "note_url" : "https://github.com/drdoctr/doctr" , ... | Generate a GitHub token for pushing from Travis |
62,075 | def delete_GitHub_token ( token_id , * , auth , headers ) : r = requests . delete ( 'https://api.github.com/authorizations/{id}' . format ( id = token_id ) , auth = auth , headers = headers ) GitHub_raise_for_status ( r ) | Delete a temporary GitHub token |
62,076 | def upload_GitHub_deploy_key ( deploy_repo , ssh_key , * , read_only = False , title = "Doctr deploy key for pushing to gh-pages from Travis" , ** login_kwargs ) : DEPLOY_KEY_URL = "https://api.github.com/repos/{deploy_repo}/keys" . format ( deploy_repo = deploy_repo ) data = { "title" : title , "key" : ssh_key , "read... | Uploads a GitHub deploy key to deploy_repo . |
62,077 | def generate_ssh_key ( ) : key = rsa . generate_private_key ( backend = default_backend ( ) , public_exponent = 65537 , key_size = 4096 ) private_key = key . private_bytes ( serialization . Encoding . PEM , serialization . PrivateFormat . PKCS8 , serialization . NoEncryption ( ) ) public_key = key . public_key ( ) . pu... | Generates an SSH deploy public and private key . |
62,078 | def guess_github_repo ( ) : p = subprocess . run ( [ 'git' , 'ls-remote' , '--get-url' , 'origin' ] , stdout = subprocess . PIPE , stderr = subprocess . PIPE , check = False ) if p . stderr or p . returncode : return False url = p . stdout . decode ( 'utf-8' ) . strip ( ) m = GIT_URL . fullmatch ( url ) if not m : retu... | Guesses the github repo for the current directory |
62,079 | def get_config ( ) : p = Path ( '.travis.yml' ) if not p . exists ( ) : return { } with p . open ( ) as f : travis_config = yaml . safe_load ( f . read ( ) ) config = travis_config . get ( 'doctr' , { } ) if not isinstance ( config , dict ) : raise ValueError ( 'config is not a dict: {}' . format ( config ) ) return co... | This load some configuration from the . travis . yml if file is present doctr key if present . |
62,080 | def decrypt_file ( file , key ) : if not file . endswith ( '.enc' ) : raise ValueError ( "%s does not end with .enc" % file ) fer = Fernet ( key ) with open ( file , 'rb' ) as f : decrypted_file = fer . decrypt ( f . read ( ) ) with open ( file [ : - 4 ] , 'wb' ) as f : f . write ( decrypted_file ) os . chmod ( file [ ... | Decrypts the file file . |
62,081 | def setup_deploy_key ( keypath = 'github_deploy_key' , key_ext = '.enc' , env_name = 'DOCTR_DEPLOY_ENCRYPTION_KEY' ) : key = os . environ . get ( env_name , os . environ . get ( "DOCTR_DEPLOY_ENCRYPTION_KEY" , None ) ) if not key : raise RuntimeError ( "{env_name} or DOCTR_DEPLOY_ENCRYPTION_KEY environment variable is ... | Decrypts the deploy key and configures it with ssh |
62,082 | def get_token ( ) : token = os . environ . get ( "GH_TOKEN" , None ) if not token : token = "GH_TOKEN environment variable not set" token = token . encode ( 'utf-8' ) return token | Get the encrypted GitHub token in Travis . |
62,083 | def run ( args , shell = False , exit = True ) : if "GH_TOKEN" in os . environ : token = get_token ( ) else : token = b'' if not shell : command = ' ' . join ( map ( shlex . quote , args ) ) else : command = args command = command . replace ( token . decode ( 'utf-8' ) , '~' * len ( token ) ) print ( blue ( command ) )... | Run the command args . |
62,084 | def get_current_repo ( ) : remote_url = subprocess . check_output ( [ 'git' , 'config' , '--get' , 'remote.origin.url' ] ) . decode ( 'utf-8' ) _ , org , git_repo = remote_url . rsplit ( '.git' , 1 ) [ 0 ] . rsplit ( '/' , 2 ) return ( org + '/' + git_repo ) | Get the GitHub repo name for the current directory . |
62,085 | def get_travis_branch ( ) : if os . environ . get ( "TRAVIS_PULL_REQUEST" , "" ) == "true" : return os . environ . get ( "TRAVIS_PULL_REQUEST_BRANCH" , "" ) else : return os . environ . get ( "TRAVIS_BRANCH" , "" ) | Get the name of the branch that the PR is from . |
62,086 | def set_git_user_email ( ) : username = subprocess . run ( shlex . split ( 'git config user.name' ) , stdout = subprocess . PIPE ) . stdout . strip ( ) . decode ( 'utf-8' ) if not username or username == "Travis CI User" : run ( [ 'git' , 'config' , '--global' , 'user.name' , "Doctr (Travis CI)" ] ) else : print ( "Not... | Set global user and email for git user if not already present on system |
62,087 | def checkout_deploy_branch ( deploy_branch , canpush = True ) : create_deploy_branch ( deploy_branch , push = canpush ) remote_branch = "doctr_remote/{}" . format ( deploy_branch ) print ( "Checking out doctr working branch tracking" , remote_branch ) clear_working_branch ( ) if run ( [ 'git' , 'rev-parse' , '--verify'... | Checkout the deploy branch creating it if it doesn t exist . |
62,088 | def deploy_branch_exists ( deploy_branch ) : remote_name = 'doctr_remote' branch_names = subprocess . check_output ( [ 'git' , 'branch' , '-r' ] ) . decode ( 'utf-8' ) . split ( ) return '{}/{}' . format ( remote_name , deploy_branch ) in branch_names | Check if there is a remote branch with name specified in deploy_branch . |
62,089 | def create_deploy_branch ( deploy_branch , push = True ) : if not deploy_branch_exists ( deploy_branch ) : print ( "Creating {} branch on doctr_remote" . format ( deploy_branch ) ) clear_working_branch ( ) run ( [ 'git' , 'checkout' , '--orphan' , DOCTR_WORKING_BRANCH ] ) run ( [ 'git' , 'rm' , '-rf' , '.' ] ) print ( ... | If there is no remote branch with name specified in deploy_branch create one . |
62,090 | def find_sphinx_build_dir ( ) : build = glob . glob ( '**/*build/html' , recursive = True ) if not build : raise RuntimeError ( "Could not find Sphinx build directory automatically" ) build_folder = build [ 0 ] return build_folder | Find build subfolder within sphinx docs directory . |
62,091 | def copy_to_tmp ( source ) : tmp_dir = tempfile . mkdtemp ( ) p = pathlib . Path ( source ) dirname = p . name or 'temp' new_dir = os . path . join ( tmp_dir , dirname ) if os . path . isdir ( source ) : shutil . copytree ( source , new_dir ) else : shutil . copy2 ( source , new_dir ) return new_dir | Copies source to a temporary directory and returns the copied location . |
62,092 | def is_subdir ( a , b ) : a , b = map ( os . path . abspath , [ a , b ] ) return os . path . commonpath ( [ a , b ] ) == b | Return true if a is a subdirectory of b |
62,093 | def sync_from_log ( src , dst , log_file , exclude = ( ) ) : from os . path import join , exists , isdir exclude = [ os . path . normpath ( i ) for i in exclude ] added , removed = [ ] , [ ] if not exists ( log_file ) : print ( "%s doesn't exist. Not removing any files." % log_file ) else : with open ( log_file ) as f ... | Sync the files in src to dst . |
62,094 | def push_docs ( deploy_branch = 'gh-pages' , retries = 5 ) : code = 1 while code and retries : print ( "Pulling" ) code = run ( [ 'git' , 'pull' , '-s' , 'recursive' , '-X' , 'ours' , 'doctr_remote' , deploy_branch ] , exit = False ) print ( "Pushing commit" ) code = run ( [ 'git' , 'push' , '-q' , 'doctr_remote' , '{}... | Push the changes to the branch named deploy_branch . |
62,095 | def clean_path ( p ) : p = os . path . expanduser ( p ) p = os . path . expandvars ( p ) p = os . path . abspath ( p ) return p | Clean a path by expanding user and environment variables and ensuring absolute path . |
62,096 | def load_file_template ( path ) : template = StringIO ( ) if not os . path . exists ( path ) : raise ValueError ( "path does not exist: %s" % path ) with open ( clean_path ( path ) , "rb" ) as infile : for line in infile : template . write ( line . decode ( "utf-8" ) ) return template | Load template from the specified filesystem path . |
62,097 | def load_package_template ( license , header = False ) : content = StringIO ( ) filename = 'template-%s-header.txt' if header else 'template-%s.txt' with resource_stream ( __name__ , filename % license ) as licfile : for line in licfile : content . write ( line . decode ( "utf-8" ) ) return content | Load license template distributed with package . |
62,098 | def extract_vars ( template ) : keys = set ( ) for match in re . finditer ( r"\{\{ (?P<key>\w+) \}\}" , template . getvalue ( ) ) : keys . add ( match . groups ( ) [ 0 ] ) return sorted ( list ( keys ) ) | Extract variables from template . Variables are enclosed in double curly braces . |
62,099 | def generate_license ( template , context ) : out = StringIO ( ) content = template . getvalue ( ) for key in extract_vars ( template ) : if key not in context : raise ValueError ( "%s is missing from the template context" % key ) content = content . replace ( "{{ %s }}" % key , context [ key ] ) template . close ( ) o... | Generate a license by extracting variables from the template and replacing them with the corresponding values in the given context . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.