idx int64 0 63k | question stringlengths 53 5.28k | target stringlengths 5 805 |
|---|---|---|
58,500 | def gpg_sign ( path_to_sign , sender_key_info , config_dir = None , passphrase = None ) : if config_dir is None : config_dir = get_config_dir ( ) tmpdir = make_gpg_tmphome ( prefix = "sign" , config_dir = config_dir ) try : sender_privkey = gpg_export_key ( sender_key_info [ 'app_name' ] , sender_key_info [ 'key_id' ] ... | Sign a file on disk . |
58,501 | def gpg_verify ( path_to_verify , sigdata , sender_key_info , config_dir = None ) : if config_dir is None : config_dir = get_config_dir ( ) tmpdir = make_gpg_tmphome ( prefix = "verify" , config_dir = config_dir ) res = gpg_stash_key ( "verify" , sender_key_info [ 'key_data' ] , config_dir = config_dir , gpghome = tmpd... | Verify a file on disk was signed by the given sender . |
58,502 | def gpg_encrypt ( fd_in , path_out , sender_key_info , recipient_key_infos , passphrase = None , config_dir = None ) : if config_dir is None : config_dir = get_config_dir ( ) tmpdir = make_gpg_tmphome ( prefix = "encrypt" , config_dir = config_dir ) for key_info in recipient_key_infos : res = gpg_stash_key ( "encrypt" ... | Encrypt a stream of data for a set of keys . |
58,503 | def gpg_decrypt ( fd_in , path_out , sender_key_info , my_key_info , passphrase = None , config_dir = None ) : if config_dir is None : config_dir = get_config_dir ( ) tmpdir = make_gpg_tmphome ( prefix = "decrypt" , config_dir = config_dir ) res = gpg_stash_key ( "decrypt" , sender_key_info [ 'key_data' ] , config_dir ... | Decrypt a stream of data using key info for a private key we own . |
58,504 | def get_primary_command_usage ( message = '' ) : if not settings . merge_primary_command and None in settings . subcommands : return format_usage ( settings . subcommands [ None ] . __doc__ ) if not message : message = '\n{}\n' . format ( settings . message ) if settings . message else '' doc = _DEFAULT_DOC . format ( ... | Return the usage string for the primary command . |
58,505 | def get_help_usage ( command ) : if not command : doc = get_primary_command_usage ( ) elif command in ( '-a' , '--all' ) : subcommands = [ k for k in settings . subcommands if k is not None ] available_commands = subcommands + [ 'help' ] command_doc = '\nAvailable commands:\n{}\n' . format ( '\n' . join ( ' {}' . form... | Print out a help message and exit the program . |
58,506 | def format_usage ( doc , width = None ) : sections = doc . replace ( '\r' , '' ) . split ( '\n\n' ) width = width or get_terminal_size ( ) . columns or 80 return '\n\n' . join ( _wrap_section ( s . strip ( ) , width ) for s in sections ) | Format the docstring for display to the user . |
58,507 | def parse_commands ( docstring ) : try : docopt . docopt ( docstring , argv = ( ) ) except ( TypeError , docopt . DocoptLanguageError ) : return except docopt . DocoptExit : pass for command in _parse_section ( 'usage' , docstring ) : args = command . split ( ) commands = [ ] i = 0 for i , arg in enumerate ( args ) : i... | Parse a docopt - style string for commands and subcommands . |
58,508 | def _merge_doc ( original , to_merge ) : if not original : return to_merge or '' if not to_merge : return original or '' sections = [ ] for name in ( 'usage' , 'arguments' , 'options' ) : sections . append ( _merge_section ( _get_section ( name , original ) , _get_section ( name , to_merge ) ) ) return format_usage ( '... | Merge two usage strings together . |
58,509 | def _merge_section ( original , to_merge ) : if not original : return to_merge or '' if not to_merge : return original or '' try : index = original . index ( ':' ) + 1 except ValueError : index = original . index ( '\n' ) name = original [ : index ] . strip ( ) section = '\n ' . join ( ( original [ index + 1 : ] . lst... | Merge two sections together . |
58,510 | def _get_section ( name , source ) : pattern = re . compile ( '^([^\n]*{name}[^\n]*\n?(?:[ \t].*?(?:\n|$))*)' . format ( name = name ) , re . IGNORECASE | re . MULTILINE ) usage = None for section in pattern . findall ( source ) : usage = _merge_section ( usage , section . strip ( ) ) return usage | Extract the named section from the source . |
58,511 | def _wrap_section ( source , width ) : if _get_section ( 'usage' , source ) : return _wrap_usage_section ( source , width ) if _is_definition_section ( source ) : return _wrap_definition_section ( source , width ) lines = inspect . cleandoc ( source ) . splitlines ( ) paragraphs = ( textwrap . wrap ( line , width , rep... | Wrap the given section string to the current terminal size . |
58,512 | def _is_definition_section ( source ) : try : definitions = textwrap . dedent ( source ) . split ( '\n' , 1 ) [ 1 ] . splitlines ( ) return all ( re . match ( r'\s\s+((?!\s\s).+)\s\s+.+' , s ) for s in definitions ) except IndexError : return False | Determine if the source is a definition section . |
58,513 | def _wrap_usage_section ( source , width ) : if not any ( len ( line ) > width for line in source . splitlines ( ) ) : return source section_header = source [ : source . index ( ':' ) + 1 ] . strip ( ) lines = [ section_header ] for commands , args in parse_commands ( source ) : command = ' {} ' . format ( ' ' . join ... | Wrap the given usage section string to the current terminal size . |
58,514 | def _wrap_definition_section ( source , width ) : index = source . index ( '\n' ) + 1 definitions , max_len = _get_definitions ( source [ index : ] ) sep = '\n' + ' ' * ( max_len + 4 ) lines = [ source [ : index ] . strip ( ) ] for arg , desc in six . iteritems ( definitions ) : wrapped_desc = sep . join ( textwrap . w... | Wrap the given definition section string to the current terminal size . |
58,515 | def _get_definitions ( source ) : max_len = 0 descs = collections . OrderedDict ( ) lines = ( s . strip ( ) for s in source . splitlines ( ) ) non_empty_lines = ( s for s in lines if s ) for line in non_empty_lines : if line : arg , desc = re . split ( r'\s\s+' , line . strip ( ) ) arg_len = len ( arg ) if arg_len > ma... | Extract a dictionary of arguments and definitions . |
58,516 | def _parse_section ( name , source ) : section = textwrap . dedent ( _get_section ( name , source ) [ 7 : ] ) commands = [ ] for line in section . splitlines ( ) : if not commands or line [ : 1 ] . isalpha ( ) and line [ : 1 ] . islower ( ) : commands . append ( line ) else : commands [ - 1 ] = '{} {}' . format ( comma... | Yield each section line . |
58,517 | def move ( self , particle , u , v , w , modelTimestep , ** kwargs ) : if not particle . settled and not particle . dead : particle . die ( ) temp = kwargs . get ( 'temperature' , None ) if temp is not None and math . isnan ( temp ) : temp = None particle . temp = temp salt = kwargs . get ( 'salinity' , None ) if salt ... | I m dead so no behaviors should act on me |
58,518 | def read_history_file ( self , filename = None ) : u if filename is None : filename = self . history_filename try : for line in open ( filename , u'r' ) : self . add_history ( lineobj . ReadLineTextBuffer ( ensure_unicode ( line . rstrip ( ) ) ) ) except IOError : self . history = [ ] self . history_cursor = 0 | u Load a readline history file . |
58,519 | def write_history_file ( self , filename = None ) : u if filename is None : filename = self . history_filename fp = open ( filename , u'wb' ) for line in self . history [ - self . history_length : ] : fp . write ( ensure_str ( line . get_line_text ( ) ) ) fp . write ( u'\n' ) fp . close ( ) | u Save a readline history file . |
58,520 | def add_history ( self , line ) : u if not hasattr ( line , "get_line_text" ) : line = lineobj . ReadLineTextBuffer ( line ) if not line . get_line_text ( ) : pass elif len ( self . history ) > 0 and self . history [ - 1 ] . get_line_text ( ) == line . get_line_text ( ) : pass else : self . history . append ( line ) se... | u Append a line to the history buffer as if it was the last line typed . |
58,521 | def beginning_of_history ( self ) : u self . history_cursor = 0 if len ( self . history ) > 0 : self . l_buffer = self . history [ 0 ] | u Move to the first line in the history . |
58,522 | def get_time_objects_from_model_timesteps ( cls , times , start ) : modelTimestep = [ ] newtimes = [ ] for i in xrange ( 0 , len ( times ) ) : try : modelTimestep . append ( times [ i + 1 ] - times [ i ] ) except StandardError : modelTimestep . append ( times [ i ] - times [ i - 1 ] ) newtimes . append ( start + timede... | Calculate the datetimes of the model timesteps |
58,523 | def fill_polygon_with_points ( cls , goal = None , polygon = None ) : if goal is None : raise ValueError ( "Must specify the number of points (goal) to fill the polygon with" ) if polygon is None or ( not isinstance ( polygon , Polygon ) and not isinstance ( polygon , MultiPolygon ) ) : raise ValueError ( "Must specify... | Fill a shapely polygon with X number of points |
58,524 | def distance_from_location_using_u_v_w ( cls , u = None , v = None , w = None , timestep = None , location = None ) : distance_horiz = 0 azimuth = 0 angle = 0 depth = location . depth if u is not 0 and v is not 0 : s_and_d = AsaMath . speed_direction_from_u_v ( u = u , v = v ) distance_horiz = s_and_d [ 'speed' ] * tim... | Calculate the greate distance from a location using u v and w . |
58,525 | def shutdown ( self ) : self . started = False try : for t in self . _threads : t . join ( ) finally : self . stopped = True | Wait for all threads to complete |
58,526 | def _unpack_bytes ( bytes ) : if bytes == b'' : return 0 int_length = 4 len_diff = int_length - len ( bytes ) bytes = bytes + len_diff * b'\x00' return struct . unpack ( "<L" , bytes ) [ 0 ] | Unpack a set of bytes into an integer . First pads to 4 bytes . Little endian . |
58,527 | def get_sprints ( ) : sprints = load_member_from_setting ( 'RAPID_PROTOTYPING_SPRINTS_MODULE' ) all_tasks = [ ] for importer , package_name , _ in pkgutil . walk_packages ( onerror = lambda p : p ) : if not package_name . endswith ( '_costs' ) : continue if not getattr ( settings , 'TEST_RUN' , None ) and ( '.test_app.... | Returns all sprints enriched with their assigned tasks . |
58,528 | def append_overhead_costs ( costs , new_id , overhead_percentage = 0.15 ) : total_time = 0 for item in costs : total_time += item [ 'time' ] costs . append ( { 'id' : new_id , 'task' : 'Overhead, Bufixes & Iterations' , 'time' : total_time * overhead_percentage , } , ) return costs | Adds 15% overhead costs to the list of costs . |
58,529 | def arduino_default_path ( ) : if sys . platform == 'darwin' : s = path ( '/Applications/Arduino.app/Contents/Resources/Java' ) elif sys . platform == 'win32' : s = None else : s = path ( '/usr/share/arduino/' ) return s | platform specific default root path . |
58,530 | def checkForChanges ( f , sde , isTable ) : fCount = int ( arcpy . GetCount_management ( f ) . getOutput ( 0 ) ) sdeCount = int ( arcpy . GetCount_management ( sde ) . getOutput ( 0 ) ) if fCount != sdeCount : return True fields = [ fld . name for fld in arcpy . ListFields ( f ) ] if not isTable : fields = filter_field... | returns False if there are no changes |
58,531 | def install_metaboard ( replace_existing = False , ) : metaboard = AutoBunch ( ) metaboard . name = 'Metaboard' metaboard . upload . protocol = 'usbasp' metaboard . upload . maximum_size = '14336' metaboard . upload . speed = '19200' metaboard . build . mcu = 'atmega168' metaboard . build . f_cpu = '16000000L' metaboar... | install metaboard . |
58,532 | def __total_pages ( self ) -> int : row_count = self . model . query . count ( ) if isinstance ( row_count , int ) : return int ( row_count / self . limit ) return None | Return max pages created by limit |
58,533 | def links ( self , base_link , current_page ) -> dict : max_pages = self . max_pages - 1 if self . max_pages > 0 else self . max_pages base_link = '/%s' % ( base_link . strip ( "/" ) ) self_page = current_page prev = current_page - 1 if current_page is not 0 else None prev_link = '%s/page/%s/%s' % ( base_link , prev , ... | Return JSON paginate links |
58,534 | def json_paginate ( self , base_url , page_number ) : data = self . page ( page_number ) first_id = None last_id = None if data : first_id = data [ 0 ] . id last_id = data [ - 1 ] . id return { 'meta' : { 'total_pages' : self . max_pages , 'first_id' : first_id , 'last_id' : last_id , 'current_page' : page_number } , '... | Return a dict for a JSON paginate |
58,535 | def add_arguments ( parser , default_level = logging . INFO ) : adder = ( getattr ( parser , 'add_argument' , None ) or getattr ( parser , 'add_option' ) ) adder ( '-l' , '--log-level' , default = default_level , type = log_level , help = "Set log level (DEBUG, INFO, WARNING, ERROR)" ) | Add arguments to an ArgumentParser or OptionParser for purposes of grabbing a logging level . |
58,536 | def setup ( options , ** kwargs ) : params = dict ( kwargs ) params . update ( level = options . log_level ) logging . basicConfig ( ** params ) | Setup logging with options or arguments from an OptionParser or ArgumentParser . Also pass any keyword arguments to the basicConfig call . |
58,537 | def setup_requests_logging ( level ) : requests_log = logging . getLogger ( "requests.packages.urllib3" ) requests_log . setLevel ( level ) requests_log . propagate = True http_client . HTTPConnection . debuglevel = level <= logging . DEBUG | Setup logging for requests such that it logs details about the connection headers etc . |
58,538 | def _set_period ( self , period ) : self . _period = period if period : self . _period_seconds = tempora . get_period_seconds ( self . _period ) self . _date_format = tempora . get_date_format_string ( self . _period_seconds ) else : self . _period_seconds = 0 self . _date_format = '' | Set the period for the timestamp . If period is 0 or None no period will be used . |
58,539 | def get_filename ( self , t ) : root , ext = os . path . splitext ( self . base_filename ) if self . _period_seconds : t -= t % self . _period_seconds dt = datetime . datetime . utcfromtimestamp ( t ) appended_date = ( dt . strftime ( self . _date_format ) if self . _date_format != '' else '' ) if appended_date : resul... | Return the appropriate filename for the given time based on the defined period . |
58,540 | def emit ( self , record ) : now = time . time ( ) current_name = self . get_filename ( now ) try : if not self . stream . name == current_name : self . _use_file ( current_name ) except AttributeError : self . _use_file ( current_name ) logging . StreamHandler . emit ( self , record ) | Emit a record . Output the record to the file ensuring that the currently - opened file has the correct date . |
58,541 | def register ( app , uri , file_or_directory , pattern , use_modified_since , use_content_range ) : if not path . isfile ( file_or_directory ) : uri += '<file_uri:' + pattern + '>' async def _handler ( request , file_uri = None ) : if file_uri and '../' in file_uri : raise InvalidUsage ( "Invalid URL" ) root_path = fil... | Register a static directory handler with Mach9 by adding a route to the router and registering a handler . |
58,542 | def fix_imports ( script ) : with open ( script , 'r' ) as f_script : lines = f_script . read ( ) . splitlines ( ) new_lines = [ ] for l in lines : if l . startswith ( "import " ) : l = "from . " + l if "from PyQt5 import" in l : l = l . replace ( "from PyQt5 import" , "from pyqode.qt import" ) new_lines . append ( l )... | Replace from PyQt5 import by from pyqode . qt import . |
58,543 | def eval_py ( self , _globals , _locals ) : try : params = eval ( self . script , _globals , _locals ) except NameError as e : raise Exception ( 'Failed to evaluate parameters: {}' . format ( str ( e ) ) ) except ResolutionError as e : raise Exception ( 'GetOutput: {}' . format ( str ( e ) ) ) return params | Evaluates a file containing a Python params dictionary . |
58,544 | def new ( cls , arg ) : content = None if arg . kind == 'file' : if os . path . exists ( arg . value ) : with open ( arg . value , 'r' ) as f : content = f . read ( ) else : raise Exception ( 'File does not exist: {}' . format ( arg . value ) ) elif arg . kind == 'cli' : content = arg . value for source_cls in cls . so... | Creates a new Parameter object from the given ParameterArgument . |
58,545 | def minimum_pitch ( self ) : pitch = self . pitch minimal_pitch = [ ] for p in pitch : minimal_pitch . append ( min ( p ) ) return min ( minimal_pitch ) | Returns the minimal pitch between two neighboring nodes of the mesh in each direction . |
58,546 | def surrounding_nodes ( self , position ) : n_node_index , n_node_position , n_node_error = self . nearest_node ( position ) if n_node_error == 0.0 : index_mod = [ ] for i in range ( len ( n_node_index ) ) : new_point = np . asarray ( n_node_position ) new_point [ i ] += 1.e-5 * np . abs ( new_point [ i ] ) try : self ... | Returns nearest node indices and direction of opposite node . |
58,547 | def tokenize ( self , string ) : it = colorise . compat . ifilter ( None , self . _pattern . finditer ( string ) ) try : t = colorise . compat . next ( it ) except StopIteration : yield string , False return pos , buf , lm , escapeflag = - 1 , '' , - 1 , False if t . start ( ) > 0 : yield string [ : t . start ( ) ] , F... | Tokenize a string and return an iterator over its tokens . |
58,548 | def parse ( self , format_string ) : txt , state = '' , 0 colorstack = [ ( None , None ) ] itokens = self . tokenize ( format_string ) for token , escaped in itokens : if token == self . _START_TOKEN and not escaped : if txt : yield txt , colorstack [ - 1 ] txt = '' state += 1 colors = self . extract_syntax ( colorise ... | Parse color syntax from a formatted string . |
58,549 | def from_mapping ( cls , evidence_mapping ) : return cls ( metadata_map = MetadataMap . from_mapping ( evidence_mapping [ 'metadataMap' ] ) , copyright = evidence_mapping [ 'copyright' ] , id = evidence_mapping [ 'id' ] , terms_of_use = evidence_mapping [ 'termsOfUse' ] , document = evidence_mapping [ 'document' ] , ti... | Create an Evidence instance from the given mapping |
58,550 | def to_obj ( cls , obj_data = None , * fields , ** field_map ) : obj_dict = obj_data . __dict__ if hasattr ( obj_data , '__dict__' ) else obj_data if not fields : fields = obj_dict . keys ( ) obj = cls ( ) update_obj ( obj_dict , obj , * fields , ** field_map ) return obj | prioritize obj_dict when there are conficts |
58,551 | def with_ctx ( func = None ) : if not func : return functools . partial ( with_ctx ) @ functools . wraps ( func ) def func_with_context ( _obj , * args , ** kwargs ) : if 'ctx' not in kwargs or kwargs [ 'ctx' ] is None : with _obj . ctx ( ) as new_ctx : kwargs [ 'ctx' ] = new_ctx return func ( _obj , * args , ** kwargs... | Auto create a new context if not available |
58,552 | def open ( self , auto_commit = None , schema = None ) : if schema is None : schema = self . schema ac = auto_commit if auto_commit is not None else schema . auto_commit exe = ExecutionContext ( self . path , schema = schema , auto_commit = ac ) if not os . path . isfile ( self . path ) or os . path . getsize ( self . ... | Create a context to execute queries |
58,553 | def build_insert ( self , table , values , columns = None ) : if not columns : columns = table . columns if len ( values ) < len ( columns ) : column_names = ',' . join ( columns [ - len ( values ) : ] ) else : column_names = ',' . join ( columns ) query = "INSERT INTO %s (%s) VALUES (%s) " % ( table . name , column_na... | Insert an active record into DB and return lastrowid if available |
58,554 | def select_record ( self , table , where = None , values = None , orderby = None , limit = None , columns = None ) : query = self . schema . query_builder . build_select ( table , where , orderby , limit , columns ) return table . to_table ( self . execute ( query , values ) , columns = columns ) | Support these keywords where values orderby limit and columns |
58,555 | def should_be_excluded ( name , exclude_patterns ) : for pattern in exclude_patterns : if fnmatch . fnmatch ( name , pattern ) : return True return False | Check if a name should be excluded . |
58,556 | def filter_visited ( curr_dir , subdirs , already_visited , follow_dirlinks , on_error ) : filtered = [ ] to_visit = set ( ) _already_visited = already_visited . copy ( ) try : file_info = os . stat ( curr_dir ) if follow_dirlinks else os . lstat ( curr_dir ) _already_visited . add ( ( file_info . st_dev , file_info . ... | Filter subdirs that have already been visited . |
58,557 | def index_files_by_size ( root , files_by_size , exclude_dirs , exclude_files , follow_dirlinks ) : errors = [ ] already_visited = set ( ) def _print_error ( error ) : msg = "error listing '%s': %s" % ( error . filename , error . strerror ) sys . stderr . write ( "%s\n" % msg ) errors . append ( msg ) for curr_dir , su... | Recursively index files under a root directory . |
58,558 | def calculate_md5 ( filename , length ) : assert length >= 0 if length == 0 : return '\xd4\x1d\x8c\xd9\x8f\x00\xb2\x04\xe9\x80\t\x98\xec\xf8\x42\x7e' md5_summer = hashlib . md5 ( ) f = open ( filename , 'rb' ) try : bytes_read = 0 while bytes_read < length : chunk_size = min ( MD5_CHUNK_SIZE , length - bytes_read ) chu... | Calculate the MD5 hash of a file up to length bytes . |
58,559 | def find_duplicates ( filenames , max_size ) : errors = [ ] if len ( filenames ) < 2 : return [ ] , errors if max_size == 0 : return [ filenames ] , errors files_by_md5 = { } for filename in filenames : try : md5 = calculate_md5 ( filename , max_size ) except EnvironmentError as e : msg = "unable to calculate MD5 for '... | Find duplicates in a list of files comparing up to max_size bytes . |
58,560 | def find_duplicates_in_dirs ( directories , exclude_dirs = None , exclude_files = None , follow_dirlinks = False ) : if exclude_dirs is None : exclude_dirs = [ ] if exclude_files is None : exclude_files = [ ] errors_in_total = [ ] files_by_size = { } for directory in directories : sub_errors = index_files_by_size ( dir... | Recursively scan a list of directories looking for duplicate files . |
58,561 | def semimajor ( P , M ) : if type ( P ) != Quantity : P = P * u . day if type ( M ) != Quantity : M = M * u . M_sun a = ( ( P / 2 / np . pi ) ** 2 * const . G * M ) ** ( 1. / 3 ) return a . to ( u . AU ) | P M can be Quantity objects ; otherwise default to day M_sun |
58,562 | def random_spherepos ( n ) : signs = np . sign ( rand . uniform ( - 1 , 1 , size = n ) ) thetas = Angle ( np . arccos ( rand . uniform ( size = n ) * signs ) , unit = u . rad ) phis = Angle ( rand . uniform ( 0 , 2 * np . pi , size = n ) , unit = u . rad ) c = SkyCoord ( phis , thetas , 1 , representation = 'physicssph... | returns SkyCoord object with n positions randomly oriented on the unit sphere |
58,563 | def to_dict ( self ) : return { inflection . camelize ( k , False ) : v for k , v in self . __dict__ . items ( ) if v } | Return a dict of all instance variables with truthy values with key names camelized |
58,564 | def depth ( self ) : return len ( self . path . rstrip ( os . sep ) . split ( os . sep ) ) | Returns the number of ancestors of this directory . |
58,565 | def ancestors ( self , stop = None ) : folder = self while folder . parent != stop : if folder . parent == folder : return yield folder . parent folder = folder . parent | Generates the parents until stop or the absolute root directory is reached . |
58,566 | def is_descendant_of ( self , ancestor ) : stop = Folder ( ancestor ) for folder in self . ancestors ( ) : if folder == stop : return True if stop . depth > folder . depth : return False return False | Checks if this folder is inside the given ancestor . |
58,567 | def get_relative_path ( self , root ) : if self . path == root : return '' ancestors = self . ancestors ( stop = root ) return functools . reduce ( lambda f , p : Folder ( p . name ) . child ( f ) , ancestors , self . name ) | Gets the fragment of the current path starting at root . |
58,568 | def get_mirror ( self , target_root , source_root = None ) : fragment = self . get_relative_path ( source_root if source_root else self . parent ) return Folder ( target_root ) . child ( fragment ) | Returns a File or Folder object that reperesents if the entire fragment of this directory starting with source_root were copied to target_root . |
58,569 | def file_or_folder ( path ) : target = unicode ( path ) return Folder ( target ) if os . path . isdir ( target ) else File ( target ) | Returns a File or Folder object that would represent the given path . |
58,570 | def is_binary ( self ) : with open ( self . path , 'rb' ) as fin : CHUNKSIZE = 1024 while 1 : chunk = fin . read ( CHUNKSIZE ) if b'\0' in chunk : return True if len ( chunk ) < CHUNKSIZE : break return False | Return true if this is a binary file . |
58,571 | def make_temp ( text ) : import tempfile ( handle , path ) = tempfile . mkstemp ( text = True ) os . close ( handle ) afile = File ( path ) afile . write ( text ) return afile | Creates a temprorary file and writes the text into it |
58,572 | def read_all ( self , encoding = 'utf-8' ) : logger . info ( "Reading everything from %s" % self ) with codecs . open ( self . path , 'r' , encoding ) as fin : read_text = fin . read ( ) return read_text | Reads from the file and returns the content as a string . |
58,573 | def write ( self , text , encoding = "utf-8" ) : logger . info ( "Writing to %s" % self ) with codecs . open ( self . path , 'w' , encoding ) as fout : fout . write ( text ) | Writes the given text to the file using the given encoding . |
58,574 | def copy_to ( self , destination ) : target = self . __get_destination__ ( destination ) logger . info ( "Copying %s to %s" % ( self , target ) ) shutil . copy ( self . path , unicode ( destination ) ) return target | Copies the file to the given destination . Returns a File object that represents the target file . destination must be a File or Folder object . |
58,575 | def etag ( self ) : CHUNKSIZE = 1024 * 64 from hashlib import md5 hash = md5 ( ) with open ( self . path ) as fin : chunk = fin . read ( CHUNKSIZE ) while chunk : hash_update ( hash , chunk ) chunk = fin . read ( CHUNKSIZE ) return hash . hexdigest ( ) | Generates etag from file contents . |
58,576 | def child_folder ( self , fragment ) : return Folder ( os . path . join ( self . path , Folder ( fragment ) . path ) ) | Returns a folder object by combining the fragment to this folder s path |
58,577 | def child ( self , fragment ) : return os . path . join ( self . path , FS ( fragment ) . path ) | Returns a path of a child item represented by fragment . |
58,578 | def make ( self ) : try : if not self . exists : logger . info ( "Creating %s" % self . path ) os . makedirs ( self . path ) except os . error : pass return self | Creates this directory and any of the missing directories in the path . Any errors that may occur are eaten . |
58,579 | def delete ( self ) : if self . exists : logger . info ( "Deleting %s" % self . path ) shutil . rmtree ( self . path ) | Deletes the directory if it exists . |
58,580 | def _create_target_tree ( self , target ) : source = self with source . walker as walker : @ walker . folder_visitor def visit_folder ( folder ) : if folder != source : Folder ( folder . get_mirror ( target , source ) ) . make ( ) | There is a bug in dir_util that makes copy_tree crash if a folder in the tree has been deleted before and readded now . To workaround the bug we first walk the tree and create directories that are needed . |
58,581 | def copy_contents_to ( self , destination ) : logger . info ( "Copying contents of %s to %s" % ( self , destination ) ) target = Folder ( destination ) target . make ( ) self . _create_target_tree ( target ) dir_util . copy_tree ( self . path , unicode ( target ) ) return target | Copies the contents of this directory to the given destination . Returns a Folder object that represents the moved directory . |
58,582 | def __start ( self ) : thread = Thread ( target = self . __loop , args = ( ) ) thread . daemon = True thread . start ( ) self . __enabled = True | Start a new thread to process Cron |
58,583 | def __dict_to_BetterDict ( self , attr ) : if type ( self [ attr ] ) == dict : self [ attr ] = BetterDict ( self [ attr ] ) return self [ attr ] | Convert the passed attr to a BetterDict if the value is a dict |
58,584 | def _bd_ ( self ) : if not getattr ( self , '__bd__' , False ) : self . __bd = BetterDictLookUp ( self ) return self . __bd | Property that allows dot lookups of otherwise hidden attributes . |
58,585 | def create_or_update ( sender , ** kwargs ) : now = datetime . datetime . now ( ) from activity_monitor . models import Activity instance = kwargs [ 'instance' ] instance_content_type = ContentType . objects . get_for_model ( sender ) instance_model = sender content_object = instance_model . objects . get ( id = instan... | Create or update an Activity Monitor item from some instance . |
58,586 | def highlight_differences ( s1 , s2 , color ) : ls1 , ls2 = len ( s1 ) , len ( s2 ) diff_indices = [ i for i , ( a , b ) in enumerate ( zip ( s1 , s2 ) ) if a != b ] print ( s1 ) if ls2 > ls1 : colorise . cprint ( '_' * ( ls2 - ls1 ) , fg = color ) else : print ( ) colorise . highlight ( s2 , indices = diff_indices , f... | Highlight the characters in s2 that differ from those in s1 . |
58,587 | def create_jinja_env ( ) : template_dir = os . path . join ( os . path . dirname ( __file__ ) , 'templates' ) env = jinja2 . Environment ( loader = jinja2 . FileSystemLoader ( template_dir ) , autoescape = jinja2 . select_autoescape ( [ 'html' ] ) ) env . filters [ 'simple_date' ] = filter_simple_date env . filters [ '... | Create a Jinja2 ~jinja2 . Environment . |
58,588 | def render_homepage ( config , env ) : template = env . get_template ( 'homepage.jinja' ) rendered_page = template . render ( config = config ) return rendered_page | Render the homepage . jinja template . |
58,589 | def d_cal ( calibcurve , rcmean , w2 , cutoff = 0.0001 , normal_distr = False , t_a = 3 , t_b = 4 ) : assert t_b - 1 == t_a if normal_distr : std = np . sqrt ( calibcurve . error ** 2 + w2 ) dens = stats . norm ( loc = rcmean , scale = std ) . pdf ( calibcurve . c14age ) else : dens = ( t_b + ( ( rcmean - calibcurve . ... | Get calendar date probabilities |
58,590 | def calibrate_dates ( chron , calib_curve , d_r , d_std , cutoff = 0.0001 , normal_distr = False , t_a = [ 3 ] , t_b = [ 4 ] ) : n = len ( chron . depth ) calib_curve = np . array ( calib_curve ) t_a = np . array ( t_a ) t_b = np . array ( t_b ) assert t_b - 1 == t_a d_r = np . array ( d_r ) d_std = np . array ( d_std ... | Get density of calendar dates for chron date segment in core |
58,591 | def _init_browser ( self ) : self . browser = splinter . Browser ( 'phantomjs' ) self . browser . visit ( self . server_url + "/youraccount/login" ) try : self . browser . fill ( 'nickname' , self . user ) self . browser . fill ( 'password' , self . password ) except : self . browser . fill ( 'p_un' , self . user ) sel... | Overide in appropriate way to prepare a logged in browser . |
58,592 | def upload_marcxml ( self , marcxml , mode ) : if mode not in [ "-i" , "-r" , "-c" , "-a" , "-ir" ] : raise NameError ( "Incorrect mode " + str ( mode ) ) return requests . post ( self . server_url + "/batchuploader/robotupload" , data = { 'file' : marcxml , 'mode' : mode } , headers = { 'User-Agent' : CFG_USER_AGENT }... | Upload a record to the server . |
58,593 | def url ( self ) : if self . server_url is not None and self . recid is not None : return '/' . join ( [ self . server_url , CFG_SITE_RECORD , str ( self . recid ) ] ) else : return None | Returns the URL to this record . Returns None if not known |
58,594 | def clean_list_of_twitter_list ( list_of_twitter_lists , sent_tokenize , _treebank_word_tokenize , tagger , lemmatizer , lemmatize , stopset , first_cap_re , all_cap_re , digits_punctuation_whitespace_re , pos_set ) : list_of_keyword_sets = list ( ) append_keyword_set = list_of_keyword_sets . append list_of_lemma_to_ke... | Extracts the sets of keywords for each Twitter list . |
58,595 | def user_twitter_list_bag_of_words ( twitter_list_corpus , sent_tokenize , _treebank_word_tokenize , tagger , lemmatizer , lemmatize , stopset , first_cap_re , all_cap_re , digits_punctuation_whitespace_re , pos_set ) : list_of_keyword_sets , list_of_lemma_to_keywordbags = clean_list_of_twitter_list ( twitter_list_corp... | Extract a bag - of - words for a corpus of Twitter lists pertaining to a Twitter user . |
58,596 | def grouper ( iterable , n , pad_value = None ) : chunk_gen = ( chunk for chunk in zip_longest ( * [ iter ( iterable ) ] * n , fillvalue = pad_value ) ) return chunk_gen | Returns a generator of n - length chunks of an input iterable with appropriate padding at the end . |
58,597 | def chunks ( iterable , n ) : for i in np . arange ( 0 , len ( iterable ) , n ) : yield iterable [ i : i + n ] | A python generator that yields 100 - length sub - list chunks . |
58,598 | def split_every ( iterable , n ) : i = iter ( iterable ) piece = list ( islice ( i , n ) ) while piece : yield piece piece = list ( islice ( i , n ) ) | A generator of n - length chunks of an input iterable |
58,599 | def merge_properties ( item_properties , prop_name , merge_value ) : existing_value = item_properties . get ( prop_name , None ) if not existing_value : item_properties [ prop_name ] = merge_value else : if type ( merge_value ) is int or type ( merge_value ) is str : item_properties [ prop_name ] = existing_value + mer... | Tries to figure out which type of property value that should be merged and invoke the right function . Returns new properties if the merge was successful otherwise False . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.