idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
58,500
def gpg_sign ( path_to_sign , sender_key_info , config_dir = None , passphrase = None ) : if config_dir is None : config_dir = get_config_dir ( ) tmpdir = make_gpg_tmphome ( prefix = "sign" , config_dir = config_dir ) try : sender_privkey = gpg_export_key ( sender_key_info [ 'app_name' ] , sender_key_info [ 'key_id' ] , include_private = True , config_dir = config_dir ) except Exception , e : log . exception ( e ) shutil . rmtree ( tmpdir ) return { 'error' : 'No such private key' } res = gpg_stash_key ( "sign" , sender_privkey , config_dir = config_dir , gpghome = tmpdir ) if res is None : shutil . rmtree ( tmpdir ) return { 'error' : 'Failed to load sender private key' } gpg = gnupg . GPG ( homedir = tmpdir ) res = None with open ( path_to_sign , "r" ) as fd_in : res = gpg . sign_file ( fd_in , keyid = sender_key_info [ 'key_id' ] , passphrase = passphrase , detach = True ) shutil . rmtree ( tmpdir ) if not res : log . debug ( "sign_file error: %s" % res . __dict__ ) log . debug ( "signer: %s" % sender_key_info [ 'key_id' ] ) return { 'error' : 'Failed to sign data' } return { 'status' : True , 'sig' : res . data }
Sign a file on disk .
58,501
def gpg_verify ( path_to_verify , sigdata , sender_key_info , config_dir = None ) : if config_dir is None : config_dir = get_config_dir ( ) tmpdir = make_gpg_tmphome ( prefix = "verify" , config_dir = config_dir ) res = gpg_stash_key ( "verify" , sender_key_info [ 'key_data' ] , config_dir = config_dir , gpghome = tmpdir ) if res is None : shutil . rmtree ( tmpdir ) return { 'error' : 'Failed to stash key %s' % sender_key_info [ 'key_id' ] } fd , path = tempfile . mkstemp ( prefix = ".sig-verify-" ) f = os . fdopen ( fd , "w" ) f . write ( sigdata ) f . flush ( ) os . fsync ( f . fileno ( ) ) f . close ( ) gpg = gnupg . GPG ( homedir = tmpdir ) with open ( path , "r" ) as fd_in : res = gpg . verify_file ( fd_in , data_filename = path_to_verify ) shutil . rmtree ( tmpdir ) try : os . unlink ( path ) except : pass if not res : log . debug ( "verify_file error: %s" % res . __dict__ ) return { 'error' : 'Failed to decrypt data' } log . debug ( "verification succeeded from keys in %s" % config_dir ) return { 'status' : True }
Verify a file on disk was signed by the given sender .
58,502
def gpg_encrypt ( fd_in , path_out , sender_key_info , recipient_key_infos , passphrase = None , config_dir = None ) : if config_dir is None : config_dir = get_config_dir ( ) tmpdir = make_gpg_tmphome ( prefix = "encrypt" , config_dir = config_dir ) for key_info in recipient_key_infos : res = gpg_stash_key ( "encrypt" , key_info [ 'key_data' ] , config_dir = config_dir , gpghome = tmpdir ) if res is None : shutil . rmtree ( tmpdir ) return { 'error' : 'Failed to stash key %s' % key_info [ 'key_id' ] } try : sender_privkey = gpg_export_key ( sender_key_info [ 'app_name' ] , sender_key_info [ 'key_id' ] , include_private = True , config_dir = config_dir ) except Exception , e : log . exception ( e ) shutil . rmtree ( tmpdir ) return { 'error' : 'No such private key' } res = gpg_stash_key ( "encrypt" , sender_privkey , config_dir = config_dir , gpghome = tmpdir ) if res is None : shutil . rmtree ( tmpdir ) return { 'error' : 'Failed to load sender private key' } recipient_key_ids = [ r [ 'key_id' ] for r in recipient_key_infos ] gpg = gnupg . GPG ( homedir = tmpdir ) res = gpg . encrypt_file ( fd_in , recipient_key_ids , sign = sender_key_info [ 'key_id' ] , passphrase = passphrase , output = path_out , always_trust = True ) shutil . rmtree ( tmpdir ) if res . status != 'encryption ok' : log . debug ( "encrypt_file error: %s" % res . __dict__ ) log . debug ( "recipients: %s" % recipient_key_ids ) log . debug ( "signer: %s" % sender_key_info [ 'key_id' ] ) return { 'error' : 'Failed to encrypt data' } return { 'status' : True }
Encrypt a stream of data for a set of keys .
58,503
def gpg_decrypt ( fd_in , path_out , sender_key_info , my_key_info , passphrase = None , config_dir = None ) : if config_dir is None : config_dir = get_config_dir ( ) tmpdir = make_gpg_tmphome ( prefix = "decrypt" , config_dir = config_dir ) res = gpg_stash_key ( "decrypt" , sender_key_info [ 'key_data' ] , config_dir = config_dir , gpghome = tmpdir ) if res is None : shutil . rmtree ( tmpdir ) return { 'error' : 'Failed to stash key %s' % sender_key_info [ 'key_id' ] } try : my_privkey = gpg_export_key ( my_key_info [ 'app_name' ] , my_key_info [ 'key_id' ] , include_private = True , config_dir = config_dir ) except : shutil . rmtree ( tmpdir ) return { 'error' : 'Failed to load local private key for %s' % my_key_info [ 'key_id' ] } res = gpg_stash_key ( "decrypt" , my_privkey , config_dir = config_dir , gpghome = tmpdir ) if res is None : shutil . rmtree ( tmpdir ) return { 'error' : 'Failed to load private key' } gpg = gnupg . GPG ( homedir = tmpdir ) res = gpg . decrypt_file ( fd_in , passphrase = passphrase , output = path_out , always_trust = True ) shutil . rmtree ( tmpdir ) if res . status != 'decryption ok' : log . debug ( "decrypt_file: %s" % res . __dict__ ) return { 'error' : 'Failed to decrypt data' } log . debug ( "decryption succeeded from keys in %s" % config_dir ) return { 'status' : True }
Decrypt a stream of data using key info for a private key we own .
58,504
def get_primary_command_usage ( message = '' ) : if not settings . merge_primary_command and None in settings . subcommands : return format_usage ( settings . subcommands [ None ] . __doc__ ) if not message : message = '\n{}\n' . format ( settings . message ) if settings . message else '' doc = _DEFAULT_DOC . format ( message = message ) if None in settings . subcommands : return _merge_doc ( doc , settings . subcommands [ None ] . __doc__ ) return format_usage ( doc )
Return the usage string for the primary command .
58,505
def get_help_usage ( command ) : if not command : doc = get_primary_command_usage ( ) elif command in ( '-a' , '--all' ) : subcommands = [ k for k in settings . subcommands if k is not None ] available_commands = subcommands + [ 'help' ] command_doc = '\nAvailable commands:\n{}\n' . format ( '\n' . join ( ' {}' . format ( c ) for c in sorted ( available_commands ) ) ) doc = get_primary_command_usage ( command_doc ) elif command . startswith ( '-' ) : raise ValueError ( "Unrecognized option '{}'." . format ( command ) ) elif command in settings . subcommands : subcommand = settings . subcommands [ command ] doc = format_usage ( subcommand . __doc__ ) docopt . docopt ( doc , argv = ( '--help' , ) )
Print out a help message and exit the program .
58,506
def format_usage ( doc , width = None ) : sections = doc . replace ( '\r' , '' ) . split ( '\n\n' ) width = width or get_terminal_size ( ) . columns or 80 return '\n\n' . join ( _wrap_section ( s . strip ( ) , width ) for s in sections )
Format the docstring for display to the user .
58,507
def parse_commands ( docstring ) : try : docopt . docopt ( docstring , argv = ( ) ) except ( TypeError , docopt . DocoptLanguageError ) : return except docopt . DocoptExit : pass for command in _parse_section ( 'usage' , docstring ) : args = command . split ( ) commands = [ ] i = 0 for i , arg in enumerate ( args ) : if arg [ 0 ] . isalpha ( ) and not arg [ 0 ] . isupper ( ) : commands . append ( arg ) else : break yield commands , args [ i : ]
Parse a docopt - style string for commands and subcommands .
58,508
def _merge_doc ( original , to_merge ) : if not original : return to_merge or '' if not to_merge : return original or '' sections = [ ] for name in ( 'usage' , 'arguments' , 'options' ) : sections . append ( _merge_section ( _get_section ( name , original ) , _get_section ( name , to_merge ) ) ) return format_usage ( '\n\n' . join ( s for s in sections ) . rstrip ( ) )
Merge two usage strings together .
58,509
def _merge_section ( original , to_merge ) : if not original : return to_merge or '' if not to_merge : return original or '' try : index = original . index ( ':' ) + 1 except ValueError : index = original . index ( '\n' ) name = original [ : index ] . strip ( ) section = '\n ' . join ( ( original [ index + 1 : ] . lstrip ( ) , to_merge [ index + 1 : ] . lstrip ( ) ) ) . rstrip ( ) return '{name}\n {section}' . format ( name = name , section = section )
Merge two sections together .
58,510
def _get_section ( name , source ) : pattern = re . compile ( '^([^\n]*{name}[^\n]*\n?(?:[ \t].*?(?:\n|$))*)' . format ( name = name ) , re . IGNORECASE | re . MULTILINE ) usage = None for section in pattern . findall ( source ) : usage = _merge_section ( usage , section . strip ( ) ) return usage
Extract the named section from the source .
58,511
def _wrap_section ( source , width ) : if _get_section ( 'usage' , source ) : return _wrap_usage_section ( source , width ) if _is_definition_section ( source ) : return _wrap_definition_section ( source , width ) lines = inspect . cleandoc ( source ) . splitlines ( ) paragraphs = ( textwrap . wrap ( line , width , replace_whitespace = False ) for line in lines ) return '\n' . join ( line for paragraph in paragraphs for line in paragraph )
Wrap the given section string to the current terminal size .
58,512
def _is_definition_section ( source ) : try : definitions = textwrap . dedent ( source ) . split ( '\n' , 1 ) [ 1 ] . splitlines ( ) return all ( re . match ( r'\s\s+((?!\s\s).+)\s\s+.+' , s ) for s in definitions ) except IndexError : return False
Determine if the source is a definition section .
58,513
def _wrap_usage_section ( source , width ) : if not any ( len ( line ) > width for line in source . splitlines ( ) ) : return source section_header = source [ : source . index ( ':' ) + 1 ] . strip ( ) lines = [ section_header ] for commands , args in parse_commands ( source ) : command = ' {} ' . format ( ' ' . join ( commands ) ) max_len = width - len ( command ) sep = '\n' + ' ' * len ( command ) wrapped_args = sep . join ( textwrap . wrap ( ' ' . join ( args ) , max_len ) ) full_command = command + wrapped_args lines += full_command . splitlines ( ) return '\n' . join ( lines )
Wrap the given usage section string to the current terminal size .
58,514
def _wrap_definition_section ( source , width ) : index = source . index ( '\n' ) + 1 definitions , max_len = _get_definitions ( source [ index : ] ) sep = '\n' + ' ' * ( max_len + 4 ) lines = [ source [ : index ] . strip ( ) ] for arg , desc in six . iteritems ( definitions ) : wrapped_desc = sep . join ( textwrap . wrap ( desc , width - max_len - 4 ) ) lines . append ( ' {arg:{size}} {desc}' . format ( arg = arg , size = str ( max_len ) , desc = wrapped_desc ) ) return '\n' . join ( lines )
Wrap the given definition section string to the current terminal size .
58,515
def _get_definitions ( source ) : max_len = 0 descs = collections . OrderedDict ( ) lines = ( s . strip ( ) for s in source . splitlines ( ) ) non_empty_lines = ( s for s in lines if s ) for line in non_empty_lines : if line : arg , desc = re . split ( r'\s\s+' , line . strip ( ) ) arg_len = len ( arg ) if arg_len > max_len : max_len = arg_len descs [ arg ] = desc return descs , max_len
Extract a dictionary of arguments and definitions .
58,516
def _parse_section ( name , source ) : section = textwrap . dedent ( _get_section ( name , source ) [ 7 : ] ) commands = [ ] for line in section . splitlines ( ) : if not commands or line [ : 1 ] . isalpha ( ) and line [ : 1 ] . islower ( ) : commands . append ( line ) else : commands [ - 1 ] = '{} {}' . format ( commands [ - 1 ] . strip ( ) , line . strip ( ) ) return commands
Yield each section line .
58,517
def move ( self , particle , u , v , w , modelTimestep , ** kwargs ) : if not particle . settled and not particle . dead : particle . die ( ) temp = kwargs . get ( 'temperature' , None ) if temp is not None and math . isnan ( temp ) : temp = None particle . temp = temp salt = kwargs . get ( 'salinity' , None ) if salt is not None and math . isnan ( salt ) : salt = None particle . salt = salt u = 0 v = 0 w = 0 result = AsaTransport . distance_from_location_using_u_v_w ( u = u , v = v , w = w , timestep = modelTimestep , location = particle . location ) result [ 'u' ] = u result [ 'v' ] = v result [ 'w' ] = w return result
I m dead so no behaviors should act on me
58,518
def read_history_file ( self , filename = None ) : u if filename is None : filename = self . history_filename try : for line in open ( filename , u'r' ) : self . add_history ( lineobj . ReadLineTextBuffer ( ensure_unicode ( line . rstrip ( ) ) ) ) except IOError : self . history = [ ] self . history_cursor = 0
u Load a readline history file .
58,519
def write_history_file ( self , filename = None ) : u if filename is None : filename = self . history_filename fp = open ( filename , u'wb' ) for line in self . history [ - self . history_length : ] : fp . write ( ensure_str ( line . get_line_text ( ) ) ) fp . write ( u'\n' ) fp . close ( )
u Save a readline history file .
58,520
def add_history ( self , line ) : u if not hasattr ( line , "get_line_text" ) : line = lineobj . ReadLineTextBuffer ( line ) if not line . get_line_text ( ) : pass elif len ( self . history ) > 0 and self . history [ - 1 ] . get_line_text ( ) == line . get_line_text ( ) : pass else : self . history . append ( line ) self . history_cursor = len ( self . history )
u Append a line to the history buffer as if it was the last line typed .
58,521
def beginning_of_history ( self ) : u self . history_cursor = 0 if len ( self . history ) > 0 : self . l_buffer = self . history [ 0 ]
u Move to the first line in the history .
58,522
def get_time_objects_from_model_timesteps ( cls , times , start ) : modelTimestep = [ ] newtimes = [ ] for i in xrange ( 0 , len ( times ) ) : try : modelTimestep . append ( times [ i + 1 ] - times [ i ] ) except StandardError : modelTimestep . append ( times [ i ] - times [ i - 1 ] ) newtimes . append ( start + timedelta ( seconds = times [ i ] ) ) return ( modelTimestep , newtimes )
Calculate the datetimes of the model timesteps
58,523
def fill_polygon_with_points ( cls , goal = None , polygon = None ) : if goal is None : raise ValueError ( "Must specify the number of points (goal) to fill the polygon with" ) if polygon is None or ( not isinstance ( polygon , Polygon ) and not isinstance ( polygon , MultiPolygon ) ) : raise ValueError ( "Must specify a polygon to fill points with" ) minx = polygon . bounds [ 0 ] maxx = polygon . bounds [ 2 ] miny = polygon . bounds [ 1 ] maxy = polygon . bounds [ 3 ] points = [ ] now = time . time ( ) while len ( points ) < goal : random_x = random . uniform ( minx , maxx ) random_y = random . uniform ( miny , maxy ) p = Point ( random_x , random_y ) if p . within ( polygon ) : points . append ( p ) logger . info ( "Filling polygon with points took %f seconds" % ( time . time ( ) - now ) ) return points
Fill a shapely polygon with X number of points
58,524
def distance_from_location_using_u_v_w ( cls , u = None , v = None , w = None , timestep = None , location = None ) : distance_horiz = 0 azimuth = 0 angle = 0 depth = location . depth if u is not 0 and v is not 0 : s_and_d = AsaMath . speed_direction_from_u_v ( u = u , v = v ) distance_horiz = s_and_d [ 'speed' ] * timestep angle = s_and_d [ 'direction' ] azimuth = AsaMath . math_angle_to_azimuth ( angle = angle ) distance_vert = 0. if w is not None : distance_vert = w * timestep depth += distance_vert if distance_horiz != 0 : vertical_angle = math . degrees ( math . atan ( distance_vert / distance_horiz ) ) gc_result = AsaGreatCircle . great_circle ( distance = distance_horiz , azimuth = azimuth , start_point = location ) else : vertical_angle = 0. if distance_vert < 0 : vertical_angle = 270. elif distance_vert > 0 : vertical_angle = 90. gc_result = { 'latitude' : location . latitude , 'longitude' : location . longitude , 'reverse_azimuth' : 0 } gc_result [ 'azimuth' ] = azimuth gc_result [ 'depth' ] = depth gc_result [ 'distance' ] = distance_horiz gc_result [ 'angle' ] = angle gc_result [ 'vertical_distance' ] = distance_vert gc_result [ 'vertical_angle' ] = vertical_angle return gc_result
Calculate the greate distance from a location using u v and w .
58,525
def shutdown ( self ) : self . started = False try : for t in self . _threads : t . join ( ) finally : self . stopped = True
Wait for all threads to complete
58,526
def _unpack_bytes ( bytes ) : if bytes == b'' : return 0 int_length = 4 len_diff = int_length - len ( bytes ) bytes = bytes + len_diff * b'\x00' return struct . unpack ( "<L" , bytes ) [ 0 ]
Unpack a set of bytes into an integer . First pads to 4 bytes . Little endian .
58,527
def get_sprints ( ) : sprints = load_member_from_setting ( 'RAPID_PROTOTYPING_SPRINTS_MODULE' ) all_tasks = [ ] for importer , package_name , _ in pkgutil . walk_packages ( onerror = lambda p : p ) : if not package_name . endswith ( '_costs' ) : continue if not getattr ( settings , 'TEST_RUN' , None ) and ( '.test_app.' in package_name ) : continue costs = load_member ( package_name + '.costs' ) for task in costs : all_tasks . append ( task ) sorted_tasks = sorted ( all_tasks , key = itemgetter ( 'id' ) ) for sprint in sprints : remaining_time = 0 sprint [ 'tasks' ] = [ ] for task in sorted_tasks : if task . get ( 'sprint' ) == sprint . get ( 'id' ) : if not task . get ( 'actual_time' ) : remaining_time += task . get ( 'developer_time' ) or task . get ( 'time' ) sprint . get ( 'tasks' ) . append ( task ) sprint [ 'remaining_time' ] = remaining_time sprint [ 'remaining_hours' ] = round ( float ( remaining_time ) / 60 , 2 ) return sprints
Returns all sprints enriched with their assigned tasks .
58,528
def append_overhead_costs ( costs , new_id , overhead_percentage = 0.15 ) : total_time = 0 for item in costs : total_time += item [ 'time' ] costs . append ( { 'id' : new_id , 'task' : 'Overhead, Bufixes & Iterations' , 'time' : total_time * overhead_percentage , } , ) return costs
Adds 15% overhead costs to the list of costs .
58,529
def arduino_default_path ( ) : if sys . platform == 'darwin' : s = path ( '/Applications/Arduino.app/Contents/Resources/Java' ) elif sys . platform == 'win32' : s = None else : s = path ( '/usr/share/arduino/' ) return s
platform specific default root path .
58,530
def checkForChanges ( f , sde , isTable ) : fCount = int ( arcpy . GetCount_management ( f ) . getOutput ( 0 ) ) sdeCount = int ( arcpy . GetCount_management ( sde ) . getOutput ( 0 ) ) if fCount != sdeCount : return True fields = [ fld . name for fld in arcpy . ListFields ( f ) ] if not isTable : fields = filter_fields ( fields ) d = arcpy . Describe ( f ) shapeType = d . shapeType if shapeType == 'Polygon' : shapeToken = 'SHAPE@AREA' elif shapeType == 'Polyline' : shapeToken = 'SHAPE@LENGTH' elif shapeType == 'Point' : shapeToken = 'SHAPE@XY' else : shapeToken = 'SHAPE@JSON' fields . append ( shapeToken ) def parseShape ( shapeValue ) : if shapeValue is None : return 0 elif shapeType in [ 'Polygon' , 'Polyline' ] : return shapeValue elif shapeType == 'Point' : if shapeValue [ 0 ] is not None and shapeValue [ 1 ] is not None : return shapeValue [ 0 ] + shapeValue [ 1 ] else : return 0 else : return shapeValue outputSR = arcpy . Describe ( f ) . spatialReference else : outputSR = None changed = False with arcpy . da . SearchCursor ( f , fields , sql_clause = ( None , 'ORDER BY OBJECTID' ) ) as fCursor , arcpy . da . SearchCursor ( sde , fields , sql_clause = ( None , 'ORDER BY OBJECTID' ) , spatial_reference = outputSR ) as sdeCursor : for fRow , sdeRow in izip ( fCursor , sdeCursor ) : if fRow != sdeRow : if fRow [ - 1 ] != sdeRow [ - 1 ] and not isTable : if shapeType not in [ 'Polygon' , 'Polyline' , 'Point' ] : changed = True break fShape = parseShape ( fRow [ - 1 ] ) sdeShape = parseShape ( sdeRow [ - 1 ] ) try : assert_almost_equal ( fShape , sdeShape , - 1 ) fRow = list ( fRow [ : - 1 ] ) sdeRow = list ( sdeRow [ : - 1 ] ) except AssertionError : changed = True break for i in range ( len ( fRow ) ) : if type ( fRow [ i ] ) is datetime : fRow = list ( fRow ) sdeRow = list ( sdeRow ) fRow [ i ] = fRow [ i ] . replace ( microsecond = 0 ) try : sdeRow [ i ] = sdeRow [ i ] . replace ( microsecond = 0 ) except : pass if fRow [ 1 : ] != sdeRow [ 1 : ] : changed = True break return changed
returns False if there are no changes
58,531
def install_metaboard ( replace_existing = False , ) : metaboard = AutoBunch ( ) metaboard . name = 'Metaboard' metaboard . upload . protocol = 'usbasp' metaboard . upload . maximum_size = '14336' metaboard . upload . speed = '19200' metaboard . build . mcu = 'atmega168' metaboard . build . f_cpu = '16000000L' metaboard . build . core = 'arduino' metaboard . upload . disable_flushing = 'true' board_id = 'metaboard' install_board ( board_id , metaboard , replace_existing = replace_existing )
install metaboard .
58,532
def __total_pages ( self ) -> int : row_count = self . model . query . count ( ) if isinstance ( row_count , int ) : return int ( row_count / self . limit ) return None
Return max pages created by limit
58,533
def links ( self , base_link , current_page ) -> dict : max_pages = self . max_pages - 1 if self . max_pages > 0 else self . max_pages base_link = '/%s' % ( base_link . strip ( "/" ) ) self_page = current_page prev = current_page - 1 if current_page is not 0 else None prev_link = '%s/page/%s/%s' % ( base_link , prev , self . limit ) if prev is not None else None next = current_page + 1 if current_page < max_pages else None next_link = '%s/page/%s/%s' % ( base_link , next , self . limit ) if next is not None else None first = 0 last = max_pages return { 'self' : '%s/page/%s/%s' % ( base_link , self_page , self . limit ) , 'prev' : prev_link , 'next' : next_link , 'first' : '%s/page/%s/%s' % ( base_link , first , self . limit ) , 'last' : '%s/page/%s/%s' % ( base_link , last , self . limit ) , }
Return JSON paginate links
58,534
def json_paginate ( self , base_url , page_number ) : data = self . page ( page_number ) first_id = None last_id = None if data : first_id = data [ 0 ] . id last_id = data [ - 1 ] . id return { 'meta' : { 'total_pages' : self . max_pages , 'first_id' : first_id , 'last_id' : last_id , 'current_page' : page_number } , 'data' : self . page ( page_number ) , 'links' : self . links ( base_url , page_number ) }
Return a dict for a JSON paginate
58,535
def add_arguments ( parser , default_level = logging . INFO ) : adder = ( getattr ( parser , 'add_argument' , None ) or getattr ( parser , 'add_option' ) ) adder ( '-l' , '--log-level' , default = default_level , type = log_level , help = "Set log level (DEBUG, INFO, WARNING, ERROR)" )
Add arguments to an ArgumentParser or OptionParser for purposes of grabbing a logging level .
58,536
def setup ( options , ** kwargs ) : params = dict ( kwargs ) params . update ( level = options . log_level ) logging . basicConfig ( ** params )
Setup logging with options or arguments from an OptionParser or ArgumentParser . Also pass any keyword arguments to the basicConfig call .
58,537
def setup_requests_logging ( level ) : requests_log = logging . getLogger ( "requests.packages.urllib3" ) requests_log . setLevel ( level ) requests_log . propagate = True http_client . HTTPConnection . debuglevel = level <= logging . DEBUG
Setup logging for requests such that it logs details about the connection headers etc .
58,538
def _set_period ( self , period ) : self . _period = period if period : self . _period_seconds = tempora . get_period_seconds ( self . _period ) self . _date_format = tempora . get_date_format_string ( self . _period_seconds ) else : self . _period_seconds = 0 self . _date_format = ''
Set the period for the timestamp . If period is 0 or None no period will be used .
58,539
def get_filename ( self , t ) : root , ext = os . path . splitext ( self . base_filename ) if self . _period_seconds : t -= t % self . _period_seconds dt = datetime . datetime . utcfromtimestamp ( t ) appended_date = ( dt . strftime ( self . _date_format ) if self . _date_format != '' else '' ) if appended_date : result = root + ' ' + appended_date + ext else : result = self . base_filename return result
Return the appropriate filename for the given time based on the defined period .
58,540
def emit ( self , record ) : now = time . time ( ) current_name = self . get_filename ( now ) try : if not self . stream . name == current_name : self . _use_file ( current_name ) except AttributeError : self . _use_file ( current_name ) logging . StreamHandler . emit ( self , record )
Emit a record . Output the record to the file ensuring that the currently - opened file has the correct date .
58,541
def register ( app , uri , file_or_directory , pattern , use_modified_since , use_content_range ) : if not path . isfile ( file_or_directory ) : uri += '<file_uri:' + pattern + '>' async def _handler ( request , file_uri = None ) : if file_uri and '../' in file_uri : raise InvalidUsage ( "Invalid URL" ) root_path = file_path = file_or_directory if file_uri : file_path = path . join ( file_or_directory , sub ( '^[/]*' , '' , file_uri ) ) file_path = path . abspath ( unquote ( file_path ) ) if not file_path . startswith ( path . abspath ( unquote ( root_path ) ) ) : raise FileNotFound ( 'File not found' , path = file_or_directory , relative_url = file_uri ) try : headers = { } stats = None if use_modified_since : stats = await stat ( file_path ) modified_since = strftime ( '%a, %d %b %Y %H:%M:%S GMT' , gmtime ( stats . st_mtime ) ) if request . headers . get ( 'If-Modified-Since' ) == modified_since : return HTTPResponse ( status = 304 ) headers [ 'Last-Modified' ] = modified_since _range = None if use_content_range : _range = None if not stats : stats = await stat ( file_path ) headers [ 'Accept-Ranges' ] = 'bytes' headers [ 'Content-Length' ] = str ( stats . st_size ) if request . method != 'HEAD' : try : _range = ContentRangeHandler ( request , stats ) except HeaderNotFound : pass else : del headers [ 'Content-Length' ] for key , value in _range . headers . items ( ) : headers [ key ] = value if request . method == 'HEAD' : return HTTPResponse ( headers = headers , content_type = guess_type ( file_path ) [ 0 ] or 'text/plain' ) else : return await file ( file_path , headers = headers , _range = _range ) except ContentRangeError : raise except Exception : raise FileNotFound ( 'File not found' , path = file_or_directory , relative_url = file_uri ) app . route ( uri , methods = [ 'GET' , 'HEAD' ] ) ( _handler )
Register a static directory handler with Mach9 by adding a route to the router and registering a handler .
58,542
def fix_imports ( script ) : with open ( script , 'r' ) as f_script : lines = f_script . read ( ) . splitlines ( ) new_lines = [ ] for l in lines : if l . startswith ( "import " ) : l = "from . " + l if "from PyQt5 import" in l : l = l . replace ( "from PyQt5 import" , "from pyqode.qt import" ) new_lines . append ( l ) with open ( script , 'w' ) as f_script : f_script . write ( "\n" . join ( new_lines ) )
Replace from PyQt5 import by from pyqode . qt import .
58,543
def eval_py ( self , _globals , _locals ) : try : params = eval ( self . script , _globals , _locals ) except NameError as e : raise Exception ( 'Failed to evaluate parameters: {}' . format ( str ( e ) ) ) except ResolutionError as e : raise Exception ( 'GetOutput: {}' . format ( str ( e ) ) ) return params
Evaluates a file containing a Python params dictionary .
58,544
def new ( cls , arg ) : content = None if arg . kind == 'file' : if os . path . exists ( arg . value ) : with open ( arg . value , 'r' ) as f : content = f . read ( ) else : raise Exception ( 'File does not exist: {}' . format ( arg . value ) ) elif arg . kind == 'cli' : content = arg . value for source_cls in cls . sources : if source_cls . supports_source ( arg ) : return source_cls ( content ) msg = 'Unsupported Parameter Source "{}"' raise Execption ( msg . format ( arg . value ) )
Creates a new Parameter object from the given ParameterArgument .
58,545
def minimum_pitch ( self ) : pitch = self . pitch minimal_pitch = [ ] for p in pitch : minimal_pitch . append ( min ( p ) ) return min ( minimal_pitch )
Returns the minimal pitch between two neighboring nodes of the mesh in each direction .
58,546
def surrounding_nodes ( self , position ) : n_node_index , n_node_position , n_node_error = self . nearest_node ( position ) if n_node_error == 0.0 : index_mod = [ ] for i in range ( len ( n_node_index ) ) : new_point = np . asarray ( n_node_position ) new_point [ i ] += 1.e-5 * np . abs ( new_point [ i ] ) try : self . nearest_node ( tuple ( new_point ) ) index_mod . append ( - 1 ) except ValueError : index_mod . append ( 1 ) else : index_mod = [ ] for i in range ( len ( n_node_index ) ) : if n_node_position [ i ] > position [ i ] : index_mod . append ( - 1 ) else : index_mod . append ( 1 ) return tuple ( n_node_index ) , tuple ( index_mod )
Returns nearest node indices and direction of opposite node .
58,547
def tokenize ( self , string ) : it = colorise . compat . ifilter ( None , self . _pattern . finditer ( string ) ) try : t = colorise . compat . next ( it ) except StopIteration : yield string , False return pos , buf , lm , escapeflag = - 1 , '' , - 1 , False if t . start ( ) > 0 : yield string [ : t . start ( ) ] , False pos = t . start ( ) it = itertools . chain ( [ t ] , it ) for m in it : start = m . start ( ) e , s = m . group ( 2 ) or '' , m . group ( 3 ) escaped = e . count ( self . _ESCAPE ) % 2 != 0 if escaped : buf += string [ pos : m . end ( 2 ) - 1 ] + s escapeflag = True else : buf += string [ pos : m . start ( 3 ) ] if buf : yield buf , escapeflag buf = '' escapeflag = False if lm == start : yield '' , False yield s , False lm = m . end ( ) pos = m . end ( ) if buf : yield buf , escapeflag escapeflag = False if pos < len ( string ) : yield string [ pos : ] , False
Tokenize a string and return an iterator over its tokens .
58,548
def parse ( self , format_string ) : txt , state = '' , 0 colorstack = [ ( None , None ) ] itokens = self . tokenize ( format_string ) for token , escaped in itokens : if token == self . _START_TOKEN and not escaped : if txt : yield txt , colorstack [ - 1 ] txt = '' state += 1 colors = self . extract_syntax ( colorise . compat . next ( itokens ) [ 0 ] ) colorstack . append ( tuple ( b or a for a , b in zip ( colorstack [ - 1 ] , colors ) ) ) elif token == self . _FMT_TOKEN and not escaped : if state % 2 != 0 : state += 1 else : txt += token elif token == self . _STOP_TOKEN and not escaped : if state < 2 : raise ColorSyntaxError ( "Missing '{0}' or '{1}'" . format ( self . _STOP_TOKEN , self . _FMT_TOKEN ) ) if txt : yield txt , colorstack [ - 1 ] txt = '' state -= 2 colorstack . pop ( ) else : txt += token if state != 0 : raise ColorSyntaxError ( "Invalid color format" ) if txt : yield txt , colorstack [ - 1 ]
Parse color syntax from a formatted string .
58,549
def from_mapping ( cls , evidence_mapping ) : return cls ( metadata_map = MetadataMap . from_mapping ( evidence_mapping [ 'metadataMap' ] ) , copyright = evidence_mapping [ 'copyright' ] , id = evidence_mapping [ 'id' ] , terms_of_use = evidence_mapping [ 'termsOfUse' ] , document = evidence_mapping [ 'document' ] , title = evidence_mapping [ 'title' ] , text = evidence_mapping [ 'text' ] , value = evidence_mapping [ 'value' ] )
Create an Evidence instance from the given mapping
58,550
def to_obj ( cls , obj_data = None , * fields , ** field_map ) : obj_dict = obj_data . __dict__ if hasattr ( obj_data , '__dict__' ) else obj_data if not fields : fields = obj_dict . keys ( ) obj = cls ( ) update_obj ( obj_dict , obj , * fields , ** field_map ) return obj
prioritize obj_dict when there are conficts
58,551
def with_ctx ( func = None ) : if not func : return functools . partial ( with_ctx ) @ functools . wraps ( func ) def func_with_context ( _obj , * args , ** kwargs ) : if 'ctx' not in kwargs or kwargs [ 'ctx' ] is None : with _obj . ctx ( ) as new_ctx : kwargs [ 'ctx' ] = new_ctx return func ( _obj , * args , ** kwargs ) else : return func ( _obj , * args , ** kwargs ) return func_with_context
Auto create a new context if not available
58,552
def open ( self , auto_commit = None , schema = None ) : if schema is None : schema = self . schema ac = auto_commit if auto_commit is not None else schema . auto_commit exe = ExecutionContext ( self . path , schema = schema , auto_commit = ac ) if not os . path . isfile ( self . path ) or os . path . getsize ( self . path ) == 0 : getLogger ( ) . warning ( "DB does not exist at {}. Setup is required." . format ( self . path ) ) if schema is not None and schema . setup_files : for file_path in schema . setup_files : getLogger ( ) . debug ( "Executing script file: {}" . format ( file_path ) ) exe . cur . executescript ( self . read_file ( file_path ) ) if schema . setup_scripts : for script in schema . setup_scripts : exe . cur . executescript ( script ) return exe
Create a context to execute queries
58,553
def build_insert ( self , table , values , columns = None ) : if not columns : columns = table . columns if len ( values ) < len ( columns ) : column_names = ',' . join ( columns [ - len ( values ) : ] ) else : column_names = ',' . join ( columns ) query = "INSERT INTO %s (%s) VALUES (%s) " % ( table . name , column_names , ',' . join ( [ '?' ] * len ( values ) ) ) return query
Insert an active record into DB and return lastrowid if available
58,554
def select_record ( self , table , where = None , values = None , orderby = None , limit = None , columns = None ) : query = self . schema . query_builder . build_select ( table , where , orderby , limit , columns ) return table . to_table ( self . execute ( query , values ) , columns = columns )
Support these keywords where values orderby limit and columns
58,555
def should_be_excluded ( name , exclude_patterns ) : for pattern in exclude_patterns : if fnmatch . fnmatch ( name , pattern ) : return True return False
Check if a name should be excluded .
58,556
def filter_visited ( curr_dir , subdirs , already_visited , follow_dirlinks , on_error ) : filtered = [ ] to_visit = set ( ) _already_visited = already_visited . copy ( ) try : file_info = os . stat ( curr_dir ) if follow_dirlinks else os . lstat ( curr_dir ) _already_visited . add ( ( file_info . st_dev , file_info . st_ino ) ) except OSError as e : on_error ( e ) for subdir in subdirs : full_path = os . path . join ( curr_dir , subdir ) try : file_info = os . stat ( full_path ) if follow_dirlinks else os . lstat ( full_path ) except OSError as e : on_error ( e ) continue if not follow_dirlinks and stat . S_ISLNK ( file_info . st_mode ) : continue dev_inode = ( file_info . st_dev , file_info . st_ino ) if dev_inode not in _already_visited : filtered . append ( subdir ) to_visit . add ( dev_inode ) else : on_error ( OSError ( errno . ELOOP , "directory loop detected" , full_path ) ) return filtered , _already_visited . union ( to_visit )
Filter subdirs that have already been visited .
58,557
def index_files_by_size ( root , files_by_size , exclude_dirs , exclude_files , follow_dirlinks ) : errors = [ ] already_visited = set ( ) def _print_error ( error ) : msg = "error listing '%s': %s" % ( error . filename , error . strerror ) sys . stderr . write ( "%s\n" % msg ) errors . append ( msg ) for curr_dir , subdirs , filenames in os . walk ( root , topdown = True , onerror = _print_error , followlinks = follow_dirlinks ) : subdirs [ : ] = prune_names ( subdirs , exclude_dirs ) filenames = prune_names ( filenames , exclude_files ) subdirs [ : ] , already_visited = filter_visited ( curr_dir , subdirs , already_visited , follow_dirlinks , _print_error ) for base_filename in filenames : full_path = os . path . join ( curr_dir , base_filename ) try : file_info = os . lstat ( full_path ) except OSError as e : _print_error ( e ) continue if stat . S_ISREG ( file_info . st_mode ) : size = file_info . st_size if size in files_by_size : files_by_size [ size ] . append ( full_path ) else : files_by_size [ size ] = [ full_path ] return errors
Recursively index files under a root directory .
58,558
def calculate_md5 ( filename , length ) : assert length >= 0 if length == 0 : return '\xd4\x1d\x8c\xd9\x8f\x00\xb2\x04\xe9\x80\t\x98\xec\xf8\x42\x7e' md5_summer = hashlib . md5 ( ) f = open ( filename , 'rb' ) try : bytes_read = 0 while bytes_read < length : chunk_size = min ( MD5_CHUNK_SIZE , length - bytes_read ) chunk = f . read ( chunk_size ) if not chunk : break md5_summer . update ( chunk ) bytes_read += len ( chunk ) finally : f . close ( ) md5 = md5_summer . digest ( ) return md5
Calculate the MD5 hash of a file up to length bytes .
58,559
def find_duplicates ( filenames , max_size ) : errors = [ ] if len ( filenames ) < 2 : return [ ] , errors if max_size == 0 : return [ filenames ] , errors files_by_md5 = { } for filename in filenames : try : md5 = calculate_md5 ( filename , max_size ) except EnvironmentError as e : msg = "unable to calculate MD5 for '%s': %s" % ( filename , e . strerror ) sys . stderr . write ( "%s\n" % msg ) errors . append ( msg ) continue if md5 not in files_by_md5 : files_by_md5 [ md5 ] = [ filename ] else : files_by_md5 [ md5 ] . append ( filename ) duplicates = [ l for l in py3compat . itervalues ( files_by_md5 ) if len ( l ) >= 2 ] return duplicates , errors
Find duplicates in a list of files comparing up to max_size bytes .
58,560
def find_duplicates_in_dirs ( directories , exclude_dirs = None , exclude_files = None , follow_dirlinks = False ) : if exclude_dirs is None : exclude_dirs = [ ] if exclude_files is None : exclude_files = [ ] errors_in_total = [ ] files_by_size = { } for directory in directories : sub_errors = index_files_by_size ( directory , files_by_size , exclude_dirs , exclude_files , follow_dirlinks ) errors_in_total += sub_errors all_duplicates = [ ] for size in iter ( files_by_size ) : if size >= PARTIAL_MD5_THRESHOLD : partial_size = min ( round_up_to_mult ( size // PARTIAL_MD5_READ_RATIO , PARTIAL_MD5_READ_MULT ) , PARTIAL_MD5_MAX_READ ) possible_duplicates_list , sub_errors = find_duplicates ( files_by_size [ size ] , partial_size ) errors_in_total += sub_errors else : possible_duplicates_list = [ files_by_size [ size ] ] for possible_duplicates in possible_duplicates_list : duplicates , sub_errors = find_duplicates ( possible_duplicates , size ) all_duplicates += duplicates errors_in_total += sub_errors return all_duplicates , errors_in_total
Recursively scan a list of directories looking for duplicate files .
58,561
def semimajor ( P , M ) : if type ( P ) != Quantity : P = P * u . day if type ( M ) != Quantity : M = M * u . M_sun a = ( ( P / 2 / np . pi ) ** 2 * const . G * M ) ** ( 1. / 3 ) return a . to ( u . AU )
P M can be Quantity objects ; otherwise default to day M_sun
58,562
def random_spherepos ( n ) : signs = np . sign ( rand . uniform ( - 1 , 1 , size = n ) ) thetas = Angle ( np . arccos ( rand . uniform ( size = n ) * signs ) , unit = u . rad ) phis = Angle ( rand . uniform ( 0 , 2 * np . pi , size = n ) , unit = u . rad ) c = SkyCoord ( phis , thetas , 1 , representation = 'physicsspherical' ) return c
returns SkyCoord object with n positions randomly oriented on the unit sphere
58,563
def to_dict ( self ) : return { inflection . camelize ( k , False ) : v for k , v in self . __dict__ . items ( ) if v }
Return a dict of all instance variables with truthy values with key names camelized
58,564
def depth ( self ) : return len ( self . path . rstrip ( os . sep ) . split ( os . sep ) )
Returns the number of ancestors of this directory .
58,565
def ancestors ( self , stop = None ) : folder = self while folder . parent != stop : if folder . parent == folder : return yield folder . parent folder = folder . parent
Generates the parents until stop or the absolute root directory is reached .
58,566
def is_descendant_of ( self , ancestor ) : stop = Folder ( ancestor ) for folder in self . ancestors ( ) : if folder == stop : return True if stop . depth > folder . depth : return False return False
Checks if this folder is inside the given ancestor .
58,567
def get_relative_path ( self , root ) : if self . path == root : return '' ancestors = self . ancestors ( stop = root ) return functools . reduce ( lambda f , p : Folder ( p . name ) . child ( f ) , ancestors , self . name )
Gets the fragment of the current path starting at root .
58,568
def get_mirror ( self , target_root , source_root = None ) : fragment = self . get_relative_path ( source_root if source_root else self . parent ) return Folder ( target_root ) . child ( fragment )
Returns a File or Folder object that reperesents if the entire fragment of this directory starting with source_root were copied to target_root .
58,569
def file_or_folder ( path ) : target = unicode ( path ) return Folder ( target ) if os . path . isdir ( target ) else File ( target )
Returns a File or Folder object that would represent the given path .
58,570
def is_binary ( self ) : with open ( self . path , 'rb' ) as fin : CHUNKSIZE = 1024 while 1 : chunk = fin . read ( CHUNKSIZE ) if b'\0' in chunk : return True if len ( chunk ) < CHUNKSIZE : break return False
Return true if this is a binary file .
58,571
def make_temp ( text ) : import tempfile ( handle , path ) = tempfile . mkstemp ( text = True ) os . close ( handle ) afile = File ( path ) afile . write ( text ) return afile
Creates a temprorary file and writes the text into it
58,572
def read_all ( self , encoding = 'utf-8' ) : logger . info ( "Reading everything from %s" % self ) with codecs . open ( self . path , 'r' , encoding ) as fin : read_text = fin . read ( ) return read_text
Reads from the file and returns the content as a string .
58,573
def write ( self , text , encoding = "utf-8" ) : logger . info ( "Writing to %s" % self ) with codecs . open ( self . path , 'w' , encoding ) as fout : fout . write ( text )
Writes the given text to the file using the given encoding .
58,574
def copy_to ( self , destination ) : target = self . __get_destination__ ( destination ) logger . info ( "Copying %s to %s" % ( self , target ) ) shutil . copy ( self . path , unicode ( destination ) ) return target
Copies the file to the given destination . Returns a File object that represents the target file . destination must be a File or Folder object .
58,575
def etag ( self ) : CHUNKSIZE = 1024 * 64 from hashlib import md5 hash = md5 ( ) with open ( self . path ) as fin : chunk = fin . read ( CHUNKSIZE ) while chunk : hash_update ( hash , chunk ) chunk = fin . read ( CHUNKSIZE ) return hash . hexdigest ( )
Generates etag from file contents .
58,576
def child_folder ( self , fragment ) : return Folder ( os . path . join ( self . path , Folder ( fragment ) . path ) )
Returns a folder object by combining the fragment to this folder s path
58,577
def child ( self , fragment ) : return os . path . join ( self . path , FS ( fragment ) . path )
Returns a path of a child item represented by fragment .
58,578
def make ( self ) : try : if not self . exists : logger . info ( "Creating %s" % self . path ) os . makedirs ( self . path ) except os . error : pass return self
Creates this directory and any of the missing directories in the path . Any errors that may occur are eaten .
58,579
def delete ( self ) : if self . exists : logger . info ( "Deleting %s" % self . path ) shutil . rmtree ( self . path )
Deletes the directory if it exists .
58,580
def _create_target_tree ( self , target ) : source = self with source . walker as walker : @ walker . folder_visitor def visit_folder ( folder ) : if folder != source : Folder ( folder . get_mirror ( target , source ) ) . make ( )
There is a bug in dir_util that makes copy_tree crash if a folder in the tree has been deleted before and readded now . To workaround the bug we first walk the tree and create directories that are needed .
58,581
def copy_contents_to ( self , destination ) : logger . info ( "Copying contents of %s to %s" % ( self , destination ) ) target = Folder ( destination ) target . make ( ) self . _create_target_tree ( target ) dir_util . copy_tree ( self . path , unicode ( target ) ) return target
Copies the contents of this directory to the given destination . Returns a Folder object that represents the moved directory .
58,582
def __start ( self ) : thread = Thread ( target = self . __loop , args = ( ) ) thread . daemon = True thread . start ( ) self . __enabled = True
Start a new thread to process Cron
58,583
def __dict_to_BetterDict ( self , attr ) : if type ( self [ attr ] ) == dict : self [ attr ] = BetterDict ( self [ attr ] ) return self [ attr ]
Convert the passed attr to a BetterDict if the value is a dict
58,584
def _bd_ ( self ) : if not getattr ( self , '__bd__' , False ) : self . __bd = BetterDictLookUp ( self ) return self . __bd
Property that allows dot lookups of otherwise hidden attributes .
58,585
def create_or_update ( sender , ** kwargs ) : now = datetime . datetime . now ( ) from activity_monitor . models import Activity instance = kwargs [ 'instance' ] instance_content_type = ContentType . objects . get_for_model ( sender ) instance_model = sender content_object = instance_model . objects . get ( id = instance . id ) try : activity = Activity . objects . get ( content_type = instance_content_type , object_id = content_object . id ) except : activity = None for activity_setting in settings . ACTIVITY_MONITOR_MODELS : this_app_label = activity_setting [ 'model' ] . split ( '.' ) [ 0 ] this_model_label = activity_setting [ 'model' ] . split ( '.' ) [ 1 ] this_content_type = ContentType . objects . get ( app_label = this_app_label , model = this_model_label ) if this_content_type == instance_content_type : if 'check' in activity_setting : if getattr ( instance , activity_setting [ 'check' ] ) is False : if activity : activity . delete ( ) return try : manager = activity_setting [ 'manager' ] except : manager = 'objects' try : timestamp = getattr ( instance , activity_setting [ 'date_field' ] ) except : timestamp = getattr ( instance , 'created' ) if type ( timestamp ) == type ( now ) : clean_timestamp = timestamp else : clean_timestamp = datetime . datetime . combine ( timestamp , datetime . time ( ) ) if 'user_field' in activity_setting : user = getattr ( instance , activity_setting [ 'user_field' ] ) elif this_model_label == 'user' or this_model_label == 'profile' : user = instance else : user = instance . user if clean_timestamp > now : return if clean_timestamp < ( now - datetime . timedelta ( days = 3 ) ) : return if not user : return if user . is_superuser and 'filter_superuser' in activity_setting : return if user . is_staff and 'filter_staff' in activity_setting : return verb = activity_setting . get ( 'verb' , None ) override_string = activity_setting . get ( 'override_string' , None ) try : getattr ( instance_model , manager ) . get ( pk = instance . pk ) except instance_model . DoesNotExist : try : activity . delete ( ) return except Activity . DoesNotExist : return if user and clean_timestamp and instance : if not activity : activity = Activity ( actor = user , content_type = instance_content_type , object_id = content_object . id , content_object = content_object , timestamp = clean_timestamp , verb = verb , override_string = override_string , ) activity . save ( ) return activity
Create or update an Activity Monitor item from some instance .
58,586
def highlight_differences ( s1 , s2 , color ) : ls1 , ls2 = len ( s1 ) , len ( s2 ) diff_indices = [ i for i , ( a , b ) in enumerate ( zip ( s1 , s2 ) ) if a != b ] print ( s1 ) if ls2 > ls1 : colorise . cprint ( '_' * ( ls2 - ls1 ) , fg = color ) else : print ( ) colorise . highlight ( s2 , indices = diff_indices , fg = color , end = '' ) if ls1 > ls2 : colorise . cprint ( '_' * ( ls1 - ls2 ) , fg = color ) else : print ( )
Highlight the characters in s2 that differ from those in s1 .
58,587
def create_jinja_env ( ) : template_dir = os . path . join ( os . path . dirname ( __file__ ) , 'templates' ) env = jinja2 . Environment ( loader = jinja2 . FileSystemLoader ( template_dir ) , autoescape = jinja2 . select_autoescape ( [ 'html' ] ) ) env . filters [ 'simple_date' ] = filter_simple_date env . filters [ 'paragraphify' ] = filter_paragraphify return env
Create a Jinja2 ~jinja2 . Environment .
58,588
def render_homepage ( config , env ) : template = env . get_template ( 'homepage.jinja' ) rendered_page = template . render ( config = config ) return rendered_page
Render the homepage . jinja template .
58,589
def d_cal ( calibcurve , rcmean , w2 , cutoff = 0.0001 , normal_distr = False , t_a = 3 , t_b = 4 ) : assert t_b - 1 == t_a if normal_distr : std = np . sqrt ( calibcurve . error ** 2 + w2 ) dens = stats . norm ( loc = rcmean , scale = std ) . pdf ( calibcurve . c14age ) else : dens = ( t_b + ( ( rcmean - calibcurve . c14age ) ** 2 ) / ( 2 * ( calibcurve . error ** 2 + w2 ) ) ) ** ( - 1 * ( t_a + 0.5 ) ) cal = np . array ( [ calibcurve . calbp . copy ( ) , dens ] ) . T cal [ : , 1 ] = cal [ : , 1 ] / cal [ : , 1 ] . sum ( ) cutoff_mask = cal [ : , 1 ] > cutoff if cutoff_mask . sum ( ) > 5 : out = cal [ cutoff_mask , : ] else : calx = np . linspace ( cal [ : , 0 ] . min ( ) , cal [ : , 0 ] . max ( ) , num = 50 ) caly = np . interp ( calx , cal [ : , 0 ] , cal [ : , 1 ] ) out = np . array ( [ calx , caly / caly . sum ( ) ] ) . T return out
Get calendar date probabilities
58,590
def calibrate_dates ( chron , calib_curve , d_r , d_std , cutoff = 0.0001 , normal_distr = False , t_a = [ 3 ] , t_b = [ 4 ] ) : n = len ( chron . depth ) calib_curve = np . array ( calib_curve ) t_a = np . array ( t_a ) t_b = np . array ( t_b ) assert t_b - 1 == t_a d_r = np . array ( d_r ) d_std = np . array ( d_std ) if len ( t_a ) == 1 : t_a = np . repeat ( t_a , n ) if len ( t_b ) == 1 : t_b = np . repeat ( t_b , n ) if len ( d_r ) == 1 : d_r = np . repeat ( d_r , n ) if len ( d_std ) == 1 : d_std = np . repeat ( d_std , n ) if len ( calib_curve ) == 1 : calib_curve = np . repeat ( calib_curve , n ) calib_probs = [ ] rcmean = chron . age - d_r w2 = chron . error ** 2 + d_std ** 2 for i in range ( n ) : age_realizations = d_cal ( calib_curve [ i ] , rcmean = rcmean [ i ] , w2 = w2 [ i ] , t_a = t_a [ i ] , t_b = t_b [ i ] , cutoff = cutoff , normal_distr = normal_distr ) calib_probs . append ( age_realizations ) return np . array ( chron . depth ) , calib_probs
Get density of calendar dates for chron date segment in core
58,591
def _init_browser ( self ) : self . browser = splinter . Browser ( 'phantomjs' ) self . browser . visit ( self . server_url + "/youraccount/login" ) try : self . browser . fill ( 'nickname' , self . user ) self . browser . fill ( 'password' , self . password ) except : self . browser . fill ( 'p_un' , self . user ) self . browser . fill ( 'p_pw' , self . password ) self . browser . fill ( 'login_method' , self . login_method ) self . browser . find_by_css ( 'input[type=submit]' ) . click ( )
Overide in appropriate way to prepare a logged in browser .
58,592
def upload_marcxml ( self , marcxml , mode ) : if mode not in [ "-i" , "-r" , "-c" , "-a" , "-ir" ] : raise NameError ( "Incorrect mode " + str ( mode ) ) return requests . post ( self . server_url + "/batchuploader/robotupload" , data = { 'file' : marcxml , 'mode' : mode } , headers = { 'User-Agent' : CFG_USER_AGENT } )
Upload a record to the server .
58,593
def url ( self ) : if self . server_url is not None and self . recid is not None : return '/' . join ( [ self . server_url , CFG_SITE_RECORD , str ( self . recid ) ] ) else : return None
Returns the URL to this record . Returns None if not known
58,594
def clean_list_of_twitter_list ( list_of_twitter_lists , sent_tokenize , _treebank_word_tokenize , tagger , lemmatizer , lemmatize , stopset , first_cap_re , all_cap_re , digits_punctuation_whitespace_re , pos_set ) : list_of_keyword_sets = list ( ) append_keyword_set = list_of_keyword_sets . append list_of_lemma_to_keywordbags = list ( ) append_lemma_to_keywordbag = list_of_lemma_to_keywordbags . append if list_of_twitter_lists is not None : for twitter_list in list_of_twitter_lists : if twitter_list is not None : keyword_set , lemma_to_keywordbag = clean_twitter_list ( twitter_list , sent_tokenize , _treebank_word_tokenize , tagger , lemmatizer , lemmatize , stopset , first_cap_re , all_cap_re , digits_punctuation_whitespace_re , pos_set ) append_keyword_set ( keyword_set ) append_lemma_to_keywordbag ( lemma_to_keywordbag ) return list_of_keyword_sets , list_of_lemma_to_keywordbags
Extracts the sets of keywords for each Twitter list .
58,595
def user_twitter_list_bag_of_words ( twitter_list_corpus , sent_tokenize , _treebank_word_tokenize , tagger , lemmatizer , lemmatize , stopset , first_cap_re , all_cap_re , digits_punctuation_whitespace_re , pos_set ) : list_of_keyword_sets , list_of_lemma_to_keywordbags = clean_list_of_twitter_list ( twitter_list_corpus , sent_tokenize , _treebank_word_tokenize , tagger , lemmatizer , lemmatize , stopset , first_cap_re , all_cap_re , digits_punctuation_whitespace_re , pos_set ) bag_of_words = reduce_list_of_bags_of_words ( list_of_keyword_sets ) lemma_to_keywordbag_total = defaultdict ( lambda : defaultdict ( int ) ) for lemma_to_keywordbag in list_of_lemma_to_keywordbags : for lemma , keywordbag in lemma_to_keywordbag . items ( ) : for keyword , multiplicity in keywordbag . items ( ) : lemma_to_keywordbag_total [ lemma ] [ keyword ] += multiplicity return bag_of_words , lemma_to_keywordbag_total
Extract a bag - of - words for a corpus of Twitter lists pertaining to a Twitter user .
58,596
def grouper ( iterable , n , pad_value = None ) : chunk_gen = ( chunk for chunk in zip_longest ( * [ iter ( iterable ) ] * n , fillvalue = pad_value ) ) return chunk_gen
Returns a generator of n - length chunks of an input iterable with appropriate padding at the end .
58,597
def chunks ( iterable , n ) : for i in np . arange ( 0 , len ( iterable ) , n ) : yield iterable [ i : i + n ]
A python generator that yields 100 - length sub - list chunks .
58,598
def split_every ( iterable , n ) : i = iter ( iterable ) piece = list ( islice ( i , n ) ) while piece : yield piece piece = list ( islice ( i , n ) )
A generator of n - length chunks of an input iterable
58,599
def merge_properties ( item_properties , prop_name , merge_value ) : existing_value = item_properties . get ( prop_name , None ) if not existing_value : item_properties [ prop_name ] = merge_value else : if type ( merge_value ) is int or type ( merge_value ) is str : item_properties [ prop_name ] = existing_value + merge_value elif type ( merge_value ) is list : item_properties [ prop_name ] = merge_list ( existing_value , merge_value ) else : return False return item_properties
Tries to figure out which type of property value that should be merged and invoke the right function . Returns new properties if the merge was successful otherwise False .