idx
int64 0
63k
| question
stringlengths 61
4.03k
| target
stringlengths 6
1.23k
|
|---|---|---|
3,400
|
def spawn_new ( self , key ) : if not os . path . exists ( self . queue [ key ] [ 'path' ] ) : self . queue [ key ] [ 'status' ] = 'failed' error_msg = "The directory for this command doesn't exist anymore: {}" . format ( self . queue [ key ] [ 'path' ] ) self . logger . error ( error_msg ) self . queue [ key ] [ 'stdout' ] = '' self . queue [ key ] [ 'stderr' ] = error_msg else : stdout , stderr = self . get_descriptor ( key ) if self . custom_shell != 'default' : self . processes [ key ] = subprocess . Popen ( [ self . custom_shell , '-i' , '-c' , self . queue [ key ] [ 'command' ] , ] , stdout = stdout , stderr = stderr , stdin = subprocess . PIPE , universal_newlines = True , preexec_fn = os . setsid , cwd = self . queue [ key ] [ 'path' ] ) else : self . processes [ key ] = subprocess . Popen ( self . queue [ key ] [ 'command' ] , shell = True , stdout = stdout , stderr = stderr , stdin = subprocess . PIPE , universal_newlines = True , preexec_fn = os . setsid , cwd = self . queue [ key ] [ 'path' ] ) self . queue [ key ] [ 'status' ] = 'running' self . queue [ key ] [ 'start' ] = str ( datetime . now ( ) . strftime ( "%H:%M" ) ) self . queue . write ( )
|
Spawn a new task and save it to the queue .
|
3,401
|
def kill_all ( self , kill_signal , kill_shell = False ) : for key in self . processes . keys ( ) : self . kill_process ( key , kill_signal , kill_shell )
|
Kill all running processes .
|
3,402
|
def start_process ( self , key ) : if key in self . processes and key in self . paused : os . killpg ( os . getpgid ( self . processes [ key ] . pid ) , signal . SIGCONT ) self . queue [ key ] [ 'status' ] = 'running' self . paused . remove ( key ) return True elif key not in self . processes : if self . queue [ key ] [ 'status' ] in [ 'queued' , 'stashed' ] : self . spawn_new ( key ) return True return False
|
Start a specific processes .
|
3,403
|
def pause_process ( self , key ) : if key in self . processes and key not in self . paused : os . killpg ( os . getpgid ( self . processes [ key ] . pid ) , signal . SIGSTOP ) self . queue [ key ] [ 'status' ] = 'paused' self . paused . append ( key ) return True return False
|
Pause a specific processes .
|
3,404
|
def daemon_factory ( path ) : def start_daemon ( ) : root_dir = path config_dir = os . path . join ( root_dir , '.config/pueue' ) try : daemon = Daemon ( root_dir = root_dir ) daemon . main ( ) except KeyboardInterrupt : print ( 'Keyboard interrupt. Shutting down' ) daemon . stop_daemon ( ) except Exception : try : daemon . stop_daemon ( ) except Exception : pass cleanup ( config_dir ) raise return start_daemon
|
Create a closure which creates a running daemon .
|
3,405
|
def main ( ) : args = parser . parse_args ( ) args_dict = vars ( args ) root_dir = args_dict [ 'root' ] if 'root' in args else None if root_dir : root_dir = os . path . abspath ( root_dir ) if not os . path . exists ( root_dir ) : print ( "The specified directory doesn't exist!" ) sys . exit ( 1 ) else : root_dir = os . path . expanduser ( '~' ) if args . stopdaemon : print_command_factory ( 'STOPDAEMON' ) ( vars ( args ) , root_dir ) elif args . nodaemon : daemon_factory ( root_dir ) ( ) elif args . daemon : config_dir = os . path . join ( root_dir , '.config/pueue' ) os . makedirs ( config_dir , exist_ok = True ) daemon = Daemonize ( app = 'pueue' , pid = os . path . join ( config_dir , 'pueue.pid' ) , action = daemon_factory ( root_dir ) , chdir = root_dir ) daemon . start ( ) elif hasattr ( args , 'func' ) : try : args . func ( args_dict , root_dir ) except EOFError : print ( 'Apparently the daemon just died. Sorry for that :/' ) else : print ( 'Invalid Command. Please check -h' )
|
Execute entry function .
|
3,406
|
def register ( host = DFLT_ADDRESS [ 0 ] , port = DFLT_ADDRESS [ 1 ] , signum = signal . SIGUSR1 ) : _pdbhandler . _register ( host , port , signum )
|
Register a pdb handler for signal signum .
|
3,407
|
def get_handler ( ) : host , port , signum = _pdbhandler . _registered ( ) if signum : return Handler ( host if host else DFLT_ADDRESS [ 0 ] . encode ( ) , port if port else DFLT_ADDRESS [ 1 ] , signum )
|
Return the handler as a named tuple .
|
3,408
|
def wait ( self , timeout ) : logger . debug ( 'Waiting for %fs' , timeout ) return self . _event . wait ( timeout )
|
Wait for the provided time to elapse
|
3,409
|
def delay ( self ) : if self . _last_checked : return self . _interval - ( time . time ( ) - self . _last_checked ) return self . _interval
|
How long to wait before the next check
|
3,410
|
def callback ( self ) : self . _callback ( * self . _args , ** self . _kwargs ) self . _last_checked = time . time ( )
|
Run the callback
|
3,411
|
def run ( self ) : while not self . wait ( self . delay ( ) ) : try : logger . info ( 'Invoking callback %s' , self . callback ) self . callback ( ) except StandardError : logger . exception ( 'Callback failed' )
|
Run the callback periodically
|
3,412
|
def login ( self , email = None , password = None , app_id = None , api_key = None ) : session_token = self . api . user_get_session_token ( app_id = app_id , email = email , password = password , api_key = api_key ) self . api . session = session_token
|
Login to MediaFire account .
|
3,413
|
def get_resource_by_uri ( self , uri ) : location = self . _parse_uri ( uri ) if location . startswith ( "/" ) : result = self . get_resource_by_path ( location ) elif "/" in location : resource_key , path = location . split ( '/' , 2 ) parent_folder = self . get_resource_by_key ( resource_key ) if not isinstance ( parent_folder , Folder ) : raise NotAFolderError ( resource_key ) result = self . get_resource_by_path ( path , folder_key = parent_folder [ 'folderkey' ] ) else : result = self . get_resource_by_key ( location ) return result
|
Return resource described by MediaFire URI .
|
3,414
|
def get_resource_by_path ( self , path , folder_key = None ) : logger . debug ( "resolving %s" , path ) path = posixpath . normpath ( path ) components = [ t for t in path . split ( posixpath . sep ) if t != '' ] if not components : return Folder ( self . api . folder_get_info ( folder_key ) [ 'folder_info' ] ) resource = None for component in components : exists = False for item in self . _folder_get_content_iter ( folder_key ) : name = item [ 'name' ] if 'name' in item else item [ 'filename' ] if name == component : exists = True if components [ - 1 ] != component : if 'filename' in item : raise NotAFolderError ( item [ 'filename' ] ) folder_key = item [ 'folderkey' ] else : resource = item break if resource is not None : break if not exists : break if resource is None : raise ResourceNotFoundError ( path ) if "quickkey" in resource : file_info = self . api . file_get_info ( resource [ 'quickkey' ] ) [ 'file_info' ] result = File ( file_info ) elif "folderkey" in resource : folder_info = self . api . folder_get_info ( resource [ 'folderkey' ] ) [ 'folder_info' ] result = Folder ( folder_info ) return result
|
Return resource by remote path .
|
3,415
|
def _folder_get_content_iter ( self , folder_key = None ) : lookup_params = [ { 'content_type' : 'folders' , 'node' : 'folders' } , { 'content_type' : 'files' , 'node' : 'files' } ] for param in lookup_params : more_chunks = True chunk = 0 while more_chunks : chunk += 1 content = self . api . folder_get_content ( content_type = param [ 'content_type' ] , chunk = chunk , folder_key = folder_key ) [ 'folder_content' ] if not content [ param [ 'node' ] ] : break if content [ 'more_chunks' ] == 'no' : more_chunks = False for resource_info in content [ param [ 'node' ] ] : yield resource_info
|
Iterator for api . folder_get_content
|
3,416
|
def get_folder_contents_iter ( self , uri ) : resource = self . get_resource_by_uri ( uri ) if not isinstance ( resource , Folder ) : raise NotAFolderError ( uri ) folder_key = resource [ 'folderkey' ] for item in self . _folder_get_content_iter ( folder_key ) : if 'filename' in item : if ".patch." in item [ 'filename' ] : continue yield File ( item ) elif 'name' in item : yield Folder ( item )
|
Return iterator for directory contents .
|
3,417
|
def create_folder ( self , uri , recursive = False ) : logger . info ( "Creating %s" , uri ) try : resource = self . get_resource_by_uri ( uri ) if isinstance ( resource , Folder ) : return resource else : raise NotAFolderError ( uri ) except ResourceNotFoundError : pass location = self . _parse_uri ( uri ) folder_name = posixpath . basename ( location ) parent_uri = 'mf://' + posixpath . dirname ( location ) try : parent_node = self . get_resource_by_uri ( parent_uri ) if not isinstance ( parent_node , Folder ) : raise NotAFolderError ( parent_uri ) parent_key = parent_node [ 'folderkey' ] except ResourceNotFoundError : if recursive : result = self . create_folder ( parent_uri , recursive = True ) parent_key = result [ 'folderkey' ] else : raise result = self . api . folder_create ( folder_name , parent_key = parent_key , action_on_duplicate = 'skip' ) logger . info ( "Created folder '%s' [mf:%s]" , result [ 'name' ] , result [ 'folder_key' ] ) return self . get_resource_by_key ( result [ 'folder_key' ] )
|
Create folder .
|
3,418
|
def delete_folder ( self , uri , purge = False ) : try : resource = self . get_resource_by_uri ( uri ) except ResourceNotFoundError : return None if not isinstance ( resource , Folder ) : raise ValueError ( "Folder expected, got {}" . format ( type ( resource ) ) ) if purge : func = self . api . folder_purge else : func = self . api . folder_delete try : result = func ( resource [ 'folderkey' ] ) except MediaFireApiError as err : if err . code == 100 : logger . debug ( "Delete folder returns error 900 but folder is deleted: " "http://forum.mediafiredev.com/showthread.php?129" ) result = { } else : raise return result
|
Delete folder .
|
3,419
|
def delete_file ( self , uri , purge = False ) : try : resource = self . get_resource_by_uri ( uri ) except ResourceNotFoundError : return None if not isinstance ( resource , File ) : raise ValueError ( "File expected, got {}" . format ( type ( resource ) ) ) if purge : func = self . api . file_purge else : func = self . api . file_delete return func ( resource [ 'quickkey' ] )
|
Delete file .
|
3,420
|
def delete_resource ( self , uri , purge = False ) : try : resource = self . get_resource_by_uri ( uri ) except ResourceNotFoundError : return None if isinstance ( resource , File ) : result = self . delete_file ( uri , purge ) elif isinstance ( resource , Folder ) : result = self . delete_folder ( uri , purge ) else : raise ValueError ( 'Unsupported resource: {}' . format ( type ( resource ) ) ) return result
|
Delete file or folder
|
3,421
|
def _prepare_upload_info ( self , source , dest_uri ) : try : dest_resource = self . get_resource_by_uri ( dest_uri ) except ResourceNotFoundError : dest_resource = None is_fh = hasattr ( source , 'read' ) folder_key = None name = None if dest_resource : if isinstance ( dest_resource , File ) : folder_key = dest_resource [ 'parent_folderkey' ] name = dest_resource [ 'filename' ] elif isinstance ( dest_resource , Folder ) : if is_fh : raise ValueError ( "Cannot determine target file name" ) basename = posixpath . basename ( source ) dest_uri = posixpath . join ( dest_uri , basename ) try : result = self . get_resource_by_uri ( dest_uri ) if isinstance ( result , Folder ) : raise ValueError ( "Target is a folder (file expected)" ) folder_key = result . get ( 'parent_folderkey' , None ) name = result [ 'filename' ] except ResourceNotFoundError : folder_key = dest_resource [ 'folderkey' ] name = basename else : raise Exception ( "Unknown resource type" ) else : parent_uri = '/' . join ( dest_uri . split ( '/' ) [ 0 : - 1 ] ) result = self . get_resource_by_uri ( parent_uri ) if not isinstance ( result , Folder ) : raise NotAFolderError ( "Parent component is not a folder" ) folder_key = result [ 'folderkey' ] name = posixpath . basename ( dest_uri ) return folder_key , name
|
Prepare Upload object resolve paths
|
3,422
|
def upload_file ( self , source , dest_uri ) : folder_key , name = self . _prepare_upload_info ( source , dest_uri ) is_fh = hasattr ( source , 'read' ) fd = None try : if is_fh : fd = source else : fd = open ( source , 'rb' ) return MediaFireUploader ( self . api ) . upload ( fd , name , folder_key = folder_key , action_on_duplicate = 'replace' ) finally : if fd and not is_fh : fd . close ( )
|
Upload file to MediaFire .
|
3,423
|
def download_file ( self , src_uri , target ) : resource = self . get_resource_by_uri ( src_uri ) if not isinstance ( resource , File ) : raise MediaFireError ( "Only files can be downloaded" ) quick_key = resource [ 'quickkey' ] result = self . api . file_get_links ( quick_key = quick_key , link_type = 'direct_download' ) direct_download = result [ 'links' ] [ 0 ] [ 'direct_download' ] direct_download = direct_download . replace ( 'http:' , 'https:' ) name = resource [ 'filename' ] target_is_filehandle = True if hasattr ( target , 'write' ) else False if not target_is_filehandle : if ( os . path . exists ( target ) and os . path . isdir ( target ) ) or target . endswith ( "/" ) : target = os . path . join ( target , name ) if not os . path . isdir ( os . path . dirname ( target ) ) : os . makedirs ( os . path . dirname ( target ) ) logger . info ( "Downloading %s to %s" , src_uri , target ) response = requests . get ( direct_download , stream = True ) try : if target_is_filehandle : out_fd = target else : out_fd = open ( target , 'wb' ) checksum = hashlib . sha256 ( ) for chunk in response . iter_content ( chunk_size = 4096 ) : if chunk : out_fd . write ( chunk ) checksum . update ( chunk ) checksum_hex = checksum . hexdigest ( ) . lower ( ) if checksum_hex != resource [ 'hash' ] : raise DownloadError ( "Hash mismatch ({} != {})" . format ( resource [ 'hash' ] , checksum_hex ) ) logger . info ( "Download completed successfully" ) finally : if not target_is_filehandle : out_fd . close ( )
|
Download file from MediaFire .
|
3,424
|
def update_file_metadata ( self , uri , filename = None , description = None , mtime = None , privacy = None ) : resource = self . get_resource_by_uri ( uri ) if not isinstance ( resource , File ) : raise ValueError ( 'Expected File, got {}' . format ( type ( resource ) ) ) result = self . api . file_update ( resource [ 'quickkey' ] , filename = filename , description = description , mtime = mtime , privacy = privacy ) return result
|
Update file metadata .
|
3,425
|
def update_folder_metadata ( self , uri , foldername = None , description = None , mtime = None , privacy = None , privacy_recursive = None ) : resource = self . get_resource_by_uri ( uri ) if not isinstance ( resource , Folder ) : raise ValueError ( 'Expected Folder, got {}' . format ( type ( resource ) ) ) result = self . api . folder_update ( resource [ 'folderkey' ] , foldername = foldername , description = description , mtime = mtime , privacy = privacy , privacy_recursive = privacy_recursive ) return result
|
Update folder metadata .
|
3,426
|
def _parse_uri ( uri ) : tokens = urlparse ( uri ) if tokens . netloc != '' : logger . error ( "Invalid URI: %s" , uri ) raise ValueError ( "MediaFire URI format error: " "host should be empty - mf:///path" ) if tokens . scheme != '' and tokens . scheme != URI_SCHEME : raise ValueError ( "MediaFire URI format error: " "must start with 'mf:' or '/'" ) return posixpath . normpath ( tokens . path )
|
Parse and validate MediaFire URI .
|
3,427
|
def merged ( self ) : stats = { } for topic in self . client . topics ( ) [ 'topics' ] : for producer in self . client . lookup ( topic ) [ 'producers' ] : hostname = producer [ 'broadcast_address' ] port = producer [ 'http_port' ] host = '%s_%s' % ( hostname , port ) stats [ host ] = nsqd . Client ( 'http://%s:%s/' % ( hostname , port ) ) . clean_stats ( ) return stats
|
The clean stats from all the hosts reporting to this host .
|
3,428
|
def stats ( self ) : data = Counter ( ) for name , value , aggregated in self . raw : if aggregated : data [ '%s.max' % name ] = max ( data [ '%s.max' % name ] , value ) data [ '%s.total' % name ] += value else : data [ name ] = value return sorted ( data . items ( ) )
|
Stats that have been aggregated appropriately .
|
3,429
|
def get_curline ( ) : if Frame : frame = Frame . get_selected_python_frame ( ) if frame : line = '' f = frame . get_pyop ( ) if f and not f . is_optimized_out ( ) : cwd = os . path . join ( os . getcwd ( ) , '' ) fname = f . filename ( ) if cwd in fname : fname = fname [ len ( cwd ) : ] try : line = f . current_line ( ) except IOError : pass if line : line = repr ( line ) . strip ( "'" ) line = line [ : - 2 ] if line . endswith ( r'\n' ) else line return ( '-> %s(%s): %s' % ( fname , f . current_line_num ( ) , line ) ) return ''
|
Return the current python source line .
|
3,430
|
def reconnected ( self , conn ) : conn . sub ( self . _topic , self . _channel ) conn . rdy ( 1 )
|
Subscribe connection and manipulate its RDY state
|
3,431
|
def distribute_ready ( self ) : connections = [ c for c in self . connections ( ) if c . alive ( ) ] if len ( connections ) > self . _max_in_flight : raise NotImplementedError ( 'Max in flight must be greater than number of connections' ) else : for count , conn in distribute ( self . _max_in_flight , connections ) : if count > conn . max_rdy_count : logger . info ( 'Using max_rdy_count (%i) instead of %i for %s RDY' , conn . max_rdy_count , count , conn ) count = conn . max_rdy_count logger . info ( 'Sending RDY %i to %s' , count , conn ) conn . rdy ( count )
|
Distribute the ready state across all of the connections
|
3,432
|
def needs_distribute_ready ( self ) : alive = [ c for c in self . connections ( ) if c . alive ( ) ] if any ( c . ready <= ( c . last_ready_sent * 0.25 ) for c in alive ) : return True
|
Determine whether or not we need to redistribute the ready state
|
3,433
|
def read ( self ) : found = Client . read ( self ) if self . needs_distribute_ready ( ) : self . distribute_ready ( ) return found
|
Read some number of messages
|
3,434
|
def profiler ( ) : import cProfile import pstats pr = cProfile . Profile ( ) pr . enable ( ) yield pr . disable ( ) ps = pstats . Stats ( pr ) . sort_stats ( 'tottime' ) ps . print_stats ( )
|
Profile the block
|
3,435
|
def messages ( count , size ) : import string letters = islice ( cycle ( chain ( string . lowercase , string . uppercase ) ) , size ) return islice ( cycle ( '' . join ( l ) for l in permutations ( letters , size ) ) , count )
|
Generator for count messages of the provided size
|
3,436
|
def stats ( ) : import re import sys import math values = [ ] for line in sys . stdin : values . extend ( map ( float , re . findall ( r'\d+\.?\d+' , line ) ) ) mean = sum ( values ) / len ( values ) variance = sum ( ( val - mean ) ** 2 for val in values ) / len ( values ) print '%3i items; mean: %10.5f; std-dev: %10.5f' % ( len ( values ) , mean , math . sqrt ( variance ) )
|
Read a stream of floats and give summary statistics
|
3,437
|
def ready ( self ) : if self . _last_failed : delta = time . time ( ) - self . _last_failed return delta >= self . backoff ( ) return True
|
Whether or not enough time has passed since the last failure
|
3,438
|
def execute_log ( args , root_dir ) : if args . get ( 'keys' ) : config_dir = os . path . join ( root_dir , '.config/pueue' ) queue_path = os . path . join ( config_dir , 'queue' ) if os . path . exists ( queue_path ) : queue_file = open ( queue_path , 'rb' ) try : queue = pickle . load ( queue_file ) except Exception : print ( 'Queue log file seems to be corrupted. Aborting.' ) return queue_file . close ( ) else : print ( 'There is no queue log file. Aborting.' ) return for key in args . get ( 'keys' ) : if queue . get ( key ) and queue [ key ] [ 'status' ] in [ 'failed' , 'done' ] : entry = queue [ key ] print ( 'Log of entry: {}' . format ( key ) ) print ( 'Returncode: {}' . format ( entry [ 'returncode' ] ) ) print ( 'Command: {}' . format ( entry [ 'command' ] ) ) print ( 'Path: {}' . format ( entry [ 'path' ] ) ) print ( 'Start: {}, End: {} \n' . format ( entry [ 'start' ] , entry [ 'end' ] ) ) if len ( entry [ 'stderr' ] ) > 0 : print ( Color ( '{autored}Stderr output: {/autored}\n ' ) + entry [ 'stderr' ] ) if len ( entry [ 'stdout' ] ) > 0 : print ( Color ( '{autogreen}Stdout output: {/autogreen}\n ' ) + entry [ 'stdout' ] ) else : print ( 'No finished process with key {}.' . format ( key ) ) else : log_path = os . path . join ( root_dir , '.local/share/pueue/queue.log' ) log_file = open ( log_path , 'r' ) print ( log_file . read ( ) )
|
Print the current log file .
|
3,439
|
def execute_show ( args , root_dir ) : key = None if args . get ( 'key' ) : key = args [ 'key' ] status = command_factory ( 'status' ) ( { } , root_dir = root_dir ) if key not in status [ 'data' ] or status [ 'data' ] [ key ] [ 'status' ] != 'running' : print ( 'No running process with this key, use `log` to show finished processes.' ) return else : status = command_factory ( 'status' ) ( { } , root_dir = root_dir ) if isinstance ( status [ 'data' ] , str ) : print ( status [ 'data' ] ) return for k in sorted ( status [ 'data' ] . keys ( ) ) : if status [ 'data' ] [ k ] [ 'status' ] == 'running' : key = k break if key is None : print ( 'No running process, use `log` to show finished processes.' ) return config_dir = os . path . join ( root_dir , '.config/pueue' ) stdoutFile = os . path . join ( config_dir , 'pueue_process_{}.stdout' . format ( key ) ) stderrFile = os . path . join ( config_dir , 'pueue_process_{}.stderr' . format ( key ) ) stdoutDescriptor = open ( stdoutFile , 'r' ) stderrDescriptor = open ( stderrFile , 'r' ) running = True if args [ 'watch' ] : stdscr = curses . initscr ( ) curses . noecho ( ) curses . cbreak ( ) curses . curs_set ( 2 ) stdscr . keypad ( True ) stdscr . refresh ( ) try : while running : stdscr . clear ( ) stdoutDescriptor . seek ( 0 ) message = stdoutDescriptor . read ( ) stdscr . addstr ( 0 , 0 , message ) stdscr . refresh ( ) time . sleep ( 2 ) except Exception : curses . nocbreak ( ) stdscr . keypad ( False ) curses . echo ( ) curses . endwin ( ) else : print ( 'Stdout output:\n' ) stdoutDescriptor . seek ( 0 ) print ( get_descriptor_output ( stdoutDescriptor , key ) ) print ( '\n\nStderr output:\n' ) stderrDescriptor . seek ( 0 ) print ( get_descriptor_output ( stderrDescriptor , key ) )
|
Print stderr and stdout of the current running process .
|
3,440
|
def fetch_track ( self , track_id , terr = KKBOXTerritory . TAIWAN ) : url = 'https://api.kkbox.com/v1.1/tracks/%s' % track_id url += '?' + url_parse . urlencode ( { 'territory' : terr } ) return self . http . _post_data ( url , None , self . http . _headers_with_access_token ( ) )
|
Fetches a song track by given ID .
|
3,441
|
def show ( self , user , feed , id ) : uri = '/users/{}/feeds/{}/indicators/{}' . format ( user , feed , id ) return self . client . get ( uri )
|
Show a specific indicator by id
|
3,442
|
def create ( self ) : uri = '/users/{0}/feeds/{1}/indicators' . format ( self . user , self . feed ) data = { "indicator" : json . loads ( str ( self . indicator ) ) , "comment" : self . comment , "content" : self . content } if self . attachment : attachment = self . _file_to_attachment ( self . attachment , filename = self . attachment_name ) data [ 'attachment' ] = { 'data' : attachment [ 'data' ] , 'filename' : attachment [ 'filename' ] } if not data [ 'indicator' ] . get ( 'indicator' ) : data [ 'indicator' ] [ 'indicator' ] = attachment [ 'sha1' ] if not data [ 'indicator' ] . get ( 'indicator' ) : raise Exception ( 'Missing indicator' ) return self . client . post ( uri , data )
|
Submit action on the Indicator object
|
3,443
|
def create_bulk ( self , indicators , user , feed ) : from . constants import API_VERSION if API_VERSION == '1' : print ( "create_bulk currently un-avail with APIv1" ) raise SystemExit uri = '/users/{0}/feeds/{1}/indicators_bulk' . format ( user , feed ) data = { 'indicators' : [ { 'indicator' : i . args . indicator , 'feed_id' : i . args . feed , 'tag_list' : i . args . tags , "description" : i . args . description , "portlist" : i . args . portlist , "protocol" : i . args . protocol , 'firsttime' : i . args . firsttime , 'lasttime' : i . args . lasttime , 'portlist_src' : i . args . portlist_src , 'comment' : { 'content' : i . args . comment } , 'rdata' : i . args . rdata , 'rtype' : i . args . rtype , 'content' : i . args . content , 'provider' : i . args . provider , } for i in indicators ] } return self . client . post ( uri , data )
|
Submit action against the IndicatorBulk endpoint
|
3,444
|
def parse_keystring ( conn , key_string ) : from PyQt5 . QtGui import QKeySequence from PyQt5 . QtCore import Qt from . qt_keycodes import KeyTbl , ModsTbl keysequence = QKeySequence ( key_string ) ks = keysequence [ 0 ] mods = Qt . NoModifier qtmods = Qt . NoModifier modifiers = 0 if ( ks & Qt . ShiftModifier == Qt . ShiftModifier ) : mods |= ModsTbl . index ( Qt . ShiftModifier ) qtmods |= Qt . ShiftModifier . real modifiers |= getattr ( xproto . KeyButMask , "Shift" , 0 ) if ( ks & Qt . AltModifier == Qt . AltModifier ) : mods |= ModsTbl . index ( Qt . AltModifier ) qtmods |= Qt . AltModifier . real modifiers |= getattr ( xproto . KeyButMask , "Mod1" , 0 ) if ( ks & Qt . ControlModifier == Qt . ControlModifier ) : mods |= ModsTbl . index ( Qt . ControlModifier ) qtmods |= Qt . ControlModifier . real modifiers |= getattr ( xproto . KeyButMask , "Control" , 0 ) qtkeys = ks ^ qtmods key = QKeySequence ( Qt . Key ( qtkeys ) ) . toString ( ) . lower ( ) keycode = lookup_string ( conn , key ) return modifiers , keycode modifiers = 0 keycode = None key_string = "Shift+Control+A" for part in key_string . split ( '+' ) : if hasattr ( xproto . KeyButMask , part ) : modifiers |= getattr ( xproto . KeyButMask , part ) else : if len ( part ) == 1 : part = part . lower ( ) keycode = lookup_string ( conn , part ) return modifiers , keycode
|
A utility function to turn strings like Mod1 + Mod4 + a into a pair corresponding to its modifiers and keycode .
|
3,445
|
def lookup_string ( conn , kstr ) : if kstr in keysyms : return get_keycode ( conn , keysyms [ kstr ] ) elif len ( kstr ) > 1 and kstr . capitalize ( ) in keysyms : return get_keycode ( conn , keysyms [ kstr . capitalize ( ) ] ) return None
|
Finds the keycode associated with a string representation of a keysym .
|
3,446
|
def get_keyboard_mapping ( conn ) : mn , mx = get_min_max_keycode ( conn ) return conn . core . GetKeyboardMapping ( mn , mx - mn + 1 )
|
Return a keyboard mapping cookie that can be used to fetch the table of keysyms in the current X environment .
|
3,447
|
def get_keyboard_mapping_unchecked ( conn ) : mn , mx = get_min_max_keycode ( ) return conn . core . GetKeyboardMappingUnchecked ( mn , mx - mn + 1 )
|
Return an unchecked keyboard mapping cookie that can be used to fetch the table of keysyms in the current X environment .
|
3,448
|
def get_keycode ( conn , keysym ) : mn , mx = get_min_max_keycode ( conn ) cols = __kbmap . keysyms_per_keycode for i in range ( mn , mx + 1 ) : for j in range ( 0 , cols ) : ks = get_keysym ( conn , i , col = j ) if ks == keysym : return i return None
|
Given a keysym find the keycode mapped to it in the current X environment . It is necessary to search the keysym table in order to do this including all columns .
|
3,449
|
def ungrab_key ( conn , wid , modifiers , key ) : try : for mod in TRIVIAL_MODS : conn . core . UngrabKeyChecked ( key , wid , modifiers | mod ) . check ( ) return True except xproto . BadAccess : return False
|
Ungrabs a key that was grabbed by grab_key . Similarly it will return True on success and False on failure .
|
3,450
|
def update_keyboard_mapping ( conn , e ) : global __kbmap , __keysmods newmap = get_keyboard_mapping ( conn ) . reply ( ) if e is None : __kbmap = newmap __keysmods = get_keys_to_mods ( conn ) return if e . request == xproto . Mapping . Keyboard : changes = { } for kc in range ( * get_min_max_keycode ( conn ) ) : knew = get_keysym ( kc , kbmap = newmap ) oldkc = get_keycode ( conn , knew ) if oldkc != kc : changes [ oldkc ] = kc __kbmap = newmap __regrab ( changes ) elif e . request == xproto . Mapping . Modifier : __keysmods = get_keys_to_mods ( )
|
Whenever the keyboard mapping is changed this function needs to be called to update xpybutil s internal representing of the current keysym table . Indeed xpybutil will do this for you automatically .
|
3,451
|
def get_storages ( self , storage_type = 'normal' ) : res = self . get_request ( '/storage/' + storage_type ) return Storage . _create_storage_objs ( res [ 'storages' ] , cloud_manager = self )
|
Return a list of Storage objects from the API .
|
3,452
|
def get_storage ( self , storage ) : res = self . get_request ( '/storage/' + str ( storage ) ) return Storage ( cloud_manager = self , ** res [ 'storage' ] )
|
Return a Storage object from the API .
|
3,453
|
def create_storage ( self , size = 10 , tier = 'maxiops' , title = 'Storage disk' , zone = 'fi-hel1' , backup_rule = { } ) : body = { 'storage' : { 'size' : size , 'tier' : tier , 'title' : title , 'zone' : zone , 'backup_rule' : backup_rule } } res = self . post_request ( '/storage' , body ) return Storage ( cloud_manager = self , ** res [ 'storage' ] )
|
Create a Storage object . Returns an object based on the API s response .
|
3,454
|
def modify_storage ( self , storage , size , title , backup_rule = { } ) : res = self . _modify_storage ( str ( storage ) , size , title , backup_rule ) return Storage ( cloud_manager = self , ** res [ 'storage' ] )
|
Modify a Storage object . Returns an object based on the API s response .
|
3,455
|
def attach_storage ( self , server , storage , storage_type , address ) : body = { 'storage_device' : { } } if storage : body [ 'storage_device' ] [ 'storage' ] = str ( storage ) if storage_type : body [ 'storage_device' ] [ 'type' ] = storage_type if address : body [ 'storage_device' ] [ 'address' ] = address url = '/server/{0}/storage/attach' . format ( server ) res = self . post_request ( url , body ) return Storage . _create_storage_objs ( res [ 'server' ] [ 'storage_devices' ] , cloud_manager = self )
|
Attach a Storage object to a Server . Return a list of the server s storages .
|
3,456
|
def detach_storage ( self , server , address ) : body = { 'storage_device' : { 'address' : address } } url = '/server/{0}/storage/detach' . format ( server ) res = self . post_request ( url , body ) return Storage . _create_storage_objs ( res [ 'server' ] [ 'storage_devices' ] , cloud_manager = self )
|
Detach a Storage object to a Server . Return a list of the server s storages .
|
3,457
|
def _reset ( self , ** kwargs ) : if 'uuid' in kwargs : self . uuid = kwargs [ 'uuid' ] elif 'storage' in kwargs : self . uuid = kwargs [ 'storage' ] if 'title' in kwargs : self . title = kwargs [ 'title' ] elif 'storage_title' in kwargs : self . title = kwargs [ 'storage_title' ] if 'size' in kwargs : self . size = kwargs [ 'size' ] elif 'storage_size' in kwargs : self . size = kwargs [ 'storage_size' ] filtered_kwargs = dict ( ( key , val ) for key , val in kwargs . items ( ) if key not in [ 'uuid' , 'storage' , 'title' , 'storage_title' , 'size' , 'storage_size' ] ) super ( Storage , self ) . _reset ( ** filtered_kwargs )
|
Reset after repopulating from API .
|
3,458
|
def fetch_album ( self , album_id , terr = KKBOXTerritory . TAIWAN ) : url = 'https://api.kkbox.com/v1.1/albums/%s' % album_id url += '?' + url_parse . urlencode ( { 'territory' : terr } ) return self . http . _post_data ( url , None , self . http . _headers_with_access_token ( ) )
|
Fetches an album by given ID .
|
3,459
|
def lookup ( self ) : print "%s by %s, size: %s, uploaded %s ago" % ( self . name , self . author , self . size , self . age )
|
Prints name author size and age
|
3,460
|
def _get_max_page ( self , url ) : html = requests . get ( url ) . text pq = PyQuery ( html ) try : tds = int ( pq ( "h2" ) . text ( ) . split ( ) [ - 1 ] ) if tds % 25 : return tds / 25 + 1 return tds / 25 except ValueError : raise ValueError ( "No results found!" )
|
Open url and return amount of pages
|
3,461
|
def build ( self , update = True ) : ret = self . base + self . query page = "" . join ( ( "/" , str ( self . page ) , "/" ) ) if self . category : category = " category:" + self . category else : category = "" if self . order : order = "" . join ( ( "?field=" , self . order [ 0 ] , "&sorder=" , self . order [ 1 ] ) ) else : order = "" ret = "" . join ( ( self . base , self . query , category , page , order ) ) if update : self . max_page = self . _get_max_page ( ret ) return ret
|
Build and return url . Also update max_page .
|
3,462
|
def build ( self , update = True ) : query_str = "?page={}" . format ( self . page ) if self . order : query_str += "" . join ( ( "&field=" , self . order [ 0 ] , "&sorder=" , self . order [ 1 ] ) ) ret = "" . join ( ( self . base , self . user , "/uploads/" , query_str ) ) if update : self . max_page = self . _get_max_page ( ret ) return ret
|
Build and return url . Also update max_page . URL structure for user torrent lists differs from other result lists as the page number is part of the query string and not the URL path
|
3,463
|
def _items ( self ) : torrents = map ( self . _get_torrent , self . _get_rows ( ) ) for t in torrents : yield t
|
Parse url and yield namedtuple Torrent for every torrent on page
|
3,464
|
def _get_torrent ( self , row ) : td = row ( "td" ) name = td ( "a.cellMainLink" ) . text ( ) name = name . replace ( " . " , "." ) . replace ( " ." , "." ) author = td ( "a.plain" ) . text ( ) verified_author = True if td ( ".lightgrey>.ka-verify" ) else False category = td ( "span" ) . find ( "strong" ) . find ( "a" ) . eq ( 0 ) . text ( ) verified_torrent = True if td ( ".icon16>.ka-green" ) else False comments = td ( ".iaconbox>.icommentjs>.iconvalue" ) . text ( ) torrent_link = "http://" + BASE . domain if td ( "a.cellMainLink" ) . attr ( "href" ) is not None : torrent_link += td ( "a.cellMainLink" ) . attr ( "href" ) magnet_link = td ( "a[data-nop]" ) . eq ( 1 ) . attr ( "href" ) download_link = td ( "a[data-download]" ) . attr ( "href" ) td_centers = row ( "td.center" ) size = td_centers . eq ( 0 ) . text ( ) files = td_centers . eq ( 1 ) . text ( ) age = " " . join ( td_centers . eq ( 2 ) . text ( ) . split ( ) ) seed = td_centers . eq ( 3 ) . text ( ) leech = td_centers . eq ( 4 ) . text ( ) return Torrent ( name , author , verified_author , category , size , files , age , seed , leech , verified_torrent , comments , torrent_link , magnet_link , download_link )
|
Parse row into namedtuple
|
3,465
|
def _get_rows ( self ) : html = requests . get ( self . url . build ( ) ) . text if re . search ( 'did not match any documents' , html ) : return [ ] pq = PyQuery ( html ) rows = pq ( "table.data" ) . find ( "tr" ) return map ( rows . eq , range ( rows . size ( ) ) ) [ 1 : ]
|
Return all rows on page
|
3,466
|
def pages ( self , page_from , page_to ) : if not all ( [ page_from < self . url . max_page , page_from > 0 , page_to <= self . url . max_page , page_to > page_from ] ) : raise IndexError ( "Invalid page numbers" ) size = ( page_to + 1 ) - page_from threads = ret = [ ] page_list = range ( page_from , page_to + 1 ) locks = [ threading . Lock ( ) for i in range ( size ) ] for lock in locks [ 1 : ] : lock . acquire ( ) def t_function ( pos ) : res = self . page ( page_list [ pos ] ) . list ( ) locks [ pos ] . acquire ( ) ret . extend ( res ) if pos != size - 1 : locks [ pos + 1 ] . release ( ) threads = [ threading . Thread ( target = t_function , args = ( i , ) ) for i in range ( size ) ] for thread in threads : thread . start ( ) for thread in threads : thread . join ( ) for torrent in ret : yield torrent
|
Yield torrents in range from page_from to page_to
|
3,467
|
def all ( self ) : return self . pages ( self . url . page , self . url . max_page )
|
Yield torrents in range from current page to last page
|
3,468
|
def order ( self , field , order = None ) : if not order : order = ORDER . DESC self . url . order = ( field , order ) self . url . set_page ( 1 ) return self
|
Set field and order set by arguments
|
3,469
|
def category ( self , category ) : self . url . category = category self . url . set_page ( 1 ) return self
|
Change category of current search and return self
|
3,470
|
def destroy ( self ) : if not hasattr ( self , 'server' ) or not self . server : raise Exception ( ) return self . server . cloud_manager . delete_firewall_rule ( self . server . uuid , self . position )
|
Remove this FirewallRule from the API .
|
3,471
|
def fetch_new_release_category ( self , category_id , terr = KKBOXTerritory . TAIWAN ) : url = 'https://api.kkbox.com/v1.1/new-release-categories/%s' % category_id url += '?' + url_parse . urlencode ( { 'territory' : terr } ) return self . http . _post_data ( url , None , self . http . _headers_with_access_token ( ) )
|
Fetches new release categories by given ID .
|
3,472
|
def fetch_top_tracks_of_artist ( self , artist_id , terr = KKBOXTerritory . TAIWAN ) : url = 'https://api.kkbox.com/v1.1/artists/%s/top-tracks' % artist_id url += '?' + url_parse . urlencode ( { 'territory' : terr } ) return self . http . _post_data ( url , None , self . http . _headers_with_access_token ( ) )
|
Fetcher top tracks belong to an artist by given ID .
|
3,473
|
def get_tags ( self ) : res = self . get_request ( '/tag' ) return [ Tag ( cloud_manager = self , ** tag ) for tag in res [ 'tags' ] [ 'tag' ] ]
|
List all tags as Tag objects .
|
3,474
|
def get_tag ( self , name ) : res = self . get_request ( '/tag/' + name ) return Tag ( cloud_manager = self , ** res [ 'tag' ] )
|
Return the tag as Tag object .
|
3,475
|
def create_tag ( self , name , description = None , servers = [ ] ) : servers = [ str ( server ) for server in servers ] body = { 'tag' : Tag ( name , description , servers ) . to_dict ( ) } res = self . request ( 'POST' , '/tag' , body ) return Tag ( cloud_manager = self , ** res [ 'tag' ] )
|
Create a new Tag . Only name is mandatory .
|
3,476
|
def remove_tags ( self , server , tags ) : uuid = str ( server ) tags = [ str ( tag ) for tag in tags ] url = '/server/{0}/untag/{1}' . format ( uuid , ',' . join ( tags ) ) return self . post_request ( url )
|
Remove tags from a server .
|
3,477
|
def assignIfExists ( opts , default = None , ** kwargs ) : for opt in opts : if ( opt in kwargs ) : return kwargs [ opt ] return default
|
Helper for assigning object attributes from API responses .
|
3,478
|
def expand ( self , info = b"" , length = 32 ) : return hkdf_expand ( self . _prk , info , length , self . _hash )
|
Generate output key material based on an info value
|
3,479
|
def login_user_block ( username , ssh_keys , create_password = True ) : block = { 'create_password' : 'yes' if create_password is True else 'no' , 'ssh_keys' : { 'ssh_key' : ssh_keys } } if username : block [ 'username' ] = username return block
|
Helper function for creating Server . login_user blocks .
|
3,480
|
def _reset ( self , server , ** kwargs ) : if server : Server . _handle_server_subobjs ( server , kwargs . get ( 'cloud_manager' ) ) for key in server : object . __setattr__ ( self , key , server [ key ] ) for key in kwargs : object . __setattr__ ( self , key , kwargs [ key ] )
|
Reset the server object with new values given as params .
|
3,481
|
def populate ( self ) : server , IPAddresses , storages = self . cloud_manager . get_server_data ( self . uuid ) self . _reset ( server , ip_addresses = IPAddresses , storage_devices = storages , populated = True ) return self
|
Sync changes from the API to the local object .
|
3,482
|
def save ( self ) : kwargs = dict ( ( field , getattr ( self , field ) ) for field in self . updateable_fields if hasattr ( self , field ) ) self . cloud_manager . modify_server ( self . uuid , ** kwargs ) self . _reset ( kwargs )
|
Sync local changes in server s attributes to the API .
|
3,483
|
def restart ( self , hard = False , timeout = 30 , force = True ) : body = dict ( ) body [ 'restart_server' ] = { 'stop_type' : 'hard' if hard else 'soft' , 'timeout' : '{0}' . format ( timeout ) , 'timeout_action' : 'destroy' if force else 'ignore' } path = '/server/{0}/restart' . format ( self . uuid ) self . cloud_manager . post_request ( path , body ) object . __setattr__ ( self , 'state' , 'maintenance' )
|
Restart the server . By default issue a soft restart with a timeout of 30s and a hard restart after the timeout .
|
3,484
|
def remove_ip ( self , IPAddress ) : self . cloud_manager . release_ip ( IPAddress . address ) self . ip_addresses . remove ( IPAddress )
|
Release the specified IP - address from the server .
|
3,485
|
def add_storage ( self , storage = None , type = 'disk' , address = None ) : self . cloud_manager . attach_storage ( server = self . uuid , storage = storage . uuid , storage_type = type , address = address ) storage . address = address storage . type = type self . storage_devices . append ( storage )
|
Attach the given storage to the Server .
|
3,486
|
def remove_storage ( self , storage ) : if not hasattr ( storage , 'address' ) : raise Exception ( ( 'Storage does not have an address. ' 'Access the Storage via Server.storage_devices ' 'so they include an address. ' '(This is due how the API handles Storages)' ) ) self . cloud_manager . detach_storage ( server = self . uuid , address = storage . address ) self . storage_devices . remove ( storage )
|
Remove Storage from a Server .
|
3,487
|
def configure_firewall ( self , FirewallRules ) : firewall_rule_bodies = [ FirewallRule . to_dict ( ) for FirewallRule in FirewallRules ] return self . cloud_manager . configure_firewall ( self , firewall_rule_bodies )
|
Helper function for automatically adding several FirewallRules in series .
|
3,488
|
def prepare_post_body ( self ) : body = dict ( ) body [ 'server' ] = { 'hostname' : self . hostname , 'zone' : self . zone , 'title' : self . title , 'storage_devices' : { } } for optional_field in self . optional_fields : if hasattr ( self , optional_field ) : body [ 'server' ] [ optional_field ] = getattr ( self , optional_field ) if not hasattr ( self , 'password_delivery' ) : body [ 'server' ] [ 'password_delivery' ] = 'none' body [ 'server' ] [ 'storage_devices' ] = { 'storage_device' : [ ] } storage_title_id = 0 for storage in self . storage_devices : if not hasattr ( storage , 'os' ) or storage . os is None : storage_title_id += 1 storage_body = storage . to_dict ( ) if not hasattr ( storage , 'title' ) or not storage . title : if hasattr ( storage , 'os' ) and storage . os : storage_body [ 'title' ] = self . hostname + ' OS disk' else : storage_body [ 'title' ] = self . hostname + ' storage disk ' + str ( storage_title_id ) if hasattr ( storage , 'os' ) and storage . os : storage_body [ 'action' ] = 'clone' storage_body [ 'storage' ] = OperatingSystems . get_OS_UUID ( storage . os ) elif hasattr ( storage , 'uuid' ) : storage_body [ 'action' ] = 'clone' storage_body [ 'storage' ] = storage . uuid else : storage_body [ 'action' ] = 'create' body [ 'server' ] [ 'storage_devices' ] [ 'storage_device' ] . append ( storage_body ) if hasattr ( self , 'ip_addresses' ) and self . ip_addresses : body [ 'server' ] [ 'ip_addresses' ] = { 'ip_address' : [ ip . to_dict ( ) for ip in self . ip_addresses ] } return body
|
Prepare a JSON serializable dict from a Server instance with nested .
|
3,489
|
def to_dict ( self ) : fields = dict ( vars ( self ) . items ( ) ) if self . populated : fields [ 'ip_addresses' ] = [ ] fields [ 'storage_devices' ] = [ ] for ip in self . ip_addresses : fields [ 'ip_addresses' ] . append ( { 'address' : ip . address , 'access' : ip . access , 'family' : ip . family } ) for storage in self . storage_devices : fields [ 'storage_devices' ] . append ( { 'address' : storage . address , 'storage' : storage . uuid , 'storage_size' : storage . size , 'storage_title' : storage . title , 'type' : storage . type , } ) del fields [ 'populated' ] del fields [ 'cloud_manager' ] return fields
|
Prepare a JSON serializable dict for read - only purposes .
|
3,490
|
def get_ip ( self , access = 'public' , addr_family = None , strict = None ) : if addr_family not in [ 'IPv4' , 'IPv6' , None ] : raise Exception ( "`addr_family` must be 'IPv4', 'IPv6' or None" ) if access not in [ 'private' , 'public' ] : raise Exception ( "`access` must be 'public' or 'private'" ) if not hasattr ( self , 'ip_addresses' ) : self . populate ( ) ip_addrs = [ ip_addr for ip_addr in self . ip_addresses if ip_addr . access == access ] preferred_family = addr_family if addr_family else 'IPv4' for ip_addr in ip_addrs : if ip_addr . family == preferred_family : return ip_addr . address return ip_addrs [ 0 ] . address if ip_addrs and not addr_family else None
|
Return the server s IP address .
|
3,491
|
def _wait_for_state_change ( self , target_states , update_interval = 10 ) : while self . state not in target_states : if self . state == 'error' : raise Exception ( 'server is in error state' ) sleep ( update_interval ) self . populate ( )
|
Blocking wait until target_state reached . update_interval is in seconds .
|
3,492
|
def stop_and_destroy ( self , sync = True ) : def _self_destruct ( ) : try_it_n_times ( operation = self . destroy , expected_error_codes = [ 'SERVER_STATE_ILLEGAL' ] , custom_error = 'destroying server failed' ) for storage in self . storage_devices : try_it_n_times ( operation = storage . destroy , expected_error_codes = [ 'STORAGE_STATE_ILLEGAL' ] , custom_error = 'destroying storage failed' ) if sync : self . populate ( ) if self . state in [ 'maintenance' , 'error' ] : self . _wait_for_state_change ( [ 'stopped' , 'started' ] ) if self . state == 'started' : try_it_n_times ( operation = self . stop , expected_error_codes = [ 'SERVER_STATE_ILLEGAL' ] , custom_error = 'stopping server failed' ) self . _wait_for_state_change ( [ 'stopped' ] ) if self . state == 'stopped' : _self_destruct ( ) else : raise Exception ( 'unknown server state: ' + self . state )
|
Destroy a server and its storages . Stops the server before destroying .
|
3,493
|
def revert ( self ) : if self . filepath : if path . isfile ( self . filepath ) : serialised_file = open ( self . filepath , "r" ) try : self . state = json . load ( serialised_file ) except ValueError : print ( "No JSON information could be read from the persistence file - could be empty: %s" % self . filepath ) self . state = { } finally : serialised_file . close ( ) else : print ( "The persistence file has not yet been created or does not exist, so the state cannot be read from it yet." ) else : print ( "Filepath to the persistence file is not set. State cannot be read." ) return False
|
Revert the state to the version stored on disc .
|
3,494
|
def sync ( self ) : if self . filepath : serialised_file = open ( self . filepath , "w" ) json . dump ( self . state , serialised_file ) serialised_file . close ( ) else : print ( "Filepath to the persistence file is not set. State cannot be synced to disc." )
|
Synchronise and update the stored state to the in - memory state .
|
3,495
|
def _require_bucket ( self , bucket_name ) : if not self . exists ( bucket_name ) and not self . claim_bucket ( bucket_name ) : raise OFSException ( "Invalid bucket: %s" % bucket_name ) return self . _get_bucket ( bucket_name )
|
Also try to create the bucket .
|
3,496
|
def del_stream ( self , bucket , label ) : bucket = self . _require_bucket ( bucket ) key = self . _require_key ( bucket , label ) key . delete ( )
|
Will fail if the bucket or label don t exist
|
3,497
|
def authenticate_request ( self , method , bucket = '' , key = '' , headers = None ) : path = self . conn . calling_format . build_path_base ( bucket , key ) auth_path = self . conn . calling_format . build_auth_path ( bucket , key ) http_request = boto . connection . AWSAuthConnection . build_base_http_request ( self . conn , method , path , auth_path , { } , headers ) http_request . authorize ( connection = self . conn ) return http_request
|
Authenticate a HTTP request by filling in Authorization field header .
|
3,498
|
def get_resources_to_check ( client_site_url , apikey ) : url = client_site_url + u"deadoralive/get_resources_to_check" response = requests . get ( url , headers = dict ( Authorization = apikey ) ) if not response . ok : raise CouldNotGetResourceIDsError ( u"Couldn't get resource IDs to check: {code} {reason}" . format ( code = response . status_code , reason = response . reason ) ) return response . json ( )
|
Return a list of resource IDs to check for broken links .
|
3,499
|
def get_url_for_id ( client_site_url , apikey , resource_id ) : url = client_site_url + u"deadoralive/get_url_for_resource_id" params = { "resource_id" : resource_id } response = requests . get ( url , headers = dict ( Authorization = apikey ) , params = params ) if not response . ok : raise CouldNotGetURLError ( u"Couldn't get URL for resource {id}: {code} {reason}" . format ( id = resource_id , code = response . status_code , reason = response . reason ) ) return response . json ( )
|
Return the URL for the given resource ID .
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.