idx
int64 0
63k
| question
stringlengths 61
4.03k
| target
stringlengths 6
1.23k
|
|---|---|---|
3,600
|
def get_form ( self , request , obj = None , ** kwargs ) : if obj is None : kwargs [ 'form' ] = SlotAdminAddForm return super ( SlotAdmin , self ) . get_form ( request , obj , ** kwargs )
|
Change the form depending on whether we re adding or editing the slot .
|
3,601
|
def get_cached_menus ( ) : items = cache . get ( CACHE_KEY ) if items is None : menu = generate_menu ( ) cache . set ( CACHE_KEY , menu . items ) else : menu = Menu ( items ) return menu
|
Return the menus from the cache or generate them if needed .
|
3,602
|
def maybe_obj ( str_or_obj ) : if not isinstance ( str_or_obj , six . string_types ) : return str_or_obj parts = str_or_obj . split ( "." ) mod , modname = None , None for p in parts : modname = p if modname is None else "%s.%s" % ( modname , p ) try : mod = __import__ ( modname ) except ImportError : if mod is None : raise break obj = mod for p in parts [ 1 : ] : obj = getattr ( obj , p ) return obj
|
If argument is not a string return it .
|
3,603
|
def generate_menu ( ) : root_menu = Menu ( list ( copy . deepcopy ( settings . WAFER_MENUS ) ) ) for dynamic_menu_func in settings . WAFER_DYNAMIC_MENUS : dynamic_menu_func = maybe_obj ( dynamic_menu_func ) dynamic_menu_func ( root_menu ) return root_menu
|
Generate a new list of menus .
|
3,604
|
def lock ( self ) : self . __fd = open ( self . __lockfile , "w" ) if self . __locktype == "wait" : fcntl . flock ( self . __fd . fileno ( ) , fcntl . LOCK_EX ) elif self . __locktype == "lock" : try : fcntl . flock ( self . __fd . fileno ( ) , fcntl . LOCK_EX | fcntl . LOCK_NB ) except IOError : raise AlreadyLocked ( "File is already locked" )
|
Try to get locked the file - the function will wait until the file is unlocked if wait was defined as locktype - the funciton will raise AlreadyLocked exception if lock was defined as locktype
|
3,605
|
def _make_handler ( state_token , done_function ) : class LocalServerHandler ( BaseHTTPServer . BaseHTTPRequestHandler ) : def error_response ( self , msg ) : logging . warn ( 'Error response: %(msg)s. %(path)s' , msg = msg , path = self . path ) self . send_response ( 400 ) self . send_header ( 'Content-type' , 'text/plain' ) self . end_headers ( ) self . wfile . write ( msg ) def do_GET ( self ) : parsed = urlparse . urlparse ( self . path ) if len ( parsed . query ) == 0 or parsed . path != '/callback' : self . error_response ( 'We encountered a problem with your request.' ) return params = urlparse . parse_qs ( parsed . query ) if params [ 'state' ] != [ state_token ] : self . error_response ( 'Attack detected: state tokens did not match!' ) return if len ( params [ 'code' ] ) != 1 : self . error_response ( 'Wrong number of "code" query parameters.' ) return self . send_response ( 200 ) self . send_header ( 'Content-type' , 'text/plain' ) self . end_headers ( ) self . wfile . write ( "courseraoauth2client: we have captured Coursera's response " "code. Feel free to close this browser window now and return " "to your terminal. Thanks!" ) done_function ( params [ 'code' ] [ 0 ] ) return LocalServerHandler
|
Makes a a handler class to use inside the basic python HTTP server .
|
3,606
|
def configuration ( ) : 'Loads configuration from the file system.' defaults = cfg = ConfigParser . SafeConfigParser ( ) cfg . readfp ( io . BytesIO ( defaults ) ) cfg . read ( [ '/etc/coursera/courseraoauth2client.cfg' , os . path . expanduser ( '~/.coursera/courseraoauth2client.cfg' ) , 'courseraoauth2client.cfg' , ] ) return cfg
|
Loads configuration from the file system .
|
3,607
|
def _load_token_cache ( self ) : 'Reads the local fs cache for pre-authorized access tokens' try : logging . debug ( 'About to read from local file cache file %s' , self . token_cache_file ) with open ( self . token_cache_file , 'rb' ) as f : fs_cached = cPickle . load ( f ) if self . _check_token_cache_type ( fs_cached ) : logging . debug ( 'Loaded from file system: %s' , fs_cached ) return fs_cached else : logging . warn ( 'Found unexpected value in cache. %s' , fs_cached ) return None except IOError : logging . debug ( 'Did not find file: %s on the file system.' , self . token_cache_file ) return None except : logging . info ( 'Encountered exception loading from the file system.' , exc_info = True ) return None
|
Reads the local fs cache for pre - authorized access tokens
|
3,608
|
def _save_token_cache ( self , new_cache ) : 'Write out to the filesystem a cache of the OAuth2 information.' logging . debug ( 'Looking to write to local authentication cache...' ) if not self . _check_token_cache_type ( new_cache ) : logging . error ( 'Attempt to save a bad value: %s' , new_cache ) return try : logging . debug ( 'About to write to fs cache file: %s' , self . token_cache_file ) with open ( self . token_cache_file , 'wb' ) as f : cPickle . dump ( new_cache , f , protocol = cPickle . HIGHEST_PROTOCOL ) logging . debug ( 'Finished dumping cache_value to fs cache file.' ) except : logging . exception ( 'Could not successfully cache OAuth2 secrets on the file ' 'system.' )
|
Write out to the filesystem a cache of the OAuth2 information .
|
3,609
|
def _check_token_cache_type ( self , cache_value ) : def check_string_value ( name ) : return ( isinstance ( cache_value [ name ] , str ) or isinstance ( cache_value [ name ] , unicode ) ) def check_refresh_token ( ) : if 'refresh' in cache_value : return check_string_value ( 'refresh' ) else : return True return ( isinstance ( cache_value , dict ) and 'token' in cache_value and 'expires' in cache_value and check_string_value ( 'token' ) and isinstance ( cache_value [ 'expires' ] , float ) and check_refresh_token ( ) )
|
Checks the cache_value for appropriate type correctness .
|
3,610
|
def _authorize_new_tokens ( self ) : logging . info ( 'About to request new OAuth2 tokens from Coursera.' ) state_token = uuid . uuid4 ( ) . hex authorization_url = self . _build_authorizaton_url ( state_token ) sys . stdout . write ( 'Please visit the following URL to authorize this app:\n' ) sys . stdout . write ( '\t%s\n\n' % authorization_url ) if _platform == 'darwin' : sys . stdout . write ( 'Mac OS X detected; attempting to auto-open the url ' 'in your default browser...\n' ) try : subprocess . check_call ( [ 'open' , authorization_url ] ) except : logging . exception ( 'Could not call `open %(url)s`.' , url = authorization_url ) if self . local_webserver_port is not None : server_address = ( '' , self . local_webserver_port ) code_holder = CodeHolder ( ) local_server = BaseHTTPServer . HTTPServer ( server_address , _make_handler ( state_token , code_holder ) ) while not code_holder . has_code ( ) : local_server . handle_request ( ) coursera_code = code_holder . code else : coursera_code = raw_input ( 'Please enter the code from Coursera: ' ) form_data = { 'code' : coursera_code , 'client_id' : self . client_id , 'client_secret' : self . client_secret , 'redirect_uri' : self . _redirect_uri , 'grant_type' : 'authorization_code' , } return self . _request_tokens_from_token_endpoint ( form_data )
|
Stands up a new localhost http server and retrieves new OAuth2 access tokens from the Coursera OAuth2 server .
|
3,611
|
def _exchange_refresh_tokens ( self ) : 'Exchanges a refresh token for an access token' if self . token_cache is not None and 'refresh' in self . token_cache : refresh_form = { 'grant_type' : 'refresh_token' , 'refresh_token' : self . token_cache [ 'refresh' ] , 'client_id' : self . client_id , 'client_secret' : self . client_secret , } try : tokens = self . _request_tokens_from_token_endpoint ( refresh_form ) tokens [ 'refresh' ] = self . token_cache [ 'refresh' ] return tokens except OAuth2Exception : logging . exception ( 'Encountered an exception during refresh token flow.' ) return None
|
Exchanges a refresh token for an access token
|
3,612
|
def foreignkey ( element , exceptions ) : label = element . field . __dict__ [ 'label' ] try : label = unicode ( label ) except NameError : pass if ( not label ) or ( label in exceptions ) : return False else : return "_queryset" in element . field . __dict__
|
function to determine if each select field needs a create button or not
|
3,613
|
def deserialize_by_field ( value , field ) : if isinstance ( field , forms . DateTimeField ) : value = parse_datetime ( value ) elif isinstance ( field , forms . DateField ) : value = parse_date ( value ) elif isinstance ( field , forms . TimeField ) : value = parse_time ( value ) return value
|
Some types get serialized to JSON as strings . If we know what they are supposed to be we can deserialize them
|
3,614
|
def main ( ) : "Boots up the command line tool" logging . captureWarnings ( True ) args = build_parser ( ) . parse_args ( ) args . setup_logging ( args ) try : return args . func ( args ) except SystemExit : raise except : logging . exception ( 'Problem when running command. Sorry!' ) sys . exit ( 1 )
|
Boots up the command line tool
|
3,615
|
def objectatrib ( instance , atrib ) : atrib = atrib . replace ( "__" , "." ) atribs = [ ] atribs = atrib . split ( "." ) obj = instance for atrib in atribs : if type ( obj ) == dict : result = obj [ atrib ] else : try : result = getattr ( obj , atrib ) ( ) except Exception : result = getattr ( obj , atrib ) obj = result return result
|
this filter is going to be useful to execute an object method or get an object attribute dynamically . this method is going to take into account the atrib param can contains underscores
|
3,616
|
def upload_path ( instance , filename ) : path_separator = "/" date_separator = "-" ext_separator = "." empty_string = "" model_name = model_inspect ( instance ) [ 'modelname' ] date = datetime . now ( ) . strftime ( "%Y-%m-%d" ) . split ( date_separator ) curr_day = date [ 2 ] curr_month = date [ 1 ] curr_year = date [ 0 ] split_filename = filename . split ( ext_separator ) filename = empty_string . join ( split_filename [ : - 1 ] ) file_ext = split_filename [ - 1 ] new_filename = empty_string . join ( [ filename , str ( random . random ( ) ) . split ( ext_separator ) [ 1 ] ] ) new_filename = ext_separator . join ( [ new_filename , file_ext ] ) string_path = path_separator . join ( [ model_name , curr_year , curr_month , curr_day , new_filename ] ) return string_path
|
This method is created to return the path to upload files . This path must be different from any other to avoid problems .
|
3,617
|
def remove_getdisplay ( field_name ) : str_ini = 'get_' str_end = '_display' if str_ini == field_name [ 0 : len ( str_ini ) ] and str_end == field_name [ ( - 1 ) * len ( str_end ) : ] : field_name = field_name [ len ( str_ini ) : ( - 1 ) * len ( str_end ) ] return field_name
|
for string get_FIELD_NAME_display return FIELD_NAME
|
3,618
|
def append ( self , filename_in_zip , file_contents ) : self . in_memory_zip . seek ( - 1 , io . SEEK_END ) zf = zipfile . ZipFile ( self . in_memory_zip , "a" , zipfile . ZIP_DEFLATED , False ) zf . writestr ( filename_in_zip , file_contents ) for zfile in zf . filelist : zfile . create_system = 0 zf . close ( ) self . in_memory_zip . seek ( 0 ) return self
|
Appends a file with name filename_in_zip and contents of file_contents to the in - memory zip .
|
3,619
|
def writetofile ( self , filename ) : f = open ( filename , "w" ) f . write ( self . read ( ) ) f . close ( )
|
Writes the in - memory zip to a file .
|
3,620
|
def sponsor_image_url ( sponsor , name ) : if sponsor . files . filter ( name = name ) . exists ( ) : return sponsor . files . filter ( name = name ) . first ( ) . item . url return ''
|
Returns the corresponding url from the sponsors images
|
3,621
|
def sponsor_tagged_image ( sponsor , tag ) : if sponsor . files . filter ( tag_name = tag ) . exists ( ) : return sponsor . files . filter ( tag_name = tag ) . first ( ) . tagged_file . item . url return ''
|
returns the corresponding url from the tagged image list .
|
3,622
|
def ifusergroup ( parser , token ) : try : tokensp = token . split_contents ( ) groups = [ ] groups += tokensp [ 1 : ] except ValueError : raise template . TemplateSyntaxError ( "Tag 'ifusergroup' requires at least 1 argument." ) nodelist_true = parser . parse ( ( 'else' , 'endifusergroup' ) ) token = parser . next_token ( ) if token . contents == 'else' : nodelist_false = parser . parse ( tuple ( [ 'endifusergroup' , ] ) ) parser . delete_first_token ( ) else : nodelist_false = NodeList ( ) return GroupCheckNode ( groups , nodelist_true , nodelist_false )
|
Check to see if the currently logged in user belongs to a specific group . Requires the Django authentication contrib app and middleware .
|
3,623
|
def OpenHandle ( self ) : if hasattr ( self , 'handle' ) : return self . handle else : handle = c_void_p ( ) ret = vmGuestLib . VMGuestLib_OpenHandle ( byref ( handle ) ) if ret != VMGUESTLIB_ERROR_SUCCESS : raise VMGuestLibException ( ret ) return handle
|
Gets a handle for use with other vSphere Guest API functions . The guest library handle provides a context for accessing information about the virtual machine .
|
3,624
|
def CloseHandle ( self ) : if hasattr ( self , 'handle' ) : ret = vmGuestLib . VMGuestLib_CloseHandle ( self . handle . value ) if ret != VMGUESTLIB_ERROR_SUCCESS : raise VMGuestLibException ( ret ) del ( self . handle )
|
Releases a handle acquired with VMGuestLib_OpenHandle
|
3,625
|
def UpdateInfo ( self ) : ret = vmGuestLib . VMGuestLib_UpdateInfo ( self . handle . value ) if ret != VMGUESTLIB_ERROR_SUCCESS : raise VMGuestLibException ( ret )
|
Updates information about the virtual machine . This information is associated with the VMGuestLibHandle .
|
3,626
|
def GetSessionId ( self ) : sid = c_void_p ( ) ret = vmGuestLib . VMGuestLib_GetSessionId ( self . handle . value , byref ( sid ) ) if ret != VMGUESTLIB_ERROR_SUCCESS : raise VMGuestLibException ( ret ) return sid
|
Retrieves the VMSessionID for the current session . Call this function after calling VMGuestLib_UpdateInfo . If VMGuestLib_UpdateInfo has never been called VMGuestLib_GetSessionId returns VMGUESTLIB_ERROR_NO_INFO .
|
3,627
|
def GetCpuLimitMHz ( self ) : counter = c_uint ( ) ret = vmGuestLib . VMGuestLib_GetCpuLimitMHz ( self . handle . value , byref ( counter ) ) if ret != VMGUESTLIB_ERROR_SUCCESS : raise VMGuestLibException ( ret ) return counter . value
|
Retrieves the upperlimit of processor use in MHz available to the virtual machine . For information about setting the CPU limit see Limits and Reservations on page 14 .
|
3,628
|
def GetCpuReservationMHz ( self ) : counter = c_uint ( ) ret = vmGuestLib . VMGuestLib_GetCpuReservationMHz ( self . handle . value , byref ( counter ) ) if ret != VMGUESTLIB_ERROR_SUCCESS : raise VMGuestLibException ( ret ) return counter . value
|
Retrieves the minimum processing power in MHz reserved for the virtual machine . For information about setting a CPU reservation see Limits and Reservations on page 14 .
|
3,629
|
def GetCpuShares ( self ) : counter = c_uint ( ) ret = vmGuestLib . VMGuestLib_GetCpuShares ( self . handle . value , byref ( counter ) ) if ret != VMGUESTLIB_ERROR_SUCCESS : raise VMGuestLibException ( ret ) return counter . value
|
Retrieves the number of CPU shares allocated to the virtual machine . For information about how an ESX server uses CPU shares to manage virtual machine priority see the vSphere Resource Management Guide .
|
3,630
|
def GetElapsedMs ( self ) : counter = c_uint64 ( ) ret = vmGuestLib . VMGuestLib_GetElapsedMs ( self . handle . value , byref ( counter ) ) if ret != VMGUESTLIB_ERROR_SUCCESS : raise VMGuestLibException ( ret ) return counter . value
|
Retrieves the number of milliseconds that have passed in the virtual machine since it last started running on the server . The count of elapsed time restarts each time the virtual machine is powered on resumed or migrated using VMotion . This value counts milliseconds regardless of whether the virtual machine is using processing power during that time .
|
3,631
|
def GetHostProcessorSpeed ( self ) : counter = c_uint ( ) ret = vmGuestLib . VMGuestLib_GetHostProcessorSpeed ( self . handle . value , byref ( counter ) ) if ret != VMGUESTLIB_ERROR_SUCCESS : raise VMGuestLibException ( ret ) return counter . value
|
Retrieves the speed of the ESX system s physical CPU in MHz .
|
3,632
|
def GetMemActiveMB ( self ) : counter = c_uint ( ) ret = vmGuestLib . VMGuestLib_GetMemActiveMB ( self . handle . value , byref ( counter ) ) if ret != VMGUESTLIB_ERROR_SUCCESS : raise VMGuestLibException ( ret ) return counter . value
|
Retrieves the amount of memory the virtual machine is actively using its estimated working set size .
|
3,633
|
def GetMemLimitMB ( self ) : counter = c_uint ( ) ret = vmGuestLib . VMGuestLib_GetMemLimitMB ( self . handle . value , byref ( counter ) ) if ret != VMGUESTLIB_ERROR_SUCCESS : raise VMGuestLibException ( ret ) return counter . value
|
Retrieves the upper limit of memory that is available to the virtual machine . For information about setting a memory limit see Limits and Reservations on page 14 .
|
3,634
|
def GetMemMappedMB ( self ) : counter = c_uint ( ) ret = vmGuestLib . VMGuestLib_GetMemMappedMB ( self . handle . value , byref ( counter ) ) if ret != VMGUESTLIB_ERROR_SUCCESS : raise VMGuestLibException ( ret ) return counter . value
|
Retrieves the amount of memory that is allocated to the virtual machine . Memory that is ballooned swapped or has never been accessed is excluded .
|
3,635
|
def GetMemOverheadMB ( self ) : counter = c_uint ( ) ret = vmGuestLib . VMGuestLib_GetMemOverheadMB ( self . handle . value , byref ( counter ) ) if ret != VMGUESTLIB_ERROR_SUCCESS : raise VMGuestLibException ( ret ) return counter . value
|
Retrieves the amount of overhead memory associated with this virtual machine that is currently consumed on the host system . Overhead memory is additional memory that is reserved for data structures required by the virtualization layer .
|
3,636
|
def GetMemReservationMB ( self ) : counter = c_uint ( ) ret = vmGuestLib . VMGuestLib_GetMemReservationMB ( self . handle . value , byref ( counter ) ) if ret != VMGUESTLIB_ERROR_SUCCESS : raise VMGuestLibException ( ret ) return counter . value
|
Retrieves the minimum amount of memory that is reserved for the virtual machine . For information about setting a memory reservation see Limits and Reservations on page 14 .
|
3,637
|
def GetMemShares ( self ) : counter = c_uint ( ) ret = vmGuestLib . VMGuestLib_GetMemShares ( self . handle . value , byref ( counter ) ) if ret != VMGUESTLIB_ERROR_SUCCESS : raise VMGuestLibException ( ret ) return counter . value
|
Retrieves the number of memory shares allocated to the virtual machine . For information about how an ESX server uses memory shares to manage virtual machine priority see the vSphere Resource Management Guide .
|
3,638
|
def GetMemSwappedMB ( self ) : counter = c_uint ( ) ret = vmGuestLib . VMGuestLib_GetMemSwappedMB ( self . handle . value , byref ( counter ) ) if ret != VMGUESTLIB_ERROR_SUCCESS : raise VMGuestLibException ( ret ) return counter . value
|
Retrieves the amount of memory that has been reclaimed from this virtual machine by transparently swapping guest memory to disk .
|
3,639
|
def GetMemTargetSizeMB ( self ) : counter = c_uint ( ) ret = vmGuestLib . VMGuestLib_GetMemTargetSizeMB ( self . handle . value , byref ( counter ) ) if ret != VMGUESTLIB_ERROR_SUCCESS : raise VMGuestLibException ( ret ) return counter . value
|
Retrieves the size of the target memory allocation for this virtual machine .
|
3,640
|
def GetMemUsedMB ( self ) : counter = c_uint ( ) ret = vmGuestLib . VMGuestLib_GetMemUsedMB ( self . handle . value , byref ( counter ) ) if ret != VMGUESTLIB_ERROR_SUCCESS : raise VMGuestLibException ( ret ) return counter . value
|
Retrieves the estimated amount of physical host memory currently consumed for this virtual machine s physical memory .
|
3,641
|
def wafer_form_helper ( context , helper_name ) : request = context . request module , class_name = helper_name . rsplit ( '.' , 1 ) if module not in sys . modules : __import__ ( module ) mod = sys . modules [ module ] class_ = getattr ( mod , class_name ) return class_ ( request = request )
|
Find the specified Crispy FormHelper and instantiate it . Handy when you are crispyifying other apps forms .
|
3,642
|
def page_menus ( root_menu ) : for page in Page . objects . filter ( include_in_menu = True ) : path = page . get_path ( ) menu = path [ 0 ] if len ( path ) > 1 else None try : root_menu . add_item ( page . name , page . get_absolute_url ( ) , menu = menu ) except MenuError as e : logger . error ( "Bad menu item %r for page with slug %r." % ( e , page . slug ) )
|
Add page menus .
|
3,643
|
def redirect_profile ( request ) : if request . user . is_authenticated : return HttpResponseRedirect ( reverse ( 'wafer_user_profile' , args = ( request . user . username , ) ) ) else : return redirect_to_login ( next = reverse ( redirect_profile ) )
|
The default destination from logging in redirect to the actual profile URL
|
3,644
|
def reviewed_badge ( user , talk ) : context = { 'reviewed' : False , } review = None if user and not user . is_anonymous ( ) : review = talk . reviews . filter ( reviewer = user ) . first ( ) if review : context [ 'reviewed' ] = True context [ 'review_is_current' ] = review . is_current ( ) return context
|
Returns a badge for the user s reviews of the talk
|
3,645
|
def form_valid ( self , form , forms ) : if self . object : form . save ( ) for ( formobj , linkerfield ) in forms : if form != formobj : formobj . save ( ) else : self . object = form . save ( ) for ( formobj , linkerfield ) in forms : if form != formobj : setattr ( formobj . instance , linkerfield , self . object ) formobj . save ( ) return HttpResponseRedirect ( self . get_success_url ( ) )
|
Called if all forms are valid . Creates a Recipe instance along with associated Ingredients and Instructions and then redirects to a success page .
|
3,646
|
def form_invalid ( self , form , forms , open_tabs , position_form_default ) : return self . render_to_response ( self . get_context_data ( form = form , forms = forms , open_tabs = open_tabs , position_form_default = position_form_default ) )
|
Called if a form is invalid . Re - renders the context data with the data - filled forms and errors .
|
3,647
|
def univariate_envelope_plot ( x , mean , std , ax = None , base_alpha = 0.375 , envelopes = [ 1 , 3 ] , lb = None , ub = None , expansion = 10 , ** kwargs ) : if ax is None : f = plt . figure ( ) ax = f . add_subplot ( 1 , 1 , 1 ) elif ax == 'gca' : ax = plt . gca ( ) mean = scipy . asarray ( mean , dtype = float ) . copy ( ) std = scipy . asarray ( std , dtype = float ) . copy ( ) if lb is not None and ub is not None and expansion != 1.0 : expansion *= ub - lb ub = ub + expansion lb = lb - expansion if ub is not None : mean [ mean > ub ] = ub if lb is not None : mean [ mean < lb ] = lb l = ax . plot ( x , mean , ** kwargs ) color = plt . getp ( l [ 0 ] , 'color' ) e = [ ] for i in envelopes : lower = mean - i * std upper = mean + i * std if ub is not None : lower [ lower > ub ] = ub upper [ upper > ub ] = ub if lb is not None : lower [ lower < lb ] = lb upper [ upper < lb ] = lb e . append ( ax . fill_between ( x , lower , upper , facecolor = color , alpha = base_alpha / i ) ) return ( l , e )
|
Make a plot of a mean curve with uncertainty envelopes .
|
3,648
|
def fetch_token ( self ) : grant_type = 'client_credentials' channel = yield self . _tvm . ticket_full ( self . _client_id , self . _client_secret , grant_type , { } ) ticket = yield channel . rx . get ( ) raise gen . Return ( self . _make_token ( ticket ) )
|
Gains token from secure backend service .
|
3,649
|
def process_summary ( summaryfile , ** kwargs ) : logging . info ( "Nanoget: Collecting metrics from summary file {} for {} sequencing" . format ( summaryfile , kwargs [ "readtype" ] ) ) ut . check_existance ( summaryfile ) if kwargs [ "readtype" ] == "1D" : cols = [ "read_id" , "run_id" , "channel" , "start_time" , "duration" , "sequence_length_template" , "mean_qscore_template" ] elif kwargs [ "readtype" ] in [ "2D" , "1D2" ] : cols = [ "read_id" , "run_id" , "channel" , "start_time" , "duration" , "sequence_length_2d" , "mean_qscore_2d" ] if kwargs [ "barcoded" ] : cols . append ( "barcode_arrangement" ) logging . info ( "Nanoget: Extracting metrics per barcode." ) try : datadf = pd . read_csv ( filepath_or_buffer = summaryfile , sep = "\t" , usecols = cols , ) except ValueError : logging . error ( "Nanoget: did not find expected columns in summary file {}:\n {}" . format ( summaryfile , ', ' . join ( cols ) ) ) sys . exit ( "ERROR: expected columns in summary file {} not found:\n {}" . format ( summaryfile , ', ' . join ( cols ) ) ) if kwargs [ "barcoded" ] : datadf . columns = [ "readIDs" , "runIDs" , "channelIDs" , "time" , "duration" , "lengths" , "quals" , "barcode" ] else : datadf . columns = [ "readIDs" , "runIDs" , "channelIDs" , "time" , "duration" , "lengths" , "quals" ] logging . info ( "Nanoget: Finished collecting statistics from summary file {}" . format ( summaryfile ) ) return ut . reduce_memory_usage ( datadf . loc [ datadf [ "lengths" ] != 0 ] . copy ( ) )
|
Extracting information from an albacore summary file .
|
3,650
|
def check_bam ( bam , samtype = "bam" ) : ut . check_existance ( bam ) samfile = pysam . AlignmentFile ( bam , "rb" ) if not samfile . has_index ( ) : pysam . index ( bam ) samfile = pysam . AlignmentFile ( bam , "rb" ) logging . info ( "Nanoget: No index for bam file could be found, created index." ) if not samfile . header [ 'HD' ] [ 'SO' ] == 'coordinate' : logging . error ( "Nanoget: Bam file {} not sorted by coordinate!." . format ( bam ) ) sys . exit ( "Please use a bam file sorted by coordinate." ) if samtype == "bam" : logging . info ( "Nanoget: Bam file {} contains {} mapped and {} unmapped reads." . format ( bam , samfile . mapped , samfile . unmapped ) ) if samfile . mapped == 0 : logging . error ( "Nanoget: Bam file {} does not contain aligned reads." . format ( bam ) ) sys . exit ( "FATAL: not a single read was mapped in bam file {}" . format ( bam ) ) return samfile
|
Check if bam file is valid .
|
3,651
|
def process_ubam ( bam , ** kwargs ) : logging . info ( "Nanoget: Starting to collect statistics from ubam file {}." . format ( bam ) ) samfile = pysam . AlignmentFile ( bam , "rb" , check_sq = False ) if not samfile . has_index ( ) : pysam . index ( bam ) samfile = pysam . AlignmentFile ( bam , "rb" ) logging . info ( "Nanoget: No index for bam file could be found, created index." ) datadf = pd . DataFrame ( data = [ ( read . query_name , nanomath . ave_qual ( read . query_qualities ) , read . query_length ) for read in samfile . fetch ( until_eof = True ) ] , columns = [ "readIDs" , "quals" , "lengths" ] ) . dropna ( axis = 'columns' , how = 'all' ) . dropna ( axis = 'index' , how = 'any' ) logging . info ( "Nanoget: ubam {} contains {} reads." . format ( bam , datadf [ "lengths" ] . size ) ) return ut . reduce_memory_usage ( datadf )
|
Extracting metrics from unaligned bam format Extracting lengths
|
3,652
|
def process_bam ( bam , ** kwargs ) : logging . info ( "Nanoget: Starting to collect statistics from bam file {}." . format ( bam ) ) samfile = check_bam ( bam ) chromosomes = samfile . references params = zip ( [ bam ] * len ( chromosomes ) , chromosomes ) with cfutures . ProcessPoolExecutor ( ) as executor : datadf = pd . DataFrame ( data = [ res for sublist in executor . map ( extract_from_bam , params ) for res in sublist ] , columns = [ "readIDs" , "quals" , "aligned_quals" , "lengths" , "aligned_lengths" , "mapQ" , "percentIdentity" ] ) . dropna ( axis = 'columns' , how = 'all' ) . dropna ( axis = 'index' , how = 'any' ) logging . info ( "Nanoget: bam {} contains {} primary alignments." . format ( bam , datadf [ "lengths" ] . size ) ) return ut . reduce_memory_usage ( datadf )
|
Combines metrics from bam after extraction .
|
3,653
|
def extract_from_bam ( params ) : bam , chromosome = params samfile = pysam . AlignmentFile ( bam , "rb" ) return [ ( read . query_name , nanomath . ave_qual ( read . query_qualities ) , nanomath . ave_qual ( read . query_alignment_qualities ) , read . query_length , read . query_alignment_length , read . mapping_quality , get_pID ( read ) ) for read in samfile . fetch ( reference = chromosome , multiple_iterators = True ) if not read . is_secondary ]
|
Extracts metrics from bam .
|
3,654
|
def get_pID ( read ) : try : return 100 * ( 1 - read . get_tag ( "NM" ) / read . query_alignment_length ) except KeyError : try : return 100 * ( 1 - ( parse_MD ( read . get_tag ( "MD" ) ) + parse_CIGAR ( read . cigartuples ) ) / read . query_alignment_length ) except KeyError : return None except ZeroDivisionError : return None
|
Return the percent identity of a read .
|
3,655
|
def handle_compressed_input ( inputfq , file_type = "fastq" ) : ut . check_existance ( inputfq ) if inputfq . endswith ( ( '.gz' , 'bgz' ) ) : import gzip logging . info ( "Nanoget: Decompressing gzipped {} {}" . format ( file_type , inputfq ) ) return gzip . open ( inputfq , 'rt' ) elif inputfq . endswith ( '.bz2' ) : import bz2 logging . info ( "Nanoget: Decompressing bz2 compressed {} {}" . format ( file_type , inputfq ) ) return bz2 . open ( inputfq , 'rt' ) elif inputfq . endswith ( ( '.fastq' , '.fq' , 'fasta' , '.fa' , '.fas' ) ) : return open ( inputfq , 'r' ) else : logging . error ( "INPUT ERROR: Unrecognized file extension {}" . format ( inputfq ) ) sys . exit ( 'INPUT ERROR:\nUnrecognized file extension in {}\n' 'Supported are gz, bz2, bgz, fastq, fq, fasta, fa and fas' . format ( inputfq ) )
|
Return handles from compressed files according to extension .
|
3,656
|
def process_fasta ( fasta , ** kwargs ) : logging . info ( "Nanoget: Starting to collect statistics from a fasta file." ) inputfasta = handle_compressed_input ( fasta , file_type = "fasta" ) return ut . reduce_memory_usage ( pd . DataFrame ( data = [ len ( rec ) for rec in SeqIO . parse ( inputfasta , "fasta" ) ] , columns = [ "lengths" ] ) . dropna ( ) )
|
Combine metrics extracted from a fasta file .
|
3,657
|
def process_fastq_plain ( fastq , ** kwargs ) : logging . info ( "Nanoget: Starting to collect statistics from plain fastq file." ) inputfastq = handle_compressed_input ( fastq ) return ut . reduce_memory_usage ( pd . DataFrame ( data = [ res for res in extract_from_fastq ( inputfastq ) if res ] , columns = [ "quals" , "lengths" ] ) . dropna ( ) )
|
Combine metrics extracted from a fastq file .
|
3,658
|
def stream_fastq_full ( fastq , threads ) : logging . info ( "Nanoget: Starting to collect full metrics from plain fastq file." ) inputfastq = handle_compressed_input ( fastq ) with cfutures . ProcessPoolExecutor ( max_workers = threads ) as executor : for results in executor . map ( extract_all_from_fastq , SeqIO . parse ( inputfastq , "fastq" ) ) : yield results logging . info ( "Nanoget: Finished collecting statistics from plain fastq file." )
|
Generator for returning metrics extracted from fastq .
|
3,659
|
def process_fastq_rich ( fastq , ** kwargs ) : logging . info ( "Nanoget: Starting to collect statistics from rich fastq file." ) inputfastq = handle_compressed_input ( fastq ) res = [ ] for record in SeqIO . parse ( inputfastq , "fastq" ) : try : read_info = info_to_dict ( record . description ) res . append ( ( nanomath . ave_qual ( record . letter_annotations [ "phred_quality" ] ) , len ( record ) , read_info [ "ch" ] , read_info [ "start_time" ] , read_info [ "runid" ] ) ) except KeyError : logging . error ( "Nanoget: keyerror when processing record {}" . format ( record . description ) ) sys . exit ( "Unexpected fastq identifier:\n{}\n\n \ missing one or more of expected fields 'ch', 'start_time' or 'runid'" . format ( record . description ) ) df = pd . DataFrame ( data = res , columns = [ "quals" , "lengths" , "channelIDs" , "timestamp" , "runIDs" ] ) . dropna ( ) df [ "channelIDs" ] = df [ "channelIDs" ] . astype ( "int64" ) return ut . reduce_memory_usage ( df )
|
Extract metrics from a richer fastq file .
|
3,660
|
def fq_minimal ( fq ) : try : while True : time = next ( fq ) [ 1 : ] . split ( " " ) [ 4 ] [ 11 : - 1 ] length = len ( next ( fq ) ) next ( fq ) next ( fq ) yield time , length except StopIteration : yield None
|
Minimal fastq metrics extractor .
|
3,661
|
def _get_piece ( string , index ) : piece = string [ index ] . strip ( ) piece = piece . upper ( ) piece_dict = { 'R' : Rook , 'P' : Pawn , 'B' : Bishop , 'N' : Knight , 'Q' : Queen , 'K' : King } try : return piece_dict [ piece ] except KeyError : raise ValueError ( "Piece {} is invalid" . format ( piece ) )
|
Returns Piece subclass given index of piece .
|
3,662
|
def short_alg ( algebraic_string , input_color , position ) : return make_legal ( incomplete_alg ( algebraic_string , input_color , position ) , position )
|
Converts a string written in short algebraic form the color of the side whose turn it is and the corresponding position into a complete move that can be played . If no moves match None is returned .
|
3,663
|
def implicify_hydrogens ( self ) : explicit = defaultdict ( list ) c = 0 for n , atom in self . atoms ( ) : if atom . element == 'H' : for m in self . neighbors ( n ) : if self . _node [ m ] . element != 'H' : explicit [ m ] . append ( n ) for n , h in explicit . items ( ) : atom = self . _node [ n ] len_h = len ( h ) for i in range ( len_h , 0 , - 1 ) : hi = h [ : i ] if atom . get_implicit_h ( [ y . order for x , y in self . _adj [ n ] . items ( ) if x not in hi ] ) == i : for x in hi : self . remove_node ( x ) c += 1 break self . flush_cache ( ) return c
|
remove explicit hydrogen if possible
|
3,664
|
def explicify_hydrogens ( self ) : tmp = [ ] for n , atom in self . atoms ( ) : if atom . element != 'H' : for _ in range ( atom . get_implicit_h ( [ x . order for x in self . _adj [ n ] . values ( ) ] ) ) : tmp . append ( n ) for n in tmp : self . add_bond ( n , self . add_atom ( H ) , Bond ( ) ) self . flush_cache ( ) return len ( tmp )
|
add explicit hydrogens to atoms
|
3,665
|
def check_valence ( self ) : return [ x for x , atom in self . atoms ( ) if not atom . check_valence ( self . environment ( x ) ) ]
|
check valences of all atoms
|
3,666
|
def _matcher ( self , other ) : if isinstance ( other , ( self . _get_subclass ( 'CGRContainer' ) , MoleculeContainer ) ) : return GraphMatcher ( other , self , lambda x , y : x == y , lambda x , y : x == y ) raise TypeError ( 'only cgr-cgr possible' )
|
return VF2 GraphMatcher
|
3,667
|
def to_datetime ( date ) : return datetime . datetime . combine ( date , datetime . datetime . min . time ( ) )
|
Turn a date into a datetime at midnight .
|
3,668
|
def iter_size_changes ( self , issue ) : try : size_changes = list ( filter ( lambda h : h . field == 'Story Points' , itertools . chain . from_iterable ( [ c . items for c in issue . changelog . histories ] ) ) ) except AttributeError : return try : current_size = issue . fields . __dict__ [ self . fields [ 'StoryPoints' ] ] except : current_size = None size = ( size_changes [ 0 ] . fromString ) if len ( size_changes ) else current_size yield IssueSizeSnapshot ( change = None , key = issue . key , date = dateutil . parser . parse ( issue . fields . created ) , size = size ) for change in issue . changelog . histories : change_date = dateutil . parser . parse ( change . created ) for item in change . items : if item . field == 'Story Points' : size = item . toString yield IssueSizeSnapshot ( change = item . field , key = issue . key , date = change_date , size = size )
|
Yield an IssueSnapshot for each time the issue size changed
|
3,669
|
def iter_changes ( self , issue , include_resolution_changes = True ) : is_resolved = False try : status_changes = list ( filter ( lambda h : h . field == 'status' , itertools . chain . from_iterable ( [ c . items for c in issue . changelog . histories ] ) ) ) except AttributeError : return last_status = status_changes [ 0 ] . fromString if len ( status_changes ) > 0 else issue . fields . status . name last_resolution = None yield IssueSnapshot ( change = None , key = issue . key , date = dateutil . parser . parse ( issue . fields . created ) , status = last_status , resolution = None , is_resolved = is_resolved ) for change in issue . changelog . histories : change_date = dateutil . parser . parse ( change . created ) resolutions = list ( filter ( lambda i : i . field == 'resolution' , change . items ) ) is_resolved = ( resolutions [ - 1 ] . to is not None ) if len ( resolutions ) > 0 else is_resolved for item in change . items : if item . field == 'status' : last_status = item . toString yield IssueSnapshot ( change = item . field , key = issue . key , date = change_date , status = last_status , resolution = last_resolution , is_resolved = is_resolved ) elif item . field == 'resolution' : last_resolution = item . toString if include_resolution_changes : yield IssueSnapshot ( change = item . field , key = issue . key , date = change_date , status = last_status , resolution = last_resolution , is_resolved = is_resolved )
|
Yield an IssueSnapshot for each time the issue changed status or resolution
|
3,670
|
def find_issues ( self , criteria = { } , jql = None , order = 'KEY ASC' , verbose = False , changelog = True ) : query = [ ] if criteria . get ( 'project' , False ) : query . append ( 'project IN (%s)' % ', ' . join ( [ '"%s"' % p for p in criteria [ 'project' ] ] ) ) if criteria . get ( 'issue_types' , False ) : query . append ( 'issueType IN (%s)' % ', ' . join ( [ '"%s"' % t for t in criteria [ 'issue_types' ] ] ) ) if criteria . get ( 'valid_resolutions' , False ) : query . append ( '(resolution IS EMPTY OR resolution IN (%s))' % ', ' . join ( [ '"%s"' % r for r in criteria [ 'valid_resolutions' ] ] ) ) if criteria . get ( 'jql_filter' ) is not None : query . append ( '(%s)' % criteria [ 'jql_filter' ] ) if jql is not None : query . append ( '(%s)' % jql ) queryString = "%s ORDER BY %s" % ( ' AND ' . join ( query ) , order , ) if verbose : print ( "Fetching issues with query:" , queryString ) fromRow = 0 issues = [ ] while True : try : if changelog : pageofissues = self . jira . search_issues ( queryString , expand = 'changelog' , maxResults = self . settings [ 'max_results' ] , startAt = fromRow ) else : pageofissues = self . jira . search_issues ( queryString , maxResults = self . settings [ 'max_results' ] , startAt = fromRow ) fromRow = fromRow + int ( self . settings [ 'max_results' ] ) issues += pageofissues if verbose : print ( "Got %s lines per jira query from result starting at line number %s " % ( self . settings [ 'max_results' ] , fromRow ) ) if len ( pageofissues ) == 0 : break except JIRAError as e : print ( "Jira query error with: {}\n{}" . format ( queryString , e ) ) return [ ] if verbose : print ( "Fetched" , len ( issues ) , "issues" ) return issues
|
Return a list of issues with changelog metadata .
|
3,671
|
def list_catalogs ( self ) : _form = CatalogSelectForm ( current = self . current ) _form . set_choices_of ( 'catalog' , [ ( i , i ) for i in fixture_bucket . get_keys ( ) ] ) self . form_out ( _form )
|
Lists existing catalogs respect to ui view template format
|
3,672
|
def get_catalog ( self ) : catalog_data = fixture_bucket . get ( self . input [ 'form' ] [ 'catalog' ] ) add_or_edit = "Edit" if catalog_data . exists else "Add" catalog_edit_form = CatalogEditForm ( current = self . current , title = '%s: %s' % ( add_or_edit , self . input [ 'form' ] [ 'catalog' ] ) ) if catalog_data . exists : if type ( catalog_data . data ) == list : for key , data in enumerate ( catalog_data . data ) : catalog_edit_form . CatalogDatas ( catalog_key = key or "0" , en = '' , tr = data ) if type ( catalog_data . data ) == dict : for key , data in catalog_data . data . items ( ) : catalog_edit_form . CatalogDatas ( catalog_key = key , en = data [ 'en' ] , tr = data [ 'tr' ] ) else : catalog_edit_form . CatalogDatas ( catalog_key = "0" , en = '' , tr = '' ) self . form_out ( catalog_edit_form ) self . output [ "object_key" ] = self . input [ 'form' ] [ 'catalog' ]
|
Get existing catalog and fill the form with the model data . If given key not found as catalog it generates an empty catalog data form .
|
3,673
|
def save_catalog ( self ) : if self . input [ "cmd" ] == 'save_catalog' : try : edited_object = dict ( ) for i in self . input [ "form" ] [ "CatalogDatas" ] : edited_object [ i [ "catalog_key" ] ] = { "en" : i [ "en" ] , "tr" : i [ "tr" ] } newobj = fixture_bucket . get ( self . input [ "object_key" ] ) newobj . data = edited_object newobj . store ( ) self . output [ "notify" ] = "catalog: %s successfully updated." % self . input [ "object_key" ] except : raise HTTPError ( 500 , "Form object could not be saved" ) if self . input [ "cmd" ] == 'cancel' : self . output [ "notify" ] = "catalog: %s canceled." % self . input [ "object_key" ]
|
Saves the catalog data to given key Cancels if the cmd is cancel Notifies user with the process .
|
3,674
|
def merge_truthy ( * dicts ) : merged = { } for d in dicts : for k , v in d . items ( ) : merged [ k ] = v or merged . get ( k , v ) return merged
|
Merge multiple dictionaries keeping the truthy values in case of key collisions .
|
3,675
|
def perform ( self ) : db_versions = self . table . versions ( ) version = self . version if ( version . is_processed ( db_versions ) and not self . config . force_version == self . version . number ) : self . log ( u'version {} is already installed' . format ( version . number ) ) return self . start ( ) try : self . _perform_version ( version ) except Exception : if sys . version_info < ( 3 , 4 ) : msg = traceback . format_exc ( ) . decode ( 'utf8' , errors = 'ignore' ) else : msg = traceback . format_exc ( ) error = u'\n' . join ( self . logs + [ u'\n' , msg ] ) self . table . record_log ( version . number , error ) raise self . finish ( )
|
Perform the version upgrade on the database .
|
3,676
|
def _perform_version ( self , version ) : if version . is_noop ( ) : self . log ( u'version {} is a noop' . format ( version . number ) ) else : self . log ( u'execute base pre-operations' ) for operation in version . pre_operations ( ) : operation . execute ( self . log ) if self . config . mode : self . log ( u'execute %s pre-operations' % self . config . mode ) for operation in version . pre_operations ( mode = self . config . mode ) : operation . execute ( self . log ) self . perform_addons ( ) self . log ( u'execute base post-operations' ) for operation in version . post_operations ( ) : operation . execute ( self . log ) if self . config . mode : self . log ( u'execute %s post-operations' % self . config . mode ) for operation in version . post_operations ( self . config . mode ) : operation . execute ( self . log )
|
Inner method for version upgrade .
|
3,677
|
def _do_upgrade ( self ) : self . current . output [ 'cmd' ] = 'upgrade' self . current . output [ 'user_id' ] = self . current . user_id self . terminate_existing_login ( ) self . current . user . bind_private_channel ( self . current . session . sess_id ) user_sess = UserSessionID ( self . current . user_id ) user_sess . set ( self . current . session . sess_id ) self . current . user . is_online ( True ) for k in translation . DEFAULT_PREFS . keys ( ) : self . current . session [ k ] = ''
|
open websocket connection
|
3,678
|
def do_view ( self ) : self . current . output [ 'login_process' ] = True self . current . task_data [ 'login_successful' ] = False if self . current . is_auth : self . _do_upgrade ( ) else : try : auth_result = self . current . auth . authenticate ( self . current . input [ 'username' ] , self . current . input [ 'password' ] ) self . current . task_data [ 'login_successful' ] = auth_result if auth_result : self . _do_upgrade ( ) except ObjectDoesNotExist : self . current . log . exception ( "Wrong username or another error occurred" ) pass except : raise if self . current . output . get ( 'cmd' ) != 'upgrade' : self . current . output [ 'status_code' ] = 403 else : KeepAlive ( self . current . user_id ) . reset ( )
|
Authenticate user with given credentials . Connects user s queue and exchange
|
3,679
|
def __get_mapping ( self , structures ) : for c in permutations ( structures , len ( self . __patterns ) ) : for m in product ( * ( x . get_substructure_mapping ( y , limit = 0 ) for x , y in zip ( self . __patterns , c ) ) ) : mapping = { } for i in m : mapping . update ( i ) if mapping : yield mapping
|
match each pattern to each molecule . if all patterns matches with all molecules return generator of all possible mapping .
|
3,680
|
def get ( self , default = None ) : d = cache . get ( self . key ) return ( ( json . loads ( d . decode ( 'utf-8' ) ) if self . serialize else d ) if d is not None else default )
|
return the cached value or default if it can t be found
|
3,681
|
def set ( self , val , lifetime = None ) : cache . set ( self . key , ( json . dumps ( val ) if self . serialize else val ) , lifetime or settings . DEFAULT_CACHE_EXPIRE_TIME ) return val
|
set cache value
|
3,682
|
def get_all ( self ) : result = cache . lrange ( self . key , 0 , - 1 ) return ( json . loads ( item . decode ( 'utf-8' ) ) for item in result if item ) if self . serialize else result
|
Get all list items .
|
3,683
|
def remove_item ( self , val ) : return cache . lrem ( self . key , json . dumps ( val ) )
|
Removes given item from the list .
|
3,684
|
def flush ( cls , * args ) : return _remove_keys ( [ ] , [ ( cls . _make_key ( args ) if args else cls . PREFIX ) + '*' ] )
|
Removes all keys of this namespace Without args clears all keys starting with cls . PREFIX if called with args clears keys starting with given cls . PREFIX + args
|
3,685
|
def update_or_expire_session ( self ) : if not hasattr ( self , 'key' ) : return now = time . time ( ) timestamp = float ( self . get ( ) or 0 ) or now sess_id = self . sess_id or UserSessionID ( self . user_id ) . get ( ) if sess_id and now - timestamp > self . SESSION_EXPIRE_TIME : Session ( sess_id ) . delete ( ) return False else : self . set ( now ) return True
|
Deletes session if keepalive request expired otherwise updates the keepalive timestamp value
|
3,686
|
def send_message_for_lane_change ( sender , ** kwargs ) : current = kwargs [ 'current' ] owners = kwargs [ 'possible_owners' ] if 'lane_change_invite' in current . task_data : msg_context = current . task_data . pop ( 'lane_change_invite' ) else : msg_context = DEFAULT_LANE_CHANGE_INVITE_MSG wfi = WFCache ( current ) . get_instance ( ) TaskInvitation . objects . filter ( instance = wfi , role = current . role , wf_name = wfi . wf . name ) . delete ( ) today = datetime . today ( ) for recipient in owners : inv = TaskInvitation ( instance = wfi , role = recipient , wf_name = wfi . wf . name , progress = 30 , start_date = today , finish_date = today + timedelta ( 15 ) ) inv . title = current . task_data . get ( 'INVITATION_TITLE' ) or wfi . wf . title inv . save ( ) try : recipient . send_notification ( title = msg_context [ 'title' ] , message = "%s %s" % ( wfi . wf . title , msg_context [ 'body' ] ) , typ = 1 , url = '' , sender = sender ) except : pass
|
Sends a message to possible owners of the current workflows next lane .
|
3,687
|
def set_password ( sender , ** kwargs ) : if sender . model_class . __name__ == 'User' : usr = kwargs [ 'object' ] if not usr . password . startswith ( '$pbkdf2' ) : usr . set_password ( usr . password ) usr . save ( )
|
Encrypts password of the user .
|
3,688
|
def channel_list ( self ) : if self . current . task_data . get ( 'msg' , False ) : if self . current . task_data . get ( 'target_channel_key' , False ) : self . current . output [ 'msgbox' ] = { 'type' : 'info' , "title" : _ ( u"Successful Operation" ) , "msg" : self . current . task_data [ 'msg' ] } del self . current . task_data [ 'msg' ] else : self . show_warning_messages ( ) self . current . task_data [ 'new_channel' ] = False _form = ChannelListForm ( title = _ ( u'Public Channel List' ) , help_text = CHANNEL_CHOICE_TEXT ) for channel in Channel . objects . filter ( typ = 15 ) : owner_name = channel . owner . username _form . ChannelList ( choice = False , name = channel . name , owner = owner_name , key = channel . key ) _form . new_channel = fields . Button ( _ ( u"Merge At New Channel" ) , cmd = "create_new_channel" ) _form . existing_channel = fields . Button ( _ ( u"Merge With An Existing Channel" ) , cmd = "choose_existing_channel" ) _form . find_chosen_channel = fields . Button ( _ ( u"Split Channel" ) , cmd = "find_chosen_channel" ) self . form_out ( _form )
|
Main screen for channel management . Channels listed and operations can be chosen on the screen . If there is an error message like non - choice it is shown here .
|
3,689
|
def channel_choice_control ( self ) : self . current . task_data [ 'control' ] , self . current . task_data [ 'msg' ] = self . selection_error_control ( self . input [ 'form' ] ) if self . current . task_data [ 'control' ] : self . current . task_data [ 'option' ] = self . input [ 'cmd' ] self . current . task_data [ 'split_operation' ] = False keys , names = self . return_selected_form_items ( self . input [ 'form' ] [ 'ChannelList' ] ) self . current . task_data [ 'chosen_channels' ] = keys self . current . task_data [ 'chosen_channels_names' ] = names
|
It controls errors . If there is an error returns channel list screen with error message .
|
3,690
|
def create_new_channel ( self ) : self . current . task_data [ 'new_channel' ] = True _form = NewChannelForm ( Channel ( ) , current = self . current ) _form . title = _ ( u"Specify Features of New Channel to Create" ) _form . forward = fields . Button ( _ ( u"Create" ) , flow = "find_target_channel" ) self . form_out ( _form )
|
Features of new channel are specified like channel s name owner etc .
|
3,691
|
def save_new_channel ( self ) : form_info = self . input [ 'form' ] channel = Channel ( typ = 15 , name = form_info [ 'name' ] , description = form_info [ 'description' ] , owner_id = form_info [ 'owner_id' ] ) channel . blocking_save ( ) self . current . task_data [ 'target_channel_key' ] = channel . key
|
It saves new channel according to specified channel features .
|
3,692
|
def choose_existing_channel ( self ) : if self . current . task_data . get ( 'msg' , False ) : self . show_warning_messages ( ) _form = ChannelListForm ( ) _form . title = _ ( u"Choose a Channel Which Will Be Merged With Chosen Channels" ) for channel in Channel . objects . filter ( typ = 15 ) . exclude ( key__in = self . current . task_data [ 'chosen_channels' ] ) : owner_name = channel . owner . username _form . ChannelList ( choice = False , name = channel . name , owner = owner_name , key = channel . key ) _form . choose = fields . Button ( _ ( u"Choose" ) ) self . form_out ( _form )
|
It is a channel choice list and chosen channels at previous step shouldn t be on the screen .
|
3,693
|
def existing_choice_control ( self ) : self . current . task_data [ 'existing' ] = False self . current . task_data [ 'msg' ] = _ ( u"You should choose just one channel to do operation." ) keys , names = self . return_selected_form_items ( self . input [ 'form' ] [ 'ChannelList' ] ) if len ( keys ) == 1 : self . current . task_data [ 'existing' ] = True self . current . task_data [ 'target_channel_key' ] = keys [ 0 ]
|
It controls errors . It generates an error message if zero or more than one channels are selected .
|
3,694
|
def split_channel ( self ) : if self . current . task_data . get ( 'msg' , False ) : self . show_warning_messages ( ) self . current . task_data [ 'split_operation' ] = True channel = Channel . objects . get ( self . current . task_data [ 'chosen_channels' ] [ 0 ] ) _form = SubscriberListForm ( title = _ ( u'Choose Subscribers to Migrate' ) ) for subscriber in Subscriber . objects . filter ( channel = channel ) : subscriber_name = subscriber . user . username _form . SubscriberList ( choice = False , name = subscriber_name , key = subscriber . key ) _form . new_channel = fields . Button ( _ ( u"Move to a New Channel" ) , cmd = "create_new_channel" ) _form . existing_channel = fields . Button ( _ ( u"Move to an Existing Channel" ) , cmd = "choose_existing_channel" ) self . form_out ( _form )
|
A channel can be splitted to new channel or other existing channel . It creates subscribers list as selectable to moved .
|
3,695
|
def subscriber_choice_control ( self ) : self . current . task_data [ 'option' ] = None self . current . task_data [ 'chosen_subscribers' ] , names = self . return_selected_form_items ( self . input [ 'form' ] [ 'SubscriberList' ] ) self . current . task_data [ 'msg' ] = "You should choose at least one subscriber for migration operation." if self . current . task_data [ 'chosen_subscribers' ] : self . current . task_data [ 'option' ] = self . input [ 'cmd' ] del self . current . task_data [ 'msg' ]
|
It controls subscribers choice and generates error message if there is a non - choice .
|
3,696
|
def move_complete_channel ( self ) : to_channel = Channel . objects . get ( self . current . task_data [ 'target_channel_key' ] ) chosen_channels = self . current . task_data [ 'chosen_channels' ] chosen_channels_names = self . current . task_data [ 'chosen_channels_names' ] with BlockSave ( Subscriber , query_dict = { 'channel_id' : to_channel . key } ) : for s in Subscriber . objects . filter ( channel_id__in = chosen_channels , typ = 15 ) : s . channel = to_channel s . save ( ) with BlockDelete ( Message ) : Message . objects . filter ( channel_id__in = chosen_channels , typ = 15 ) . delete ( ) with BlockDelete ( Channel ) : Channel . objects . filter ( key__in = chosen_channels ) . delete ( ) self . current . task_data [ 'msg' ] = _ ( u"Chosen channels(%s) have been merged to '%s' channel successfully." ) % ( ', ' . join ( chosen_channels_names ) , to_channel . name )
|
Channels and theirs subscribers are moved completely to new channel or existing channel .
|
3,697
|
def move_chosen_subscribers ( self ) : from_channel = Channel . objects . get ( self . current . task_data [ 'chosen_channels' ] [ 0 ] ) to_channel = Channel . objects . get ( self . current . task_data [ 'target_channel_key' ] ) with BlockSave ( Subscriber , query_dict = { 'channel_id' : to_channel . key } ) : for subscriber in Subscriber . objects . filter ( key__in = self . current . task_data [ 'chosen_subscribers' ] ) : subscriber . channel = to_channel subscriber . save ( ) if self . current . task_data [ 'new_channel' ] : self . copy_and_move_messages ( from_channel , to_channel ) self . current . task_data [ 'msg' ] = _ ( u"Chosen subscribers and messages of them migrated from '%s' channel to " u"'%s' channel successfully." ) % ( from_channel . name , to_channel . name )
|
After splitting operation only chosen subscribers are moved to new channel or existing channel .
|
3,698
|
def copy_and_move_messages ( from_channel , to_channel ) : with BlockSave ( Message , query_dict = { 'channel_id' : to_channel . key } ) : for message in Message . objects . filter ( channel = from_channel , typ = 15 ) : message . key = '' message . channel = to_channel message . save ( )
|
While splitting channel and moving chosen subscribers to new channel old channel s messages are copied and moved to new channel .
|
3,699
|
def show_warning_messages ( self , title = _ ( u"Incorrect Operation" ) , box_type = 'warning' ) : msg = self . current . task_data [ 'msg' ] self . current . output [ 'msgbox' ] = { 'type' : box_type , "title" : title , "msg" : msg } del self . current . task_data [ 'msg' ]
|
It shows incorrect operations or successful operation messages .
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.