idx int64 0 63k | question stringlengths 53 5.28k | target stringlengths 5 805 |
|---|---|---|
47,000 | def getCursor ( self ) : if self . connection is None : self . Connect ( ) return self . connection . cursor ( MySQLdb . cursors . DictCursor ) | Get a Dictionary Cursor for executing queries |
47,001 | def Connect ( self ) : if self . connection is None : self . connection = MySQLdb . connect ( * [ ] , ** self . connectionInfo . info ) if self . connectionInfo . commitOnEnd is True : self . connection . autocommit ( ) self . _updateCheckTime ( ) | Creates a new physical connection to the database |
47,002 | def being ( self ) : try : if self . connection is not None : self . lock ( ) c = self . getCursor ( ) c . execute ( 'BEGIN;' ) c . close ( ) except Exception , e : pass | Being a Transaction |
47,003 | def Close ( self ) : if self . connection is not None : try : self . connection . commit ( ) self . connection . close ( ) self . connection = None except Exception , e : pass | Commits and closes the current connection |
47,004 | def configure_api ( app , manager ) : if not hasattr ( app , 'extensions' ) : app . extensions = { } app . extensions [ 'babbage' ] = manager return blueprint | Configure the current Flask app with an instance of CubeManager that will be used to load and query data . |
47,005 | def get_cube ( name ) : manager = get_manager ( ) if not manager . has_cube ( name ) : raise NotFound ( 'No such cube: %r' % name ) return manager . get_cube ( name ) | Load the named cube from the current registered CubeManager . |
47,006 | def cubes ( ) : cubes = [ ] for cube in get_manager ( ) . list_cubes ( ) : cubes . append ( { 'name' : cube } ) return jsonify ( { 'status' : 'ok' , 'data' : cubes } ) | Get a listing of all publicly available cubes . |
47,007 | def aggregate ( name ) : cube = get_cube ( name ) result = cube . aggregate ( aggregates = request . args . get ( 'aggregates' ) , drilldowns = request . args . get ( 'drilldown' ) , cuts = request . args . get ( 'cut' ) , order = request . args . get ( 'order' ) , page = request . args . get ( 'page' ) , page_size = request . args . get ( 'pagesize' ) ) result [ 'status' ] = 'ok' if request . args . get ( 'format' , '' ) . lower ( ) == 'csv' : return create_csv_response ( result [ 'cells' ] ) else : return jsonify ( result ) | Perform an aggregation request . |
47,008 | def facts ( name ) : cube = get_cube ( name ) result = cube . facts ( fields = request . args . get ( 'fields' ) , cuts = request . args . get ( 'cut' ) , order = request . args . get ( 'order' ) , page = request . args . get ( 'page' ) , page_size = request . args . get ( 'pagesize' ) ) result [ 'status' ] = 'ok' return jsonify ( result ) | List the fact table entries in the current cube . This is the full materialized dataset . |
47,009 | def members ( name , ref ) : cube = get_cube ( name ) result = cube . members ( ref , cuts = request . args . get ( 'cut' ) , order = request . args . get ( 'order' ) , page = request . args . get ( 'page' ) , page_size = request . args . get ( 'pagesize' ) ) result [ 'status' ] = 'ok' return jsonify ( result ) | List the members of a specific dimension or the distinct values of a given attribute . |
47,010 | def apply ( self , q , bindings , drilldowns ) : info = [ ] for drilldown in self . parse ( drilldowns ) : for attribute in self . cube . model . match ( drilldown ) : info . append ( attribute . ref ) table , column = attribute . bind ( self . cube ) bindings . append ( Binding ( table , attribute . ref ) ) q = q . column ( column ) q = q . group_by ( column ) return info , q , bindings | Apply a set of grouping criteria and project them . |
47,011 | def check_attribute_exists ( instance ) : attributes = instance . get ( 'attributes' , { } ) . keys ( ) if instance . get ( 'key_attribute' ) not in attributes : return False label_attr = instance . get ( 'label_attribute' ) if label_attr and label_attr not in attributes : return False return True | Additional check for the dimension model to ensure that attributes given as the key and label attribute on the dimension exist . |
47,012 | def check_valid_hierarchies ( instance ) : hierarchies = instance . get ( 'hierarchies' , { } ) . values ( ) dimensions = set ( instance . get ( 'dimensions' , { } ) . keys ( ) ) all_levels = set ( ) for hierarcy in hierarchies : levels = set ( hierarcy . get ( 'levels' , [ ] ) ) if len ( all_levels . intersection ( levels ) ) > 0 : return False all_levels = all_levels . union ( levels ) if not dimensions . issuperset ( levels ) : return False return True | Additional check for the hierarchies model to ensure that levels given are pointing to actual dimensions |
47,013 | def load_validator ( name ) : with open ( os . path . join ( SCHEMA_PATH , name ) ) as fh : schema = json . load ( fh ) Draft4Validator . check_schema ( schema ) return Draft4Validator ( schema , format_checker = checker ) | Load the JSON Schema Draft 4 validator with the given name from the local schema directory . |
47,014 | def get_cube ( self , name ) : return Cube ( self . get_engine ( ) , name , self . get_cube_model ( name ) ) | Given a cube name construct that cube and return it . Do not overwrite this method unless you need to . |
47,015 | def list_cubes ( self ) : for file_name in os . listdir ( self . directory ) : if '.' in file_name : name , ext = file_name . rsplit ( '.' , 1 ) if ext . lower ( ) == 'json' : yield name | List all available JSON files . |
47,016 | def Terminate ( self ) : self . lock . acquire ( ) try : for bucket in self . connections . values ( ) : try : for conn in bucket : conn . lock ( ) try : conn . Close ( ) except Exception : pass conn . release ( ) except Exception : pass self . connections = { } finally : self . lock . release ( ) | Close all open connections Loop though all the connections and commit all queries and close all the connections . This should be called at the end of your application . |
47,017 | def Cleanup ( self ) : self . lock . acquire ( ) try : for bucket in self . connections . values ( ) : try : for conn in bucket : conn . lock ( ) try : open = conn . TestConnection ( forceCheck = True ) if open is True : conn . commit ( ) else : index = bucket . index ( conn ) del bucket [ index ] conn . release ( ) except Exception : conn . release ( ) except Exception : pass finally : self . lock . release ( ) | Cleanup Timed out connections Loop though all the connections and test if still active . If inactive close socket . |
47,018 | def Commit ( self ) : self . lock . acquire ( ) try : for bucket in self . connections . values ( ) : try : for conn in bucket : conn . lock ( ) try : conn . commit ( ) conn . release ( ) except Exception : conn . release ( ) except Exception : pass finally : self . lock . release ( ) | Commits all currently open connections |
47,019 | def GetConnection ( self , ConnectionObj ) : key = ConnectionObj . getKey ( ) connection = None if self . connections . has_key ( key ) : connection = self . _getConnectionFromPoolSet ( key ) if connection is None : self . lock . acquire ( ) if len ( self . connections [ key ] ) < self . maxActiveConnections : connection = self . _createConnection ( ConnectionObj ) self . connections [ key ] . append ( connection ) self . lock . release ( ) else : while connection is None : connection = self . _getConnectionFromPoolSet ( key ) self . lock . release ( ) else : self . lock . acquire ( ) if not self . connections . has_key ( key ) : self . connections [ key ] = [ ] if len ( self . connections [ key ] ) < self . maxActiveConnections : connection = self . _createConnection ( ConnectionObj ) self . connections [ key ] . append ( connection ) else : while connection is None : connection = self . _getConnectionFromPoolSet ( key ) self . lock . release ( ) return connection | Get a Open and active connection Returns a PySQLConnectionManager if one is open else it will create a new one if the max active connections hasn t been hit . If all possible connections are used . Then None is returned . |
47,020 | async def on_date ( self , date : datetime . date ) -> dict : return await self . _request ( 'get' , 'dailystats/{0}' . format ( date . strftime ( '%Y-%m-%d' ) ) ) | Get statistics for a certain date . |
47,021 | async def upcoming ( self , details : bool = False ) -> list : endpoint = 'dailystats' key = 'DailyStats' if details : endpoint += '/details' key = 'DailyStatsDetails' data = await self . _request ( 'get' , endpoint ) return data [ key ] | Return watering statistics for the next 6 days . |
47,022 | def _raise_for_remote_status ( url : str , data : dict ) -> None : if data . get ( 'errorType' ) and data [ 'errorType' ] > 0 : raise_remote_error ( data [ 'errorType' ] ) if data . get ( 'statusCode' ) and data [ 'statusCode' ] != 200 : raise RequestError ( 'Error requesting data from {0}: {1} {2}' . format ( url , data [ 'statusCode' ] , data [ 'message' ] ) ) | Raise an error from the remote API if necessary . |
47,023 | async def login ( host : str , password : str , websession : ClientSession , * , port : int = 8080 , ssl : bool = True , request_timeout : int = DEFAULT_TIMEOUT ) -> Controller : print ( 'regenmaschine.client.login() is deprecated; see documentation!' ) client = Client ( websession , request_timeout ) await client . load_local ( host , password , port , ssl ) return next ( iter ( client . controllers . values ( ) ) ) | Authenticate against a RainMachine device . |
47,024 | def raise_remote_error ( error_code : int ) -> None : try : error = next ( ( v for k , v in ERROR_CODES . items ( ) if k == error_code ) ) raise RequestError ( error ) except StopIteration : raise RequestError ( 'Unknown remote error code returned: {0}' . format ( error_code ) ) | Raise the appropriate error with a remote error code . |
47,025 | def quarter_boundaries ( quarter ) : year , quarter = quarter . split ( 'Q' ) year = int ( year ) quarter = int ( quarter ) first_month_of_quarter = 3 * quarter - 2 last_month_of_quarter = 3 * quarter first_day = date ( year , first_month_of_quarter , 1 ) last_day = date ( year , last_month_of_quarter , monthrange ( year , last_month_of_quarter ) [ 1 ] ) return first_day , last_day | Returns first and last day of a quarter |
47,026 | def metta_config ( quarter , num_dimensions ) : first_day , last_day = quarter_boundaries ( quarter ) return { 'start_time' : first_day , 'end_time' : last_day , 'prediction_window' : 3 , 'label_name' : 'onet_soc_code' , 'label_type' : 'categorical' , 'matrix_id' : 'job_postings_{}' . format ( quarter ) , 'feature_names' : [ 'doc2vec_{}' . format ( i ) for i in range ( num_dimensions ) ] , } | Returns metta metadata for a quarter s SOC code classifier matrix |
47,027 | def upload_to_metta ( train_features_path , train_labels_path , test_features_path , test_labels_path , train_quarter , test_quarter , num_dimensions ) : train_config = metta_config ( train_quarter , num_dimensions ) test_config = metta_config ( test_quarter , num_dimensions ) X_train = pd . read_csv ( train_features_path , sep = ',' ) X_train . columns = [ 'doc2vec_' + str ( i ) for i in range ( X_train . shape [ 1 ] ) ] Y_train = pd . read_csv ( train_labels_path ) Y_train . columns = [ 'onet_soc_code' ] train = pd . concat ( [ X_train , Y_train ] , axis = 1 ) X_test = pd . read_csv ( test_features_path , sep = ',' ) X_test . columns = [ 'doc2vec_' + str ( i ) for i in range ( X_test . shape [ 1 ] ) ] Y_test = pd . read_csv ( test_labels_path ) Y_test . columns = [ 'onet_soc_code' ] test = pd . concat ( [ X_test , Y_test ] , axis = 1 ) metta . archive_train_test ( train_config , X_train , test_config , X_test , directory = 'wdi' ) | Store train and test matrices using metta |
47,028 | def upload ( s3_conn , filepath , s3_path ) : bucket_name , prefix = split_s3_path ( s3_path ) bucket = s3_conn . get_bucket ( bucket_name ) filename = os . path . basename ( filepath ) key = boto . s3 . key . Key ( bucket = bucket , name = '{}/{}' . format ( prefix , filename ) ) logging . info ( 'uploading from %s to %s' , filepath , key ) key . set_contents_from_filename ( filepath ) | Uploads the given file to s3 |
47,029 | def upload_dict ( s3_conn , s3_prefix , data_to_sync ) : bucket_name , prefix = split_s3_path ( s3_prefix ) bucket = s3_conn . get_bucket ( bucket_name ) for key , value in data_to_sync . items ( ) : full_name = '{}/{}.json' . format ( prefix , key ) s3_key = boto . s3 . key . Key ( bucket = bucket , name = full_name ) logging . info ( 'uploading key %s' , full_name ) s3_key . set_contents_from_string ( json . dumps ( value ) ) | Syncs a dictionary to an S3 bucket serializing each value in the dictionary as a JSON file with the key as its name . |
47,030 | def download ( s3_conn , out_filename , s3_path ) : bucket_name , prefix = split_s3_path ( s3_path ) bucket = s3_conn . get_bucket ( bucket_name ) key = boto . s3 . key . Key ( bucket = bucket , name = prefix ) logging . info ( 'loading from %s into %s' , key , out_filename ) key . get_contents_to_filename ( out_filename , cb = log_download_progress ) | Downloads the given s3_path |
47,031 | async def _request ( self , method : str , endpoint : str , * , headers : dict = None , params : dict = None , json : dict = None , ssl : bool = True ) -> dict : return await self . _client_request ( method , '{0}/{1}' . format ( self . _host , endpoint ) , access_token = self . _access_token , access_token_expiration = self . _access_token_expiration , headers = headers , params = params , json = json , ssl = ssl ) | Wrap the generic request method to add access token etc . |
47,032 | async def get ( self , zone_id : int , * , details : bool = False ) -> dict : endpoint = 'zone/{0}' . format ( zone_id ) if details : endpoint += '/properties' return await self . _request ( 'get' , endpoint ) | Return a specific zone . |
47,033 | async def start ( self , zone_id : int , time : int ) -> dict : return await self . _request ( 'post' , 'zone/{0}/start' . format ( zone_id ) , json = { 'time' : time } ) | Start a program . |
47,034 | def add_observer ( self , observer , identify_observed = False ) : if hasattr ( observer , "__self__" ) : result = self . _add_bound_method ( observer , identify_observed ) else : result = self . _add_function ( observer , identify_observed ) return result | Register an observer to observe me . |
47,035 | def _add_function ( self , func , identify_observed ) : key = self . make_key ( func ) if key not in self . observers : self . observers [ key ] = ObserverFunction ( func , identify_observed , ( key , self . observers ) ) return True else : return False | Add a function as an observer . |
47,036 | def _add_bound_method ( self , bound_method , identify_observed ) : inst = bound_method . __self__ method_name = bound_method . __name__ key = self . make_key ( bound_method ) if key not in self . observers : self . observers [ key ] = ObserverBoundMethod ( inst , method_name , identify_observed , ( key , self . observers ) ) return True else : return False | Add an bound method as an observer . |
47,037 | def discard_observer ( self , observer ) : discarded = False key = self . make_key ( observer ) if key in self . observers : del self . observers [ key ] discarded = True return discarded | Un - register an observer . |
47,038 | def make_key ( observer ) : if hasattr ( observer , "__self__" ) : inst = observer . __self__ method_name = observer . __name__ key = ( id ( inst ) , method_name ) else : key = id ( observer ) return key | Construct a unique hashable immutable key for an observer . |
47,039 | def build_command_tree ( pattern , cmd_params ) : from docopt import Either , Optional , OneOrMore , Required , Option , Command , Argument if type ( pattern ) in [ Either , Optional , OneOrMore ] : for child in pattern . children : build_command_tree ( child , cmd_params ) elif type ( pattern ) in [ Required ] : for child in pattern . children : cmd_params = build_command_tree ( child , cmd_params ) elif type ( pattern ) in [ Option ] : suffix = "=" if pattern . argcount else "" if pattern . short : cmd_params . options . append ( pattern . short + suffix ) if pattern . long : cmd_params . options . append ( pattern . long + suffix ) elif type ( pattern ) in [ Command ] : cmd_params = cmd_params . get_subcommand ( pattern . name ) elif type ( pattern ) in [ Argument ] : cmd_params . arguments . append ( pattern . name ) return cmd_params | Recursively fill in a command tree in cmd_params according to a docopt - parsed pattern object . |
47,040 | def group ( self ) : yield self . current for num , item in enumerate ( self . iterator , 1 ) : self . current = item if num == self . limit : break yield item else : self . on_going = False | Yield a group from the iterable |
47,041 | async def log ( self , date : datetime . date = None , days : int = None , details : bool = False ) -> list : endpoint = 'watering/log' if details : endpoint += '/details' if date and days : endpoint = '{0}/{1}/{2}' . format ( endpoint , date . strftime ( '%Y-%m-%d' ) , days ) data = await self . _request ( 'get' , endpoint ) return data [ 'waterLog' ] [ 'days' ] | Get watering information for X days from Y date . |
47,042 | async def runs ( self , date : datetime . date = None , days : int = None ) -> list : endpoint = 'watering/past' if date and days : endpoint = '{0}/{1}/{2}' . format ( endpoint , date . strftime ( '%Y-%m-%d' ) , days ) data = await self . _request ( 'get' , endpoint ) return data [ 'pastValues' ] | Return all program runs for X days from Y date . |
47,043 | async def all ( self , include_inactive : bool = False ) -> list : data = await self . _request ( 'get' , 'program' ) return [ p for p in data [ 'programs' ] if include_inactive or p [ 'active' ] ] | Return all programs . |
47,044 | def cache_json ( filename ) : def cache_decorator ( cacheable_function ) : @ wraps ( cacheable_function ) def cache_wrapper ( * args , ** kwargs ) : path = CACHE_DIRECTORY + filename check_create_folder ( path ) if os . path . exists ( path ) : with open ( path ) as infile : return json . load ( infile ) else : function_output = cacheable_function ( * args , ** kwargs ) with open ( path , 'w' ) as outfile : json . dump ( function_output , outfile ) return function_output return cache_wrapper return cache_decorator | Caches the JSON - serializable output of the function to a given file |
47,045 | def check_create_folder ( filename ) : os . makedirs ( os . path . dirname ( filename ) , exist_ok = True ) | Check if the folder exisits . If not create the folder |
47,046 | def postings ( self , quarter , stats_counter = None ) : logging . info ( 'Finding postings for %s' , quarter ) for posting in self . _iter_postings ( quarter ) : transformed = self . _transform ( posting ) transformed [ 'id' ] = '{}_{}' . format ( self . partner_id , self . _id ( posting ) ) if stats_counter : stats_counter . track ( input_document = posting , output_document = transformed ) yield transformed | Yield job postings in common schema format |
47,047 | def afterSummaryReport ( self , event ) : logger . info ( 'Generating HTML report...' ) sorted_test_results = self . _sort_test_results ( ) context = { 'test_report_title' : 'Test Report' , 'test_summary' : self . summary_stats , 'test_results' : sorted_test_results , 'autocomplete_terms' : json . dumps ( self . _generate_search_terms ( ) ) , 'timestamp' : datetime . utcnow ( ) . strftime ( '%Y/%m/%d %H:%M:%S UTC' ) } template = load_template ( self . _config [ 'template' ] ) rendered_template = render_template ( template , context ) with open ( self . _config [ 'report_path' ] , 'w' ) as template_file : template_file . write ( rendered_template ) | After everything is done generate the report |
47,048 | def dates_in_range ( start_date , end_date ) : return [ start_date + timedelta ( n ) for n in range ( int ( ( end_date - start_date ) . days ) ) ] | Returns all dates between two dates . |
47,049 | def load_hooks ( ) : hooks = { } for entrypoint in pkg_resources . iter_entry_points ( ENTRYPOINT ) : name = str ( entrypoint ) . split ( '=' ) [ 0 ] . strip ( ) try : hook = entrypoint . load ( ) except Exception as e : write_message ( 'failed to load entry-point %r (error="%s")' % ( name , e ) , 'yellow' ) else : hooks [ name ] = hook return hooks | Load the exposed hooks . |
47,050 | def gettext ( ui_file_path ) : with open ( ui_file_path , 'r' ) as fin : content = fin . read ( ) content = re . sub ( r'_translate\(".*",\s' , '_(' , content ) content = content . replace ( ' _translate = QtCore.QCoreApplication.translate' , '' ) with open ( ui_file_path , 'w' ) as fout : fout . write ( content ) | Let you use gettext instead of the Qt tools for l18n |
47,051 | def basic_client ( ) : es_connected = False while not es_connected : try : ES = Elasticsearch ( hosts = [ HOSTNAME ] ) es_connected = True except TransportError as e : logging . info ( 'Not yet connected: %s, sleeping for 1s' , e ) time . sleep ( 1 ) return ES | Returns an Elasticsearch basic client that is responsive to the environment variable ELASTICSEARCH_ENDPOINT |
47,052 | def create_index ( index_name , index_config , client ) : client . create ( index = index_name , body = index_config ) | Creates an index with a given configuration |
47,053 | def get_index_from_alias ( alias_name , index_client = None ) : index_client = index_client or indices_client ( ) if not index_client . exists_alias ( name = alias_name ) : return None return list ( index_client . get_alias ( name = alias_name ) . keys ( ) ) [ 0 ] | Retrieve the base index name from an alias |
47,054 | def atomic_swap ( alias_name , new_index_name , index_client ) : logging . info ( 'Performing atomic index alias swap' ) if index_client . exists_alias ( name = alias_name ) : old_index_name = get_index_from_alias ( alias_name , index_client ) logging . info ( 'Removing old as well as adding new' ) actions = { 'actions' : [ { 'remove' : { 'index' : old_index_name , 'alias' : alias_name } } , { 'add' : { 'index' : new_index_name , 'alias' : alias_name } } ] } index_client . update_aliases ( body = actions ) index_client . delete ( index = old_index_name ) else : logging . info ( 'Old alias not found, only adding new' ) actions = { 'actions' : [ { 'add' : { 'index' : new_index_name , 'alias' : alias_name } } ] } index_client . update_aliases ( body = actions ) | Points an alias to a new index then delete the old index if needed |
47,055 | def zero_downtime_index ( index_name , index_config ) : client = indices_client ( ) temporary_name = index_name + '_' + str ( uuid . uuid4 ( ) ) logging . info ( 'creating index with config %s' , index_config ) create_index ( temporary_name , index_config , client ) try : yield temporary_name atomic_swap ( index_name , temporary_name , client ) except Exception : logging . error ( 'deleting temporary index %s due to error:' , temporary_name , exc_info = True ) client . delete ( index = temporary_name ) | Context manager to create a new index based on a given alias allow the caller to index it and then point the alias to the new index |
47,056 | def replace ( self ) : with zero_downtime_index ( self . alias_name , self . index_config ( ) ) as target_index : self . index_all ( target_index ) | Replace index with a new one zero_downtime_index for safety and rollback |
47,057 | def append ( self ) : target_index = get_index_from_alias ( self . alias_name ) if not target_index : self . replace ( ) else : self . index_all ( target_index ) | Index documents onto an existing index |
47,058 | def build_args ( cmd , src , dst ) : cmd = cmd % ( quote ( src ) , quote ( dst ) ) args = shlex . split ( cmd ) return [ arg for arg in args if arg ] | Build arguments list for passing to subprocess . call_check |
47,059 | def output_format_lock ( self , packages , ** kwargs ) : self . _output_config [ 'type' ] = PLAIN text = '' tmp_packages = OrderedDict ( ) columns = self . _config . get_columns ( ) widths = { } for _pkg in packages . values ( ) : _pkg_name = _pkg . package_name _params = _pkg . get_params ( columns , merged = True , raw = False ) if _pkg_name not in tmp_packages : tmp_packages [ _pkg_name ] = _params comment = 1 for _col in columns : widths [ _col ] = max ( widths . get ( _col , len ( _col ) ) , len ( str ( _params . get ( _col , '' ) ) ) ) + comment comment = 0 comment = 1 for _col in columns : text += '{}{} ' . format ( _col , ' ' * ( widths [ _col ] - len ( _col ) - comment ) ) comment = 0 text = '#{}\n' . format ( text . strip ( ) ) for _pkg_name in sorted ( tmp_packages , key = lambda x : str ( x ) . lower ( ) ) : _pkg = tmp_packages [ _pkg_name ] line = '' for _col in columns : line += '{}{} ' . format ( _pkg [ _col ] , ' ' * ( widths [ _col ] - len ( str ( _pkg [ _col ] ) ) ) ) text += '{}\n' . format ( line . strip ( ) ) return text | Text to lock file |
47,060 | def output_format_module ( self , packages , esc_path = False ) : def create_ordered_list ( packages_ ) : list_ = [ ] for _pkg_name in packages_ : _pkg = packages_ [ _pkg_name ] if _pkg and _pkg . packages : list_ . extend ( create_ordered_list ( _pkg . packages ) ) if _pkg : _pkg_params = _pkg . get_params ( self . _columns , True ) _res_item = { } for item in self . _output_config [ 'columns' ] : name = item [ 'name' ] . format ( OutFormat ( item [ 'column' ] ) ) value = _pkg_params . get ( item [ 'column' ] , '' ) if not isinstance ( value , ( list , dict , tuple ) ) : try : value = item [ 'value' ] . format ( OutFormat ( value , ( item [ 'column' ] == 'path' ) if esc_path else False ) ) except Exception : value = '' _res_item [ name ] = value list_ . append ( _res_item ) return list_ result_list = create_ordered_list ( packages , ) if self . _output_config [ 'type' ] == LIST : return result_list result = OrderedDict ( ) for item in result_list : name = item [ self . _output_config [ 'key' ] ] if self . _output_config [ 'value' ] : value = item [ self . _output_config [ 'value' ] ] else : value = OrderedDict ( [ ( k , v ) for k , v in item . items ( ) if k != self . _output_config [ 'key' ] ] ) result [ name ] = value return result | Create out with child first position |
47,061 | def smart ( ** kwargs ) : def decorator ( func ) : for key , value in kwargs . items ( ) : setattr ( func , key , value ) return func return decorator | Simple decorator to get custom fields on admin class using this you will use less line codes |
47,062 | def create_buffer ( params , value ) : try : fv = float ( value ) return np . full ( params . length , fv , np . float ) except TypeError : if isinstance ( value , np . ndarray ) : if ( len ( value ) >= params . length ) : return value raise TypeError ( 'Value must be a float or a numpy array ofthe required length' ) | If the value is a float create a numpy array of the required length filled with value If the value is a numpy array check its length Otherwise throw a type error |
47,063 | def make_request ( self , method , service , path , body = None , query_params : QueryParams = None , headers : dict = None , correlation_id : str = None , content_type : str = 'application/json' , context : Request = None , timeout = 30 , ** kwargs ) -> asyncio . coroutine : if not isinstance ( method , Methods ) : method = Methods ( method . upper ( ) ) if content_type == 'application/json' and isinstance ( body , dict ) : body = json . dumps ( body ) if isinstance ( query_params , dict ) : query_string = parse . urlencode ( query_params ) elif isinstance ( query_params , QueryParams ) : query_string = str ( query_params ) else : query_string = '' headers = headers or { } ctx = request_context . get ( ) if context : warnings . warn ( "Passing in a context to waspy client is deprecated. " "Passed in context will be ignored" , DeprecationWarning ) if not correlation_id : correlation_id = ctx [ 'correlation_id' ] headers = { ** headers , ** ctx [ 'ctx_headers' ] } exchange = headers . get ( 'ctx-exchange-override' , None ) if exchange : kwargs [ 'exchange' ] = 'amq.headers' if isinstance ( body , str ) : body = body . encode ( ) response = asyncio . wait_for ( self . transport . make_request ( service , method . name , path , body = body , query = query_string , headers = headers , correlation_id = correlation_id , content_type = content_type , timeout = timeout , ** kwargs ) , timeout = timeout ) return response | Make a request to another service . If context is provided then context and correlation will be pulled from the provided request object for you . This includes credentials correlationid and service - headers . |
47,064 | def process_request ( self , request_object ) : identifier = request_object . identifier resource = request_object . entity_cls . get ( identifier ) return ResponseSuccess ( Status . SUCCESS , resource ) | Fetch Resource and return Entity |
47,065 | def from_dict ( cls , entity_cls , adict ) : invalid_req = InvalidRequestObject ( ) page = int ( adict . pop ( 'page' , 1 ) ) per_page = int ( adict . pop ( 'per_page' , getattr ( active_config , 'PER_PAGE' , 10 ) ) ) order_by = adict . pop ( 'order_by' , ( ) ) if page < 0 : invalid_req . add_error ( 'page' , 'is invalid' ) if invalid_req . has_errors : return invalid_req return cls ( entity_cls , page , per_page , order_by , adict ) | Initialize a ListRequestObject object from a dictionary . |
47,066 | def process_request ( self , request_object ) : resources = ( request_object . entity_cls . query . filter ( ** request_object . filters ) . offset ( ( request_object . page - 1 ) * request_object . per_page ) . limit ( request_object . per_page ) . order_by ( request_object . order_by ) . all ( ) ) return ResponseSuccess ( Status . SUCCESS , resources ) | Return a list of resources |
47,067 | def process_request ( self , request_object ) : resource = request_object . entity_cls . create ( ** request_object . data ) return ResponseSuccessCreated ( resource ) | Process Create Resource Request |
47,068 | def process_request ( self , request_object ) : entity = request_object . entity_cls . get ( request_object . identifier ) resource = entity . update ( request_object . data ) return ResponseSuccess ( Status . SUCCESS , resource ) | Process Update Resource Request |
47,069 | def process_request ( self , request_object ) : entity = request_object . entity_cls . get ( request_object . identifier ) entity . delete ( ) return ResponseSuccessWithNoContent ( ) | Process the Delete Resource Request |
47,070 | def _cast_to_type ( self , value ) : if isinstance ( value , str ) or value is None : return value return str ( value ) | Convert the value to its string representation |
47,071 | def _cast_to_type ( self , value ) : try : return int ( value ) except ( ValueError , TypeError ) : self . fail ( 'invalid' , value = value ) | Convert the value to an int and raise error on failures |
47,072 | def _cast_to_type ( self , value ) : try : return float ( value ) except ( ValueError , TypeError ) : self . fail ( 'invalid' , value = value ) | Convert the value to a float and raise error on failures |
47,073 | def _cast_to_type ( self , value ) : if value in ( True , False ) : return bool ( value ) if value in ( 't' , 'True' , '1' ) : return True if value in ( 'f' , 'False' , '0' ) : return False self . fail ( 'invalid' , value = value ) | Convert the value to a boolean and raise error on failures |
47,074 | def _cast_to_type ( self , value ) : if not isinstance ( value , list ) : self . fail ( 'invalid' , value = value ) return value | Raise error if the value is not a list |
47,075 | def _cast_to_type ( self , value ) : if not isinstance ( value , dict ) : self . fail ( 'invalid' , value = value ) return value | Raise error if the value is not a dict |
47,076 | def _cast_to_type ( self , value ) : if isinstance ( value , datetime . datetime ) : return value . date ( ) if isinstance ( value , datetime . date ) : return value try : value = date_parser ( value ) return value . date ( ) except ValueError : self . fail ( 'invalid' , value = value ) | Convert the value to a date and raise error on failures |
47,077 | def _cast_to_type ( self , value ) : if isinstance ( value , datetime . datetime ) : return value if isinstance ( value , datetime . date ) : value = datetime . datetime ( value . year , value . month , value . day ) return value try : value = date_parser ( value ) return value except ValueError : self . fail ( 'invalid' , value = value ) | Convert the value to a datetime and raise error on failures |
47,078 | def execute ( self , request_object ) : if not request_object . is_valid : return ResponseFailure . build_from_invalid_request ( request_object ) try : return self . process_request ( request_object ) except ValidationError as err : return ResponseFailure . build_unprocessable_error ( err . normalized_messages ) except ObjectNotFoundError : return ResponseFailure . build_not_found ( [ { 'identifier' : 'Object with this ID does not exist.' } ] ) except Exception as exc : logger . error ( f'{self.__class__.__name__} execution failed due to error {exc}' , exc_info = True ) return ResponseFailure . build_system_error ( [ { exc . __class__ . __name__ : exc } ] ) | Generic executor method of all UseCases |
47,079 | async def exchange_declare ( self ) : await self . channel . exchange_declare ( self . exchange , self . exchange_type , durable = self . durable , auto_delete = self . auto_delete , no_wait = self . no_wait , ) | Override this method to change how a exchange is declared |
47,080 | async def queue_declare ( self ) : await self . channel . queue_declare ( self . queue , durable = self . durable , exclusive = self . exclusive , no_wait = self . no_wait ) | Override this method to change how a queue is declared |
47,081 | def from_entity ( cls , entity : Entity ) -> 'DictModel' : dict_obj = { } for field_name in entity . meta_ . attributes : dict_obj [ field_name ] = getattr ( entity , field_name ) return dict_obj | Convert the entity to a dictionary record |
47,082 | def get_connection ( self ) : database = { 'data' : _databases . setdefault ( self . identifier , defaultdict ( dict ) ) , 'lock' : _locks . setdefault ( self . identifier , Lock ( ) ) , 'counters' : _counters } return database | Return the dictionary database object |
47,083 | def get_repository ( self , entity_cls ) : model_cls = self . get_model ( entity_cls ) return DictRepository ( self , entity_cls , model_cls ) | Return a repository object configured with a live connection |
47,084 | def _evaluate_lookup ( self , key , value , negated , db ) : results = { } for record_key , record_value in db . items ( ) : match = True stripped_key , lookup_class = self . _extract_lookup ( key ) lookup = lookup_class ( record_value [ stripped_key ] , value ) if negated : match &= not eval ( lookup . as_expression ( ) ) else : match &= eval ( lookup . as_expression ( ) ) if match : results [ record_key ] = record_value return results | Extract values from DB that match the given criteria |
47,085 | def raw ( self , query : Any , data : Any = None ) : assert isinstance ( query , str ) conn = self . get_connection ( ) items = [ ] for schema_name in conn [ 'data' ] : input_db = conn [ 'data' ] [ schema_name ] try : query = query . replace ( "'" , "\"" ) criteria = json . loads ( query ) for key , value in criteria . items ( ) : input_db = self . _evaluate_lookup ( key , value , False , input_db ) items . extend ( list ( input_db . values ( ) ) ) except json . JSONDecodeError : raise Exception ( "Query Malformed" ) except KeyError : pass return items | Run raw queries on the database |
47,086 | def _set_auto_fields ( self , model_obj ) : for field_name , field_obj in self . entity_cls . meta_ . auto_fields : counter_key = f'{self.schema_name}_{field_name}' if not ( field_name in model_obj and model_obj [ field_name ] is not None ) : counter = next ( self . conn [ 'counters' ] [ counter_key ] ) if not counter : counter = next ( self . conn [ 'counters' ] [ counter_key ] ) model_obj [ field_name ] = counter return model_obj | Set the values of the auto field using counter |
47,087 | def create ( self , model_obj ) : model_obj = self . _set_auto_fields ( model_obj ) identifier = model_obj [ self . entity_cls . meta_ . id_field . field_name ] with self . conn [ 'lock' ] : self . conn [ 'data' ] [ self . schema_name ] [ identifier ] = model_obj return model_obj | Write a record to the dict repository |
47,088 | def _filter ( self , criteria : Q , db ) : negated = criteria . negated input_db = None if criteria . connector == criteria . AND : input_db = db for child in criteria . children : if isinstance ( child , Q ) : input_db = self . _filter ( child , input_db ) else : input_db = self . provider . _evaluate_lookup ( child [ 0 ] , child [ 1 ] , negated , input_db ) else : input_db = { } for child in criteria . children : if isinstance ( child , Q ) : results = self . _filter ( child , db ) else : results = self . provider . _evaluate_lookup ( child [ 0 ] , child [ 1 ] , negated , db ) input_db = { ** input_db , ** results } return input_db | Recursive function to filter items from dictionary |
47,089 | def filter ( self , criteria : Q , offset : int = 0 , limit : int = 10 , order_by : list = ( ) ) : if criteria . children : items = list ( self . _filter ( criteria , self . conn [ 'data' ] [ self . schema_name ] ) . values ( ) ) else : items = list ( self . conn [ 'data' ] [ self . schema_name ] . values ( ) ) for o_key in order_by : reverse = False if o_key . startswith ( '-' ) : reverse = True o_key = o_key [ 1 : ] items = sorted ( items , key = itemgetter ( o_key ) , reverse = reverse ) result = ResultSet ( offset = offset , limit = limit , total = len ( items ) , items = items [ offset : offset + limit ] ) return result | Read the repository and return results as per the filer |
47,090 | def update ( self , model_obj ) : identifier = model_obj [ self . entity_cls . meta_ . id_field . field_name ] with self . conn [ 'lock' ] : if identifier not in self . conn [ 'data' ] [ self . schema_name ] : raise ObjectNotFoundError ( f'`{self.__class__.__name__}` object with identifier {identifier} ' f'does not exist.' ) self . conn [ 'data' ] [ self . schema_name ] [ identifier ] = model_obj return model_obj | Update the entity record in the dictionary |
47,091 | def update_all ( self , criteria : Q , * args , ** kwargs ) : items = self . _filter ( criteria , self . conn [ 'data' ] [ self . schema_name ] ) update_count = 0 for key in items : item = items [ key ] item . update ( * args ) item . update ( kwargs ) self . conn [ 'data' ] [ self . schema_name ] [ key ] = item update_count += 1 return update_count | Update all objects satisfying the criteria |
47,092 | def delete_all ( self , criteria : Q = None ) : if criteria : items = self . _filter ( criteria , self . conn [ 'data' ] [ self . schema_name ] ) with self . conn [ 'lock' ] : for identifier in items : self . conn [ 'data' ] [ self . schema_name ] . pop ( identifier , None ) return len ( items ) else : with self . conn [ 'lock' ] : if self . schema_name in self . conn [ 'data' ] : del self . conn [ 'data' ] [ self . schema_name ] | Delete the dictionary object by its criteria |
47,093 | def raw ( self , query : Any , data : Any = None ) : assert isinstance ( query , str ) input_db = self . conn [ 'data' ] [ self . schema_name ] result = None try : query = query . replace ( "'" , "\"" ) criteria = json . loads ( query ) for key , value in criteria . items ( ) : input_db = self . provider . _evaluate_lookup ( key , value , False , input_db ) items = list ( input_db . values ( ) ) result = ResultSet ( offset = 1 , limit = len ( items ) , total = len ( items ) , items = items ) except json . JSONDecodeError : raise Exception ( "Query Malformed" ) return result | Run raw query on Repository . |
47,094 | def process_source ( self ) : if isinstance ( self . source , str ) : self . source = self . source . replace ( "'" , "\'" ) . replace ( '"' , "\'" ) return "\"{source}\"" . format ( source = self . source ) return self . source | Return source with transformations if any |
47,095 | def process_target ( self ) : if isinstance ( self . target , str ) : self . target = self . target . replace ( "'" , "\'" ) . replace ( '"' , "\'" ) return "\"{target}\"" . format ( target = self . target ) return self . target | Return target with transformations if any |
47,096 | def parse_url_to_topic ( method , route ) : route = route . replace ( '.' , '?' ) route = route . replace ( '/' , '.' ) . strip ( '.' ) topic = f'{method.value.lower()}.{route}' return re . sub ( r"\.\{[^\}]*\}[:\w\d_-]*" , ".*" , topic ) | Transforms a URL to a topic . |
47,097 | def wrap ( anything : bytes , encoding : str ) -> str : return json . dumps ( { "wrap" : load_json ( anything . decode ( encoding ) ) } , ensure_ascii = False ) | Use for example of Transformer . function |
47,098 | def construct ( cls , name : str , declared_fields : typing . List [ tuple ] ) : @ classmethod def from_dict ( cls , adict ) : invalid_req = InvalidRequestObject ( ) values = { } for item in fields ( cls ) : value = None if item . metadata and 'required' in item . metadata and item . metadata [ 'required' ] : if item . name not in adict or adict . get ( item . name ) is None : invalid_req . add_error ( item . name , 'is required' ) else : value = adict [ item . name ] elif item . name in adict : value = adict [ item . name ] elif item . default : value = item . default try : if item . type not in [ typing . Any , 'typing.Any' ] and value is not None : if item . type in [ int , float , str , bool , list , dict , tuple , datetime . date , datetime . datetime ] : value = item . type ( value ) else : if not ( isinstance ( value , item . type ) or issubclass ( value , item . type ) ) : invalid_req . add_error ( item . name , '{} should be of type {}' . format ( item . name , item . type ) ) except Exception : invalid_req . add_error ( item . name , 'Value {} for {} is invalid' . format ( value , item . name ) ) values [ item . name ] = value if invalid_req . has_errors : return invalid_req return cls ( ** values ) formatted_fields = cls . _format_fields ( declared_fields ) dc = make_dataclass ( name , formatted_fields , namespace = { 'from_dict' : from_dict , 'is_valid' : True } ) return dc | Utility method packaged along with the factory to be able to construct Request Object classes on the fly . |
47,099 | def _format_fields ( cls , declared_fields : typing . List [ tuple ] ) : formatted_fields = [ ] for declared_field in declared_fields : field_name = field_type = field_defn = None if isinstance ( declared_field , str ) or len ( declared_field ) == 1 : field_name = declared_field field_type = typing . Any field_defn = field ( default = None ) elif len ( declared_field ) == 2 : field_name = declared_field [ 0 ] field_type = declared_field [ 1 ] field_defn = field ( default = None ) elif len ( declared_field ) == 3 : field_name = declared_field [ 0 ] field_type = declared_field [ 1 ] assert isinstance ( declared_field [ 2 ] , dict ) metadata = default = None if 'required' in declared_field [ 2 ] and declared_field [ 2 ] [ 'required' ] : metadata = { 'required' : True } if 'default' in declared_field [ 2 ] : default = declared_field [ 2 ] [ 'default' ] field_defn = field ( default = default , metadata = metadata ) formatted_fields . append ( ( field_name , field_type , field_defn ) ) return formatted_fields | Process declared fields and construct a list of tuples that can be fed into dataclass constructor factory . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.