idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
51,900
def perform_query ( self , query , ** params ) : try : return self . engine . execute ( query , params ) except : print ( "Error with query: {}" . format ( query ) ) raise
Perform a query where query is a string .
51,901
def fetch_entities ( self ) : query = text ( ) response = self . perform_query ( query ) entities = { } domains = set ( ) for [ entity ] in response : domain = entity . split ( "." ) [ 0 ] domains . add ( domain ) entities . setdefault ( domain , [ ] ) . append ( entity ) self . _domains = list ( domains ) self . _entities = entities print ( "There are {} entities with data" . format ( len ( entities ) ) )
Fetch entities for which we have data .
51,902
def fetch_all_data ( self , limit = 50000 ) : query = text ( ) try : print ( "Querying the database, this could take a while" ) response = self . perform_query ( query , limit = limit ) master_df = pd . DataFrame ( response . fetchall ( ) ) print ( "master_df created successfully." ) self . _master_df = master_df . copy ( ) self . parse_all_data ( ) except : raise ValueError ( "Error querying the database." )
Fetch data for all entities .
51,903
def parse_all_data ( self ) : self . _master_df . columns = [ "domain" , "entity" , "state" , "last_changed" ] self . _master_df [ "numerical" ] = self . _master_df [ "state" ] . apply ( lambda x : functions . isfloat ( x ) ) self . _master_df . set_index ( [ "domain" , "entity" , "numerical" , "last_changed" ] , inplace = True )
Parses the master df .
51,904
def correlations ( self ) : corr_df = self . _sensors_num_df . corr ( ) corr_names = [ ] corrs = [ ] for i in range ( len ( corr_df . index ) ) : for j in range ( len ( corr_df . index ) ) : c_name = corr_df . index [ i ] r_name = corr_df . columns [ j ] corr_names . append ( "%s-%s" % ( c_name , r_name ) ) corrs . append ( corr_df . ix [ i , j ] ) corrs_all = pd . DataFrame ( index = corr_names ) corrs_all [ "value" ] = corrs corrs_all = corrs_all . dropna ( ) . drop ( corrs_all [ ( corrs_all [ "value" ] == float ( 1 ) ) ] . index ) corrs_all = corrs_all . drop ( corrs_all [ corrs_all [ "value" ] == float ( - 1 ) ] . index ) corrs_all = corrs_all . sort_values ( "value" , ascending = False ) corrs_all = corrs_all . drop_duplicates ( ) return corrs_all
Calculate the correlation coefficients .
51,905
def plot ( self , entities : List [ str ] ) : ax = self . _sensors_num_df [ entities ] . plot ( figsize = [ 12 , 6 ] ) ax . legend ( loc = "center left" , bbox_to_anchor = ( 1 , 0.5 ) ) ax . set_xlabel ( "Date" ) ax . set_ylabel ( "Reading" ) return
Basic plot of a numerical sensor data .
51,906
def plot ( self , entity ) : df = self . _binary_df [ [ entity ] ] resampled = df . resample ( "s" ) . ffill ( ) resampled . columns = [ "value" ] fig , ax = plt . subplots ( 1 , 1 , figsize = ( 16 , 2 ) ) ax . fill_between ( resampled . index , y1 = 0 , y2 = 1 , facecolor = "royalblue" , label = "off" ) ax . fill_between ( resampled . index , y1 = 0 , y2 = 1 , where = ( resampled [ "value" ] > 0 ) , facecolor = "red" , label = "on" , ) ax . set_title ( entity ) ax . set_xlabel ( "Date" ) ax . set_frame_on ( False ) ax . set_yticks ( [ ] ) plt . legend ( loc = ( 1.01 , 0.7 ) ) plt . show ( ) return
Basic plot of a single binary sensor data .
51,907
def is_sf_database ( db , model = None ) : from django . db import connections if db is None : return getattr ( model , '_salesforce_object' , False ) engine = connections [ db ] . settings_dict [ 'ENGINE' ] return engine == 'salesforce.backend' or connections [ db ] . vendor == 'salesforce'
The alias is a Salesforce database .
51,908
def allow_migrate ( self , db , app_label , model_name = None , ** hints ) : if model_name : model = apps . get_model ( app_label , model_name ) else : model = hints . get ( 'model' ) if hasattr ( model , '_salesforce_object' ) : if not ( is_sf_database ( db ) or db == self . sf_alias ) : return False else : if is_sf_database ( db ) or self . sf_alias != 'default' and db == self . sf_alias : return False if hasattr ( model , '_salesforce_object' ) : pass
Don t attempt to sync SF models to non SF databases and vice versa .
51,909
def update ( self , ** kwargs ) : assert not self . called self . kw . update ( kwargs ) return self
Customize the lazy field
51,910
def create ( self ) : assert not self . called return self . klass ( * self . args , ** self . kw )
Create a normal field from the lazy field
51,911
def get_queryset ( self ) : if router . is_sf_database ( self . db ) : q = models_sql_query . SalesforceQuery ( self . model , where = compiler . SalesforceWhereNode ) return query . SalesforceQuerySet ( self . model , query = q , using = self . db ) return super ( SalesforceManager , self ) . get_queryset ( )
Returns a QuerySet which access remote SF objects .
51,912
def get_attname_column ( self ) : attname = self . get_attname ( ) if self . db_column is not None : column = self . db_column else : if not self . name . islower ( ) : column = self . name else : column = self . name . title ( ) . replace ( '_' , '' ) if self . sf_custom : column = self . sf_namespace + column + '__c' return attname , column
Get the database column name automatically in most cases .
51,913
def extract_values ( query ) : if isinstance ( query , subqueries . UpdateQuery ) : row = query . values return extract_values_inner ( row , query ) if isinstance ( query , subqueries . InsertQuery ) : ret = [ ] for row in query . objs : ret . append ( extract_values_inner ( row , query ) ) return ret raise NotSupportedError
Extract values from insert or update query . Supports bulk_create
51,914
def execute ( self , q , args = ( ) ) : self . rowcount = None response = None if self . query is None : self . execute_select ( q , args ) else : response = self . execute_django ( q , args ) if isinstance ( response , list ) : return if response and response . text : data = response . json ( parse_float = decimal . Decimal ) if 'totalSize' in data : self . rowcount = data [ 'totalSize' ] elif ( 'success' in data and 'id' in data ) : self . lastrowid = data [ 'id' ] return elif 'compositeResponse' in data : self . lastrowid = [ x [ 'body' ] [ 'id' ] if x [ 'body' ] is not None else x [ 'referenceId' ] for x in data [ 'compositeResponse' ] ] return elif data [ 'hasErrors' ] is False : if data [ 'results' ] and data [ 'results' ] [ 0 ] [ 'result' ] : self . lastrowid = [ item [ 'result' ] [ 'id' ] for item in data [ 'results' ] ] return else : raise DatabaseError ( data ) if not q . upper ( ) . startswith ( 'SELECT COUNT() FROM' ) : self . first_row = data [ 'records' ] [ 0 ] if data [ 'records' ] else None
Send a query to the Salesforce API .
51,915
def execute_django ( self , soql , args = ( ) ) : response = None sqltype = soql . split ( None , 1 ) [ 0 ] . upper ( ) if isinstance ( self . query , subqueries . InsertQuery ) : response = self . execute_insert ( self . query ) elif isinstance ( self . query , subqueries . UpdateQuery ) : response = self . execute_update ( self . query ) elif isinstance ( self . query , subqueries . DeleteQuery ) : response = self . execute_delete ( self . query ) elif isinstance ( self . query , RawQuery ) : self . execute_select ( soql , args ) elif sqltype in ( 'SAVEPOINT' , 'ROLLBACK' , 'RELEASE' ) : log . info ( "Ignored SQL command '%s'" , sqltype ) return elif isinstance ( self . query , Query ) : self . execute_select ( soql , args ) else : raise DatabaseError ( "Unsupported query: type %s: %s" % ( type ( self . query ) , self . query ) ) return response
Fixed execute for queries coming from Django query compilers
51,916
def get_pks_from_query ( self , query ) : where = query . where sql = None if where . connector == 'AND' and not where . negated and len ( where . children ) == 1 : child = where . children [ 0 ] if ( child . lookup_name in ( 'exact' , 'in' ) and child . lhs . target . column == 'Id' and not child . bilateral_transforms and child . lhs . target . model is self . query . model ) : pks = child . rhs if child . lookup_name == 'exact' : assert isinstance ( pks , text_type ) return [ pks ] assert not child . bilateral_transforms if isinstance ( pks , ( tuple , list ) ) : return pks if DJANGO_111_PLUS : assert isinstance ( pks , Query ) and type ( pks ) . __name__ == 'SalesforceQuery' sql , params = pks . get_compiler ( 'salesforce' ) . as_sql ( ) else : assert isinstance ( pks , salesforce . backend . query . SalesforceQuerySet ) return [ x . pk for x in pks ] if not sql : where_sql , params = where . as_sql ( query . get_compiler ( 'salesforce' ) , self . db . connection ) sql = "SELECT Id FROM {} WHERE {}" . format ( query . model . _meta . db_table , where_sql ) with self . db . cursor ( ) as cur : cur . execute ( sql , params ) assert len ( cur . description ) == 1 and cur . description [ 0 ] [ 0 ] == 'Id' return [ x [ 0 ] for x in cur ]
Prepare primary keys for update and delete queries
51,917
def versions_request ( self ) : ret = self . handle_api_exceptions ( 'GET' , '' , api_ver = '' ) return [ str_dict ( x ) for x in ret . json ( ) ]
List Available REST API Versions
51,918
def fix_international ( text ) : "Fix excaped international characters back to utf-8" class SmartInternational ( str ) : def __new__ ( cls , text ) : return str . __new__ ( cls , text ) def endswith ( self , string ) : return super ( SmartInternational , self ) . endswith ( str ( string ) ) if PY3 : return text out = [ ] last = 0 for match in re . finditer ( r'(?<=[^\\])(?:\\x[0-9a-f]{2}|\\u[0-9a-f]{4})' , text ) : start , end , group = match . start ( ) , match . end ( ) , match . group ( ) out . append ( text [ last : start ] ) c = group . decode ( 'unicode_escape' ) out . append ( c if ord ( c ) > 160 and ord ( c ) != 173 else group ) last = end out . append ( text [ last : ] ) return SmartInternational ( '' . join ( out ) . encode ( 'utf-8' ) )
Fix excaped international characters back to utf - 8
51,919
def get_meta ( self , table_name , constraints = None , column_to_field_name = None , is_view = False , is_partition = None ) : meta = [ " class Meta(models.Model.Meta):" , " db_table = '%s'" % table_name ] if self . connection . vendor == 'salesforce' : for line in self . connection . introspection . get_additional_meta ( table_name ) : meta . append ( " " + line ) meta . append ( "" ) return meta
Return a sequence comprising the lines of code necessary to construct the inner Meta class for the model corresponding to the given database table name .
51,920
def relative_path ( path ) : return os . path . join ( os . path . dirname ( __file__ ) , path )
Return the given path relative to this file .
51,921
def get_tagged_version ( ) : with open ( relative_path ( 'salesforce/__init__.py' ) , 'r' ) as fd : version = re . search ( r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]' , fd . read ( ) , re . MULTILINE ) . group ( 1 ) return version
Determine the current version of this package .
51,922
def dynamic_start ( self , access_token , instance_url = None , ** kw ) : self . dynamic = { 'access_token' : str ( access_token ) , 'instance_url' : str ( instance_url ) } self . dynamic . update ( kw )
Set the access token dynamically according to the current user .
51,923
def mark_quoted_strings ( sql ) : pm_pattern = re . compile ( r"'[^\\']*(?:\\[\\'][^\\']*)*'" ) bs_pattern = re . compile ( r"\\([\\'])" ) out_pattern = re . compile ( r"^(?:[-!()*+,.:<=>\w\s|%s])*$" ) missing_apostrophe = "invalid character in SOQL or a missing apostrophe" start = 0 out = [ ] params = [ ] for match in pm_pattern . finditer ( sql ) : out . append ( sql [ start : match . start ( ) ] ) assert out_pattern . match ( sql [ start : match . start ( ) ] ) , missing_apostrophe params . append ( bs_pattern . sub ( '\\1' , sql [ match . start ( ) + 1 : match . end ( ) - 1 ] ) ) start = match . end ( ) out . append ( sql [ start : ] ) assert out_pattern . match ( sql [ start : ] ) , missing_apostrophe return '@' . join ( out ) , params
Mark all quoted strings in the SOQL by
51,924
def subst_quoted_strings ( sql , params ) : parts = sql . split ( '@' ) params_dont_match = "number of parameters doesn' match the transformed query" assert len ( parts ) == len ( params ) + 1 , params_dont_match out = [ ] for i , param in enumerate ( params ) : out . append ( parts [ i ] ) out . append ( "'%s'" % param . replace ( '\\' , '\\\\' ) . replace ( "\'" , "\\\'" ) ) out . append ( parts [ - 1 ] ) return '' . join ( out )
Reverse operation to mark_quoted_strings - substitutes
51,925
def find_closing_parenthesis ( sql , startpos ) : pattern = re . compile ( r'[()]' ) level = 0 opening = [ ] for match in pattern . finditer ( sql , startpos ) : par = match . group ( ) if par == '(' : if level == 0 : opening = match . start ( ) level += 1 if par == ')' : assert level > 0 , "missing '(' before ')'" level -= 1 if level == 0 : closing = match . end ( ) return opening , closing
Find the pair of opening and closing parentheses .
51,926
def split_subquery ( sql ) : sql , params = mark_quoted_strings ( sql ) sql = simplify_expression ( sql ) _ = params start = 0 out = [ ] subqueries = [ ] pattern = re . compile ( r'\(SELECT\b' , re . I ) match = pattern . search ( sql , start ) while match : out . append ( sql [ start : match . start ( ) + 1 ] + '&' ) start , pos = find_closing_parenthesis ( sql , match . start ( ) ) start , pos = start + 1 , pos - 1 subqueries . append ( split_subquery ( sql [ start : pos ] ) ) start = pos match = pattern . search ( sql , start ) out . append ( sql [ start : len ( sql ) ] ) return '' . join ( out ) , subqueries
Split on subqueries and replace them by & .
51,927
def simplify_expression ( txt ) : minimal = re . sub ( r'\s' , ' ' , re . sub ( r'\s(?=\W)' , '' , re . sub ( r'(?<=\W)\s' , '' , txt . strip ( ) ) ) ) return re . sub ( r'\)(?=\w)' , ') ' , re . sub ( r'(,|\b(?:{}))\(' . format ( '|' . join ( RESERVED_WORDS ) ) , '\\1 (' , minimal ) )
Remove all unecessary whitespace and some very usual space
51,928
def _make_flat ( self , row_dict , path , subroots ) : out = { } for k , v in row_dict . items ( ) : klc = k . lower ( ) if ( not ( isinstance ( v , dict ) and 'attributes' in v ) or ( 'done' in v and 'records' in v and 'totalSize' in v ) ) : if klc not in subroots : out [ klc ] = v else : strpath = '.' . join ( path + ( klc , ) ) + '.' strip_pos = len ( strpath ) - len ( klc + '.' ) for alias in self . aliases : if alias . lower ( ) . startswith ( strpath ) : out [ alias . lower ( ) [ strip_pos : ] ] = None else : new_subroots = subroots [ klc ] if k != 'attributes' else { } for sub_k , sub_v in self . _make_flat ( v , path + ( klc , ) , new_subroots ) . items ( ) : out [ k . lower ( ) + '.' + sub_k ] = sub_v return out
Replace the nested dict objects by a flat dict with keys object . object . name .
51,929
def parse_rest_response ( self , records , rowcount , row_type = list ) : if self . is_plain_count : assert list ( records ) == [ ] yield rowcount else : while True : for row_deep in records : assert self . is_aggregation == ( row_deep [ 'attributes' ] [ 'type' ] == 'AggregateResult' ) row_flat = self . _make_flat ( row_deep , path = ( ) , subroots = self . subroots ) assert all ( not isinstance ( x , dict ) or x [ 'done' ] for x in row_flat ) if issubclass ( row_type , dict ) : yield { k : fix_data_type ( row_flat [ k . lower ( ) ] ) for k in self . aliases } else : yield [ fix_data_type ( row_flat [ k . lower ( ) ] ) for k in self . aliases ] break
Parse the REST API response to DB API cursor flat response
51,930
def make_dynamic_fields ( pattern_module , dynamic_field_patterns , attrs ) : import re attr_meta = attrs [ 'Meta' ] db_table = getattr ( attr_meta , 'db_table' , None ) if not db_table : raise RuntimeError ( 'The "db_table" must be set in Meta if "dynamic_field_patterns" is used.' ) is_custom_model = getattr ( attr_meta , 'custom' , False ) patterns = [ ] for pat in dynamic_field_patterns : enabled = True if pat . startswith ( '-' ) : enabled = False pat = pat [ 1 : ] patterns . append ( ( enabled , re . compile ( r'^(?:{})$' . format ( pat ) , re . I ) ) ) used_columns = [ ] for name , attr in attrs . items ( ) : if isinstance ( attr , SfField ) : field = attr if field . sf_custom is None and is_custom_model : field . sf_custom = True if not field . name : field . name = name attname , column = field . get_attname_column ( ) used_columns . append ( column ) if not pattern_module : raise RuntimeError ( "a pattern_module is required for dynamic fields." ) for name , obj in vars ( pattern_module ) . items ( ) : if not name . startswith ( '_' ) and isclass ( obj ) and issubclass ( obj , ModelTemplate ) : default_table = obj . __name__ if getattr ( getattr ( obj , 'Meta' , None ) , 'db_table' , default_table ) == db_table : cls = obj break else : if any ( not x . startswith ( '__' ) for x in dir ( pattern_module ) ) : raise RuntimeError ( "No Model for table '%s' found in the module '%s'" % ( db_table , pattern_module . __name__ ) ) warnings . warn ( "The module '%s' is empty. (It is OK if you are " "rewriting new Models by pipe from inspectdb command.)" % pattern_module . __name__ ) return lazy_fields = [ ( name , obj ) for name , obj in vars ( cls ) . items ( ) if isinstance ( obj , LazyField ) and issubclass ( obj . klass , SfField ) ] for name , obj in sorted ( lazy_fields , key = lambda name_obj : name_obj [ 1 ] . counter ) : for enabled , pat in patterns : if pat . match ( name ) : break else : enabled = False if enabled : if issubclass ( obj . klass , ForeignKey ) : to = obj . kw [ 'to' ] if isclass ( to ) and issubclass ( to , ModelTemplate ) : obj . kw [ 'to' ] = to . __name__ field = obj . create ( ) attrs [ name ] = field assert pattern_module
Add some Salesforce fields from a pattern_module models . py
51,931
def prepare_exception ( obj , messages = None , response = None , verbs = None ) : verbs = set ( verbs or [ ] ) known_options = [ 'method+url' ] if messages is None : messages = [ ] if isinstance ( messages , ( text_type , str ) ) : messages = [ messages ] assert isinstance ( messages , list ) assert not verbs . difference ( known_options ) data = None if response is not None and 'json' in response . headers . get ( 'Content-Type' , '' ) and response . text : data = json . loads ( response . text ) if data : data_0 = data [ 0 ] if 'errorCode' in data_0 : subreq = '' if 'referenceId' in data_0 : subreq = " (in subrequest {!r})" . format ( data_0 [ 'referenceId' ] ) messages = [ data_0 [ 'errorCode' ] + subreq ] + messages if data_0 . get ( 'fields' ) : messages . append ( 'FIELDS: {}' . format ( data_0 [ 'fields' ] ) ) if len ( data ) > 1 : messages . append ( 'MORE_ERRORS ({})' . format ( len ( data ) ) ) if 'method+url' in verbs : method = response . request . method url = response . request . url if len ( url ) > 100 : url = url [ : 100 ] + '...' data_info = '' if ( method in ( 'POST' , 'PATCH' ) and ( not response . request . body or 'json' not in response . request . headers [ 'content-type' ] ) ) : data_info = ' (without json request data)' messages . append ( 'in {} "{}"{}' . format ( method , url , data_info ) ) separ = '\n ' if not PY3 : messages = [ x if isinstance ( x , str ) else x . encode ( 'utf-8' ) for x in messages ] messages = [ x . replace ( '\n' , separ ) for x in messages ] message = separ . join ( messages ) if obj : obj . data = data obj . response = response obj . verbs = verbs return message
Prepare excetion params or only an exception message
51,932
def warn_sf ( messages , response , verbs = None , klass = SalesforceWarning ) : warnings . warn ( klass ( messages , response , verbs ) , stacklevel = 2 )
Issue a warning SalesforceWarning with message combined from message and data from SFDC response
51,933
def get_from_clause ( self ) : self . query_topology ( ) root_table = self . soql_trans [ self . root_alias ] return [ root_table ] , [ ]
Return the FROM clause converted the SOQL dialect .
51,934
def quote_name_unless_alias ( self , name ) : r = self . connection . ops . quote_name ( name ) self . quote_cache [ name ] = r return r
A wrapper around connection . ops . quote_name that doesn t quote aliases for table names . Mostly used during the ORDER BY clause .
51,935
def get_soap_client ( db_alias , client_class = None ) : if not beatbox : raise InterfaceError ( "To use SOAP API, you'll need to install the Beatbox package." ) if client_class is None : client_class = beatbox . PythonClient soap_client = client_class ( ) connection = connections [ db_alias ] cursor = connection . cursor ( ) cursor . urls_request ( ) auth_info = connections [ db_alias ] . sf_session . auth access_token = auth_info . get_auth ( ) [ 'access_token' ] assert access_token [ 15 ] == '!' org_id = access_token [ : 15 ] url = '/services/Soap/u/{version}/{org_id}' . format ( version = salesforce . API_VERSION , org_id = org_id ) soap_client . useSession ( access_token , auth_info . instance_url + url ) return soap_client
Create the SOAP client for the current user logged in the db_alias
51,936
def signalize_extensions ( ) : warnings . warn ( "DB-API extension cursor.rownumber used" , SalesforceWarning ) warnings . warn ( "DB-API extension connection.<exception> used" , SalesforceWarning ) warnings . warn ( "DB-API extension cursor.connection used" , SalesforceWarning ) warnings . warn ( "DB-API extension cursor.messages used" , SalesforceWarning ) warnings . warn ( "DB-API extension connection.messages used" , SalesforceWarning ) warnings . warn ( "DB-API extension cursor.next(, SalesforceWarning) used" ) warnings . warn ( "DB-API extension cursor.__iter__(, SalesforceWarning) used" ) warnings . warn ( "DB-API extension cursor.lastrowid used" , SalesforceWarning ) warnings . warn ( "DB-API extension .errorhandler used" , SalesforceWarning )
DB API 2 . 0 extension are reported by warnings at run - time .
51,937
def arg_to_soql ( arg ) : conversion = sql_conversions . get ( type ( arg ) ) if conversion : return conversion ( arg ) for type_ in subclass_conversions : if isinstance ( arg , type_ ) : return sql_conversions [ type_ ] ( arg ) return sql_conversions [ str ] ( arg )
Perform necessary SOQL quoting on the arg .
51,938
def arg_to_json ( arg ) : conversion = json_conversions . get ( type ( arg ) ) if conversion : return conversion ( arg ) for type_ in subclass_conversions : if isinstance ( arg , type_ ) : return json_conversions [ type_ ] ( arg ) return json_conversions [ str ] ( arg )
Perform necessary JSON conversion on the arg .
51,939
def merge_dict ( dict_1 , * other , ** kw ) : tmp = dict_1 . copy ( ) for x in other : tmp . update ( x ) tmp . update ( kw ) return tmp
Merge two or more dict including kw into result dict .
51,940
def make_session ( self ) : with connect_lock : if self . _sf_session is None : sf_session = requests . Session ( ) sf_session . auth = SalesforcePasswordAuth ( db_alias = self . alias , settings_dict = self . settings_dict ) sf_instance_url = sf_session . auth . instance_url sf_requests_adapter = HTTPAdapter ( max_retries = get_max_retries ( ) ) sf_session . mount ( sf_instance_url , sf_requests_adapter ) self . _sf_session = sf_session
Authenticate and get the name of assigned SFDC data server
51,941
def rest_api_url ( self , * url_parts , ** kwargs ) : url_parts = list ( url_parts ) if url_parts and re . match ( r'^(?:https|mock)://' , url_parts [ 0 ] ) : return '/' . join ( url_parts ) relative = kwargs . pop ( 'relative' , False ) api_ver = kwargs . pop ( 'api_ver' , None ) api_ver = api_ver if api_ver is not None else self . api_ver assert not kwargs if not relative : base = [ self . sf_session . auth . instance_url ] else : base = [ '' ] if url_parts and url_parts [ 0 ] . startswith ( '/' ) : prefix = [ ] url_parts [ 0 ] = url_parts [ 0 ] [ 1 : ] else : prefix = [ 'services/data' ] if api_ver : prefix += [ 'v{api_ver}' . format ( api_ver = api_ver ) ] return '/' . join ( base + prefix + url_parts )
Join the URL of REST_API
51,942
def raise_errors ( self , response ) : verb = self . debug_verbs method = response . request . method data = None is_json = 'json' in response . headers . get ( 'Content-Type' , '' ) and response . text if is_json : data = json . loads ( response . text ) if not ( isinstance ( data , list ) and data and 'errorCode' in data [ 0 ] ) : messages = [ response . text ] if is_json else [ ] raise OperationalError ( [ 'HTTP error "%d %s":' % ( response . status_code , response . reason ) ] + messages , response , [ 'method+url' ] ) err_msg = data [ 0 ] [ 'message' ] err_code = data [ 0 ] [ 'errorCode' ] if response . status_code == 404 : if method == 'DELETE' and err_code in ( 'ENTITY_IS_DELETED' , 'INVALID_CROSS_REFERENCE_KEY' ) : warn_sf ( [ err_msg , "Object is deleted before delete or update" ] , response , [ 'method+url' ] ) return None if err_code in ( 'NOT_FOUND' , 'METHOD_NOT_ALLOWED' , ) : raise SalesforceError ( [ err_msg ] , response , [ 'method+url' ] ) raise SalesforceError ( [ err_msg ] , response )
The innermost part - report errors by exceptions
51,943
def composite_request ( self , data ) : post_data = { 'compositeRequest' : data , 'allOrNone' : True } resp = self . handle_api_exceptions ( 'POST' , 'composite' , json = post_data ) comp_resp = resp . json ( ) [ 'compositeResponse' ] is_ok = all ( x [ 'httpStatusCode' ] < 400 for x in comp_resp ) if is_ok : return resp bad_responses = { i : x for i , x in enumerate ( comp_resp ) if not ( x [ 'httpStatusCode' ] == 400 and x [ 'body' ] [ 0 ] [ 'errorCode' ] in ( 'PROCESSING_HALTED' , 'ALL_OR_NONE_OPERATION_ROLLED_BACK' ) ) } if len ( bad_responses ) != 1 : raise InternalError ( "Too much or too many subrequests with an individual error" ) bad_i , bad_response = bad_responses . popitem ( ) bad_request = data [ bad_i ] bad_req = FakeReq ( bad_request [ 'method' ] , bad_request [ 'url' ] , bad_request . get ( 'body' ) , bad_request . get ( 'httpHeaders' , { } ) , context = { bad_i : bad_request [ 'referenceId' ] } ) body = [ merge_dict ( x , referenceId = bad_response [ 'referenceId' ] ) for x in bad_response [ 'body' ] ] bad_resp_headers = bad_response [ 'httpHeaders' ] . copy ( ) bad_resp_headers . update ( { 'Content-Type' : resp . headers [ 'Content-Type' ] } ) bad_resp = FakeResp ( bad_response [ 'httpStatusCode' ] , json . dumps ( body ) , bad_req , bad_resp_headers ) self . raise_errors ( bad_resp )
Call a composite request with subrequests error handling
51,944
def align_after ( self , offset ) : f = self . reader if offset <= 0 : f . seek ( 0 ) self . _block_count = 0 self . _read_header ( ) return sm = self . sync_marker sml = len ( sm ) pos = offset while pos < self . file_length - sml : f . seek ( pos ) data = f . read ( self . FORWARD_WINDOW_SIZE ) sync_offset = data . find ( sm ) if sync_offset > - 1 : f . seek ( pos + sync_offset ) self . _block_count = 0 return pos += len ( data )
Search for a sync point after offset and align just after that .
51,945
def get_progress ( self ) : pos = self . reader . reader . tell ( ) return min ( ( pos - self . region_start ) / float ( self . region_end - self . region_start ) , 1.0 )
Give a rough estimate of the progress done .
51,946
def is_exe ( fpath ) : return os . path . isfile ( fpath ) and os . access ( fpath , os . X_OK )
Path references an executable file .
51,947
def is_readable ( fpath ) : return os . path . isfile ( fpath ) and os . access ( fpath , os . R_OK )
Path references a readable file .
51,948
def is_local ( self , hadoop_conf = None , hadoop_home = None ) : conf = self . hadoop_params ( hadoop_conf , hadoop_home ) keys = ( 'mapreduce.framework.name' , 'mapreduce.jobtracker.address' , 'mapred.job.tracker' ) for k in keys : if conf . get ( k , 'local' ) . lower ( ) != 'local' : return False return True
\ Is Hadoop configured to run in local mode?
51,949
def abspath ( hdfs_path , user = None , local = False ) : if local : return 'file:%s' % os . path . abspath ( hdfs_path ) if isfull ( hdfs_path ) : return hdfs_path hostname , port , path = split ( hdfs_path , user = user ) if hostname : fs = hdfs_fs . hdfs ( hostname , port ) apath = join ( "hdfs://%s:%s" % ( fs . host , fs . port ) , path ) fs . close ( ) else : apath = "file:%s" % os . path . abspath ( path ) return apath
Return an absolute path for hdfs_path .
51,950
def dirname ( hdfs_path ) : scheme , netloc , path = parse ( hdfs_path ) return unparse ( scheme , netloc , os . path . dirname ( path ) )
Return the directory component of hdfs_path .
51,951
def expanduser ( path ) : if hdfs_fs . default_is_local ( ) : return os . path . expanduser ( path ) m = re . match ( r'^~([^/]*)' , path ) if m is None : return path user = m . groups ( ) [ 0 ] or common . DEFAULT_USER return '/user/%s%s' % ( user , path [ m . end ( 1 ) : ] )
Replace initial ~ or ~user with the user s home directory .
51,952
def normpath ( path ) : scheme , netloc , path_ = parse ( path ) return unparse ( scheme , netloc , os . path . normpath ( path_ ) )
Normalize path collapsing redundant separators and up - level refs .
51,953
def realpath ( path ) : scheme , netloc , path_ = parse ( path ) if scheme == 'file' or hdfs_fs . default_is_local ( ) : return unparse ( scheme , netloc , os . path . realpath ( path_ ) ) return path
Return path with symlinks resolved .
51,954
def default_is_local ( hadoop_conf = None , hadoop_home = None ) : params = pydoop . hadoop_params ( hadoop_conf , hadoop_home ) for k in 'fs.defaultFS' , 'fs.default.name' : if not params . get ( k , 'file:' ) . startswith ( 'file:' ) : return False return True
\ Is Hadoop configured to use the local file system?
51,955
def open_file ( self , path , mode = "r" , buff_size = 0 , replication = 0 , blocksize = 0 , encoding = None , errors = None ) : _complain_ifclosed ( self . closed ) if not path : raise ValueError ( "Empty path" ) m , is_text = common . parse_mode ( mode ) if not self . host : fret = local_file ( self , path , m ) if is_text : cls = io . BufferedReader if m == "r" else io . BufferedWriter fret = TextIOWrapper ( cls ( fret ) , encoding , errors ) return fret f = self . fs . open_file ( path , m , buff_size , replication , blocksize ) cls = FileIO if is_text else hdfs_file fret = cls ( f , self , mode ) return fret
Open an HDFS file .
51,956
def capacity ( self ) : _complain_ifclosed ( self . closed ) if not self . __status . host : raise RuntimeError ( 'Capacity is not defined for a local fs' ) return self . fs . get_capacity ( )
Return the raw capacity of the filesystem .
51,957
def copy ( self , from_path , to_hdfs , to_path ) : _complain_ifclosed ( self . closed ) if isinstance ( to_hdfs , self . __class__ ) : to_hdfs = to_hdfs . fs return self . fs . copy ( from_path , to_hdfs , to_path )
Copy file from one filesystem to another .
51,958
def delete ( self , path , recursive = True ) : _complain_ifclosed ( self . closed ) return self . fs . delete ( path , recursive )
Delete path .
51,959
def exists ( self , path ) : _complain_ifclosed ( self . closed ) return self . fs . exists ( path )
Check if a given path exists on the filesystem .
51,960
def get_path_info ( self , path ) : _complain_ifclosed ( self . closed ) return self . fs . get_path_info ( path )
Get information about path as a dict of properties .
51,961
def list_directory ( self , path ) : r _complain_ifclosed ( self . closed ) return self . fs . list_directory ( path )
r Get list of files and directories for path \ .
51,962
def rename ( self , from_path , to_path ) : _complain_ifclosed ( self . closed ) return self . fs . rename ( from_path , to_path )
Rename file .
51,963
def set_replication ( self , path , replication ) : r _complain_ifclosed ( self . closed ) return self . fs . set_replication ( path , replication )
r Set the replication of path to replication \ .
51,964
def set_working_directory ( self , path ) : r _complain_ifclosed ( self . closed ) return self . fs . set_working_directory ( path )
r Set the working directory to path \ . All relative paths will be resolved relative to it .
51,965
def working_directory ( self ) : _complain_ifclosed ( self . closed ) wd = self . fs . get_working_directory ( ) return wd
Get the current working directory .
51,966
def __compute_mode_from_string ( self , path , mode_string ) : Char_to_perm_byte = { 'r' : 4 , 'w' : 2 , 'x' : 1 } Fields = ( ( 'u' , 6 ) , ( 'g' , 3 ) , ( 'o' , 0 ) ) m = re . match ( r"\s*([ugoa]*)([-+=])([rwx]*)\s*" , mode_string ) if not m : raise ValueError ( "Invalid mode string %s" % mode_string ) who = m . group ( 1 ) what_op = m . group ( 2 ) which_perm = m . group ( 3 ) old_mode = self . fs . get_path_info ( path ) [ 'permissions' ] op_perm = [ reduce ( ops . ior , [ Char_to_perm_byte [ c ] for c in which_perm ] ) ] * 3 if 'a' in who : who = 'ugo' elif who == '' : who = 'ugo' inverted_umask = ~ self . __get_umask ( ) for i , field in enumerate ( Fields ) : op_perm [ i ] &= ( inverted_umask >> field [ 1 ] ) & 0x7 new_mode = 0 for i , tpl in enumerate ( Fields ) : field , shift = tpl old = ( old_mode >> shift ) & 0x7 if field in who : if what_op == '-' : new = old & ~ op_perm [ i ] elif what_op == '=' : new = op_perm [ i ] elif what_op == '+' : new = old | op_perm [ i ] else : raise RuntimeError ( "unexpected permission operation %s" % what_op ) else : new = old new_mode |= new << shift return new_mode
Scan a unix - style mode string and apply it to path .
51,967
def utime ( self , path , mtime , atime ) : _complain_ifclosed ( self . closed ) return self . fs . utime ( path , int ( mtime ) , int ( atime ) )
Change file last access and modification times .
51,968
def rm_rf ( path , dry_run = False ) : log . info ( "removing %s" % path ) if dry_run : return try : if os . path . isdir ( path ) and not os . path . islink ( path ) : shutil . rmtree ( path ) else : os . remove ( path ) except OSError : pass
Remove a file or directory tree .
51,969
def __finalize_hdfs ( self , ext ) : java_home = jvm . get_java_home ( ) jvm_lib_path , _ = jvm . get_jvm_lib_path_and_name ( java_home ) ext . include_dirs = jvm . get_include_dirs ( ) + ext . include_dirs ext . libraries = jvm . get_libraries ( ) ext . library_dirs = [ os . path . join ( java_home , "Libraries" ) , jvm_lib_path ] ext . define_macros = jvm . get_macros ( ) ext . extra_link_args = [ '-Wl,-rpath,%s' % jvm_lib_path ] if self . __have_better_tls ( ) : ext . define_macros . append ( ( "HAVE_BETTER_TLS" , None ) ) try : self . compiler . compiler_so . remove ( "-Wsign-compare" ) except ( AttributeError , ValueError ) : pass
\ Adds a few bits that depend on the specific environment .
51,970
def run_tool_cmd ( tool , cmd , args = None , properties = None , hadoop_conf_dir = None , logger = None , keep_streams = True ) : if logger is None : logger = utils . NullLogger ( ) _args = [ tool ] if hadoop_conf_dir : _args . extend ( [ "--config" , hadoop_conf_dir ] ) _args . append ( cmd ) if properties : _args . extend ( _construct_property_args ( properties ) ) if args : if isinstance ( args , basestring ) : args = shlex . split ( args ) _merge_csv_args ( args ) gargs = _pop_generic_args ( args ) for seq in gargs , args : _args . extend ( map ( str , seq ) ) logger . debug ( 'final args: %r' , ( _args , ) ) if keep_streams : p = subprocess . Popen ( _args , stdout = subprocess . PIPE , stderr = subprocess . PIPE ) error = "" stderr_iterator = iter ( p . stderr . readline , b"" ) for line in stderr_iterator : error += line logger . info ( "cmd stderr line: %s" , line . strip ( ) ) output , _ = p . communicate ( ) else : p = subprocess . Popen ( _args , stdout = None , stderr = None , bufsize = 1 ) ret = p . wait ( ) error = 'command exited with %d status' % ret if ret else '' output = '' if p . returncode : raise RunCmdError ( p . returncode , ' ' . join ( _args ) , error ) return output
Run a Hadoop command .
51,971
def get_task_trackers ( properties = None , hadoop_conf_dir = None , offline = False ) : if offline : if not hadoop_conf_dir : hadoop_conf_dir = pydoop . hadoop_conf ( ) slaves = os . path . join ( hadoop_conf_dir , "slaves" ) try : with open ( slaves ) as f : task_trackers = [ ( l . strip ( ) , 0 ) for l in f ] except IOError : task_trackers = [ ] else : stdout = run_class ( "org.apache.hadoop.mapred.JobClient" , [ "-list-active-trackers" ] , properties = properties , hadoop_conf_dir = hadoop_conf_dir , keep_streams = True ) task_trackers = [ ] for line in stdout . splitlines ( ) : if not line : continue line = line . split ( ":" ) task_trackers . append ( ( line [ 0 ] . split ( "_" ) [ 1 ] , int ( line [ - 1 ] ) ) ) return task_trackers
Get the list of task trackers in the Hadoop cluster .
51,972
def get_num_nodes ( properties = None , hadoop_conf_dir = None , offline = False ) : return len ( get_task_trackers ( properties , hadoop_conf_dir , offline ) )
Get the number of task trackers in the Hadoop cluster .
51,973
def dfs ( args = None , properties = None , hadoop_conf_dir = None ) : return run_class ( "org.apache.hadoop.fs.FsShell" , args , properties , hadoop_conf_dir = hadoop_conf_dir , keep_streams = True )
Run the Hadoop file system shell .
51,974
def run_pipes ( executable , input_path , output_path , more_args = None , properties = None , force_pydoop_submitter = False , hadoop_conf_dir = None , logger = None , keep_streams = False ) : if logger is None : logger = utils . NullLogger ( ) if not hdfs . path . exists ( executable ) : raise IOError ( "executable %s not found" % executable ) if not hdfs . path . exists ( input_path ) and not ( set ( input_path ) & GLOB_CHARS ) : raise IOError ( "input path %s not found" % input_path ) if properties is None : properties = { } properties . setdefault ( 'mapreduce.pipes.isjavarecordreader' , 'true' ) properties . setdefault ( 'mapreduce.pipes.isjavarecordwriter' , 'true' ) if force_pydoop_submitter : use_pydoop_submit = True else : use_pydoop_submit = False ver = pydoop . hadoop_version_info ( ) if ver . has_security ( ) : if ver . is_cdh_mrv2 ( ) and hdfs . default_is_local ( ) : raise RuntimeError ( "mrv2 on local fs not supported yet" ) use_pydoop_submit = hdfs . default_is_local ( ) args = [ "-program" , executable , "-input" , input_path , "-output" , output_path , ] if more_args is not None : args . extend ( more_args ) if use_pydoop_submit : submitter = "it.crs4.pydoop.pipes.Submitter" pydoop_jar = pydoop . jar_path ( ) args . extend ( ( "-libjars" , pydoop_jar ) ) return run_class ( submitter , args , properties , classpath = pydoop_jar , logger = logger , keep_streams = keep_streams ) else : return run_mapred_cmd ( "pipes" , args = args , properties = properties , hadoop_conf_dir = hadoop_conf_dir , logger = logger , keep_streams = keep_streams )
Run a pipes command .
51,975
def collect_output ( mr_out_dir , out_file = None ) : if out_file is None : output = [ ] for fn in iter_mr_out_files ( mr_out_dir ) : with hdfs . open ( fn , "rt" ) as f : output . append ( f . read ( ) ) return "" . join ( output ) else : block_size = 16777216 with open ( out_file , 'a' ) as o : for fn in iter_mr_out_files ( mr_out_dir ) : with hdfs . open ( fn ) as f : data = f . read ( block_size ) while len ( data ) > 0 : o . write ( data ) data = f . read ( block_size )
Return all mapreduce output in mr_out_dir .
51,976
def set_output ( self , output ) : self . output = output self . logger . info ( "assigning output to %s" , self . output )
Set the output path for the job . Optional if the runner has been instantiated with a prefix .
51,977
def set_exe ( self , pipes_code ) : if not self . output : raise RuntimeError ( "no output directory, can't create launcher" ) parent = hdfs . path . dirname ( hdfs . path . abspath ( self . output . rstrip ( "/" ) ) ) self . exe = hdfs . path . join ( parent , utils . make_random_str ( ) ) hdfs . dump ( pipes_code , self . exe )
Dump launcher code to the distributed file system .
51,978
def dump ( data , hdfs_path , ** kwargs ) : kwargs [ "mode" ] = "w" if isinstance ( data , bintype ) else "wt" with open ( hdfs_path , ** kwargs ) as fo : i = 0 bufsize = common . BUFSIZE while i < len ( data ) : fo . write ( data [ i : i + bufsize ] ) i += bufsize fo . fs . close ( )
\ Write data to hdfs_path .
51,979
def load ( hdfs_path , ** kwargs ) : m , _ = common . parse_mode ( kwargs . get ( "mode" , "r" ) ) if m != "r" : raise ValueError ( "opening mode must be readonly" ) with open ( hdfs_path , ** kwargs ) as fi : data = fi . read ( ) fi . fs . close ( ) return data
\ Read the content of hdfs_path and return it .
51,980
def cp ( src_hdfs_path , dest_hdfs_path , ** kwargs ) : src , dest = { } , { } try : for d , p in ( ( src , src_hdfs_path ) , ( dest , dest_hdfs_path ) ) : d [ "host" ] , d [ "port" ] , d [ "path" ] = path . split ( p ) d [ "fs" ] = hdfs ( d [ "host" ] , d [ "port" ] ) try : src [ "info" ] = src [ "fs" ] . get_path_info ( src [ "path" ] ) except IOError : raise IOError ( "no such file or directory: %r" % ( src [ "path" ] ) ) try : dest [ "info" ] = dest [ "fs" ] . get_path_info ( dest [ "path" ] ) except IOError : if src [ "info" ] [ "kind" ] == "file" : _cp_file ( src [ "fs" ] , src [ "path" ] , dest [ "fs" ] , dest [ "path" ] , ** kwargs ) return else : dest [ "fs" ] . create_directory ( dest [ "path" ] ) dest_hdfs_path = dest [ "fs" ] . get_path_info ( dest [ "path" ] ) [ "name" ] for item in src [ "fs" ] . list_directory ( src [ "path" ] ) : cp ( item [ "name" ] , dest_hdfs_path , ** kwargs ) return if dest [ "info" ] [ "kind" ] == "file" : raise IOError ( "%r already exists" % ( dest [ "path" ] ) ) dest [ "path" ] = path . join ( dest [ "path" ] , path . basename ( src [ "path" ] ) ) if dest [ "fs" ] . exists ( dest [ "path" ] ) : raise IOError ( "%r already exists" % ( dest [ "path" ] ) ) if src [ "info" ] [ "kind" ] == "file" : _cp_file ( src [ "fs" ] , src [ "path" ] , dest [ "fs" ] , dest [ "path" ] , ** kwargs ) else : dest [ "fs" ] . create_directory ( dest [ "path" ] ) dest_hdfs_path = dest [ "fs" ] . get_path_info ( dest [ "path" ] ) [ "name" ] for item in src [ "fs" ] . list_directory ( src [ "path" ] ) : cp ( item [ "name" ] , dest_hdfs_path , ** kwargs ) finally : for d in src , dest : try : d [ "fs" ] . close ( ) except KeyError : pass
\ Copy the contents of src_hdfs_path to dest_hdfs_path .
51,981
def put ( src_path , dest_hdfs_path , ** kwargs ) : cp ( path . abspath ( src_path , local = True ) , dest_hdfs_path , ** kwargs )
\ Copy the contents of src_path to dest_hdfs_path .
51,982
def get ( src_hdfs_path , dest_path , ** kwargs ) : cp ( src_hdfs_path , path . abspath ( dest_path , local = True ) , ** kwargs )
\ Copy the contents of src_hdfs_path to dest_path .
51,983
def mkdir ( hdfs_path , user = None ) : host , port , path_ = path . split ( hdfs_path , user ) fs = hdfs ( host , port , user ) retval = fs . create_directory ( path_ ) fs . close ( ) return retval
Create a directory and its parents as needed .
51,984
def lsl ( hdfs_path , user = None , recursive = False ) : host , port , path_ = path . split ( hdfs_path , user ) fs = hdfs ( host , port , user ) if not recursive : dir_list = fs . list_directory ( path_ ) else : treewalk = fs . walk ( path_ ) top = next ( treewalk ) if top [ 'kind' ] == 'directory' : dir_list = list ( treewalk ) else : dir_list = [ top ] fs . close ( ) return dir_list
Return a list of dictionaries of file properties .
51,985
def ls ( hdfs_path , user = None , recursive = False ) : dir_list = lsl ( hdfs_path , user , recursive ) return [ d [ "name" ] for d in dir_list ]
Return a list of hdfs paths .
51,986
def move ( src , dest , user = None ) : src_host , src_port , src_path = path . split ( src , user ) dest_host , dest_port , dest_path = path . split ( dest , user ) src_fs = hdfs ( src_host , src_port , user ) dest_fs = hdfs ( dest_host , dest_port , user ) try : retval = src_fs . move ( src_path , dest_fs , dest_path ) return retval finally : src_fs . close ( ) dest_fs . close ( )
Move or rename src to dest .
51,987
def renames ( from_path , to_path , user = None ) : to_dir = path . dirname ( to_path ) if to_dir : mkdir ( to_dir , user = user ) rename ( from_path , to_path , user = user )
Rename from_path to to_path creating parents as needed .
51,988
def readline ( self ) : _complain_ifclosed ( self . closed ) line = self . f . readline ( ) if self . __encoding : return line . decode ( self . __encoding , self . __errors ) else : return line
Read and return a line of text .
51,989
def pread ( self , position , length ) : r _complain_ifclosed ( self . closed ) if position > self . size : raise IOError ( "position cannot be past EOF" ) if length < 0 : length = self . size - position data = self . f . raw . pread ( position , length ) if self . __encoding : return data . decode ( self . __encoding , self . __errors ) else : return data
r Read length bytes of data from the file starting from position \ .
51,990
def read ( self , length = - 1 ) : _complain_ifclosed ( self . closed ) if length < 0 : length = self . size chunks = [ ] while 1 : if length <= 0 : break c = self . f . read ( min ( self . buff_size , length ) ) if c == b"" : break chunks . append ( c ) length -= len ( c ) data = b"" . join ( chunks ) if self . __encoding : return data . decode ( self . __encoding , self . __errors ) else : return data
Read length bytes from the file . If length is negative or omitted read all data until EOF .
51,991
def seek ( self , position , whence = os . SEEK_SET ) : _complain_ifclosed ( self . closed ) return self . f . seek ( position , whence )
Seek to position in file .
51,992
def write ( self , data ) : _complain_ifclosed ( self . closed ) if self . __encoding : self . f . write ( data . encode ( self . __encoding , self . __errors ) ) return len ( data ) else : return self . f . write ( data )
Write data to the file .
51,993
def set_args ( self , args , unknown_args = None ) : if unknown_args is None : unknown_args = [ ] self . logger . setLevel ( getattr ( logging , args . log_level ) ) parent = hdfs . path . dirname ( hdfs . path . abspath ( args . output . rstrip ( "/" ) ) ) self . remote_wd = hdfs . path . join ( parent , utils . make_random_str ( prefix = "pydoop_submit_" ) ) self . remote_exe = hdfs . path . join ( self . remote_wd , str ( uuid . uuid4 ( ) ) ) self . properties [ JOB_NAME ] = args . job_name or 'pydoop' self . properties [ IS_JAVA_RR ] = ( 'false' if args . do_not_use_java_record_reader else 'true' ) self . properties [ IS_JAVA_RW ] = ( 'false' if args . do_not_use_java_record_writer else 'true' ) self . properties [ JOB_REDUCES ] = args . num_reducers if args . job_name : self . properties [ JOB_NAME ] = args . job_name self . properties . update ( args . job_conf or { } ) self . __set_files_to_cache ( args ) self . __set_archives_to_cache ( args ) self . requested_env = self . _env_arg_to_dict ( args . set_env or [ ] ) self . args = args self . unknown_args = unknown_args
Configure job based on the arguments provided .
51,994
def __warn_user_if_wd_maybe_unreadable ( self , abs_remote_path ) : host , port , path = hdfs . path . split ( abs_remote_path ) if host == '' and port == 0 : host_port = "file:///" else : host_port = "hdfs://%s:%s/" % ( host , port ) path_pieces = path . strip ( '/' ) . split ( os . path . sep ) fs = hdfs . hdfs ( host , port ) for i in range ( 0 , len ( path_pieces ) ) : part = os . path . join ( host_port , os . path . sep . join ( path_pieces [ 0 : i + 1 ] ) ) permissions = fs . get_path_info ( part ) [ 'permissions' ] if permissions & 0o111 != 0o111 : self . logger . warning ( ( "remote module %s may not be readable by the task " "tracker when initializing the distributed cache. " "Permissions on %s: %s" ) , abs_remote_path , part , oct ( permissions ) ) break
Check directories above the remote module and issue a warning if they are not traversable by all users .
51,995
def __setup_remote_paths ( self ) : self . logger . debug ( "remote_wd: %s" , self . remote_wd ) self . logger . debug ( "remote_exe: %s" , self . remote_exe ) self . logger . debug ( "remotes: %s" , self . files_to_upload ) if self . args . module : self . logger . debug ( 'Generated pipes_code:\n\n %s' , self . _generate_pipes_code ( ) ) if not self . args . pretend : hdfs . mkdir ( self . remote_wd ) hdfs . chmod ( self . remote_wd , "a+rx" ) self . logger . debug ( "created and chmod-ed: %s" , self . remote_wd ) pipes_code = self . _generate_pipes_code ( ) hdfs . dump ( pipes_code , self . remote_exe ) self . logger . debug ( "dumped pipes_code to: %s" , self . remote_exe ) hdfs . chmod ( self . remote_exe , "a+rx" ) self . __warn_user_if_wd_maybe_unreadable ( self . remote_wd ) for ( l , h , _ ) in self . files_to_upload : self . logger . debug ( "uploading: %s to %s" , l , h ) hdfs . cp ( l , h ) self . logger . debug ( "Created%sremote paths:" % ( ' [simulation] ' if self . args . pretend else ' ' ) )
Actually create the working directory and copy the module into it .
51,996
def docker_client ( ) : cert_path = os . environ . get ( 'DOCKER_CERT_PATH' , '' ) if cert_path == '' : cert_path = os . path . join ( os . environ . get ( 'HOME' , '' ) , '.docker' ) base_url = os . environ . get ( 'DOCKER_HOST' ) tls_config = None if os . environ . get ( 'DOCKER_TLS_VERIFY' , '' ) != '' : parts = base_url . split ( '://' , 1 ) base_url = '%s://%s' % ( 'https' , parts [ 1 ] ) client_cert = ( os . path . join ( cert_path , 'cert.pem' ) , os . path . join ( cert_path , 'key.pem' ) ) ca_cert = os . path . join ( cert_path , 'ca.pem' ) tls_config = tls . TLSConfig ( ssl_version = ssl . PROTOCOL_TLSv1 , verify = True , assert_hostname = False , client_cert = client_cert , ca_cert = ca_cert , ) timeout = int ( os . environ . get ( 'DOCKER_CLIENT_TIMEOUT' , 60 ) ) return Client ( base_url = base_url , tls = tls_config , version = '1.15' , timeout = timeout )
Returns a docker - py client configured using environment variables according to the same logic as the official Docker client .
51,997
def get_java_home ( ) : error = RuntimeError ( "java home not found, try setting JAVA_HOME" ) try : return os . environ [ "JAVA_HOME" ] except KeyError : wd = tempfile . mkdtemp ( prefix = 'pydoop_' ) jclass = "Temp" jsrc = os . path . join ( wd , "%s.java" % jclass ) with open ( jsrc , "w" ) as f : f . write ( JPROG . substitute ( classname = jclass ) ) try : subprocess . check_call ( [ "javac" , jsrc ] ) path = subprocess . check_output ( [ "java" , "-cp" , wd , jclass ] , universal_newlines = True ) except ( OSError , UnicodeDecodeError , subprocess . CalledProcessError ) : raise error finally : shutil . rmtree ( wd ) path = os . path . normpath ( path . strip ( ) ) if os . path . exists ( os . path . join ( path , "include" , "jni.h" ) ) : return path path = os . path . dirname ( path ) if os . path . exists ( os . path . join ( path , "include" , "jni.h" ) ) : return path raise error
\ Try getting JAVA_HOME from system properties .
51,998
def run_task ( factory , ** kwargs ) : context = TaskContext ( factory , ** kwargs ) pstats_dir = kwargs . get ( "pstats_dir" , os . getenv ( PSTATS_DIR ) ) if pstats_dir : import cProfile import tempfile import pydoop . hdfs as hdfs hdfs . mkdir ( pstats_dir ) fd , pstats_fn = tempfile . mkstemp ( suffix = ".pstats" ) os . close ( fd ) cProfile . runctx ( "_run(context, **kwargs)" , globals ( ) , locals ( ) , filename = pstats_fn ) pstats_fmt = kwargs . get ( "pstats_fmt" , os . getenv ( PSTATS_FMT , DEFAULT_PSTATS_FMT ) ) name = pstats_fmt % ( context . task_type , context . get_task_partition ( ) , os . path . basename ( pstats_fn ) ) hdfs . put ( pstats_fn , hdfs . path . join ( pstats_dir , name ) ) else : _run ( context , ** kwargs )
\ Run a MapReduce task .
51,999
def progress ( self ) : now = time ( ) if now - self . last_progress_t > 1 : self . last_progress_t = now if self . status : self . uplink . status ( self . status ) self . status = None self . __spill_counters ( ) self . uplink . progress ( self . progress_value ) self . uplink . flush ( )
\ Report progress to the Java side .