idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
53,200
def auto_delete_files_on_instance_delete ( instance : Any , fieldnames : Iterable [ str ] ) -> None : for fieldname in fieldnames : filefield = getattr ( instance , fieldname , None ) if filefield : if os . path . isfile ( filefield . path ) : os . remove ( filefield . path )
Deletes files from filesystem when object is deleted .
53,201
def auto_delete_files_on_instance_change ( instance : Any , fieldnames : Iterable [ str ] , model_class ) -> None : if not instance . pk : return try : old_instance = model_class . objects . get ( pk = instance . pk ) except model_class . DoesNotExist : return for fieldname in fieldnames : old_filefield = getattr ( old_instance , fieldname , None ) if not old_filefield : continue new_filefield = getattr ( instance , fieldname , None ) if old_filefield != new_filefield : if os . path . isfile ( old_filefield . path ) : os . remove ( old_filefield . path )
Deletes files from filesystem when object is changed .
53,202
def axis_transform ( pca_axes ) : from_ = N . identity ( 3 ) to_ = pca_axes trans_matrix = N . linalg . lstsq ( from_ , to_ ) [ 0 ] return trans_matrix
Creates an affine transformation matrix to rotate data in PCA axes into Cartesian plane
53,203
def covariance_matrix ( self ) : a = N . dot ( self . U , self . sigma ) cv = N . dot ( a , a . T ) return cv
Constructs the covariance matrix of input data from the singular value decomposition . Note that this is different than a covariance matrix of residuals which is what we want for calculating fit errors .
53,204
def U ( self ) : if self . _U is None : sinv = N . diag ( 1 / self . singular_values ) self . _U = dot ( self . arr , self . V . T , sinv ) return self . _U
Property to support lazy evaluation of residuals
53,205
def _covariance_matrix ( self , type = 'noise' ) : if type == 'sampling' : return self . sigma ** 2 / ( self . n - 1 ) elif type == 'noise' : return 4 * self . sigma * N . var ( self . rotated ( ) , axis = 0 )
Constructs the covariance matrix from PCA residuals
53,206
def as_hyperbola ( self , rotated = False ) : idx = N . diag_indices ( 3 ) _ = 1 / self . covariance_matrix [ idx ] d = list ( _ ) d [ - 1 ] *= - 1 arr = N . identity ( 4 ) * - 1 arr [ idx ] = d hyp = conic ( arr ) if rotated : R = augment ( self . axes ) hyp = hyp . transform ( R ) return hyp
Hyperbolic error area
53,207
def run ( self , line ) : words = [ ] for word in self . clean_unicode ( line . lower ( ) ) . split ( ) : if word . startswith ( 'http' ) : continue cleaned = self . clean_punctuation ( word ) if len ( cleaned ) > 1 and cleaned not in self . stopwords : words . append ( cleaned ) return words
Extract words from tweet
53,208
def bool_from_exists_clause ( session : Session , exists_clause : Exists ) -> bool : if session . get_bind ( ) . dialect . name == SqlaDialectName . MSSQL : result = session . query ( literal ( True ) ) . filter ( exists_clause ) . scalar ( ) else : result = session . query ( exists_clause ) . scalar ( ) return bool ( result )
Database dialects are not consistent in how EXISTS clauses can be converted to a boolean answer . This function manages the inconsistencies .
53,209
def exists_orm ( session : Session , ormclass : DeclarativeMeta , * criteria : Any ) -> bool : q = session . query ( ormclass ) for criterion in criteria : q = q . filter ( criterion ) exists_clause = q . exists ( ) return bool_from_exists_clause ( session = session , exists_clause = exists_clause )
Detects whether a database record exists for the specified ormclass and criteria .
53,210
def get_or_create ( session : Session , model : DeclarativeMeta , defaults : Dict [ str , Any ] = None , ** kwargs : Any ) -> Tuple [ Any , bool ] : instance = session . query ( model ) . filter_by ( ** kwargs ) . first ( ) if instance : return instance , False else : params = dict ( ( k , v ) for k , v in kwargs . items ( ) if not isinstance ( v , ClauseElement ) ) params . update ( defaults or { } ) instance = model ( ** params ) session . add ( instance ) return instance , True
Fetches an ORM object from the database or creates one if none existed .
53,211
def create_base64encoded_randomness ( num_bytes : int ) -> str : randbytes = os . urandom ( num_bytes ) return base64 . urlsafe_b64encode ( randbytes ) . decode ( 'ascii' )
Create and return num_bytes of random data .
53,212
def tablename ( self ) -> str : if self . _tablename : return self . _tablename return self . table . name
Returns the string name of the table .
53,213
def is_running ( process_id : int ) -> bool : pstr = str ( process_id ) encoding = sys . getdefaultencoding ( ) s = subprocess . Popen ( [ "ps" , "-p" , pstr ] , stdout = subprocess . PIPE ) for line in s . stdout : strline = line . decode ( encoding ) if pstr in strline : return True return False
Uses the Unix ps program to see if a process is running .
53,214
def bootstrap_noise ( data , func , n = 10000 , std = 1 , symmetric = True ) : boot_dist = [ ] arr = N . zeros ( data . shape ) for i in range ( n ) : if symmetric : arr = N . random . randn ( * data . shape ) * std else : arr [ : , - 1 ] = N . random . randn ( data . shape [ 0 ] ) * std boot_dist . append ( func ( data + arr ) ) return N . array ( boot_dist )
Bootstrap by adding noise
53,215
def modelrepr ( instance ) -> str : elements = [ ] for f in instance . _meta . get_fields ( ) : if f . auto_created : continue if f . is_relation and f . related_model is None : continue fieldname = f . name try : value = repr ( getattr ( instance , fieldname ) ) except ObjectDoesNotExist : value = "<RelatedObjectDoesNotExist>" elements . append ( "{}={}" . format ( fieldname , value ) ) return "<{} <{}>>" . format ( type ( instance ) . __name__ , ", " . join ( elements ) )
Default repr version of a Django model object for debugging .
53,216
def assert_processor_available ( processor : str ) -> None : if processor not in [ Processors . XHTML2PDF , Processors . WEASYPRINT , Processors . PDFKIT ] : raise AssertionError ( "rnc_pdf.set_pdf_processor: invalid PDF processor" " specified" ) if processor == Processors . WEASYPRINT and not weasyprint : raise RuntimeError ( "rnc_pdf: Weasyprint requested, but not available" ) if processor == Processors . XHTML2PDF and not xhtml2pdf : raise RuntimeError ( "rnc_pdf: xhtml2pdf requested, but not available" ) if processor == Processors . PDFKIT and not pdfkit : raise RuntimeError ( "rnc_pdf: pdfkit requested, but not available" )
Assert that a specific PDF processor is available .
53,217
def get_pdf_from_html ( html : str , header_html : str = None , footer_html : str = None , wkhtmltopdf_filename : str = _WKHTMLTOPDF_FILENAME , wkhtmltopdf_options : Dict [ str , Any ] = None , file_encoding : str = "utf-8" , debug_options : bool = False , debug_content : bool = False , debug_wkhtmltopdf_args : bool = True , fix_pdfkit_encoding_bug : bool = None , processor : str = _DEFAULT_PROCESSOR ) -> bytes : result = make_pdf_from_html ( on_disk = False , html = html , header_html = header_html , footer_html = footer_html , wkhtmltopdf_filename = wkhtmltopdf_filename , wkhtmltopdf_options = wkhtmltopdf_options , file_encoding = file_encoding , debug_options = debug_options , debug_content = debug_content , debug_wkhtmltopdf_args = debug_wkhtmltopdf_args , fix_pdfkit_encoding_bug = fix_pdfkit_encoding_bug , processor = processor , ) return result
Takes HTML and returns a PDF .
53,218
def append_pdf ( input_pdf : bytes , output_writer : PdfFileWriter ) : append_memory_pdf_to_writer ( input_pdf = input_pdf , writer = output_writer )
Appends a PDF to a pyPDF writer . Legacy interface .
53,219
def get_concatenated_pdf_from_disk ( filenames : Iterable [ str ] , start_recto : bool = True ) -> bytes : if start_recto : writer = PdfFileWriter ( ) for filename in filenames : if filename : if writer . getNumPages ( ) % 2 != 0 : writer . addBlankPage ( ) writer . appendPagesFromReader ( PdfFileReader ( open ( filename , 'rb' ) ) ) return pdf_from_writer ( writer ) else : merger = PdfFileMerger ( ) for filename in filenames : if filename : merger . append ( open ( filename , 'rb' ) ) return pdf_from_writer ( merger )
Concatenates PDFs from disk and returns them as an in - memory binary PDF .
53,220
def get_concatenated_pdf_in_memory ( pdf_plans : Iterable [ PdfPlan ] , start_recto : bool = True ) -> bytes : writer = PdfFileWriter ( ) for pdfplan in pdf_plans : pdfplan . add_to_writer ( writer , start_recto = start_recto ) return pdf_from_writer ( writer )
Concatenates PDFs and returns them as an in - memory binary PDF .
53,221
def add_to_writer ( self , writer : PdfFileWriter , start_recto : bool = True ) -> None : if self . is_html : pdf = get_pdf_from_html ( html = self . html , header_html = self . header_html , footer_html = self . footer_html , wkhtmltopdf_filename = self . wkhtmltopdf_filename , wkhtmltopdf_options = self . wkhtmltopdf_options ) append_memory_pdf_to_writer ( pdf , writer , start_recto = start_recto ) elif self . is_filename : if start_recto and writer . getNumPages ( ) % 2 != 0 : writer . addBlankPage ( ) writer . appendPagesFromReader ( PdfFileReader ( open ( self . filename , 'rb' ) ) ) else : raise AssertionError ( "PdfPlan: shouldn't get here!" )
Add the PDF described by this class to a PDF writer .
53,222
def trunc_if_integer ( n : Any ) -> Any : if n == int ( n ) : return int ( n ) return n
Truncates floats that are integers to their integer representation . That is converts 1 . 0 to 1 etc . Otherwise returns the starting value . Will raise an exception if the input cannot be converted to int .
53,223
def timedelta_days ( days : int ) -> timedelta64 : int_days = int ( days ) if int_days != days : raise ValueError ( "Fractional days passed to timedelta_days: " "{!r}" . format ( days ) ) try : return timedelta64 ( int_days , 'D' ) except ValueError as e : raise ValueError ( "Failure in timedelta_days; value was {!r}; original " "error was: {}" . format ( days , e ) )
Convert a duration in days to a NumPy timedelta64 object .
53,224
def _get_generic_two_antidep_episodes_result ( rowdata : Tuple [ Any , ... ] = None ) -> DataFrame : data = [ rowdata ] if rowdata else [ ] return DataFrame ( array ( data , dtype = [ ( RCN_PATIENT_ID , DTYPE_STRING ) , ( RCN_DRUG_A_NAME , DTYPE_STRING ) , ( RCN_DRUG_A_FIRST_MENTION , DTYPE_DATE ) , ( RCN_DRUG_A_SECOND_MENTION , DTYPE_DATE ) , ( RCN_DRUG_B_NAME , DTYPE_STRING ) , ( RCN_DRUG_B_FIRST_MENTION , DTYPE_DATE ) , ( RCN_DRUG_B_SECOND_MENTION , DTYPE_DATE ) , ( RCN_EXPECT_RESPONSE_BY_DATE , DTYPE_DATE ) , ( RCN_END_OF_SYMPTOM_PERIOD , DTYPE_DATE ) , ] ) )
Create a results row for this application .
53,225
def get_filelikeobject ( filename : str = None , blob : bytes = None ) -> BinaryIO : if not filename and not blob : raise ValueError ( "no filename and no blob" ) if filename and blob : raise ValueError ( "specify either filename or blob" ) if filename : return open ( filename , 'rb' ) else : return io . BytesIO ( blob )
Open a file - like object .
53,226
def get_file_contents ( filename : str = None , blob : bytes = None ) -> bytes : if not filename and not blob : raise ValueError ( "no filename and no blob" ) if filename and blob : raise ValueError ( "specify either filename or blob" ) if blob : return blob with open ( filename , 'rb' ) as f : return f . read ( )
Returns the binary contents of a file or of a BLOB .
53,227
def get_chardet_encoding ( binary_contents : bytes ) -> Optional [ str ] : if not binary_contents : return None if chardet is None or UniversalDetector is None : log . warning ( "chardet not installed; limits detection of encodings" ) return None detector = UniversalDetector ( ) for byte_line in binary_contents . split ( b"\n" ) : detector . feed ( byte_line ) if detector . done : break guess = detector . result if 'encoding' not in guess : log . warning ( "Something went wrong within chardet; no encoding" ) return None return guess [ 'encoding' ]
Guess the character set encoding of the specified binary_contents .
53,228
def get_file_contents_text ( filename : str = None , blob : bytes = None , config : TextProcessingConfig = _DEFAULT_CONFIG ) -> str : binary_contents = get_file_contents ( filename = filename , blob = blob ) if config . encoding : try : return binary_contents . decode ( config . encoding ) except ValueError : pass sysdef = sys . getdefaultencoding ( ) if sysdef != config . encoding : try : return binary_contents . decode ( sysdef ) except ValueError : pass if chardet : guess = chardet . detect ( binary_contents ) if guess [ 'encoding' ] : return binary_contents . decode ( guess [ 'encoding' ] ) raise ValueError ( "Unknown encoding ({})" . format ( "filename={}" . format ( repr ( filename ) ) if filename else "blob" ) )
Returns the string contents of a file or of a BLOB .
53,229
def get_cmd_output ( * args , encoding : str = SYS_ENCODING ) -> str : log . debug ( "get_cmd_output(): args = {!r}" , args ) p = subprocess . Popen ( args , stdout = subprocess . PIPE ) stdout , stderr = p . communicate ( ) return stdout . decode ( encoding , errors = 'ignore' )
Returns text output of a command .
53,230
def get_cmd_output_from_stdin ( stdint_content_binary : bytes , * args , encoding : str = SYS_ENCODING ) -> str : p = subprocess . Popen ( args , stdin = subprocess . PIPE , stdout = subprocess . PIPE ) stdout , stderr = p . communicate ( input = stdint_content_binary ) return stdout . decode ( encoding , errors = 'ignore' )
Returns text output of a command passing binary data in via stdin .
53,231
def convert_pdf_to_txt ( filename : str = None , blob : bytes = None , config : TextProcessingConfig = _DEFAULT_CONFIG ) -> str : pdftotext = tools [ 'pdftotext' ] if pdftotext : if filename : return get_cmd_output ( pdftotext , filename , '-' ) else : return get_cmd_output_from_stdin ( blob , pdftotext , '-' , '-' ) elif pdfminer : with get_filelikeobject ( filename , blob ) as fp : rsrcmgr = pdfminer . pdfinterp . PDFResourceManager ( ) retstr = StringIO ( ) codec = ENCODING laparams = pdfminer . layout . LAParams ( ) device = pdfminer . converter . TextConverter ( rsrcmgr , retstr , codec = codec , laparams = laparams ) interpreter = pdfminer . pdfinterp . PDFPageInterpreter ( rsrcmgr , device ) password = "" maxpages = 0 caching = True pagenos = set ( ) for page in pdfminer . pdfpage . PDFPage . get_pages ( fp , pagenos , maxpages = maxpages , password = password , caching = caching , check_extractable = True ) : interpreter . process_page ( page ) text = retstr . getvalue ( ) . decode ( ENCODING ) return text else : raise AssertionError ( "No PDF-reading tool available" )
Converts a PDF file to text . Pass either a filename or a binary object .
53,232
def availability_pdf ( ) -> bool : pdftotext = tools [ 'pdftotext' ] if pdftotext : return True elif pdfminer : log . warning ( "PDF conversion: pdftotext missing; " "using pdfminer (less efficient)" ) return True else : return False
Is a PDF - to - text tool available?
53,233
def docx_text_from_xml ( xml : str , config : TextProcessingConfig ) -> str : root = ElementTree . fromstring ( xml ) return docx_text_from_xml_node ( root , 0 , config )
Converts an XML tree of a DOCX file to string contents .
53,234
def docx_text_from_xml_node ( node : ElementTree . Element , level : int , config : TextProcessingConfig ) -> str : text = '' if node . tag == DOCX_TEXT : text += node . text or '' elif node . tag == DOCX_TAB : text += '\t' elif node . tag in DOCX_NEWLINES : text += '\n' elif node . tag == DOCX_NEWPARA : text += '\n\n' if node . tag == DOCX_TABLE : text += '\n\n' + docx_table_from_xml_node ( node , level , config ) else : for child in node : text += docx_text_from_xml_node ( child , level + 1 , config ) return text
Returns text from an XML node within a DOCX file .
53,235
def docx_table_from_xml_node ( table_node : ElementTree . Element , level : int , config : TextProcessingConfig ) -> str : table = CustomDocxTable ( ) for row_node in table_node : if row_node . tag != DOCX_TABLE_ROW : continue table . new_row ( ) for cell_node in row_node : if cell_node . tag != DOCX_TABLE_CELL : continue table . new_cell ( ) for para_node in cell_node : text = docx_text_from_xml_node ( para_node , level , config ) if text : table . add_paragraph ( text ) return docx_process_table ( table , config )
Converts an XML node representing a DOCX table into a textual representation .
53,236
def docx_process_simple_text ( text : str , width : int ) -> str : if width : return '\n' . join ( textwrap . wrap ( text , width = width ) ) else : return text
Word - wraps text .
53,237
def docx_process_table ( table : DOCX_TABLE_TYPE , config : TextProcessingConfig ) -> str : def get_cell_text ( cell_ ) -> str : cellparagraphs = [ paragraph . text . strip ( ) for paragraph in cell_ . paragraphs ] cellparagraphs = [ x for x in cellparagraphs if x ] return '\n\n' . join ( cellparagraphs ) ncols = 1 for row in table . rows : ncols = max ( ncols , len ( row . cells ) ) pt = prettytable . PrettyTable ( field_names = list ( range ( ncols ) ) , encoding = ENCODING , header = False , border = True , hrules = prettytable . ALL , vrules = prettytable . NONE if config . plain else prettytable . ALL , ) pt . align = 'l' pt . valign = 't' pt . max_width = max ( config . width // ncols , config . min_col_width ) if config . plain : for row in table . rows : for i , cell in enumerate ( row . cells ) : n_before = i n_after = ncols - i - 1 ptrow = ( [ '' ] * n_before + [ get_cell_text ( cell ) ] + [ '' ] * n_after ) assert ( len ( ptrow ) == ncols ) pt . add_row ( ptrow ) else : for row in table . rows : ptrow = [ ] for cell in row . cells : ptrow . append ( get_cell_text ( cell ) ) ptrow += [ '' ] * ( ncols - len ( ptrow ) ) assert ( len ( ptrow ) == ncols ) pt . add_row ( ptrow ) return pt . get_string ( )
Converts a DOCX table to text .
53,238
def docx_docx_iter_block_items ( parent : DOCX_CONTAINER_TYPE ) -> Iterator [ DOCX_BLOCK_ITEM_TYPE ] : if isinstance ( parent , docx . document . Document ) : parent_elm = parent . element . body elif isinstance ( parent , docx . table . _Cell ) : parent_elm = parent . _tc else : raise ValueError ( "something's not right" ) for child in parent_elm . iterchildren ( ) : if isinstance ( child , docx . oxml . text . paragraph . CT_P ) : yield docx . text . paragraph . Paragraph ( child , parent ) elif isinstance ( child , docx . oxml . table . CT_Tbl ) : yield docx . table . Table ( child , parent )
Iterate through items of a DOCX file .
53,239
def docx_docx_gen_text ( doc : DOCX_DOCUMENT_TYPE , config : TextProcessingConfig ) -> Iterator [ str ] : if in_order : for thing in docx_docx_iter_block_items ( doc ) : if isinstance ( thing , docx . text . paragraph . Paragraph ) : yield docx_process_simple_text ( thing . text , config . width ) elif isinstance ( thing , docx . table . Table ) : yield docx_process_table ( thing , config ) else : for paragraph in doc . paragraphs : yield docx_process_simple_text ( paragraph . text , config . width ) for table in doc . tables : yield docx_process_table ( table , config )
Iterate through a DOCX file and yield text .
53,240
def convert_docx_to_text ( filename : str = None , blob : bytes = None , config : TextProcessingConfig = _DEFAULT_CONFIG ) -> str : if True : text = '' with get_filelikeobject ( filename , blob ) as fp : for xml in gen_xml_files_from_docx ( fp ) : text += docx_text_from_xml ( xml , config ) return text
Converts a DOCX file to text . Pass either a filename or a binary object .
53,241
def convert_odt_to_text ( filename : str = None , blob : bytes = None , config : TextProcessingConfig = _DEFAULT_CONFIG ) -> str : with get_filelikeobject ( filename , blob ) as fp : z = zipfile . ZipFile ( fp ) tree = ElementTree . fromstring ( z . read ( 'content.xml' ) ) textlist = [ ] for element in tree . iter ( ) : if element . text : textlist . append ( element . text . strip ( ) ) return '\n\n' . join ( textlist )
Converts an OpenOffice ODT file to text .
53,242
def convert_html_to_text ( filename : str = None , blob : bytes = None , config : TextProcessingConfig = _DEFAULT_CONFIG ) -> str : with get_filelikeobject ( filename , blob ) as fp : soup = bs4 . BeautifulSoup ( fp ) return soup . get_text ( )
Converts HTML to text .
53,243
def convert_xml_to_text ( filename : str = None , blob : bytes = None , config : TextProcessingConfig = _DEFAULT_CONFIG ) -> str : with get_filelikeobject ( filename , blob ) as fp : soup = bs4 . BeautifulStoneSoup ( fp ) return soup . get_text ( )
Converts XML to text .
53,244
def convert_rtf_to_text ( filename : str = None , blob : bytes = None , config : TextProcessingConfig = _DEFAULT_CONFIG ) -> str : unrtf = tools [ 'unrtf' ] if unrtf : args = [ unrtf , '--text' , '--nopict' ] if UNRTF_SUPPORTS_QUIET : args . append ( '--quiet' ) if filename : args . append ( filename ) return get_cmd_output ( * args ) else : return get_cmd_output_from_stdin ( blob , * args ) elif pyth : with get_filelikeobject ( filename , blob ) as fp : doc = pyth . plugins . rtf15 . reader . Rtf15Reader . read ( fp ) return ( pyth . plugins . plaintext . writer . PlaintextWriter . write ( doc ) . getvalue ( ) ) else : raise AssertionError ( "No RTF-reading tool available" )
Converts RTF to text .
53,245
def availability_rtf ( ) -> bool : unrtf = tools [ 'unrtf' ] if unrtf : return True elif pyth : log . warning ( "RTF conversion: unrtf missing; " "using pyth (less efficient)" ) return True else : return False
Is an RTF processor available?
53,246
def convert_doc_to_text ( filename : str = None , blob : bytes = None , config : TextProcessingConfig = _DEFAULT_CONFIG ) -> str : antiword = tools [ 'antiword' ] if antiword : if filename : return get_cmd_output ( antiword , '-w' , str ( config . width ) , filename ) else : return get_cmd_output_from_stdin ( blob , antiword , '-w' , str ( config . width ) , '-' ) else : raise AssertionError ( "No DOC-reading tool available" )
Converts Microsoft Word DOC files to text .
53,247
def document_to_text ( filename : str = None , blob : bytes = None , extension : str = None , config : TextProcessingConfig = _DEFAULT_CONFIG ) -> str : if not filename and not blob : raise ValueError ( "document_to_text: no filename and no blob" ) if filename and blob : raise ValueError ( "document_to_text: specify either filename or blob" ) if blob and not extension : raise ValueError ( "document_to_text: need extension hint for blob" ) if filename : stub , extension = os . path . splitext ( filename ) else : if extension [ 0 ] != "." : extension = "." + extension extension = extension . lower ( ) log . debug ( "filename: {}, blob type: {}, blob length: {}, extension: {}" . format ( filename , type ( blob ) , len ( blob ) if blob is not None else None , extension ) ) if filename and not os . path . isfile ( filename ) : raise ValueError ( "document_to_text: no such file: {!r}" . format ( filename ) ) info = ext_map . get ( extension ) if info is None : log . warning ( "Unknown filetype: {}; using generic tool" , extension ) info = ext_map [ None ] func = info [ CONVERTER ] return func ( filename , blob , config )
Converts a document to text .
53,248
def is_text_extractor_available ( extension : str ) -> bool : if extension is not None : extension = extension . lower ( ) info = ext_map . get ( extension ) if info is None : return False availability = info [ AVAILABILITY ] if type ( availability ) == bool : return availability elif callable ( availability ) : return availability ( ) else : raise ValueError ( "Bad information object for extension: {}" . format ( extension ) )
Is a text extractor available for the specified extension?
53,249
def set_verbose_logging ( verbose : bool ) -> None : if verbose : set_loglevel ( logging . DEBUG ) else : set_loglevel ( logging . INFO )
Chooses basic or verbose logging .
53,250
def debug_sql ( sql : str , * args : Any ) -> None : log . debug ( "SQL: %s" % sql ) if args : log . debug ( "Args: %r" % args )
Writes SQL and arguments to the log .
53,251
def get_sql_insert ( table : str , fieldlist : Sequence [ str ] , delims : Tuple [ str , str ] = ( "" , "" ) ) -> str : return ( "INSERT INTO " + delimit ( table , delims ) + " (" + "," . join ( [ delimit ( x , delims ) for x in fieldlist ] ) + ") VALUES (" + "," . join ( [ "?" ] * len ( fieldlist ) ) + ")" )
Returns ? - marked SQL for an INSERT statement .
53,252
def sql_dequote_string ( s : str ) -> str : if len ( s ) < 2 : return s s = s [ 1 : - 1 ] return s . replace ( "''" , "'" )
Reverses sql_quote_string .
53,253
def datetime2literal_rnc ( d : datetime . datetime , c : Optional [ Dict ] ) -> str : dt = d . isoformat ( " " ) return _mysql . string_literal ( dt , c )
Format a DateTime object as something MySQL will actually accept .
53,254
def assign_from_list ( obj : T , fieldlist : Sequence [ str ] , valuelist : Sequence [ any ] ) -> None : if len ( fieldlist ) != len ( valuelist ) : raise AssertionError ( "assign_from_list: fieldlist and valuelist of " "different length" ) for i in range ( len ( valuelist ) ) : setattr ( obj , fieldlist [ i ] , valuelist [ i ] )
Within obj assigns the values from the value list to the fields in the fieldlist .
53,255
def blank_object ( obj : T , fieldlist : Sequence [ str ] ) -> None : for f in fieldlist : setattr ( obj , f , None )
Within obj sets all fields in the fieldlist to None .
53,256
def debug_query_result ( rows : Sequence [ Any ] ) -> None : log . info ( "Retrieved {} rows" , len ( rows ) ) for i in range ( len ( rows ) ) : log . info ( "Row {}: {}" , i , rows [ i ] )
Writes a query result to the log .
53,257
def is_read_only ( cls , db : DATABASE_SUPPORTER_FWD_REF , logger : logging . Logger = None ) -> bool : def convert_enums ( row_ ) : return [ True if x == 'Y' else ( False if x == 'N' else None ) for x in row_ ] try : sql = rows = db . fetchall ( sql ) for row in rows : dbname = row [ 0 ] prohibited = convert_enums ( row [ 1 : ] ) if any ( prohibited ) : if logger : logger . debug ( "MySQL.is_read_only(): FAIL: database privileges " "wrong: dbname={}, prohibited={}" . format ( dbname , prohibited ) ) return False except mysql . OperationalError : pass try : sql = rows = db . fetchall ( sql ) if not rows or len ( rows ) > 1 : return False prohibited = convert_enums ( rows [ 0 ] ) if any ( prohibited ) : if logger : logger . debug ( "MySQL.is_read_only(): FAIL: GLOBAL privileges " "wrong: prohibited={}" . format ( prohibited ) ) return False except mysql . OperationalError : pass return True
Do we have read - only access?
53,258
def ping ( self ) -> None : if self . db is None or self . db_pythonlib not in [ PYTHONLIB_MYSQLDB , PYTHONLIB_PYMYSQL ] : return try : self . db . ping ( True ) except mysql . OperationalError : self . db = None self . connect_to_database_mysql ( self . _database , self . _user , self . _password , self . _server , self . _port , self . _charset , self . _use_unicode )
Pings a database connection reconnecting if necessary .
53,259
def connect_to_database_odbc_mysql ( self , database : str , user : str , password : str , server : str = "localhost" , port : int = 3306 , driver : str = "{MySQL ODBC 5.1 Driver}" , autocommit : bool = True ) -> None : self . connect ( engine = ENGINE_MYSQL , interface = INTERFACE_ODBC , database = database , user = user , password = password , host = server , port = port , driver = driver , autocommit = autocommit )
Connects to a MySQL database via ODBC .
53,260
def connect_to_database_odbc_sqlserver ( self , odbc_connection_string : str = None , dsn : str = None , database : str = None , user : str = None , password : str = None , server : str = "localhost" , driver : str = "{SQL Server}" , autocommit : bool = True ) -> None : self . connect ( engine = ENGINE_SQLSERVER , interface = INTERFACE_ODBC , odbc_connection_string = odbc_connection_string , dsn = dsn , database = database , user = user , password = password , host = server , driver = driver , autocommit = autocommit )
Connects to an SQL Server database via ODBC .
53,261
def connect_to_database_odbc_access ( self , dsn : str , autocommit : bool = True ) -> None : self . connect ( engine = ENGINE_ACCESS , interface = INTERFACE_ODBC , dsn = dsn , autocommit = autocommit )
Connects to an Access database via ODBC with the DSN prespecified .
53,262
def localize_sql ( self , sql : str ) -> str : if self . db_pythonlib in [ PYTHONLIB_PYMYSQL , PYTHONLIB_MYSQLDB ] : sql = _PERCENT_REGEX . sub ( "%%" , sql ) sql = _QUERY_VALUE_REGEX . sub ( "%s" , sql ) return sql
Translates ? - placeholder SQL to appropriate dialect .
53,263
def insert_record_by_fieldspecs_with_values ( self , table : str , fieldspeclist : FIELDSPECLIST_TYPE ) -> int : fields = [ ] values = [ ] for fs in fieldspeclist : fields . append ( fs [ "name" ] ) values . append ( fs [ "value" ] ) return self . insert_record ( table , fields , values )
Inserts a record into the database using a list of fieldspecs having their value stored under the value key .
53,264
def db_exec_with_cursor ( self , cursor , sql : str , * args ) -> int : sql = self . localize_sql ( sql ) try : debug_sql ( sql , args ) cursor . execute ( sql , args ) return cursor . rowcount except : log . exception ( "db_exec_with_cursor: SQL was: " + sql ) raise
Executes SQL on a supplied cursor with ? placeholders substituting in the arguments . Returns number of rows affected .
53,265
def db_exec_and_commit ( self , sql : str , * args ) -> int : rowcount = self . db_exec ( sql , * args ) self . commit ( ) return rowcount
Execute SQL and commit .
53,266
def db_exec_literal ( self , sql : str ) -> int : self . ensure_db_open ( ) cursor = self . db . cursor ( ) debug_sql ( sql ) try : cursor . execute ( sql ) return cursor . rowcount except : log . exception ( "db_exec_literal: SQL was: " + sql ) raise
Executes SQL without modification . Returns rowcount .
53,267
def fetchvalue ( self , sql : str , * args ) -> Optional [ Any ] : row = self . fetchone ( sql , * args ) if row is None : return None return row [ 0 ]
Executes SQL ; returns the first value of the first row or None .
53,268
def fetchone ( self , sql : str , * args ) -> Optional [ Sequence [ Any ] ] : self . ensure_db_open ( ) cursor = self . db . cursor ( ) self . db_exec_with_cursor ( cursor , sql , * args ) try : return cursor . fetchone ( ) except : log . exception ( "fetchone: SQL was: " + sql ) raise
Executes SQL ; returns the first row or None .
53,269
def fetchallfirstvalues ( self , sql : str , * args ) -> List [ Any ] : rows = self . fetchall ( sql , * args ) return [ row [ 0 ] for row in rows ]
Executes SQL ; returns list of first values of each row .
53,270
def fetch_fieldnames ( self , sql : str , * args ) -> List [ str ] : self . ensure_db_open ( ) cursor = self . db . cursor ( ) self . db_exec_with_cursor ( cursor , sql , * args ) try : return [ i [ 0 ] for i in cursor . description ] except : log . exception ( "fetch_fieldnames: SQL was: " + sql ) raise
Executes SQL ; returns just the output fieldnames .
53,271
def delete_by_field ( self , table : str , field : str , value : Any ) -> int : sql = ( "DELETE FROM " + self . delimit ( table ) + " WHERE " + self . delimit ( field ) + "=?" ) return self . db_exec ( sql , value )
Deletes all records where field is value .
53,272
def fetch_all_objects_from_db ( self , cls : Type [ T ] , table : str , fieldlist : Sequence [ str ] , construct_with_pk : bool , * args ) -> List [ T ] : return self . fetch_all_objects_from_db_where ( cls , table , fieldlist , construct_with_pk , None , * args )
Fetches all objects from a table returning an array of objects of class cls .
53,273
def fetch_all_objects_from_db_by_pklist ( self , cls : Type , table : str , fieldlist : Sequence [ str ] , pklist : Sequence [ Any ] , construct_with_pk : bool , * args ) -> List [ T ] : objarray = [ ] for pk in pklist : if construct_with_pk : obj = cls ( pk , * args ) else : obj = cls ( * args ) self . fetch_object_from_db_by_pk ( obj , table , fieldlist , pk ) objarray . append ( obj ) return objarray
Fetches all objects from a table given a list of PKs .
53,274
def save_object_to_db ( self , obj : Any , table : str , fieldlist : Sequence [ str ] , is_new_record : bool ) -> None : if is_new_record : pkvalue = getattr ( obj , fieldlist [ 0 ] ) if pkvalue is None : self . insert_object_into_db_pk_unknown ( obj , table , fieldlist ) else : self . insert_object_into_db_pk_known ( obj , table , fieldlist ) else : self . update_object_in_db ( obj , table , fieldlist )
Saves a object to the database inserting or updating as necessary .
53,275
def create_index_from_fieldspec ( self , table : str , fieldspec : FIELDSPEC_TYPE , indexname : str = None ) -> None : if "indexed" in fieldspec and fieldspec [ "indexed" ] : if "index_nchar" in fieldspec : nchar = fieldspec [ "index_nchar" ] else : nchar = None self . create_index ( table , fieldspec [ "name" ] , nchar , indexname = indexname )
Calls create_index based on a fieldspec if the fieldspec has indexed = True .
53,276
def fielddefsql_from_fieldspec ( fieldspec : FIELDSPEC_TYPE ) -> str : sql = fieldspec [ "name" ] + " " + fieldspec [ "sqltype" ] if "notnull" in fieldspec and fieldspec [ "notnull" ] : sql += " NOT NULL" if "autoincrement" in fieldspec and fieldspec [ "autoincrement" ] : sql += " AUTO_INCREMENT" if "pk" in fieldspec and fieldspec [ "pk" ] : sql += " PRIMARY KEY" else : if "unique" in fieldspec and fieldspec [ "unique" ] : sql += " UNIQUE" if "comment" in fieldspec : sql += " COMMENT " + sql_quote_string ( fieldspec [ "comment" ] ) return sql
Returns SQL fragment to define a field .
53,277
def fielddefsql_from_fieldspeclist ( self , fieldspeclist : FIELDSPECLIST_TYPE ) -> str : return "," . join ( [ self . fielddefsql_from_fieldspec ( x ) for x in fieldspeclist ] )
Returns list of field - defining SQL fragments .
53,278
def fieldspec_subset_by_name ( fieldspeclist : FIELDSPECLIST_TYPE , fieldnames : Container [ str ] ) -> FIELDSPECLIST_TYPE : result = [ ] for x in fieldspeclist : if x [ "name" ] in fieldnames : result . append ( x ) return result
Returns a subset of the fieldspecs matching the fieldnames list .
53,279
def drop_table ( self , tablename : str ) -> int : sql = "DROP TABLE IF EXISTS {}" . format ( tablename ) log . info ( "Dropping table " + tablename + " (ignore any warning here)" ) return self . db_exec_literal ( sql )
Drops a table . Use caution!
53,280
def drop_view ( self , viewname : str ) -> int : sql = "DROP VIEW IF EXISTS {}" . format ( viewname ) log . info ( "Dropping view " + viewname + " (ignore any warning here)" ) return self . db_exec_literal ( sql )
Drops a view .
53,281
def rename_table ( self , from_table : str , to_table : str ) -> Optional [ int ] : if not self . table_exists ( from_table ) : log . info ( "Skipping renaming of table " + from_table + " (doesn't exist)" ) return None if self . table_exists ( to_table ) : raise RuntimeError ( "Can't rename table {} to {}: destination " "already exists!" . format ( from_table , to_table ) ) log . info ( "Renaming table {} to {}" , from_table , to_table ) sql = "RENAME TABLE {} TO {}" . format ( from_table , to_table ) return self . db_exec_literal ( sql )
Renames a table . MySQL - specific .
53,282
def add_column ( self , tablename : str , fieldspec : FIELDSPEC_TYPE ) -> int : sql = "ALTER TABLE {} ADD COLUMN {}" . format ( tablename , self . fielddefsql_from_fieldspec ( fieldspec ) ) log . info ( sql ) return self . db_exec_literal ( sql )
Adds a column to an existing table .
53,283
def modify_column_if_table_exists ( self , tablename : str , fieldname : str , newdef : str ) -> Optional [ int ] : if not self . table_exists ( tablename ) : return None sql = "ALTER TABLE {t} MODIFY COLUMN {field} {newdef}" . format ( t = tablename , field = fieldname , newdef = newdef ) log . info ( sql ) return self . db_exec_literal ( sql )
Alters a column s definition without renaming it .
53,284
def change_column_if_table_exists ( self , tablename : str , oldfieldname : str , newfieldname : str , newdef : str ) -> Optional [ int ] : if not self . table_exists ( tablename ) : return None if not self . column_exists ( tablename , oldfieldname ) : return None sql = "ALTER TABLE {t} CHANGE COLUMN {old} {new} {newdef}" . format ( t = tablename , old = oldfieldname , new = newfieldname , newdef = newdef , ) log . info ( sql ) return self . db_exec_literal ( sql )
Renames a column and alters its definition .
53,285
def create_or_update_table ( self , tablename : str , fieldspeclist : FIELDSPECLIST_TYPE , drop_superfluous_columns : bool = False , dynamic : bool = False , compressed : bool = False ) -> None : self . make_table ( tablename , fieldspeclist , dynamic = dynamic , compressed = compressed ) fields_in_db = set ( self . fetch_column_names ( tablename ) ) desired_fieldnames = set ( self . fieldnames_from_fieldspeclist ( fieldspeclist ) ) missing_fieldnames = desired_fieldnames - fields_in_db missing_fieldspecs = self . fieldspec_subset_by_name ( fieldspeclist , missing_fieldnames ) for f in missing_fieldspecs : self . add_column ( tablename , f ) superfluous_fieldnames = fields_in_db - desired_fieldnames for f in superfluous_fieldnames : if drop_superfluous_columns : log . warning ( "... dropping superfluous field: " + f ) self . drop_column ( tablename , f ) else : log . warning ( "... superfluous field (ignored): " + f ) for fs in fieldspeclist : self . create_index_from_fieldspec ( tablename , fs )
- Make table if it doesn t exist . - Add fields that aren t there . - Warn about superfluous fields but don t delete them unless drop_superfluous_columns == True . - Make indexes if requested .
53,286
def get_comment ( self , table : str , column : str ) -> str : return self . flavour . get_comment ( self , table , column )
Returns database SQL comment for a column .
53,287
def debug_query ( self , sql : str , * args ) -> None : rows = self . fetchall ( sql , * args ) debug_query_result ( rows )
Executes SQL and writes the result to the log .
53,288
def wipe_table ( self , table : str ) -> int : sql = "DELETE FROM " + self . delimit ( table ) return self . db_exec ( sql )
Delete all records from a table . Use caution!
53,289
def noise_covariance ( fit , dof = 2 , ** kw ) : ev = fit . eigenvalues measurement_noise = ev [ - 1 ] / ( fit . n - dof ) return 4 * ev * measurement_noise
Covariance taking into account the noise covariance of the data . This is technically more realistic for continuously sampled data . From Faber 1993
53,290
def create_view ( operations , operation ) : operations . execute ( "CREATE VIEW %s AS %s" % ( operation . target . name , operation . target . sqltext ) )
Implements CREATE VIEW .
53,291
def create_sp ( operations , operation ) : operations . execute ( "CREATE FUNCTION %s %s" % ( operation . target . name , operation . target . sqltext ) )
Implements CREATE FUNCTION .
53,292
def iso_string_to_python_datetime ( isostring : str ) -> Optional [ datetime . datetime ] : if not isostring : return None return dateutil . parser . parse ( isostring )
Takes an ISO - 8601 string and returns a datetime .
53,293
def python_utc_datetime_to_sqlite_strftime_string ( value : datetime . datetime ) -> str : millisec_str = str ( round ( value . microsecond / 1000 ) ) . zfill ( 3 ) return value . strftime ( "%Y-%m-%d %H:%M:%S" ) + "." + millisec_str
Converts a Python datetime to a string literal compatible with SQLite including the millisecond field .
53,294
def from_db_value ( self , value , expression , connection , context ) : if value is None : return value if value == '' : return None return iso_string_to_python_datetime ( value )
Convert database value to Python value . Called when data is loaded from the database .
53,295
def get_prep_value ( self , value ) : log . debug ( "get_prep_value: {}, {}" , value , type ( value ) ) if not value : return '' return value . astimezone ( timezone . utc )
Convert Python value to database value for QUERYING . We query with UTC so this function converts datetime values to UTC .
53,296
def get_db_prep_save ( self , value , connection , prepared = False ) : log . debug ( "get_db_prep_save: {}, {}" , value , type ( value ) ) if not value : return '' return python_localized_datetime_to_human_iso ( value )
Convert Python value to database value for SAVING . We save with full timezone information .
53,297
def list_file_extensions ( path : str , reportevery : int = 1 ) -> List [ str ] : extensions = set ( ) count = 0 for root , dirs , files in os . walk ( path ) : count += 1 if count % reportevery == 0 : log . debug ( "Walking directory {}: {!r}" , count , root ) for file in files : filename , ext = os . path . splitext ( file ) extensions . add ( ext ) return sorted ( list ( extensions ) )
Returns a sorted list of every file extension found in a directory and its subdirectories .
53,298
def are_debian_packages_installed ( packages : List [ str ] ) -> Dict [ str , bool ] : assert len ( packages ) >= 1 require_executable ( DPKG_QUERY ) args = [ DPKG_QUERY , "-W" , "-f=${Package} ${Status}\n" , ] + packages completed_process = subprocess . run ( args , stdout = subprocess . PIPE , stderr = subprocess . PIPE , check = False ) encoding = sys . getdefaultencoding ( ) stdout = completed_process . stdout . decode ( encoding ) stderr = completed_process . stderr . decode ( encoding ) present = OrderedDict ( ) for line in stdout . split ( "\n" ) : if line : words = line . split ( ) assert len ( words ) >= 2 package = words [ 0 ] present [ package ] = "installed" in words [ 1 : ] for line in stderr . split ( "\n" ) : if line : words = line . split ( ) assert len ( words ) >= 2 package = words [ - 1 ] present [ package ] = False log . debug ( "Debian package presence: {}" , present ) return present
Check which of a list of Debian packages are installed via dpkg - query .
53,299
def require_debian_packages ( packages : List [ str ] ) -> None : present = are_debian_packages_installed ( packages ) missing_packages = [ k for k , v in present . items ( ) if not v ] if missing_packages : missing_packages . sort ( ) msg = ( "Debian packages are missing, as follows. Suggest:\n\n" "sudo apt install {}" . format ( " " . join ( missing_packages ) ) ) log . critical ( msg ) raise ValueError ( msg )
Ensure specific packages are installed under Debian .