idx
int64 0
63k
| question
stringlengths 61
4.03k
| target
stringlengths 6
1.23k
|
|---|---|---|
10,600
|
def get ( tickers , provider = None , common_dates = True , forward_fill = False , clean_tickers = True , column_names = None , ticker_field_sep = ':' , mrefresh = False , existing = None , ** kwargs ) : if provider is None : provider = DEFAULT_PROVIDER tickers = utils . parse_arg ( tickers ) data = { } for ticker in tickers : t = ticker f = None bits = ticker . split ( ticker_field_sep , 1 ) if len ( bits ) == 2 : t = bits [ 0 ] f = bits [ 1 ] if hasattr ( provider , 'mcache' ) : data [ ticker ] = provider ( ticker = t , field = f , mrefresh = mrefresh , ** kwargs ) else : data [ ticker ] = provider ( ticker = t , field = f , ** kwargs ) df = pd . DataFrame ( data ) df = df [ tickers ] if existing is not None : df = ffn . merge ( existing , df ) if common_dates : df = df . dropna ( ) if forward_fill : df = df . fillna ( method = 'ffill' ) if column_names : cnames = utils . parse_arg ( column_names ) if len ( cnames ) != len ( df . columns ) : raise ValueError ( 'column_names must be of same length as tickers' ) df . columns = cnames elif clean_tickers : df . columns = map ( utils . clean_ticker , df . columns ) return df
|
Helper function for retrieving data as a DataFrame .
|
10,601
|
def web ( ticker , field = None , start = None , end = None , mrefresh = False , source = 'yahoo' ) : if source == 'yahoo' and field is None : field = 'Adj Close' tmp = _download_web ( ticker , data_source = source , start = start , end = end ) if tmp is None : raise ValueError ( 'failed to retrieve data for %s:%s' % ( ticker , field ) ) if field : return tmp [ field ] else : return tmp
|
Data provider wrapper around pandas . io . data provider . Provides memoization .
|
10,602
|
def csv ( ticker , path = 'data.csv' , field = '' , mrefresh = False , ** kwargs ) : if 'index_col' not in kwargs : kwargs [ 'index_col' ] = 0 if 'parse_dates' not in kwargs : kwargs [ 'parse_dates' ] = True df = pd . read_csv ( path , ** kwargs ) tf = ticker if field is not '' and field is not None : tf = '%s:%s' % ( tf , field ) if tf not in df : raise ValueError ( 'Ticker(field) not present in csv file!' ) return df [ tf ]
|
Data provider wrapper around pandas read_csv . Provides memoization .
|
10,603
|
def display ( table , limit = 0 , vrepr = None , index_header = None , caption = None , tr_style = None , td_styles = None , encoding = None , truncate = None , epilogue = None ) : from IPython . core . display import display_html html = _display_html ( table , limit = limit , vrepr = vrepr , index_header = index_header , caption = caption , tr_style = tr_style , td_styles = td_styles , encoding = encoding , truncate = truncate , epilogue = epilogue ) display_html ( html , raw = True )
|
Display a table inline within an IPython notebook .
|
10,604
|
def fromxlsx ( filename , sheet = None , range_string = None , row_offset = 0 , column_offset = 0 , ** kwargs ) : return XLSXView ( filename , sheet = sheet , range_string = range_string , row_offset = row_offset , column_offset = column_offset , ** kwargs )
|
Extract a table from a sheet in an Excel . xlsx file .
|
10,605
|
def toxlsx ( tbl , filename , sheet = None , encoding = None ) : import openpyxl if encoding is None : encoding = locale . getpreferredencoding ( ) wb = openpyxl . Workbook ( write_only = True ) ws = wb . create_sheet ( title = sheet ) for row in tbl : ws . append ( row ) wb . save ( filename )
|
Write a table to a new Excel . xlsx file .
|
10,606
|
def teepickle ( table , source = None , protocol = - 1 , write_header = True ) : return TeePickleView ( table , source = source , protocol = protocol , write_header = write_header )
|
Return a table that writes rows to a pickle file as they are iterated over .
|
10,607
|
def format ( table , field , fmt , ** kwargs ) : conv = lambda v : fmt . format ( v ) return convert ( table , field , conv , ** kwargs )
|
Convenience function to format all values in the given field using the fmt format string .
|
10,608
|
def formatall ( table , fmt , ** kwargs ) : conv = lambda v : fmt . format ( v ) return convertall ( table , conv , ** kwargs )
|
Convenience function to format all values in all fields using the fmt format string .
|
10,609
|
def interpolate ( table , field , fmt , ** kwargs ) : conv = lambda v : fmt % v return convert ( table , field , conv , ** kwargs )
|
Convenience function to interpolate all values in the given field using the fmt string .
|
10,610
|
def interpolateall ( table , fmt , ** kwargs ) : conv = lambda v : fmt % v return convertall ( table , conv , ** kwargs )
|
Convenience function to interpolate all values in all fields using the fmt string .
|
10,611
|
def recordlookup ( table , key , dictionary = None ) : if dictionary is None : dictionary = dict ( ) it = iter ( table ) hdr = next ( it ) flds = list ( map ( text_type , hdr ) ) keyindices = asindices ( hdr , key ) assert len ( keyindices ) > 0 , 'no key selected' getkey = operator . itemgetter ( * keyindices ) for row in it : k = getkey ( row ) rec = Record ( row , flds ) if k in dictionary : l = dictionary [ k ] l . append ( rec ) dictionary [ k ] = l else : dictionary [ k ] = [ rec ] return dictionary
|
Load a dictionary with data from the given table mapping to record objects .
|
10,612
|
def appendbcolz ( table , obj , check_names = True ) : import bcolz import numpy as np if isinstance ( obj , string_types ) : ctbl = bcolz . open ( obj , mode = 'a' ) else : assert hasattr ( obj , 'append' ) and hasattr ( obj , 'names' ) , 'expected rootdir or ctable, found %r' % obj ctbl = obj dtype = ctbl . dtype it = iter ( table ) hdr = next ( it ) flds = list ( map ( text_type , hdr ) ) if check_names : assert tuple ( flds ) == tuple ( ctbl . names ) , 'column names do not match' chunklen = sum ( ctbl . cols [ name ] . chunklen for name in ctbl . names ) // len ( ctbl . names ) while True : data = list ( itertools . islice ( it , chunklen ) ) data = np . array ( data , dtype = dtype ) ctbl . append ( data ) if len ( data ) < chunklen : break ctbl . flush ( ) return ctbl
|
Append data into a bcolz ctable . The obj argument can be either an existing ctable or the name of a directory were an on - disk ctable is stored .
|
10,613
|
def teetext ( table , source = None , encoding = None , errors = 'strict' , template = None , prologue = None , epilogue = None ) : assert template is not None , 'template is required' return TeeTextView ( table , source = source , encoding = encoding , errors = errors , template = template , prologue = prologue , epilogue = epilogue )
|
Return a table that writes rows to a text file as they are iterated over .
|
10,614
|
def groupcountdistinctvalues ( table , key , value ) : s1 = cut ( table , key , value ) s2 = distinct ( s1 ) s3 = aggregate ( s2 , key , len ) return s3
|
Group by the key field then count the number of distinct values in the value field .
|
10,615
|
def appendtextindex ( table , index_or_dirname , indexname = None , merge = True , optimize = False ) : import whoosh . index if isinstance ( index_or_dirname , string_types ) : dirname = index_or_dirname index = whoosh . index . open_dir ( dirname , indexname = indexname , readonly = False ) needs_closing = True elif isinstance ( index_or_dirname , whoosh . index . Index ) : index = index_or_dirname needs_closing = False else : raise ArgumentError ( 'expected string or index, found %r' % index_or_dirname ) writer = index . writer ( ) try : for d in dicts ( table ) : writer . add_document ( ** d ) writer . commit ( merge = merge , optimize = optimize ) except Exception : writer . cancel ( ) raise finally : if needs_closing : index . close ( )
|
Load all rows from table into a Whoosh index adding them to any existing data in the index .
|
10,616
|
def searchtextindexpage ( index_or_dirname , query , pagenum , pagelen = 10 , indexname = None , docnum_field = None , score_field = None , fieldboosts = None , search_kwargs = None ) : return SearchTextIndexView ( index_or_dirname , query , pagenum = pagenum , pagelen = pagelen , indexname = indexname , docnum_field = docnum_field , score_field = score_field , fieldboosts = fieldboosts , search_kwargs = search_kwargs )
|
Search an index using a query returning a result page .
|
10,617
|
def fromxls ( filename , sheet = None , use_view = True ) : return XLSView ( filename , sheet = sheet , use_view = use_view )
|
Extract a table from a sheet in an Excel . xls file . Sheet is identified by its name or index number . N . B . the sheet name is case sensitive .
|
10,618
|
def toxls ( tbl , filename , sheet , encoding = None , style_compression = 0 , styles = None ) : import xlwt if encoding is None : encoding = locale . getpreferredencoding ( ) wb = xlwt . Workbook ( encoding = encoding , style_compression = style_compression ) ws = wb . add_sheet ( sheet ) if styles is None : for r , row in enumerate ( tbl ) : for c , v in enumerate ( row ) : ws . write ( r , c , label = v ) else : it = iter ( tbl ) hdr = next ( it ) flds = list ( map ( str , hdr ) ) for c , f in enumerate ( flds ) : ws . write ( 0 , c , label = f ) if f not in styles or styles [ f ] is None : styles [ f ] = xlwt . Style . default_style styles = [ styles [ f ] for f in flds ] for r , row in enumerate ( it ) : for c , ( v , style ) in enumerate ( izip_longest ( row , styles , fillvalue = None ) ) : ws . write ( r + 1 , c , label = v , style = style ) wb . save ( filename )
|
Write a table to a new Excel . xls file .
|
10,619
|
def asindices ( hdr , spec ) : flds = list ( map ( text_type , hdr ) ) indices = list ( ) if not isinstance ( spec , ( list , tuple ) ) : spec = ( spec , ) for s in spec : if isinstance ( s , int ) and s < len ( hdr ) : indices . append ( s ) elif s in flds : indices . append ( flds . index ( s ) ) else : raise FieldSelectionError ( s ) return indices
|
Convert the given field spec into a list of field indices .
|
10,620
|
def expr ( s ) : prog = re . compile ( '\{([^}]+)\}' ) def repl ( matchobj ) : return "rec['%s']" % matchobj . group ( 1 ) return eval ( "lambda rec: " + prog . sub ( repl , s ) )
|
Construct a function operating on a table record .
|
10,621
|
def teecsv ( table , source = None , encoding = None , errors = 'strict' , write_header = True , ** csvargs ) : source = write_source_from_arg ( source ) csvargs . setdefault ( 'dialect' , 'excel' ) return teecsv_impl ( table , source = source , encoding = encoding , errors = errors , write_header = write_header , ** csvargs )
|
Returns a table that writes rows to a CSV file as they are iterated over .
|
10,622
|
def distinct ( table , key = None , count = None , presorted = False , buffersize = None , tempdir = None , cache = True ) : return DistinctView ( table , key = key , count = count , presorted = presorted , buffersize = buffersize , tempdir = tempdir , cache = cache )
|
Return only distinct rows in the table .
|
10,623
|
def make_sqlalchemy_column ( col , colname , constraints = True ) : import sqlalchemy col_not_none = [ v for v in col if v is not None ] sql_column_kwargs = { } sql_type_kwargs = { } if len ( col_not_none ) == 0 : sql_column_type = sqlalchemy . String if constraints : sql_type_kwargs [ 'length' ] = NULL_COLUMN_MAX_LENGTH elif all ( isinstance ( v , bool ) for v in col_not_none ) : sql_column_type = sqlalchemy . Boolean elif all ( isinstance ( v , int ) for v in col_not_none ) : if max ( col_not_none ) > SQL_INTEGER_MAX or min ( col_not_none ) < SQL_INTEGER_MIN : sql_column_type = sqlalchemy . BigInteger else : sql_column_type = sqlalchemy . Integer elif all ( isinstance ( v , long ) for v in col_not_none ) : sql_column_type = sqlalchemy . BigInteger elif all ( isinstance ( v , ( int , long ) ) for v in col_not_none ) : sql_column_type = sqlalchemy . BigInteger elif all ( isinstance ( v , ( int , long , float ) ) for v in col_not_none ) : sql_column_type = sqlalchemy . Float elif all ( isinstance ( v , datetime . datetime ) for v in col_not_none ) : sql_column_type = sqlalchemy . DateTime elif all ( isinstance ( v , datetime . date ) for v in col_not_none ) : sql_column_type = sqlalchemy . Date elif all ( isinstance ( v , datetime . time ) for v in col_not_none ) : sql_column_type = sqlalchemy . Time else : sql_column_type = sqlalchemy . String if constraints : sql_type_kwargs [ 'length' ] = max ( [ len ( text_type ( v ) ) for v in col ] ) if constraints : sql_column_kwargs [ 'nullable' ] = len ( col_not_none ) < len ( col ) return sqlalchemy . Column ( colname , sql_column_type ( ** sql_type_kwargs ) , ** sql_column_kwargs )
|
Infer an appropriate SQLAlchemy column type based on a sequence of values .
|
10,624
|
def make_sqlalchemy_table ( table , tablename , schema = None , constraints = True , metadata = None ) : import sqlalchemy if not metadata : metadata = sqlalchemy . MetaData ( ) sql_table = sqlalchemy . Table ( tablename , metadata , schema = schema ) cols = columns ( table ) flds = list ( cols . keys ( ) ) for f in flds : sql_column = make_sqlalchemy_column ( cols [ f ] , f , constraints = constraints ) sql_table . append_column ( sql_column ) return sql_table
|
Create an SQLAlchemy table definition based on data in table .
|
10,625
|
def make_create_table_statement ( table , tablename , schema = None , constraints = True , metadata = None , dialect = None ) : import sqlalchemy sql_table = make_sqlalchemy_table ( table , tablename , schema = schema , constraints = constraints , metadata = metadata ) if dialect : module = __import__ ( 'sqlalchemy.dialects.%s' % DIALECTS [ dialect ] , fromlist = [ 'dialect' ] ) sql_dialect = module . dialect ( ) else : sql_dialect = None return text_type ( sqlalchemy . schema . CreateTable ( sql_table ) . compile ( dialect = sql_dialect ) ) . strip ( )
|
Generate a CREATE TABLE statement based on data in table .
|
10,626
|
def create_table ( table , dbo , tablename , schema = None , commit = True , constraints = True , metadata = None , dialect = None , sample = 1000 ) : if sample > 0 : table = head ( table , sample ) sql = make_create_table_statement ( table , tablename , schema = schema , constraints = constraints , metadata = metadata , dialect = dialect ) _execute ( sql , dbo , commit = commit )
|
Create a database table based on a sample of data in the given table .
|
10,627
|
def drop_table ( dbo , tablename , schema = None , commit = True ) : tablename = _quote ( tablename ) if schema is not None : tablename = _quote ( schema ) + '.' + tablename sql = u'DROP TABLE %s' % tablename _execute ( sql , dbo , commit )
|
Drop a database table .
|
10,628
|
def typecounter ( table , field ) : counter = Counter ( ) for v in values ( table , field ) : try : counter [ v . __class__ . __name__ ] += 1 except IndexError : pass return counter
|
Count the number of values found for each Python type .
|
10,629
|
def teehtml ( table , source = None , encoding = None , errors = 'strict' , caption = None , vrepr = text_type , lineterminator = '\n' , index_header = False , tr_style = None , td_styles = None , truncate = None ) : source = write_source_from_arg ( source ) return TeeHTMLView ( table , source = source , encoding = encoding , errors = errors , caption = caption , vrepr = vrepr , lineterminator = lineterminator , index_header = index_header , tr_style = tr_style , td_styles = td_styles , truncate = truncate )
|
Return a table that writes rows to a Unicode HTML file as they are iterated over .
|
10,630
|
def tupletree ( table , start = 'start' , stop = 'stop' , value = None ) : import intervaltree tree = intervaltree . IntervalTree ( ) it = iter ( table ) hdr = next ( it ) flds = list ( map ( text_type , hdr ) ) assert start in flds , 'start field not recognised' assert stop in flds , 'stop field not recognised' getstart = itemgetter ( flds . index ( start ) ) getstop = itemgetter ( flds . index ( stop ) ) if value is None : getvalue = tuple else : valueindices = asindices ( hdr , value ) assert len ( valueindices ) > 0 , 'invalid value field specification' getvalue = itemgetter ( * valueindices ) for row in it : tree . addi ( getstart ( row ) , getstop ( row ) , getvalue ( row ) ) return tree
|
Construct an interval tree for the given table where each node in the tree is a row of the table .
|
10,631
|
def facettupletrees ( table , key , start = 'start' , stop = 'stop' , value = None ) : import intervaltree it = iter ( table ) hdr = next ( it ) flds = list ( map ( text_type , hdr ) ) assert start in flds , 'start field not recognised' assert stop in flds , 'stop field not recognised' getstart = itemgetter ( flds . index ( start ) ) getstop = itemgetter ( flds . index ( stop ) ) if value is None : getvalue = tuple else : valueindices = asindices ( hdr , value ) assert len ( valueindices ) > 0 , 'invalid value field specification' getvalue = itemgetter ( * valueindices ) keyindices = asindices ( hdr , key ) assert len ( keyindices ) > 0 , 'invalid key' getkey = itemgetter ( * keyindices ) trees = dict ( ) for row in it : k = getkey ( row ) if k not in trees : trees [ k ] = intervaltree . IntervalTree ( ) trees [ k ] . addi ( getstart ( row ) , getstop ( row ) , getvalue ( row ) ) return trees
|
Construct faceted interval trees for the given table where each node in the tree is a row of the table .
|
10,632
|
def recordtree ( table , start = 'start' , stop = 'stop' ) : import intervaltree getstart = attrgetter ( start ) getstop = attrgetter ( stop ) tree = intervaltree . IntervalTree ( ) for rec in records ( table ) : tree . addi ( getstart ( rec ) , getstop ( rec ) , rec ) return tree
|
Construct an interval tree for the given table where each node in the tree is a row of the table represented as a record object .
|
10,633
|
def facetrecordtrees ( table , key , start = 'start' , stop = 'stop' ) : import intervaltree getstart = attrgetter ( start ) getstop = attrgetter ( stop ) getkey = attrgetter ( key ) trees = dict ( ) for rec in records ( table ) : k = getkey ( rec ) if k not in trees : trees [ k ] = intervaltree . IntervalTree ( ) trees [ k ] . addi ( getstart ( rec ) , getstop ( rec ) , rec ) return trees
|
Construct faceted interval trees for the given table where each node in the tree is a record .
|
10,634
|
def facetintervallookupone ( table , key , start = 'start' , stop = 'stop' , value = None , include_stop = False , strict = True ) : trees = facettupletrees ( table , key , start = start , stop = stop , value = value ) out = dict ( ) for k in trees : out [ k ] = IntervalTreeLookupOne ( trees [ k ] , include_stop = include_stop , strict = strict ) return out
|
Construct a faceted interval lookup for the given table returning at most one result for each query . If strict = True queries returning more than one result will raise a DuplicateKeyError . If strict = False and there is more than one result the first result is returned .
|
10,635
|
def intervalantijoin ( left , right , lstart = 'start' , lstop = 'stop' , rstart = 'start' , rstop = 'stop' , lkey = None , rkey = None , include_stop = False , missing = None ) : assert ( lkey is None ) == ( rkey is None ) , 'facet key field must be provided for both or neither table' return IntervalAntiJoinView ( left , right , lstart = lstart , lstop = lstop , rstart = rstart , rstop = rstop , lkey = lkey , rkey = rkey , include_stop = include_stop , missing = missing )
|
Return rows from the left table with no overlapping rows from the right table .
|
10,636
|
def intervaljoinvalues ( left , right , value , lstart = 'start' , lstop = 'stop' , rstart = 'start' , rstop = 'stop' , lkey = None , rkey = None , include_stop = False ) : assert ( lkey is None ) == ( rkey is None ) , 'facet key field must be provided for both or neither table' if lkey is None : lkp = intervallookup ( right , start = rstart , stop = rstop , value = value , include_stop = include_stop ) f = lambda row : lkp . search ( row [ lstart ] , row [ lstop ] ) else : lkp = facetintervallookup ( right , rkey , start = rstart , stop = rstop , value = value , include_stop = include_stop ) f = lambda row : lkp [ row [ lkey ] ] . search ( row [ lstart ] , row [ lstop ] ) return addfield ( left , value , f )
|
Convenience function to join the left table with values from a specific field in the right hand table . Note start coordinates are included and stop coordinates are excluded from the interval . Use the include_stop keyword argument to include the upper bound of the interval when finding overlaps .
|
10,637
|
def intervalsubtract ( left , right , lstart = 'start' , lstop = 'stop' , rstart = 'start' , rstop = 'stop' , lkey = None , rkey = None , include_stop = False ) : assert ( lkey is None ) == ( rkey is None ) , 'facet key field must be provided for both or neither table' return IntervalSubtractView ( left , right , lstart = lstart , lstop = lstop , rstart = rstart , rstop = rstop , lkey = lkey , rkey = rkey , include_stop = include_stop )
|
Subtract intervals in the right hand table from intervals in the left hand table .
|
10,638
|
def _collapse ( intervals ) : span = None for start , stop in intervals : if span is None : span = _Interval ( start , stop ) elif start <= span . stop < stop : span = _Interval ( span . start , stop ) elif start > span . stop : yield span span = _Interval ( start , stop ) if span is not None : yield span
|
Collapse an iterable of intervals sorted by start coord .
|
10,639
|
def _subtract ( start , stop , intervals ) : remainder_start = start sub_stop = None for sub_start , sub_stop in _collapse ( intervals ) : if remainder_start < sub_start : yield _Interval ( remainder_start , sub_start ) remainder_start = sub_stop if sub_stop is not None and sub_stop < stop : yield _Interval ( sub_stop , stop )
|
Subtract intervals from a spanning interval .
|
10,640
|
def rowgroupmap ( table , key , mapper , header = None , presorted = False , buffersize = None , tempdir = None , cache = True ) : return RowGroupMapView ( table , key , mapper , header = header , presorted = presorted , buffersize = buffersize , tempdir = tempdir , cache = cache )
|
Group rows under the given key then apply mapper to yield zero or more output rows for each input group of rows .
|
10,641
|
def rowlenselect ( table , n , complement = False ) : where = lambda row : len ( row ) == n return select ( table , where , complement = complement )
|
Select rows of length n .
|
10,642
|
def selectop ( table , field , value , op , complement = False ) : return select ( table , field , lambda v : op ( v , value ) , complement = complement )
|
Select rows where the function op applied to the given field and the given value returns True .
|
10,643
|
def selecteq ( table , field , value , complement = False ) : return selectop ( table , field , value , operator . eq , complement = complement )
|
Select rows where the given field equals the given value .
|
10,644
|
def selectne ( table , field , value , complement = False ) : return selectop ( table , field , value , operator . ne , complement = complement )
|
Select rows where the given field does not equal the given value .
|
10,645
|
def selectlt ( table , field , value , complement = False ) : value = Comparable ( value ) return selectop ( table , field , value , operator . lt , complement = complement )
|
Select rows where the given field is less than the given value .
|
10,646
|
def selectle ( table , field , value , complement = False ) : value = Comparable ( value ) return selectop ( table , field , value , operator . le , complement = complement )
|
Select rows where the given field is less than or equal to the given value .
|
10,647
|
def selectgt ( table , field , value , complement = False ) : value = Comparable ( value ) return selectop ( table , field , value , operator . gt , complement = complement )
|
Select rows where the given field is greater than the given value .
|
10,648
|
def selectge ( table , field , value , complement = False ) : value = Comparable ( value ) return selectop ( table , field , value , operator . ge , complement = complement )
|
Select rows where the given field is greater than or equal to the given value .
|
10,649
|
def selectcontains ( table , field , value , complement = False ) : return selectop ( table , field , value , operator . contains , complement = complement )
|
Select rows where the given field contains the given value .
|
10,650
|
def selectin ( table , field , value , complement = False ) : return select ( table , field , lambda v : v in value , complement = complement )
|
Select rows where the given field is a member of the given value .
|
10,651
|
def selectnotin ( table , field , value , complement = False ) : return select ( table , field , lambda v : v not in value , complement = complement )
|
Select rows where the given field is not a member of the given value .
|
10,652
|
def selectis ( table , field , value , complement = False ) : return selectop ( table , field , value , operator . is_ , complement = complement )
|
Select rows where the given field is the given value .
|
10,653
|
def selectisnot ( table , field , value , complement = False ) : return selectop ( table , field , value , operator . is_not , complement = complement )
|
Select rows where the given field is not the given value .
|
10,654
|
def selectisinstance ( table , field , value , complement = False ) : return selectop ( table , field , value , isinstance , complement = complement )
|
Select rows where the given field is an instance of the given type .
|
10,655
|
def selectrangeopenleft ( table , field , minv , maxv , complement = False ) : minv = Comparable ( minv ) maxv = Comparable ( maxv ) return select ( table , field , lambda v : minv <= v < maxv , complement = complement )
|
Select rows where the given field is greater than or equal to minv and less than maxv .
|
10,656
|
def selecttrue ( table , field , complement = False ) : return select ( table , field , lambda v : bool ( v ) , complement = complement )
|
Select rows where the given field evaluates True .
|
10,657
|
def selectfalse ( table , field , complement = False ) : return select ( table , field , lambda v : not bool ( v ) , complement = complement )
|
Select rows where the given field evaluates False .
|
10,658
|
def selectnone ( table , field , complement = False ) : return select ( table , field , lambda v : v is None , complement = complement )
|
Select rows where the given field is None .
|
10,659
|
def selectnotnone ( table , field , complement = False ) : return select ( table , field , lambda v : v is not None , complement = complement )
|
Select rows where the given field is not None .
|
10,660
|
def stdchannel_redirected ( stdchannel ) : try : s = io . StringIO ( ) old = getattr ( sys , stdchannel ) setattr ( sys , stdchannel , s ) yield s finally : setattr ( sys , stdchannel , old )
|
Redirects stdout or stderr to a StringIO object . As of python 3 . 4 there is a standard library contextmanager for this but backwards compatibility!
|
10,661
|
def load ( param ) : return ( Pattern ( param ) if isinstance ( param , str ) else param if param is not None else Null ( ) )
|
If the supplied parameter is a string assum it s a simple pattern .
|
10,662
|
def _multi_permission_mask ( mode ) : def compose ( f , g ) : return lambda * args , ** kwargs : g ( f ( * args , ** kwargs ) ) return functools . reduce ( compose , map ( _permission_mask , mode . split ( ',' ) ) )
|
Support multiple comma - separated Unix chmod symbolic modes .
|
10,663
|
def _permission_mask ( mode ) : parsed = re . match ( '(?P<who>[ugoa]+)(?P<op>[-+=])(?P<what>[rwx]*)$' , mode ) if not parsed : raise ValueError ( "Unrecognized symbolic mode" , mode ) spec_map = dict ( r = 4 , w = 2 , x = 1 ) specs = ( spec_map [ perm ] for perm in parsed . group ( 'what' ) ) spec = functools . reduce ( operator . or_ , specs , 0 ) shift_map = dict ( u = 6 , g = 3 , o = 0 ) who = parsed . group ( 'who' ) . replace ( 'a' , 'ugo' ) masks = ( spec << shift_map [ subj ] for subj in who ) mask = functools . reduce ( operator . or_ , masks ) op = parsed . group ( 'op' ) if op == '-' : mask ^= 0o777 if op == '=' : masks = ( 0o7 << shift_map [ subj ] for subj in who ) retain = functools . reduce ( operator . or_ , masks ) ^ 0o777 op_map = { '+' : operator . or_ , '-' : operator . and_ , '=' : lambda mask , target : target & retain ^ mask , } return functools . partial ( op_map [ op ] , mask )
|
Convert a Unix chmod symbolic mode like ugo + rwx to a function suitable for applying to a mask to affect that change .
|
10,664
|
def uncshare ( self ) : unc , r = self . module . splitunc ( self ) return self . _next_class ( unc )
|
The UNC mount point for this path . This is empty for paths on local drives .
|
10,665
|
def splitall ( self ) : r parts = [ ] loc = self while loc != os . curdir and loc != os . pardir : prev = loc loc , child = prev . splitpath ( ) if loc == prev : break parts . append ( child ) parts . append ( loc ) parts . reverse ( ) return parts
|
r Return a list of the path components in this path .
|
10,666
|
def relpath ( self , start = '.' ) : cwd = self . _next_class ( start ) return cwd . relpathto ( self )
|
Return this path as a relative path based from start which defaults to the current working directory .
|
10,667
|
def fnmatch ( self , pattern , normcase = None ) : default_normcase = getattr ( pattern , 'normcase' , self . module . normcase ) normcase = normcase or default_normcase name = normcase ( self . name ) pattern = normcase ( pattern ) return fnmatch . fnmatchcase ( name , pattern )
|
Return True if self . name matches the given pattern .
|
10,668
|
def glob ( self , pattern ) : cls = self . _next_class return [ cls ( s ) for s in glob . glob ( self / pattern ) ]
|
Return a list of Path objects that match the pattern .
|
10,669
|
def chunks ( self , size , * args , ** kwargs ) : with self . open ( * args , ** kwargs ) as f : for chunk in iter ( lambda : f . read ( size ) or None , None ) : yield chunk
|
Returns a generator yielding chunks of the file so it can be read piece by piece with a simple for loop .
|
10,670
|
def lines ( self , encoding = None , errors = 'strict' , retain = True ) : r return self . text ( encoding , errors ) . splitlines ( retain )
|
r Open this file read all lines return them in a list .
|
10,671
|
def _hash ( self , hash_name ) : m = hashlib . new ( hash_name ) for chunk in self . chunks ( 8192 , mode = "rb" ) : m . update ( chunk ) return m
|
Returns a hash object for the file at the current path .
|
10,672
|
def chown ( self , uid = - 1 , gid = - 1 ) : if hasattr ( os , 'chown' ) : if 'pwd' in globals ( ) and isinstance ( uid , str ) : uid = pwd . getpwnam ( uid ) . pw_uid if 'grp' in globals ( ) and isinstance ( gid , str ) : gid = grp . getgrnam ( gid ) . gr_gid os . chown ( self , uid , gid ) else : msg = "Ownership not available on this platform." raise NotImplementedError ( msg ) return self
|
Change the owner and group by names rather than the uid or gid numbers .
|
10,673
|
def in_place ( self , mode = 'r' , buffering = - 1 , encoding = None , errors = None , newline = None , backup_extension = None , ) : import io if set ( mode ) . intersection ( 'wa+' ) : raise ValueError ( 'Only read-only file modes can be used' ) backup_fn = self + ( backup_extension or os . extsep + 'bak' ) try : os . unlink ( backup_fn ) except os . error : pass os . rename ( self , backup_fn ) readable = io . open ( backup_fn , mode , buffering = buffering , encoding = encoding , errors = errors , newline = newline , ) try : perm = os . fstat ( readable . fileno ( ) ) . st_mode except OSError : writable = open ( self , 'w' + mode . replace ( 'r' , '' ) , buffering = buffering , encoding = encoding , errors = errors , newline = newline , ) else : os_mode = os . O_CREAT | os . O_WRONLY | os . O_TRUNC if hasattr ( os , 'O_BINARY' ) : os_mode |= os . O_BINARY fd = os . open ( self , os_mode , perm ) writable = io . open ( fd , "w" + mode . replace ( 'r' , '' ) , buffering = buffering , encoding = encoding , errors = errors , newline = newline , ) try : if hasattr ( os , 'chmod' ) : os . chmod ( self , perm ) except OSError : pass try : yield readable , writable except Exception : readable . close ( ) writable . close ( ) try : os . unlink ( self ) except os . error : pass os . rename ( backup_fn , self ) raise else : readable . close ( ) writable . close ( ) finally : try : os . unlink ( backup_fn ) except os . error : pass
|
A context in which a file may be re - written in - place with new content .
|
10,674
|
def get_dir ( self , scope , class_ ) : prop_name = '{scope}_{class_}_dir' . format ( ** locals ( ) ) value = getattr ( self . wrapper , prop_name ) MultiPath = Multi . for_class ( self . path_class ) return MultiPath . detect ( value )
|
Return the callable function from appdirs but with the result wrapped in self . path_class
|
10,675
|
def _next_class ( cls ) : return next ( class_ for class_ in cls . __mro__ if not issubclass ( class_ , Multi ) )
|
Multi - subclasses should use the parent class
|
10,676
|
def render_math ( self , token ) : if token . content . startswith ( '$$' ) : return self . render_raw_text ( token ) return '${}$' . format ( self . render_raw_text ( token ) )
|
Ensure Math tokens are all enclosed in two dollar signs .
|
10,677
|
def markdown ( iterable , renderer = HTMLRenderer ) : with renderer ( ) as renderer : return renderer . render ( Document ( iterable ) )
|
Output HTML with default settings . Enables inline and block - level HTML tags .
|
10,678
|
def convert_file ( filename , renderer ) : try : with open ( filename , 'r' ) as fin : rendered = mistletoe . markdown ( fin , renderer ) print ( rendered , end = '' ) except OSError : sys . exit ( 'Cannot open file "{}".' . format ( filename ) )
|
Parse a Markdown file and dump the output to stdout .
|
10,679
|
def interactive ( renderer ) : _import_readline ( ) _print_heading ( renderer ) contents = [ ] more = False while True : try : prompt , more = ( '... ' , True ) if more else ( '>>> ' , True ) contents . append ( input ( prompt ) + '\n' ) except EOFError : print ( '\n' + mistletoe . markdown ( contents , renderer ) , end = '' ) more = False contents = [ ] except KeyboardInterrupt : print ( '\nExiting.' ) break
|
Parse user input dump to stdout rinse and repeat . Python REPL style .
|
10,680
|
def toc ( self ) : from mistletoe . block_token import List def get_indent ( level ) : if self . omit_title : level -= 1 return ' ' * 4 * ( level - 1 ) def build_list_item ( heading ) : level , content = heading template = '{indent}- {content}\n' return template . format ( indent = get_indent ( level ) , content = content ) return List ( [ build_list_item ( heading ) for heading in self . _headings ] )
|
Returns table of contents as a block_token . List instance .
|
10,681
|
def render_inner ( self , token ) : rendered = [ self . render ( child ) for child in token . children ] return '' . join ( rendered )
|
Recursively renders child tokens . Joins the rendered strings with no space in between .
|
10,682
|
def complexity_entropy_multiscale ( signal , max_scale_factor = 20 , m = 2 , r = "default" ) : if r == "default" : r = 0.15 * np . std ( signal ) n = len ( signal ) per_scale_entropy_values = np . zeros ( max_scale_factor ) for i in range ( max_scale_factor ) : b = int ( np . fix ( n / ( i + 1 ) ) ) temp_ts = [ 0 ] * int ( b ) for j in range ( b ) : num = sum ( signal [ j * ( i + 1 ) : ( j + 1 ) * ( i + 1 ) ] ) den = i + 1 temp_ts [ j ] = float ( num ) / float ( den ) se = nolds . sampen ( temp_ts , m , r , nolds . measures . rowwise_chebyshev , debug_plot = False , plot_file = None ) if np . isinf ( se ) : print ( "NeuroKit warning: complexity_entropy_multiscale(): Signal might be to short to compute SampEn for scale factors > " + str ( i ) + ". Setting max_scale_factor to " + str ( i ) + "." ) max_scale_factor = i break else : per_scale_entropy_values [ i ] = se all_entropy_values = per_scale_entropy_values [ 0 : max_scale_factor ] parameters = { "max_scale_factor" : max_scale_factor , "r" : r , "m" : m } mse = { "MSE_Parameters" : parameters , "MSE_Values" : all_entropy_values , "MSE_AUC" : np . trapz ( all_entropy_values ) , "MSE_Sum" : np . sum ( all_entropy_values ) } return ( mse )
|
Computes the Multiscale Entropy . Uses sample entropy with chebychev distance .
|
10,683
|
def eeg_gfp ( raws , gflp_method = "GFPL1" , scale = True , normalize = True , smoothing = None ) : gfp = { } for participant in raws : gfp [ participant ] = { } for run in raws [ participant ] : gfp [ participant ] [ run ] = { } raw = raws [ participant ] [ run ] . copy ( ) if True in set ( [ "MEG" in ch for ch in raw . info [ "ch_names" ] ] ) : meg = True eeg = False else : meg = False eeg = True try : gfp [ participant ] [ run ] [ "ecg" ] = np . array ( raw . copy ( ) . pick_types ( meg = False , eeg = False , ecg = True ) . to_data_frame ( ) ) except ValueError : gfp [ participant ] [ run ] [ "ecg" ] = np . nan data = raw . copy ( ) . pick_types ( meg = meg , eeg = eeg ) gfp [ participant ] [ run ] [ "data_info" ] = data . info gfp [ participant ] [ run ] [ "data_freq" ] = data . info [ "sfreq" ] gfp [ participant ] [ run ] [ "run_duration" ] = len ( data ) / data . info [ "sfreq" ] data = np . array ( data . to_data_frame ( ) ) data , gfp_curve , gfp_peaks = eeg_gfp_peaks ( data , gflp_method = gflp_method , smoothing = smoothing , smoothing_window = 100 , peak_method = "wavelet" , normalize = normalize ) gfp [ participant ] [ run ] [ "microstates_times" ] = gfp_peaks data_peaks = data [ gfp_peaks ] if scale is True : gfp [ participant ] [ run ] [ "data" ] = z_score ( data_peaks ) else : gfp [ participant ] [ run ] [ "data" ] = data_peaks gfp [ participant ] [ run ] [ "data_scale" ] = scale gfp [ participant ] [ run ] [ "data_normalize" ] = normalize gfp [ participant ] [ run ] [ "data_smoothing" ] = smoothing return ( gfp )
|
Run the GFP analysis .
|
10,684
|
def eeg_microstates_clustering ( data , n_microstates = 4 , clustering_method = "kmeans" , n_jobs = 1 , n_init = 25 , occurence_rejection_treshold = 0.05 , max_refitting = 5 , verbose = True ) : training_set = data . copy ( ) if verbose is True : print ( "- Initializing the clustering algorithm..." ) if clustering_method == "kmeans" : algorithm = sklearn . cluster . KMeans ( init = 'k-means++' , n_clusters = n_microstates , n_init = n_init , n_jobs = n_jobs ) elif clustering_method == "spectral" : algorithm = sklearn . cluster . SpectralClustering ( n_clusters = n_microstates , n_init = n_init , n_jobs = n_jobs ) elif clustering_method == "agglom" : algorithm = sklearn . cluster . AgglomerativeClustering ( n_clusters = n_microstates , linkage = "complete" ) elif clustering_method == "dbscan" : algorithm = sklearn . cluster . DBSCAN ( min_samples = 100 ) elif clustering_method == "affinity" : algorithm = sklearn . cluster . AffinityPropagation ( damping = 0.5 ) else : print ( "NeuroKit Error: eeg_microstates(): clustering_method must be 'kmeans', 'spectral', 'dbscan', 'affinity' or 'agglom'" ) refitting = 0 good_fit_achieved = False while good_fit_achieved is False : good_fit_achieved = True if verbose is True : print ( "- Fitting the classifier..." ) algorithm . fit ( training_set ) if verbose is True : print ( "- Clustering back the initial data..." ) predicted = algorithm . fit_predict ( training_set ) if verbose is True : print ( "- Check for abnormalities..." ) occurences = dict ( collections . Counter ( predicted ) ) masks = [ np . array ( [ True ] * len ( training_set ) ) ] for microstate in occurences : if occurences [ microstate ] < len ( data ) * occurence_rejection_treshold : good_fit_achieved = False refitting += 1 print ( "NeuroKit Warning: eeg_microstates(): detected some outliers: refitting the classifier (n=" + str ( refitting ) + ")." ) masks . append ( predicted != microstate ) mask = np . all ( masks , axis = 0 ) training_set = training_set [ mask ] return ( algorithm )
|
Fit the clustering algorithm .
|
10,685
|
def eeg_microstates_plot ( method , path = "" , extension = ".png" , show_sensors_position = False , show_sensors_name = False , plot = True , save = True , dpi = 150 , contours = 0 , colorbar = False , separate = False ) : figures = [ ] names = [ ] try : microstates = method [ "microstates_good_fit" ] except KeyError : microstates = method [ "microstates" ] for microstate in set ( microstates ) : if microstate != "Bad" : values = np . mean ( method [ "data" ] [ np . where ( microstates == microstate ) ] , axis = 0 ) values = np . array ( values , ndmin = 2 ) . T evoked = mne . EvokedArray ( values , method [ "raw.info_example" ] , 0 ) fig = evoked . plot_topomap ( times = 0 , title = microstate , size = 6 , contours = contours , time_format = "" , show = plot , colorbar = colorbar , show_names = show_sensors_name , sensors = show_sensors_position ) figures . append ( fig ) name = path + "microstate_%s_%s%s%s_%s%i_%s%s" % ( microstate , method [ "data_scale" ] , method [ "data_normalize" ] , method [ "data_smoothing" ] , method [ "feature_reduction_method" ] , method [ "n_features" ] , method [ "clustering_method" ] , extension ) fig . savefig ( name , dpi = dpi ) names . append ( name ) if save is True : image_template = PIL . Image . open ( names [ 0 ] ) X , Y = image_template . size image_template . close ( ) combined = PIL . Image . new ( 'RGB' , ( int ( X * len ( set ( microstates ) ) / 2 ) , int ( Y * len ( set ( microstates ) ) / 2 ) ) ) fig = 0 for x in np . arange ( 0 , len ( set ( microstates ) ) / 2 * int ( X ) , int ( X ) ) : for y in np . arange ( 0 , len ( set ( microstates ) ) / 2 * int ( Y ) , int ( Y ) ) : try : newfig = PIL . Image . open ( names [ fig ] ) combined . paste ( newfig , ( int ( x ) , int ( y ) ) ) newfig . close ( ) except : pass fig += 1 combined_name = path + "microstates_%s%s%s_%s%i_%s%s" % ( method [ "data_scale" ] , method [ "data_normalize" ] , method [ "data_smoothing" ] , method [ "feature_reduction_method" ] , method [ "n_features" ] , method [ "clustering_method" ] , extension ) combined . save ( combined_name ) if separate is False or save is False : for name in names : os . remove ( name ) return ( figures )
|
Plot the microstates .
|
10,686
|
def eeg_microstates_relabel ( method , results , microstates_labels , reverse_microstates = None ) : microstates = list ( method [ 'microstates' ] ) for index , microstate in enumerate ( method [ 'microstates' ] ) : if microstate in list ( reverse_microstates . keys ( ) ) : microstates [ index ] = reverse_microstates [ microstate ] method [ "data" ] [ index ] = - 1 * method [ "data" ] [ index ] if microstate in list ( microstates_labels . keys ( ) ) : microstates [ index ] = microstates_labels [ microstate ] method [ 'microstates' ] = np . array ( microstates ) return ( results , method )
|
Relabel the microstates .
|
10,687
|
def bio_process ( ecg = None , rsp = None , eda = None , emg = None , add = None , sampling_rate = 1000 , age = None , sex = None , position = None , ecg_filter_type = "FIR" , ecg_filter_band = "bandpass" , ecg_filter_frequency = [ 3 , 45 ] , ecg_segmenter = "hamilton" , ecg_quality_model = "default" , ecg_hrv_features = [ "time" , "frequency" ] , eda_alpha = 8e-4 , eda_gamma = 1e-2 , scr_method = "makowski" , scr_treshold = 0.1 , emg_names = None , emg_envelope_freqs = [ 10 , 400 ] , emg_envelope_lfreq = 4 , emg_activation_treshold = "default" , emg_activation_n_above = 0.25 , emg_activation_n_below = 1 ) : processed_bio = { } bio_df = pd . DataFrame ( { } ) if ecg is not None : ecg = ecg_process ( ecg = ecg , rsp = rsp , sampling_rate = sampling_rate , filter_type = ecg_filter_type , filter_band = ecg_filter_band , filter_frequency = ecg_filter_frequency , segmenter = ecg_segmenter , quality_model = ecg_quality_model , hrv_features = ecg_hrv_features , age = age , sex = sex , position = position ) processed_bio [ "ECG" ] = ecg [ "ECG" ] if rsp is not None : processed_bio [ "RSP" ] = ecg [ "RSP" ] bio_df = pd . concat ( [ bio_df , ecg [ "df" ] ] , axis = 1 ) if rsp is not None and ecg is None : rsp = rsp_process ( rsp = rsp , sampling_rate = sampling_rate ) processed_bio [ "RSP" ] = rsp [ "RSP" ] bio_df = pd . concat ( [ bio_df , rsp [ "df" ] ] , axis = 1 ) if eda is not None : eda = eda_process ( eda = eda , sampling_rate = sampling_rate , alpha = eda_alpha , gamma = eda_gamma , scr_method = scr_method , scr_treshold = scr_treshold ) processed_bio [ "EDA" ] = eda [ "EDA" ] bio_df = pd . concat ( [ bio_df , eda [ "df" ] ] , axis = 1 ) if emg is not None : emg = emg_process ( emg = emg , sampling_rate = sampling_rate , emg_names = emg_names , envelope_freqs = emg_envelope_freqs , envelope_lfreq = emg_envelope_lfreq , activation_treshold = emg_activation_treshold , activation_n_above = emg_activation_n_above , activation_n_below = emg_activation_n_below ) bio_df = pd . concat ( [ bio_df , emg . pop ( "df" ) ] , axis = 1 ) for i in emg : processed_bio [ i ] = emg [ i ] if add is not None : add = add . reset_index ( drop = True ) bio_df = pd . concat ( [ bio_df , add ] , axis = 1 ) processed_bio [ "df" ] = bio_df return ( processed_bio )
|
Automated processing of bio signals . Wrapper for other bio processing functions .
|
10,688
|
def ecg_process ( ecg , rsp = None , sampling_rate = 1000 , filter_type = "FIR" , filter_band = "bandpass" , filter_frequency = [ 3 , 45 ] , segmenter = "hamilton" , quality_model = "default" , hrv_features = [ "time" , "frequency" ] , age = None , sex = None , position = None ) : processed_ecg = ecg_preprocess ( ecg , sampling_rate = sampling_rate , filter_type = filter_type , filter_band = filter_band , filter_frequency = filter_frequency , segmenter = segmenter ) if quality_model is not None : quality = ecg_signal_quality ( cardiac_cycles = processed_ecg [ "ECG" ] [ "Cardiac_Cycles" ] , sampling_rate = sampling_rate , rpeaks = processed_ecg [ "ECG" ] [ "R_Peaks" ] , quality_model = quality_model ) processed_ecg [ "ECG" ] . update ( quality ) processed_ecg [ "df" ] = pd . concat ( [ processed_ecg [ "df" ] , quality [ "ECG_Signal_Quality" ] ] , axis = 1 ) if hrv_features is not None : hrv = ecg_hrv ( rpeaks = processed_ecg [ "ECG" ] [ "R_Peaks" ] , sampling_rate = sampling_rate , hrv_features = hrv_features ) try : processed_ecg [ "df" ] = pd . concat ( [ processed_ecg [ "df" ] , hrv . pop ( "df" ) ] , axis = 1 ) except KeyError : pass processed_ecg [ "ECG" ] [ "HRV" ] = hrv if age is not None and sex is not None and position is not None : processed_ecg [ "ECG" ] [ "HRV_Adjusted" ] = ecg_hrv_assessment ( hrv , age , sex , position ) if rsp is not None : rsp = rsp_process ( rsp = rsp , sampling_rate = sampling_rate ) processed_ecg [ "RSP" ] = rsp [ "RSP" ] processed_ecg [ "df" ] = pd . concat ( [ processed_ecg [ "df" ] , rsp [ "df" ] ] , axis = 1 ) rsa = ecg_rsa ( processed_ecg [ "ECG" ] [ "R_Peaks" ] , rsp [ "df" ] [ "RSP_Filtered" ] , sampling_rate = sampling_rate ) processed_ecg [ "ECG" ] [ "RSA" ] = rsa processed_ecg [ "df" ] = pd . concat ( [ processed_ecg [ "df" ] , rsa . pop ( "df" ) ] , axis = 1 ) return ( processed_ecg )
|
Automated processing of ECG and RSP signals .
|
10,689
|
def ecg_signal_quality ( cardiac_cycles , sampling_rate , rpeaks = None , quality_model = "default" ) : if len ( cardiac_cycles ) > 200 : cardiac_cycles = cardiac_cycles . rolling ( 20 ) . mean ( ) . resample ( "3L" ) . pad ( ) if len ( cardiac_cycles ) < 200 : cardiac_cycles = cardiac_cycles . resample ( "1L" ) . pad ( ) cardiac_cycles = cardiac_cycles . rolling ( 20 ) . mean ( ) . resample ( "3L" ) . pad ( ) if len ( cardiac_cycles ) < 200 : fill_dict = { } for i in cardiac_cycles . columns : fill_dict [ i ] = [ np . nan ] * ( 200 - len ( cardiac_cycles ) ) cardiac_cycles = pd . concat ( [ pd . DataFrame ( fill_dict ) , cardiac_cycles ] , ignore_index = True ) cardiac_cycles = cardiac_cycles . fillna ( method = "bfill" ) cardiac_cycles = cardiac_cycles . reset_index ( drop = True ) [ 8 : 200 ] cardiac_cycles = z_score ( cardiac_cycles ) . T cardiac_cycles = np . array ( cardiac_cycles ) if quality_model == "default" : model = sklearn . externals . joblib . load ( Path . materials ( ) + 'heartbeat_classification.model' ) else : model = sklearn . externals . joblib . load ( quality_model ) quality = { } lead = model . predict ( cardiac_cycles ) lead = pd . Series ( lead ) . value_counts ( ) . index [ 0 ] quality [ "Probable_Lead" ] = lead predict = pd . DataFrame ( model . predict_proba ( cardiac_cycles ) ) predict . columns = model . classes_ quality [ "Cardiac_Cycles_Signal_Quality" ] = predict [ lead ] . values quality [ "Average_Signal_Quality" ] = predict [ lead ] . mean ( ) if rpeaks is not None : signal = quality [ "Cardiac_Cycles_Signal_Quality" ] signal = interpolate ( signal , rpeaks , sampling_rate ) signal . name = "ECG_Signal_Quality" quality [ "ECG_Signal_Quality" ] = signal return ( quality )
|
Attempt to find the recording lead and the overall and individual quality of heartbeats signal . Although used as a routine this feature is experimental .
|
10,690
|
def ecg_simulate ( duration = 10 , sampling_rate = 1000 , bpm = 60 , noise = 0.01 ) : cardiac = scipy . signal . wavelets . daub ( 10 ) cardiac = np . concatenate ( [ cardiac , np . zeros ( 10 ) ] ) num_heart_beats = int ( duration * bpm / 60 ) ecg = np . tile ( cardiac , num_heart_beats ) noise = np . random . normal ( 0 , noise , len ( ecg ) ) ecg = noise + ecg ecg = scipy . signal . resample ( ecg , sampling_rate * duration ) return ( ecg )
|
Simulates an ECG signal .
|
10,691
|
def rsp_process ( rsp , sampling_rate = 1000 ) : processed_rsp = { "df" : pd . DataFrame ( { "RSP_Raw" : np . array ( rsp ) } ) } biosppy_rsp = dict ( biosppy . signals . resp . resp ( rsp , sampling_rate = sampling_rate , show = False ) ) processed_rsp [ "df" ] [ "RSP_Filtered" ] = biosppy_rsp [ "filtered" ] rsp_rate = biosppy_rsp [ "resp_rate" ] * 60 rsp_times = biosppy_rsp [ "resp_rate_ts" ] rsp_times = np . round ( rsp_times * sampling_rate ) . astype ( int ) try : rsp_rate = interpolate ( rsp_rate , rsp_times , sampling_rate ) processed_rsp [ "df" ] [ "RSP_Rate" ] = rsp_rate except TypeError : print ( "NeuroKit Warning: rsp_process(): Sequence too short to compute respiratory rate." ) processed_rsp [ "df" ] [ "RSP_Rate" ] = np . nan rsp_cycles = rsp_find_cycles ( biosppy_rsp [ "filtered" ] ) processed_rsp [ "df" ] [ "RSP_Inspiration" ] = rsp_cycles [ "RSP_Inspiration" ] processed_rsp [ "RSP" ] = { } processed_rsp [ "RSP" ] [ "Cycles_Onsets" ] = rsp_cycles [ "RSP_Cycles_Onsets" ] processed_rsp [ "RSP" ] [ "Expiration_Onsets" ] = rsp_cycles [ "RSP_Expiration_Onsets" ] processed_rsp [ "RSP" ] [ "Cycles_Length" ] = rsp_cycles [ "RSP_Cycles_Length" ] / sampling_rate rsp_diff = processed_rsp [ "RSP" ] [ "Cycles_Length" ] processed_rsp [ "RSP" ] [ "Respiratory_Variability" ] = { } processed_rsp [ "RSP" ] [ "Respiratory_Variability" ] [ "RSPV_SD" ] = np . std ( rsp_diff ) processed_rsp [ "RSP" ] [ "Respiratory_Variability" ] [ "RSPV_RMSSD" ] = np . sqrt ( np . mean ( rsp_diff ** 2 ) ) processed_rsp [ "RSP" ] [ "Respiratory_Variability" ] [ "RSPV_RMSSD_Log" ] = np . log ( processed_rsp [ "RSP" ] [ "Respiratory_Variability" ] [ "RSPV_RMSSD" ] ) return ( processed_rsp )
|
Automated processing of RSP signals .
|
10,692
|
def rsp_find_cycles ( signal ) : gradient = np . gradient ( signal ) zeros , = biosppy . tools . zero_cross ( signal = gradient , detrend = True ) phases_indices = [ ] for i in zeros : if gradient [ i + 1 ] > gradient [ i - 1 ] : phases_indices . append ( "Inspiration" ) else : phases_indices . append ( "Expiration" ) inspiration_onsets = [ ] expiration_onsets = [ ] for index , onset in enumerate ( zeros ) : if phases_indices [ index ] == "Inspiration" : inspiration_onsets . append ( onset ) if phases_indices [ index ] == "Expiration" : expiration_onsets . append ( onset ) if phases_indices [ 0 ] == "Inspiration" : phase = "Expiration" else : phase = "Inspiration" inspiration = [ ] phase_counter = 0 for i , value in enumerate ( signal ) : if i == zeros [ phase_counter ] : phase = phases_indices [ phase_counter ] if phase_counter < len ( zeros ) - 1 : phase_counter += 1 inspiration . append ( phase ) if phases_indices [ len ( phases_indices ) - 1 ] == "Inspiration" : last_phase = "Expiration" else : last_phase = "Inspiration" inspiration = np . array ( inspiration ) inspiration [ max ( zeros ) : ] = last_phase inspiration [ inspiration == "Inspiration" ] = 1 inspiration [ inspiration == "Expiration" ] = 0 inspiration = pd . to_numeric ( inspiration ) cycles_length = np . diff ( inspiration_onsets ) rsp_cycles = { "RSP_Inspiration" : inspiration , "RSP_Expiration_Onsets" : expiration_onsets , "RSP_Cycles_Onsets" : inspiration_onsets , "RSP_Cycles_Length" : cycles_length } return ( rsp_cycles )
|
Find Respiratory cycles onsets durations and phases .
|
10,693
|
def eeg_select_channels ( raw , channel_names ) : if isinstance ( channel_names , list ) is False : channel_names = [ channel_names ] channels , time_index = raw . copy ( ) . pick_channels ( channel_names ) [ : ] if len ( channel_names ) > 1 : channels = pd . DataFrame ( channels . T , columns = channel_names ) else : channels = pd . Series ( channels [ 0 ] ) channels . name = channel_names [ 0 ] return ( channels )
|
Select one or several channels by name and returns them in a dataframe .
|
10,694
|
def eeg_create_mne_events ( onsets , conditions = None ) : event_id = { } if conditions is None : conditions = [ "Event" ] * len ( onsets ) if len ( conditions ) != len ( onsets ) : print ( "NeuroKit Warning: eeg_create_events(): conditions parameter of different length than onsets. Aborting." ) return ( ) event_names = list ( set ( conditions ) ) event_index = list ( range ( len ( event_names ) ) ) for i in enumerate ( event_names ) : conditions = [ event_index [ i [ 0 ] ] if x == i [ 1 ] else x for x in conditions ] event_id [ i [ 1 ] ] = event_index [ i [ 0 ] ] events = np . array ( [ onsets , [ 0 ] * len ( onsets ) , conditions ] ) . T return ( events , event_id )
|
Create MNE compatible events .
|
10,695
|
def eeg_add_events ( raw , events_channel , conditions = None , treshold = "auto" , cut = "higher" , time_index = None , number = "all" , after = 0 , before = None , min_duration = 1 ) : if isinstance ( events_channel , str ) : try : events_channel = eeg_select_channels ( raw , events_channel ) except : print ( "NeuroKit error: eeg_add_events(): Wrong events_channel name provided." ) events = find_events ( events_channel , treshold = treshold , cut = cut , time_index = time_index , number = number , after = after , before = before , min_duration = min_duration ) events , event_id = eeg_create_mne_events ( events [ "onsets" ] , conditions ) raw . add_events ( events ) return ( raw , events , event_id )
|
Find events on a channel convert them into an MNE compatible format and add them to the raw data .
|
10,696
|
def eeg_to_all_evokeds ( all_epochs , conditions = None ) : if conditions is None : conditions = { } for participant , epochs in all_epochs . items ( ) : conditions . update ( epochs . event_id ) all_evokeds = { } for participant , epochs in all_epochs . items ( ) : evokeds = { } for cond in conditions : try : evokeds [ cond ] = epochs [ cond ] . average ( ) except KeyError : pass all_evokeds [ participant ] = evokeds return ( all_evokeds )
|
Convert all_epochs to all_evokeds .
|
10,697
|
def eeg_to_df ( eeg , index = None , include = "all" , exclude = None , hemisphere = "both" , central = True ) : if isinstance ( eeg , mne . Epochs ) : data = { } if index is None : index = range ( len ( eeg ) ) for epoch_index , epoch in zip ( index , eeg . get_data ( ) ) : epoch = pd . DataFrame ( epoch . T ) epoch . columns = eeg . ch_names epoch . index = eeg . times selection = eeg_select_electrodes ( eeg , include = include , exclude = exclude , hemisphere = hemisphere , central = central ) data [ epoch_index ] = epoch [ selection ] else : data = eeg . get_data ( ) . T data = pd . DataFrame ( data ) data . columns = eeg . ch_names data . index = eeg . times return ( data )
|
Convert mne Raw or Epochs object to dataframe or dict of dataframes .
|
10,698
|
def plot_polarbar ( scores , labels = None , labels_size = 15 , colors = "default" , distribution_means = None , distribution_sds = None , treshold = 1.28 , fig_size = ( 15 , 15 ) ) : if isinstance ( scores , dict ) : if labels is None : labels = list ( scores . keys ( ) ) try : scores = [ scores [ key ] for key in labels ] except KeyError : print ( "NeuroKit Error: plot_polarbar(): labels and scores keys not matching. Recheck them." ) if colors == "default" : if len ( scores ) < 9 : colors = [ "#f44336" , "#9C27B0" , "#3F51B5" , "#03A9F4" , "#009688" , "#8BC34A" , "#FFEB3B" , "#FF9800" , "#795548" ] else : colors = None if labels is None : labels = range ( len ( scores ) ) N = len ( scores ) theta = np . linspace ( 0.0 , - 2 * np . pi , N , endpoint = False ) width = 2 * np . pi / N plot = plt . figure ( figsize = fig_size ) layer1 = plot . add_subplot ( 111 , projection = "polar" ) bars1 = layer1 . bar ( theta + np . pi / len ( scores ) , scores , width = width , bottom = 0.0 ) layer1 . yaxis . set_ticks ( range ( 11 ) ) layer1 . yaxis . set_ticklabels ( [ ] ) layer1 . xaxis . set_ticks ( theta + np . pi / len ( scores ) ) layer1 . xaxis . set_ticklabels ( labels , fontsize = labels_size ) for index , bar in enumerate ( bars1 ) : if colors is not None : bar . set_facecolor ( colors [ index ] ) bar . set_alpha ( 1 ) if distribution_means is not None and distribution_sds is not None : if isinstance ( distribution_means , int ) : distribution_means = [ distribution_means ] * N if isinstance ( distribution_sds , int ) : distribution_sds = [ distribution_sds ] * N bottoms , tops = normal_range ( np . array ( distribution_means ) , np . array ( distribution_sds ) , treshold = treshold ) tops = tops - bottoms layer2 = plot . add_subplot ( 111 , polar = True ) bars2 = layer2 . bar ( theta , tops , width = width , bottom = bottoms , linewidth = 0 ) layer2 . xaxis . set_ticks ( theta + np . pi / len ( scores ) ) layer2 . xaxis . set_ticklabels ( labels , fontsize = labels_size ) for index , bar in enumerate ( bars2 ) : bar . set_facecolor ( "#607D8B" ) bar . set_alpha ( 0.3 ) return ( plot )
|
Polar bar chart .
|
10,699
|
def feature_reduction ( data , method , n_features ) : if method == "PCA" : feature_red_method = sklearn . decomposition . PCA ( n_components = n_features ) data_processed = feature_red_method . fit_transform ( data ) elif method == "agglom" : feature_red_method = sklearn . cluster . FeatureAgglomeration ( n_clusters = n_features ) data_processed = feature_red_method . fit_transform ( data ) elif method == "ica" : feature_red_method = sklearn . decomposition . FastICA ( n_components = n_features ) data_processed = feature_red_method . fit_transform ( data ) elif method == "kernelPCA" : feature_red_method = sklearn . decomposition . KernelPCA ( n_components = n_features , kernel = 'linear' ) data_processed = feature_red_method . fit_transform ( data ) elif method == "kernelPCA" : feature_red_method = sklearn . decomposition . KernelPCA ( n_components = n_features , kernel = 'linear' ) data_processed = feature_red_method . fit_transform ( data ) elif method == "sparsePCA" : feature_red_method = sklearn . decomposition . SparsePCA ( n_components = n_features ) data_processed = feature_red_method . fit_transform ( data ) elif method == "incrementalPCA" : feature_red_method = sklearn . decomposition . IncrementalPCA ( n_components = n_features ) data_processed = feature_red_method . fit_transform ( data ) elif method == "nmf" : if np . min ( data ) < 0 : data -= np . min ( data ) feature_red_method = sklearn . decomposition . NMF ( n_components = n_features ) data_processed = feature_red_method . fit_transform ( data ) else : feature_red_method = None data_processed = data . copy ( ) return ( data_processed )
|
Feature reduction .
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.