idx int64 0 63k | question stringlengths 61 4.03k | target stringlengths 6 1.23k |
|---|---|---|
10,600 | def get ( tickers , provider = None , common_dates = True , forward_fill = False , clean_tickers = True , column_names = None , ticker_field_sep = ':' , mrefresh = False , existing = None , ** kwargs ) : if provider is None : provider = DEFAULT_PROVIDER tickers = utils . parse_arg ( tickers ) data = { } for ticker in t... | Helper function for retrieving data as a DataFrame . |
10,601 | def web ( ticker , field = None , start = None , end = None , mrefresh = False , source = 'yahoo' ) : if source == 'yahoo' and field is None : field = 'Adj Close' tmp = _download_web ( ticker , data_source = source , start = start , end = end ) if tmp is None : raise ValueError ( 'failed to retrieve data for %s:%s' % (... | Data provider wrapper around pandas . io . data provider . Provides memoization . |
10,602 | def csv ( ticker , path = 'data.csv' , field = '' , mrefresh = False , ** kwargs ) : if 'index_col' not in kwargs : kwargs [ 'index_col' ] = 0 if 'parse_dates' not in kwargs : kwargs [ 'parse_dates' ] = True df = pd . read_csv ( path , ** kwargs ) tf = ticker if field is not '' and field is not None : tf = '%s:%s' % ( ... | Data provider wrapper around pandas read_csv . Provides memoization . |
10,603 | def display ( table , limit = 0 , vrepr = None , index_header = None , caption = None , tr_style = None , td_styles = None , encoding = None , truncate = None , epilogue = None ) : from IPython . core . display import display_html html = _display_html ( table , limit = limit , vrepr = vrepr , index_header = index_heade... | Display a table inline within an IPython notebook . |
10,604 | def fromxlsx ( filename , sheet = None , range_string = None , row_offset = 0 , column_offset = 0 , ** kwargs ) : return XLSXView ( filename , sheet = sheet , range_string = range_string , row_offset = row_offset , column_offset = column_offset , ** kwargs ) | Extract a table from a sheet in an Excel . xlsx file . |
10,605 | def toxlsx ( tbl , filename , sheet = None , encoding = None ) : import openpyxl if encoding is None : encoding = locale . getpreferredencoding ( ) wb = openpyxl . Workbook ( write_only = True ) ws = wb . create_sheet ( title = sheet ) for row in tbl : ws . append ( row ) wb . save ( filename ) | Write a table to a new Excel . xlsx file . |
10,606 | def teepickle ( table , source = None , protocol = - 1 , write_header = True ) : return TeePickleView ( table , source = source , protocol = protocol , write_header = write_header ) | Return a table that writes rows to a pickle file as they are iterated over . |
10,607 | def format ( table , field , fmt , ** kwargs ) : conv = lambda v : fmt . format ( v ) return convert ( table , field , conv , ** kwargs ) | Convenience function to format all values in the given field using the fmt format string . |
10,608 | def formatall ( table , fmt , ** kwargs ) : conv = lambda v : fmt . format ( v ) return convertall ( table , conv , ** kwargs ) | Convenience function to format all values in all fields using the fmt format string . |
10,609 | def interpolate ( table , field , fmt , ** kwargs ) : conv = lambda v : fmt % v return convert ( table , field , conv , ** kwargs ) | Convenience function to interpolate all values in the given field using the fmt string . |
10,610 | def interpolateall ( table , fmt , ** kwargs ) : conv = lambda v : fmt % v return convertall ( table , conv , ** kwargs ) | Convenience function to interpolate all values in all fields using the fmt string . |
10,611 | def recordlookup ( table , key , dictionary = None ) : if dictionary is None : dictionary = dict ( ) it = iter ( table ) hdr = next ( it ) flds = list ( map ( text_type , hdr ) ) keyindices = asindices ( hdr , key ) assert len ( keyindices ) > 0 , 'no key selected' getkey = operator . itemgetter ( * keyindices ) for ro... | Load a dictionary with data from the given table mapping to record objects . |
10,612 | def appendbcolz ( table , obj , check_names = True ) : import bcolz import numpy as np if isinstance ( obj , string_types ) : ctbl = bcolz . open ( obj , mode = 'a' ) else : assert hasattr ( obj , 'append' ) and hasattr ( obj , 'names' ) , 'expected rootdir or ctable, found %r' % obj ctbl = obj dtype = ctbl . dtype it ... | Append data into a bcolz ctable . The obj argument can be either an existing ctable or the name of a directory were an on - disk ctable is stored . |
10,613 | def teetext ( table , source = None , encoding = None , errors = 'strict' , template = None , prologue = None , epilogue = None ) : assert template is not None , 'template is required' return TeeTextView ( table , source = source , encoding = encoding , errors = errors , template = template , prologue = prologue , epil... | Return a table that writes rows to a text file as they are iterated over . |
10,614 | def groupcountdistinctvalues ( table , key , value ) : s1 = cut ( table , key , value ) s2 = distinct ( s1 ) s3 = aggregate ( s2 , key , len ) return s3 | Group by the key field then count the number of distinct values in the value field . |
10,615 | def appendtextindex ( table , index_or_dirname , indexname = None , merge = True , optimize = False ) : import whoosh . index if isinstance ( index_or_dirname , string_types ) : dirname = index_or_dirname index = whoosh . index . open_dir ( dirname , indexname = indexname , readonly = False ) needs_closing = True elif ... | Load all rows from table into a Whoosh index adding them to any existing data in the index . |
10,616 | def searchtextindexpage ( index_or_dirname , query , pagenum , pagelen = 10 , indexname = None , docnum_field = None , score_field = None , fieldboosts = None , search_kwargs = None ) : return SearchTextIndexView ( index_or_dirname , query , pagenum = pagenum , pagelen = pagelen , indexname = indexname , docnum_field =... | Search an index using a query returning a result page . |
10,617 | def fromxls ( filename , sheet = None , use_view = True ) : return XLSView ( filename , sheet = sheet , use_view = use_view ) | Extract a table from a sheet in an Excel . xls file . Sheet is identified by its name or index number . N . B . the sheet name is case sensitive . |
10,618 | def toxls ( tbl , filename , sheet , encoding = None , style_compression = 0 , styles = None ) : import xlwt if encoding is None : encoding = locale . getpreferredencoding ( ) wb = xlwt . Workbook ( encoding = encoding , style_compression = style_compression ) ws = wb . add_sheet ( sheet ) if styles is None : for r , r... | Write a table to a new Excel . xls file . |
10,619 | def asindices ( hdr , spec ) : flds = list ( map ( text_type , hdr ) ) indices = list ( ) if not isinstance ( spec , ( list , tuple ) ) : spec = ( spec , ) for s in spec : if isinstance ( s , int ) and s < len ( hdr ) : indices . append ( s ) elif s in flds : indices . append ( flds . index ( s ) ) else : raise FieldSe... | Convert the given field spec into a list of field indices . |
10,620 | def expr ( s ) : prog = re . compile ( '\{([^}]+)\}' ) def repl ( matchobj ) : return "rec['%s']" % matchobj . group ( 1 ) return eval ( "lambda rec: " + prog . sub ( repl , s ) ) | Construct a function operating on a table record . |
10,621 | def teecsv ( table , source = None , encoding = None , errors = 'strict' , write_header = True , ** csvargs ) : source = write_source_from_arg ( source ) csvargs . setdefault ( 'dialect' , 'excel' ) return teecsv_impl ( table , source = source , encoding = encoding , errors = errors , write_header = write_header , ** c... | Returns a table that writes rows to a CSV file as they are iterated over . |
10,622 | def distinct ( table , key = None , count = None , presorted = False , buffersize = None , tempdir = None , cache = True ) : return DistinctView ( table , key = key , count = count , presorted = presorted , buffersize = buffersize , tempdir = tempdir , cache = cache ) | Return only distinct rows in the table . |
10,623 | def make_sqlalchemy_column ( col , colname , constraints = True ) : import sqlalchemy col_not_none = [ v for v in col if v is not None ] sql_column_kwargs = { } sql_type_kwargs = { } if len ( col_not_none ) == 0 : sql_column_type = sqlalchemy . String if constraints : sql_type_kwargs [ 'length' ] = NULL_COLUMN_MAX_LENG... | Infer an appropriate SQLAlchemy column type based on a sequence of values . |
10,624 | def make_sqlalchemy_table ( table , tablename , schema = None , constraints = True , metadata = None ) : import sqlalchemy if not metadata : metadata = sqlalchemy . MetaData ( ) sql_table = sqlalchemy . Table ( tablename , metadata , schema = schema ) cols = columns ( table ) flds = list ( cols . keys ( ) ) for f in fl... | Create an SQLAlchemy table definition based on data in table . |
10,625 | def make_create_table_statement ( table , tablename , schema = None , constraints = True , metadata = None , dialect = None ) : import sqlalchemy sql_table = make_sqlalchemy_table ( table , tablename , schema = schema , constraints = constraints , metadata = metadata ) if dialect : module = __import__ ( 'sqlalchemy.dia... | Generate a CREATE TABLE statement based on data in table . |
10,626 | def create_table ( table , dbo , tablename , schema = None , commit = True , constraints = True , metadata = None , dialect = None , sample = 1000 ) : if sample > 0 : table = head ( table , sample ) sql = make_create_table_statement ( table , tablename , schema = schema , constraints = constraints , metadata = metadata... | Create a database table based on a sample of data in the given table . |
10,627 | def drop_table ( dbo , tablename , schema = None , commit = True ) : tablename = _quote ( tablename ) if schema is not None : tablename = _quote ( schema ) + '.' + tablename sql = u'DROP TABLE %s' % tablename _execute ( sql , dbo , commit ) | Drop a database table . |
10,628 | def typecounter ( table , field ) : counter = Counter ( ) for v in values ( table , field ) : try : counter [ v . __class__ . __name__ ] += 1 except IndexError : pass return counter | Count the number of values found for each Python type . |
10,629 | def teehtml ( table , source = None , encoding = None , errors = 'strict' , caption = None , vrepr = text_type , lineterminator = '\n' , index_header = False , tr_style = None , td_styles = None , truncate = None ) : source = write_source_from_arg ( source ) return TeeHTMLView ( table , source = source , encoding = enc... | Return a table that writes rows to a Unicode HTML file as they are iterated over . |
10,630 | def tupletree ( table , start = 'start' , stop = 'stop' , value = None ) : import intervaltree tree = intervaltree . IntervalTree ( ) it = iter ( table ) hdr = next ( it ) flds = list ( map ( text_type , hdr ) ) assert start in flds , 'start field not recognised' assert stop in flds , 'stop field not recognised' getsta... | Construct an interval tree for the given table where each node in the tree is a row of the table . |
10,631 | def facettupletrees ( table , key , start = 'start' , stop = 'stop' , value = None ) : import intervaltree it = iter ( table ) hdr = next ( it ) flds = list ( map ( text_type , hdr ) ) assert start in flds , 'start field not recognised' assert stop in flds , 'stop field not recognised' getstart = itemgetter ( flds . in... | Construct faceted interval trees for the given table where each node in the tree is a row of the table . |
10,632 | def recordtree ( table , start = 'start' , stop = 'stop' ) : import intervaltree getstart = attrgetter ( start ) getstop = attrgetter ( stop ) tree = intervaltree . IntervalTree ( ) for rec in records ( table ) : tree . addi ( getstart ( rec ) , getstop ( rec ) , rec ) return tree | Construct an interval tree for the given table where each node in the tree is a row of the table represented as a record object . |
10,633 | def facetrecordtrees ( table , key , start = 'start' , stop = 'stop' ) : import intervaltree getstart = attrgetter ( start ) getstop = attrgetter ( stop ) getkey = attrgetter ( key ) trees = dict ( ) for rec in records ( table ) : k = getkey ( rec ) if k not in trees : trees [ k ] = intervaltree . IntervalTree ( ) tree... | Construct faceted interval trees for the given table where each node in the tree is a record . |
10,634 | def facetintervallookupone ( table , key , start = 'start' , stop = 'stop' , value = None , include_stop = False , strict = True ) : trees = facettupletrees ( table , key , start = start , stop = stop , value = value ) out = dict ( ) for k in trees : out [ k ] = IntervalTreeLookupOne ( trees [ k ] , include_stop = incl... | Construct a faceted interval lookup for the given table returning at most one result for each query . If strict = True queries returning more than one result will raise a DuplicateKeyError . If strict = False and there is more than one result the first result is returned . |
10,635 | def intervalantijoin ( left , right , lstart = 'start' , lstop = 'stop' , rstart = 'start' , rstop = 'stop' , lkey = None , rkey = None , include_stop = False , missing = None ) : assert ( lkey is None ) == ( rkey is None ) , 'facet key field must be provided for both or neither table' return IntervalAntiJoinView ( lef... | Return rows from the left table with no overlapping rows from the right table . |
10,636 | def intervaljoinvalues ( left , right , value , lstart = 'start' , lstop = 'stop' , rstart = 'start' , rstop = 'stop' , lkey = None , rkey = None , include_stop = False ) : assert ( lkey is None ) == ( rkey is None ) , 'facet key field must be provided for both or neither table' if lkey is None : lkp = intervallookup (... | Convenience function to join the left table with values from a specific field in the right hand table . Note start coordinates are included and stop coordinates are excluded from the interval . Use the include_stop keyword argument to include the upper bound of the interval when finding overlaps . |
10,637 | def intervalsubtract ( left , right , lstart = 'start' , lstop = 'stop' , rstart = 'start' , rstop = 'stop' , lkey = None , rkey = None , include_stop = False ) : assert ( lkey is None ) == ( rkey is None ) , 'facet key field must be provided for both or neither table' return IntervalSubtractView ( left , right , lstar... | Subtract intervals in the right hand table from intervals in the left hand table . |
10,638 | def _collapse ( intervals ) : span = None for start , stop in intervals : if span is None : span = _Interval ( start , stop ) elif start <= span . stop < stop : span = _Interval ( span . start , stop ) elif start > span . stop : yield span span = _Interval ( start , stop ) if span is not None : yield span | Collapse an iterable of intervals sorted by start coord . |
10,639 | def _subtract ( start , stop , intervals ) : remainder_start = start sub_stop = None for sub_start , sub_stop in _collapse ( intervals ) : if remainder_start < sub_start : yield _Interval ( remainder_start , sub_start ) remainder_start = sub_stop if sub_stop is not None and sub_stop < stop : yield _Interval ( sub_stop ... | Subtract intervals from a spanning interval . |
10,640 | def rowgroupmap ( table , key , mapper , header = None , presorted = False , buffersize = None , tempdir = None , cache = True ) : return RowGroupMapView ( table , key , mapper , header = header , presorted = presorted , buffersize = buffersize , tempdir = tempdir , cache = cache ) | Group rows under the given key then apply mapper to yield zero or more output rows for each input group of rows . |
10,641 | def rowlenselect ( table , n , complement = False ) : where = lambda row : len ( row ) == n return select ( table , where , complement = complement ) | Select rows of length n . |
10,642 | def selectop ( table , field , value , op , complement = False ) : return select ( table , field , lambda v : op ( v , value ) , complement = complement ) | Select rows where the function op applied to the given field and the given value returns True . |
10,643 | def selecteq ( table , field , value , complement = False ) : return selectop ( table , field , value , operator . eq , complement = complement ) | Select rows where the given field equals the given value . |
10,644 | def selectne ( table , field , value , complement = False ) : return selectop ( table , field , value , operator . ne , complement = complement ) | Select rows where the given field does not equal the given value . |
10,645 | def selectlt ( table , field , value , complement = False ) : value = Comparable ( value ) return selectop ( table , field , value , operator . lt , complement = complement ) | Select rows where the given field is less than the given value . |
10,646 | def selectle ( table , field , value , complement = False ) : value = Comparable ( value ) return selectop ( table , field , value , operator . le , complement = complement ) | Select rows where the given field is less than or equal to the given value . |
10,647 | def selectgt ( table , field , value , complement = False ) : value = Comparable ( value ) return selectop ( table , field , value , operator . gt , complement = complement ) | Select rows where the given field is greater than the given value . |
10,648 | def selectge ( table , field , value , complement = False ) : value = Comparable ( value ) return selectop ( table , field , value , operator . ge , complement = complement ) | Select rows where the given field is greater than or equal to the given value . |
10,649 | def selectcontains ( table , field , value , complement = False ) : return selectop ( table , field , value , operator . contains , complement = complement ) | Select rows where the given field contains the given value . |
10,650 | def selectin ( table , field , value , complement = False ) : return select ( table , field , lambda v : v in value , complement = complement ) | Select rows where the given field is a member of the given value . |
10,651 | def selectnotin ( table , field , value , complement = False ) : return select ( table , field , lambda v : v not in value , complement = complement ) | Select rows where the given field is not a member of the given value . |
10,652 | def selectis ( table , field , value , complement = False ) : return selectop ( table , field , value , operator . is_ , complement = complement ) | Select rows where the given field is the given value . |
10,653 | def selectisnot ( table , field , value , complement = False ) : return selectop ( table , field , value , operator . is_not , complement = complement ) | Select rows where the given field is not the given value . |
10,654 | def selectisinstance ( table , field , value , complement = False ) : return selectop ( table , field , value , isinstance , complement = complement ) | Select rows where the given field is an instance of the given type . |
10,655 | def selectrangeopenleft ( table , field , minv , maxv , complement = False ) : minv = Comparable ( minv ) maxv = Comparable ( maxv ) return select ( table , field , lambda v : minv <= v < maxv , complement = complement ) | Select rows where the given field is greater than or equal to minv and less than maxv . |
10,656 | def selecttrue ( table , field , complement = False ) : return select ( table , field , lambda v : bool ( v ) , complement = complement ) | Select rows where the given field evaluates True . |
10,657 | def selectfalse ( table , field , complement = False ) : return select ( table , field , lambda v : not bool ( v ) , complement = complement ) | Select rows where the given field evaluates False . |
10,658 | def selectnone ( table , field , complement = False ) : return select ( table , field , lambda v : v is None , complement = complement ) | Select rows where the given field is None . |
10,659 | def selectnotnone ( table , field , complement = False ) : return select ( table , field , lambda v : v is not None , complement = complement ) | Select rows where the given field is not None . |
10,660 | def stdchannel_redirected ( stdchannel ) : try : s = io . StringIO ( ) old = getattr ( sys , stdchannel ) setattr ( sys , stdchannel , s ) yield s finally : setattr ( sys , stdchannel , old ) | Redirects stdout or stderr to a StringIO object . As of python 3 . 4 there is a standard library contextmanager for this but backwards compatibility! |
10,661 | def load ( param ) : return ( Pattern ( param ) if isinstance ( param , str ) else param if param is not None else Null ( ) ) | If the supplied parameter is a string assum it s a simple pattern . |
10,662 | def _multi_permission_mask ( mode ) : def compose ( f , g ) : return lambda * args , ** kwargs : g ( f ( * args , ** kwargs ) ) return functools . reduce ( compose , map ( _permission_mask , mode . split ( ',' ) ) ) | Support multiple comma - separated Unix chmod symbolic modes . |
10,663 | def _permission_mask ( mode ) : parsed = re . match ( '(?P<who>[ugoa]+)(?P<op>[-+=])(?P<what>[rwx]*)$' , mode ) if not parsed : raise ValueError ( "Unrecognized symbolic mode" , mode ) spec_map = dict ( r = 4 , w = 2 , x = 1 ) specs = ( spec_map [ perm ] for perm in parsed . group ( 'what' ) ) spec = functools . reduce... | Convert a Unix chmod symbolic mode like ugo + rwx to a function suitable for applying to a mask to affect that change . |
10,664 | def uncshare ( self ) : unc , r = self . module . splitunc ( self ) return self . _next_class ( unc ) | The UNC mount point for this path . This is empty for paths on local drives . |
10,665 | def splitall ( self ) : r parts = [ ] loc = self while loc != os . curdir and loc != os . pardir : prev = loc loc , child = prev . splitpath ( ) if loc == prev : break parts . append ( child ) parts . append ( loc ) parts . reverse ( ) return parts | r Return a list of the path components in this path . |
10,666 | def relpath ( self , start = '.' ) : cwd = self . _next_class ( start ) return cwd . relpathto ( self ) | Return this path as a relative path based from start which defaults to the current working directory . |
10,667 | def fnmatch ( self , pattern , normcase = None ) : default_normcase = getattr ( pattern , 'normcase' , self . module . normcase ) normcase = normcase or default_normcase name = normcase ( self . name ) pattern = normcase ( pattern ) return fnmatch . fnmatchcase ( name , pattern ) | Return True if self . name matches the given pattern . |
10,668 | def glob ( self , pattern ) : cls = self . _next_class return [ cls ( s ) for s in glob . glob ( self / pattern ) ] | Return a list of Path objects that match the pattern . |
10,669 | def chunks ( self , size , * args , ** kwargs ) : with self . open ( * args , ** kwargs ) as f : for chunk in iter ( lambda : f . read ( size ) or None , None ) : yield chunk | Returns a generator yielding chunks of the file so it can be read piece by piece with a simple for loop . |
10,670 | def lines ( self , encoding = None , errors = 'strict' , retain = True ) : r return self . text ( encoding , errors ) . splitlines ( retain ) | r Open this file read all lines return them in a list . |
10,671 | def _hash ( self , hash_name ) : m = hashlib . new ( hash_name ) for chunk in self . chunks ( 8192 , mode = "rb" ) : m . update ( chunk ) return m | Returns a hash object for the file at the current path . |
10,672 | def chown ( self , uid = - 1 , gid = - 1 ) : if hasattr ( os , 'chown' ) : if 'pwd' in globals ( ) and isinstance ( uid , str ) : uid = pwd . getpwnam ( uid ) . pw_uid if 'grp' in globals ( ) and isinstance ( gid , str ) : gid = grp . getgrnam ( gid ) . gr_gid os . chown ( self , uid , gid ) else : msg = "Ownership not... | Change the owner and group by names rather than the uid or gid numbers . |
10,673 | def in_place ( self , mode = 'r' , buffering = - 1 , encoding = None , errors = None , newline = None , backup_extension = None , ) : import io if set ( mode ) . intersection ( 'wa+' ) : raise ValueError ( 'Only read-only file modes can be used' ) backup_fn = self + ( backup_extension or os . extsep + 'bak' ) try : os ... | A context in which a file may be re - written in - place with new content . |
10,674 | def get_dir ( self , scope , class_ ) : prop_name = '{scope}_{class_}_dir' . format ( ** locals ( ) ) value = getattr ( self . wrapper , prop_name ) MultiPath = Multi . for_class ( self . path_class ) return MultiPath . detect ( value ) | Return the callable function from appdirs but with the result wrapped in self . path_class |
10,675 | def _next_class ( cls ) : return next ( class_ for class_ in cls . __mro__ if not issubclass ( class_ , Multi ) ) | Multi - subclasses should use the parent class |
10,676 | def render_math ( self , token ) : if token . content . startswith ( '$$' ) : return self . render_raw_text ( token ) return '${}$' . format ( self . render_raw_text ( token ) ) | Ensure Math tokens are all enclosed in two dollar signs . |
10,677 | def markdown ( iterable , renderer = HTMLRenderer ) : with renderer ( ) as renderer : return renderer . render ( Document ( iterable ) ) | Output HTML with default settings . Enables inline and block - level HTML tags . |
10,678 | def convert_file ( filename , renderer ) : try : with open ( filename , 'r' ) as fin : rendered = mistletoe . markdown ( fin , renderer ) print ( rendered , end = '' ) except OSError : sys . exit ( 'Cannot open file "{}".' . format ( filename ) ) | Parse a Markdown file and dump the output to stdout . |
10,679 | def interactive ( renderer ) : _import_readline ( ) _print_heading ( renderer ) contents = [ ] more = False while True : try : prompt , more = ( '... ' , True ) if more else ( '>>> ' , True ) contents . append ( input ( prompt ) + '\n' ) except EOFError : print ( '\n' + mistletoe . markdown ( contents , renderer ) , en... | Parse user input dump to stdout rinse and repeat . Python REPL style . |
10,680 | def toc ( self ) : from mistletoe . block_token import List def get_indent ( level ) : if self . omit_title : level -= 1 return ' ' * 4 * ( level - 1 ) def build_list_item ( heading ) : level , content = heading template = '{indent}- {content}\n' return template . format ( indent = get_indent ( level ) , content = cont... | Returns table of contents as a block_token . List instance . |
10,681 | def render_inner ( self , token ) : rendered = [ self . render ( child ) for child in token . children ] return '' . join ( rendered ) | Recursively renders child tokens . Joins the rendered strings with no space in between . |
10,682 | def complexity_entropy_multiscale ( signal , max_scale_factor = 20 , m = 2 , r = "default" ) : if r == "default" : r = 0.15 * np . std ( signal ) n = len ( signal ) per_scale_entropy_values = np . zeros ( max_scale_factor ) for i in range ( max_scale_factor ) : b = int ( np . fix ( n / ( i + 1 ) ) ) temp_ts = [ 0 ] * i... | Computes the Multiscale Entropy . Uses sample entropy with chebychev distance . |
10,683 | def eeg_gfp ( raws , gflp_method = "GFPL1" , scale = True , normalize = True , smoothing = None ) : gfp = { } for participant in raws : gfp [ participant ] = { } for run in raws [ participant ] : gfp [ participant ] [ run ] = { } raw = raws [ participant ] [ run ] . copy ( ) if True in set ( [ "MEG" in ch for ch in raw... | Run the GFP analysis . |
10,684 | def eeg_microstates_clustering ( data , n_microstates = 4 , clustering_method = "kmeans" , n_jobs = 1 , n_init = 25 , occurence_rejection_treshold = 0.05 , max_refitting = 5 , verbose = True ) : training_set = data . copy ( ) if verbose is True : print ( "- Initializing the clustering algorithm..." ) if clustering_meth... | Fit the clustering algorithm . |
10,685 | def eeg_microstates_plot ( method , path = "" , extension = ".png" , show_sensors_position = False , show_sensors_name = False , plot = True , save = True , dpi = 150 , contours = 0 , colorbar = False , separate = False ) : figures = [ ] names = [ ] try : microstates = method [ "microstates_good_fit" ] except KeyError ... | Plot the microstates . |
10,686 | def eeg_microstates_relabel ( method , results , microstates_labels , reverse_microstates = None ) : microstates = list ( method [ 'microstates' ] ) for index , microstate in enumerate ( method [ 'microstates' ] ) : if microstate in list ( reverse_microstates . keys ( ) ) : microstates [ index ] = reverse_microstates [... | Relabel the microstates . |
10,687 | def bio_process ( ecg = None , rsp = None , eda = None , emg = None , add = None , sampling_rate = 1000 , age = None , sex = None , position = None , ecg_filter_type = "FIR" , ecg_filter_band = "bandpass" , ecg_filter_frequency = [ 3 , 45 ] , ecg_segmenter = "hamilton" , ecg_quality_model = "default" , ecg_hrv_features... | Automated processing of bio signals . Wrapper for other bio processing functions . |
10,688 | def ecg_process ( ecg , rsp = None , sampling_rate = 1000 , filter_type = "FIR" , filter_band = "bandpass" , filter_frequency = [ 3 , 45 ] , segmenter = "hamilton" , quality_model = "default" , hrv_features = [ "time" , "frequency" ] , age = None , sex = None , position = None ) : processed_ecg = ecg_preprocess ( ecg ,... | Automated processing of ECG and RSP signals . |
10,689 | def ecg_signal_quality ( cardiac_cycles , sampling_rate , rpeaks = None , quality_model = "default" ) : if len ( cardiac_cycles ) > 200 : cardiac_cycles = cardiac_cycles . rolling ( 20 ) . mean ( ) . resample ( "3L" ) . pad ( ) if len ( cardiac_cycles ) < 200 : cardiac_cycles = cardiac_cycles . resample ( "1L" ) . pad ... | Attempt to find the recording lead and the overall and individual quality of heartbeats signal . Although used as a routine this feature is experimental . |
10,690 | def ecg_simulate ( duration = 10 , sampling_rate = 1000 , bpm = 60 , noise = 0.01 ) : cardiac = scipy . signal . wavelets . daub ( 10 ) cardiac = np . concatenate ( [ cardiac , np . zeros ( 10 ) ] ) num_heart_beats = int ( duration * bpm / 60 ) ecg = np . tile ( cardiac , num_heart_beats ) noise = np . random . normal ... | Simulates an ECG signal . |
10,691 | def rsp_process ( rsp , sampling_rate = 1000 ) : processed_rsp = { "df" : pd . DataFrame ( { "RSP_Raw" : np . array ( rsp ) } ) } biosppy_rsp = dict ( biosppy . signals . resp . resp ( rsp , sampling_rate = sampling_rate , show = False ) ) processed_rsp [ "df" ] [ "RSP_Filtered" ] = biosppy_rsp [ "filtered" ] rsp_rate ... | Automated processing of RSP signals . |
10,692 | def rsp_find_cycles ( signal ) : gradient = np . gradient ( signal ) zeros , = biosppy . tools . zero_cross ( signal = gradient , detrend = True ) phases_indices = [ ] for i in zeros : if gradient [ i + 1 ] > gradient [ i - 1 ] : phases_indices . append ( "Inspiration" ) else : phases_indices . append ( "Expiration" ) ... | Find Respiratory cycles onsets durations and phases . |
10,693 | def eeg_select_channels ( raw , channel_names ) : if isinstance ( channel_names , list ) is False : channel_names = [ channel_names ] channels , time_index = raw . copy ( ) . pick_channels ( channel_names ) [ : ] if len ( channel_names ) > 1 : channels = pd . DataFrame ( channels . T , columns = channel_names ) else : ... | Select one or several channels by name and returns them in a dataframe . |
10,694 | def eeg_create_mne_events ( onsets , conditions = None ) : event_id = { } if conditions is None : conditions = [ "Event" ] * len ( onsets ) if len ( conditions ) != len ( onsets ) : print ( "NeuroKit Warning: eeg_create_events(): conditions parameter of different length than onsets. Aborting." ) return ( ) event_names ... | Create MNE compatible events . |
10,695 | def eeg_add_events ( raw , events_channel , conditions = None , treshold = "auto" , cut = "higher" , time_index = None , number = "all" , after = 0 , before = None , min_duration = 1 ) : if isinstance ( events_channel , str ) : try : events_channel = eeg_select_channels ( raw , events_channel ) except : print ( "NeuroK... | Find events on a channel convert them into an MNE compatible format and add them to the raw data . |
10,696 | def eeg_to_all_evokeds ( all_epochs , conditions = None ) : if conditions is None : conditions = { } for participant , epochs in all_epochs . items ( ) : conditions . update ( epochs . event_id ) all_evokeds = { } for participant , epochs in all_epochs . items ( ) : evokeds = { } for cond in conditions : try : evokeds ... | Convert all_epochs to all_evokeds . |
10,697 | def eeg_to_df ( eeg , index = None , include = "all" , exclude = None , hemisphere = "both" , central = True ) : if isinstance ( eeg , mne . Epochs ) : data = { } if index is None : index = range ( len ( eeg ) ) for epoch_index , epoch in zip ( index , eeg . get_data ( ) ) : epoch = pd . DataFrame ( epoch . T ) epoch .... | Convert mne Raw or Epochs object to dataframe or dict of dataframes . |
10,698 | def plot_polarbar ( scores , labels = None , labels_size = 15 , colors = "default" , distribution_means = None , distribution_sds = None , treshold = 1.28 , fig_size = ( 15 , 15 ) ) : if isinstance ( scores , dict ) : if labels is None : labels = list ( scores . keys ( ) ) try : scores = [ scores [ key ] for key in lab... | Polar bar chart . |
10,699 | def feature_reduction ( data , method , n_features ) : if method == "PCA" : feature_red_method = sklearn . decomposition . PCA ( n_components = n_features ) data_processed = feature_red_method . fit_transform ( data ) elif method == "agglom" : feature_red_method = sklearn . cluster . FeatureAgglomeration ( n_clusters =... | Feature reduction . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.