idx int64 0 63k | question stringlengths 53 5.28k | target stringlengths 5 805 |
|---|---|---|
49,600 | def read_config ( file_name ) : config = { } for name in ( [ '.pyspelling.yml' , '.spelling.yml' ] if not file_name else [ file_name ] ) : if os . path . exists ( name ) : if not file_name and name == '.spelling.yml' : warn_deprecated ( "Using '.spelling.yml' as the default is deprecated. Default config is now '.pyspelling.yml'" ) with codecs . open ( name , 'r' , encoding = 'utf-8' ) as f : config = yaml_load ( f . read ( ) ) break return config | Read configuration . |
49,601 | def fix_timedelta_repr ( func ) : if version < ( 3 , 7 ) : return func def fix_timedelta ( match ) : values = match . group ( 1 ) . split ( ", " ) param_repr = ", " . join ( "{}={}" . format ( param , value ) for param , value in zip ( ( "days" , "seconds" , "microseconds" ) , values ) if value != "0" ) if not param_repr : param_repr = "0" return "timedelta({})" . format ( param_repr ) func . __doc__ = re . sub ( r"timedelta\(([^)]+)\)" , fix_timedelta , func . __doc__ ) return func | Account repr change for timedelta in Python 3 . 7 and above in docstrings . |
49,602 | def include_codemirror ( self ) : contents = [ ] js = self . _get_tag ( 'codemirror.js' , 'script' ) css = self . _get_tag ( 'codemirror.css' , 'stylesheet' ) if js and css : contents . append ( js ) contents . append ( css ) for language in self . languages : url = self . __class__ . LANGUAGE_REL_URL . format ( language ) js = self . _get_tag ( url , 'script' ) if js : contents . append ( js ) if self . theme : url = self . __class__ . THEME_REL_URL . format ( self . theme ) css = self . _get_tag ( url , 'stylesheet' ) if css : contents . append ( css ) if self . addons : for addon_type , name in self . addons : url = self . __class__ . ADDON_REL_URL . format ( addon_type , name ) js = self . _get_tag ( url , 'script' ) if js : contents . append ( js ) url = self . __class__ . ADDON_CSS_REL_URL . format ( addon_type , name ) css = self . _get_tag ( url , 'stylesheet' , False ) if css : contents . append ( css ) return Markup ( '\n' . join ( contents ) ) | Include resources in pages |
49,603 | def rst_preprocess ( file ) : with open ( file ) as fp : return re . sub ( "^\.\.\s+include:: (.*?)$" , lambda x : ( rst_preprocess ( x . group ( 1 ) ) or "" ) . rstrip ( ) , fp . read ( ) , flags = re . MULTILINE ) | Preprocess reST file to support Sphinx like include directive . Includes are relative to the current working directory . |
49,604 | def num2varint ( num ) : if num < 0xfd : return num2hexstring ( num ) elif num <= 0xffff : return 'fd' + num2hexstring ( number = num , size = 2 , little_endian = True ) elif num <= 0xffffffff : return 'fe' + num2hexstring ( number = num , size = 4 , little_endian = True ) else : return 'ff' + num2hexstring ( number = num , size = 8 , little_endian = True ) | Converts a number to a variable length Int . Used for array length header |
49,605 | def get ( self , path , params = None ) : r = requests . get ( url = self . url + path , params = params , timeout = self . timeout ) r . raise_for_status ( ) return r . json ( ) | Perform GET request |
49,606 | def post ( self , path , data = None , json_data = None , params = None ) : r = requests . post ( url = self . url + path , data = data , json = json_data , params = params , timeout = self . timeout ) try : r . raise_for_status ( ) except requests . exceptions . HTTPError : raise SwitcheoApiException ( r . json ( ) [ 'error_code' ] , r . json ( ) [ 'error_message' ] , r . json ( ) [ 'error' ] ) return r . json ( ) | Perform POST request |
49,607 | def header_check ( self , content ) : encode = None m = RE_HTML_ENCODE . search ( content ) if m : enc = m . group ( 1 ) . decode ( 'ascii' ) try : codecs . getencoder ( enc ) encode = enc except LookupError : pass else : encode = self . _has_xml_encode ( content ) return encode | Special HTML encoding check . |
49,608 | def has_bom ( self , filestream ) : content = filestream . read ( 4 ) if content == b'PK\x03\x04' : return filters . BINARY_ENCODE return None | Check if has BOM . |
49,609 | def soft_break ( self , el , text ) : if self . type == 'docx' and el . namespace == self . namespaces [ 'w' ] and el . name == 'p' : text . append ( '\n' ) if self . type == 'pptx' and el . namespace == self . namespaces [ 'a' ] and el . name == 'p' : text . append ( '\n' ) | Apply soft break . |
49,610 | def copy ( self ) : other = ProtoFeed ( ) for key in cs . PROTOFEED_ATTRS : value = getattr ( self , key ) if isinstance ( value , pd . DataFrame ) : value = value . copy ( ) setattr ( other , key , value ) return other | Return a copy of this ProtoFeed that is a feed with all the same attributes . |
49,611 | def clone ( root , jsontreecls = jsontree , datetimeencoder = _datetimeencoder , datetimedecoder = _datetimedecoder ) : return json . loads ( json . dumps ( root , cls = JSONTreeEncoder , datetimeencoder = datetimeencoder ) , cls = JSONTreeDecoder , jsontreecls = jsontreecls , datetimedecoder = datetimedecoder ) | Clone an object by first searializing out and then loading it back in . |
49,612 | def load ( fp , encoding = None , cls = JSONTreeDecoder , object_hook = None , parse_float = None , parse_int = None , parse_constant = None , object_pairs_hook = None , ** kargs ) : return json . load ( fp , encoding , cls , object_hook , parse_float , parse_int , parse_constant , object_pairs_hook , ** kargs ) | JSON load from file function that defaults the loading class to be JSONTreeDecoder |
49,613 | def loads ( s , encoding = None , cls = JSONTreeDecoder , object_hook = None , parse_float = None , parse_int = None , parse_constant = None , object_pairs_hook = None , ** kargs ) : return json . loads ( s , encoding , cls , object_hook , parse_float , parse_int , parse_constant , object_pairs_hook , ** kargs ) | JSON load from string function that defaults the loading class to be JSONTreeDecoder |
49,614 | def header_check ( self , content ) : encode = None m = RE_PY_ENCODE . match ( content ) if m : if m . group ( 1 ) : encode = m . group ( 1 ) . decode ( 'ascii' ) elif m . group ( 2 ) : encode = m . group ( 2 ) . decode ( 'ascii' ) if encode is None : encode = 'utf-8' return encode | Special Python encoding check . |
49,615 | def eval_string_type ( self , text , is_string = False ) : stype = set ( ) wstype = set ( ) for m in RE_ITER_STRING_TYPES . finditer ( text ) : value = m . group ( 0 ) if value == '*' : wstype . add ( 'u' ) wstype . add ( 'f' ) wstype . add ( 'r' ) wstype . add ( 'b' ) elif value . endswith ( '*' ) : wstype . add ( value [ 0 ] . lower ( ) ) else : stype . add ( value . lower ( ) ) if is_string and 'b' not in stype and 'f' not in stype : stype . add ( 'u' ) return stype , wstype | Evaluate string type . |
49,616 | def process_strings ( self , string , docstrings = False ) : m = RE_STRING_TYPE . match ( string ) stype = self . get_string_type ( m . group ( 1 ) if m . group ( 1 ) else '' ) if not self . match_string ( stype ) and not docstrings : return '' , False is_bytes = 'b' in stype is_raw = 'r' in stype is_format = 'f' in stype content = m . group ( 3 ) if is_raw and ( not is_format or not self . decode_escapes ) : string = self . norm_nl ( content ) elif is_raw and is_format : string = self . norm_nl ( FE_RFESC . sub ( self . replace_unicode , content ) ) elif is_bytes : string = self . norm_nl ( RE_BESC . sub ( self . replace_bytes , content ) ) elif is_format : string = self . norm_nl ( RE_FESC . sub ( self . replace_unicode , content ) ) else : string = self . norm_nl ( RE_ESC . sub ( self . replace_unicode , content ) ) return textwrap . dedent ( RE_NON_PRINTABLE . sub ( '\n' , string ) if is_bytes else string ) , is_bytes | Process escapes . |
49,617 | def find_slots ( cls ) : slots = set ( ) for c in cls . __mro__ : cslots = getattr ( c , "__slots__" , tuple ( ) ) if not cslots : continue elif isinstance ( cslots , ( bstr , ustr ) ) : cslots = ( cslots , ) slots . update ( cslots ) return slots | Return a set of all slots for a given class and its parents |
49,618 | def _is_valid_date ( obj , accept_none = True ) : if accept_none and obj is None : return True return isinstance ( obj , date ) and not isinstance ( obj , datetime ) | Check if an object is an instance of or a subclass deriving from a date . However it does not consider datetime or subclasses thereof as valid dates . |
49,619 | def empty ( cls ) : self = cls . __new__ ( cls ) self . _range = _empty_internal_range return self | Returns an empty set . An empty set is unbounded and only contain the empty set . |
49,620 | def contains ( self , other ) : if self . is_valid_range ( other ) : if not self : return not other elif not other or other . startsafter ( self ) and other . endsbefore ( self ) : return True else : return False elif self . is_valid_scalar ( other ) : is_within_lower = True if not self . lower_inf : lower_cmp = operator . le if self . lower_inc else operator . lt is_within_lower = lower_cmp ( self . lower , other ) is_within_upper = True if not self . upper_inf : upper_cmp = operator . ge if self . upper_inc else operator . gt is_within_upper = upper_cmp ( self . upper , other ) return is_within_lower and is_within_upper else : raise TypeError ( "Unsupported type to test for inclusion '{0.__class__.__name__}'" . format ( other ) ) | Return True if this contains other . Other may be either range of same type or scalar of same type as the boundaries . |
49,621 | def within ( self , other ) : if not self . is_valid_range ( other ) : raise TypeError ( "Unsupported type to test for inclusion '{0.__class__.__name__}'" . format ( other ) ) return other . contains ( self ) | Tests if this range is within other . |
49,622 | def overlap ( self , other ) : if not self or not other : return False if self < other : a , b = self , other else : a , b = other , self if a . upper_inf or b . lower_inf : return True return a . upper > b . lower or a . upper == b . lower and a . upper_inc and b . lower_inc | Returns True if both ranges share any points . |
49,623 | def adjacent ( self , other ) : if not self . is_valid_range ( other ) : raise TypeError ( "Unsupported type to test for inclusion '{0.__class__.__name__}'" . format ( other ) ) elif not self or not other : return False return ( ( self . lower == other . upper and self . lower_inc != other . upper_inc ) or ( self . upper == other . lower and self . upper_inc != other . lower_inc ) ) | Returns True if ranges are directly next to each other but does not overlap . |
49,624 | def union ( self , other ) : if not self . is_valid_range ( other ) : msg = "Unsupported type to test for union '{.__class__.__name__}'" raise TypeError ( msg . format ( other ) ) if not self : return other elif not other : return self if self < other : a , b = self , other else : a , b = other , self if ( a . upper < b . lower or a . upper == b . lower and not a . upper_inc and not b . lower_inc ) and not a . adjacent ( b ) : raise ValueError ( "Ranges must be either adjacent or overlapping" ) if a . upper == b . upper : upper = a . upper upper_inc = a . upper_inc or b . upper_inc elif a . upper < b . upper : upper = b . upper upper_inc = b . upper_inc else : upper = a . upper upper_inc = a . upper_inc return self . __class__ ( a . lower , upper , a . lower_inc , upper_inc ) | Merges this range with a given range . |
49,625 | def difference ( self , other ) : if not self . is_valid_range ( other ) : msg = "Unsupported type to test for difference '{.__class__.__name__}'" raise TypeError ( msg . format ( other ) ) if not self or not other or not self . overlap ( other ) : return self elif self in other : return self . empty ( ) elif other in self and not ( self . startswith ( other ) or self . endswith ( other ) ) : raise ValueError ( "Other range must not be within this range" ) elif self . endsbefore ( other ) : return self . replace ( upper = other . lower , upper_inc = not other . lower_inc ) elif self . startsafter ( other ) : return self . replace ( lower = other . upper , lower_inc = not other . upper_inc ) else : return self . empty ( ) | Compute the difference between this and a given range . |
49,626 | def intersection ( self , other ) : if not self . is_valid_range ( other ) : msg = "Unsupported type to test for intersection '{.__class__.__name__}'" raise TypeError ( msg . format ( other ) ) if not self or not other or not self . overlap ( other ) : return self . empty ( ) lower_end_span = self if self . startsafter ( other ) else other upper_end_span = self if self . endsbefore ( other ) else other return lower_end_span . replace ( upper = upper_end_span . upper , upper_inc = upper_end_span . upper_inc ) | Returns a new range containing all points shared by both ranges . If no points are shared an empty range is returned . |
49,627 | def startswith ( self , other ) : if self . is_valid_range ( other ) : if self . lower_inc == other . lower_inc : return self . lower == other . lower else : return False elif self . is_valid_scalar ( other ) : if self . lower_inc : return self . lower == other else : return False else : raise TypeError ( "Unsupported type to test for starts with '{}'" . format ( other . __class__ . __name__ ) ) | Test if this range starts with other . other may be either range or scalar . |
49,628 | def endswith ( self , other ) : if self . is_valid_range ( other ) : if self . upper_inc == other . upper_inc : return self . upper == other . upper else : return False elif self . is_valid_scalar ( other ) : if self . upper_inc : return self . upper == other else : return False else : raise TypeError ( "Unsupported type to test for ends with '{}'" . format ( other . __class__ . __name__ ) ) | Test if this range ends with other . other may be either range or scalar . |
49,629 | def startsafter ( self , other ) : if self . is_valid_range ( other ) : if self . lower == other . lower : return other . lower_inc or not self . lower_inc elif self . lower_inf : return False elif other . lower_inf : return True else : return self . lower > other . lower elif self . is_valid_scalar ( other ) : return self . lower >= other else : raise TypeError ( "Unsupported type to test for starts after '{}'" . format ( other . __class__ . __name__ ) ) | Test if this range starts after other . other may be either range or scalar . This only takes the lower end of the ranges into consideration . If the scalar or the lower end of the given range is greater than or equal to this range s lower end True is returned . |
49,630 | def endsbefore ( self , other ) : if self . is_valid_range ( other ) : if self . upper == other . upper : return not self . upper_inc or other . upper_inc elif self . upper_inf : return False elif other . upper_inf : return True else : return self . upper <= other . upper elif self . is_valid_scalar ( other ) : return self . upper <= other else : raise TypeError ( "Unsupported type to test for ends before '{}'" . format ( other . __class__ . __name__ ) ) | Test if this range ends before other . other may be either range or scalar . This only takes the upper end of the ranges into consideration . If the scalar or the upper end of the given range is less than or equal to this range s upper end True is returned . |
49,631 | def left_of ( self , other ) : if not self . is_valid_range ( other ) : msg = ( "Left of is not supported for '{}', provide a proper range " "class" ) . format ( other . __class__ . __name__ ) raise TypeError ( msg ) return self < other and not self . overlap ( other ) | Test if this range other is strictly left of other . |
49,632 | def offset ( self , offset ) : if not self : return self offset_type = self . type if self . offset_type is None else self . offset_type if offset is not None and not isinstance ( offset , offset_type ) : raise TypeError ( ( "Invalid type for offset '{offset_type.__name__}'" " expected '{expected_type.__name__}'" ) . format ( expected_type = offset_type , offset_type = offset . __class__ ) ) lower = None if self . lower is None else self . lower + offset upper = None if self . upper is None else self . upper + offset return self . replace ( lower = lower , upper = upper ) | Shift the range to the left or right with the given offset |
49,633 | def from_date ( cls , date , period = None ) : if period is None or period == "day" : return cls ( date , date , upper_inc = True ) elif period == "week" : start = date - timedelta ( date . weekday ( ) ) return cls ( start , start + timedelta ( 7 ) ) elif period == "american_week" : start = date - timedelta ( ( date . weekday ( ) + 1 ) % 7 ) return cls ( start , start + timedelta ( 7 ) ) elif period == "month" : start = date . replace ( day = 1 ) return cls ( start , ( start + timedelta ( 31 ) ) . replace ( day = 1 ) ) elif period == "quarter" : start = date . replace ( month = ( date . month - 1 ) // 3 * 3 + 1 , day = 1 ) return cls ( start , ( start + timedelta ( 93 ) ) . replace ( day = 1 ) ) elif period == "year" : start = date . replace ( month = 1 , day = 1 ) return cls ( start , ( start + timedelta ( 366 ) ) . replace ( day = 1 ) ) else : raise ValueError ( "Unexpected period, got {!r}" . format ( period ) ) | Create a day long daterange from for the given date . |
49,634 | def from_week ( cls , year , iso_week ) : first_day = date_from_iso_week ( year , iso_week ) return cls . from_date ( first_day , period = "week" ) | Create daterange based on a year and an ISO week |
49,635 | def from_month ( cls , year , month ) : first_day = date ( year , month , 1 ) return cls . from_date ( first_day , period = "month" ) | Create daterange based on a year and amonth |
49,636 | def from_quarter ( cls , year , quarter ) : quarter_months = { 1 : 1 , 2 : 4 , 3 : 7 , 4 : 10 , } if quarter not in quarter_months : error_msg = ( "quarter is not a valid quarter. Expected a value between 1 " "and 4 got {!r}" ) raise ValueError ( error_msg . format ( quarter ) ) first_day = date ( year , quarter_months [ quarter ] , 1 ) return cls . from_date ( first_day , period = "quarter" ) | Create daterange based on a year and quarter . |
49,637 | def from_year ( cls , year ) : first_day = date ( year , 1 , 1 ) return cls . from_date ( first_day , period = "year" ) | Create daterange based on a year |
49,638 | def offset ( self , offset ) : span = self if offset > 0 : for i in iter_range ( offset ) : span = span . next_period ( ) elif offset < 0 : for i in iter_range ( - offset ) : span = span . prev_period ( ) return span | Offset the date range by the given amount of periods . |
49,639 | def prev_period ( self ) : return self . from_date ( self . prev ( self . lower ) , period = self . period ) | The period before this range . |
49,640 | def get_package_hashes ( filename ) : log . debug ( 'Getting package hashes' ) filename = os . path . abspath ( filename ) with open ( filename , 'rb' ) as f : data = f . read ( ) _hash = hashlib . sha256 ( data ) . hexdigest ( ) log . debug ( 'Hash for file %s: %s' , filename , _hash ) return _hash | Provides hash of given filename . |
49,641 | def extend_src ( self , content , context ) : self . extend_src_text ( content , context , self . block_comments , 'block-comment' ) self . extend_src_text ( content , context , self . line_comments , 'line-comment' ) | Extend source list . |
49,642 | def _filter ( self , text , context , encoding ) : content = [ ] self . current_encoding = encoding self . line_num = 1 self . prev_line = - 1 self . leading = '' self . block_comments = [ ] self . line_comments = [ ] self . find_content ( text ) self . extend_src ( content , context ) return content | Filter JavaScript comments . |
49,643 | def savefits ( cube , fitsname , ** kwargs ) : dropdeg = kwargs . pop ( 'dropdeg' , False ) ndim = len ( cube . dims ) FITSINFO = get_data ( 'decode' , 'data/fitsinfo.yaml' ) hdrdata = yaml . load ( FITSINFO , dc . utils . OrderedLoader ) if ndim == 2 : header = fits . Header ( hdrdata [ 'dcube_2d' ] ) data = cube . values . T elif ndim == 3 : if dropdeg : header = fits . Header ( hdrdata [ 'dcube_2d' ] ) data = cube . values [ : , : , 0 ] . T else : header = fits . Header ( hdrdata [ 'dcube_3d' ] ) kidfq = cube . kidfq . values freqrange = ~ np . isnan ( kidfq ) orderedfq = np . argsort ( kidfq [ freqrange ] ) newcube = cube [ : , : , orderedfq ] data = newcube . values . T else : raise TypeError ( ndim ) if cube . coordsys == 'AZEL' : header . update ( { 'CTYPE1' : 'dAZ' , 'CTYPE2' : 'dEL' } ) elif cube . coordsys == 'RADEC' : header . update ( { 'OBSRA' : float ( cube . xref ) , 'OBSDEC' : float ( cube . yref ) } ) else : pass header . update ( { 'CRVAL1' : float ( cube . x [ 0 ] ) , 'CDELT1' : float ( cube . x [ 1 ] - cube . x [ 0 ] ) , 'CRVAL2' : float ( cube . y [ 0 ] ) , 'CDELT2' : float ( cube . y [ 1 ] - cube . y [ 0 ] ) , 'DATE' : datetime . now ( timezone ( 'UTC' ) ) . isoformat ( ) } ) if ( ndim == 3 ) and ( not dropdeg ) : header . update ( { 'CRVAL3' : float ( newcube . kidfq [ 0 ] ) , 'CDELT3' : float ( newcube . kidfq [ 1 ] - newcube . kidfq [ 0 ] ) } ) fitsname = str ( Path ( fitsname ) . expanduser ( ) ) fits . writeto ( fitsname , data , header , ** kwargs ) logger . info ( '{} has been created.' . format ( fitsname ) ) | Save a cube to a 3D - cube FITS file . |
49,644 | def loadnetcdf ( filename , copy = True ) : filename = str ( Path ( filename ) . expanduser ( ) ) if copy : dataarray = xr . open_dataarray ( filename ) . copy ( ) else : dataarray = xr . open_dataarray ( filename , chunks = { } ) if dataarray . name is None : dataarray . name = filename . rstrip ( '.nc' ) for key , val in dataarray . coords . items ( ) : if val . dtype . kind == 'S' : dataarray [ key ] = val . astype ( 'U' ) elif val . dtype == np . int32 : dataarray [ key ] = val . astype ( 'i8' ) return dataarray | Load a dataarray from a NetCDF file . |
49,645 | def savenetcdf ( dataarray , filename = None ) : if filename is None : if dataarray . name is not None : filename = dataarray . name else : filename = uuid4 ( ) . hex [ : 8 ] else : filename = str ( Path ( filename ) . expanduser ( ) ) if not filename . endswith ( '.nc' ) : filename += '.nc' dataarray . to_netcdf ( filename ) logger . info ( '{} has been created.' . format ( filename ) ) | Save a dataarray to a NetCDF file . |
49,646 | def convert ( self , text , encoding ) : if self . normalize in ( 'NFC' , 'NFKC' , 'NFD' , 'NFKD' ) : text = unicodedata . normalize ( self . normalize , text ) if self . convert_encoding : text = text . encode ( self . convert_encoding , self . errors ) . decode ( self . convert_encoding ) encoding = self . convert_encoding return text , encoding | Convert the text . |
49,647 | def get_zip_content ( self , filename ) : with zipfile . ZipFile ( filename , 'r' ) as z : self . determine_file_type ( z ) for item in z . infolist ( ) : if glob . globmatch ( item . filename , self . filepattern , flags = self . FLAGS ) : yield z . read ( item . filename ) , item . filename | Get zip content . |
49,648 | def get_content ( self , zipbundle ) : for content , filename in self . get_zip_content ( zipbundle ) : with io . BytesIO ( content ) as b : encoding = self . _analyze_file ( b ) if encoding is None : encoding = self . default_encoding b . seek ( 0 ) text = b . read ( ) . decode ( encoding ) yield text , filename , encoding | Get content . |
49,649 | def content_break ( self , el ) : should_break = False if self . type == 'odp' : if el . name == 'page' and el . namespace and el . namespace == self . namespaces [ 'draw' ] : should_break = True return should_break | Break on specified boundaries . |
49,650 | def soft_break ( self , el , text ) : if el . name == 'p' and el . namespace and el . namespace == self . namespaces [ "text" ] : text . append ( '\n' ) | Apply soft break if needed . |
49,651 | def extract_tag_metadata ( self , el ) : if self . type == 'odp' : if el . namespace and el . namespace == self . namespaces [ 'draw' ] and el . name == 'page-thumbnail' : name = el . attrs . get ( 'draw:page-number' , '' ) self . additional_context = 'slide{}:' . format ( name ) super ( ) . extract_tag_metadata ( el ) | Extract meta data . |
49,652 | def get_sub_node ( self , node ) : subnode = node . find ( 'office:document' ) if subnode : mimetype = subnode . attrs [ 'office:mimetype' ] self . type = MIMEMAP [ mimetype ] node = node . find ( 'office:body' ) return node | Extract node from document if desired . |
49,653 | def array ( data , tcoords = None , chcoords = None , scalarcoords = None , datacoords = None , attrs = None , name = None ) : array = xr . DataArray ( data , dims = ( 't' , 'ch' ) , attrs = attrs , name = name ) array . dca . _initcoords ( ) if tcoords is not None : array . coords . update ( { key : ( 't' , tcoords [ key ] ) for key in tcoords } ) if chcoords is not None : array . coords . update ( { key : ( 'ch' , chcoords [ key ] ) for key in chcoords } ) if scalarcoords is not None : array . coords . update ( scalarcoords ) if datacoords is not None : array . coords . update ( { key : ( ( 't' , 'ch' ) , datacoords [ key ] ) for key in datacoords } ) return array | Create an array as an instance of xarray . DataArray with Decode accessor . |
49,654 | def zeros ( shape , dtype = None , ** kwargs ) : data = np . zeros ( shape , dtype ) return dc . array ( data , ** kwargs ) | Create an array of given shape and type filled with zeros . |
49,655 | def ones ( shape , dtype = None , ** kwargs ) : data = np . ones ( shape , dtype ) return dc . array ( data , ** kwargs ) | Create an array of given shape and type filled with ones . |
49,656 | def full ( shape , fill_value , dtype = None , ** kwargs ) : return ( dc . zeros ( shape , ** kwargs ) + fill_value ) . astype ( dtype ) | Create an array of given shape and type filled with fill_value . |
49,657 | def empty ( shape , dtype = None , ** kwargs ) : data = np . empty ( shape , dtype ) return dc . array ( data , ** kwargs ) | Create an array of given shape and type without initializing entries . |
49,658 | def zeros_like ( array , dtype = None , keepmeta = True ) : if keepmeta : return xr . zeros_like ( array , dtype ) else : return dc . zeros ( array . shape , dtype ) | Create an array of zeros with the same shape and type as the input array . |
49,659 | def ones_like ( array , dtype = None , keepmeta = True ) : if keepmeta : return xr . ones_like ( array , dtype ) else : return dc . ones ( array . shape , dtype ) | Create an array of ones with the same shape and type as the input array . |
49,660 | def full_like ( array , fill_value , reverse = False , dtype = None , keepmeta = True ) : if keepmeta : return ( dc . zeros_like ( array ) + fill_value ) . astype ( dtype ) else : return dc . full ( array . shape , fill_value , dtype ) | Create an array of fill_value with the same shape and type as the input array . |
49,661 | def empty_like ( array , dtype = None , keepmeta = True ) : if keepmeta : return dc . empty ( array . shape , dtype , tcoords = array . dca . tcoords , chcoords = array . dca . chcoords , scalarcoords = array . dca . scalarcoords , attrs = array . attrs , name = array . name ) else : return dc . empty ( array . shape , dtype ) | Create an array of empty with the same shape and type as the input array . |
49,662 | def iter_tasks ( matrix , names , groups ) : name_index = dict ( [ ( task . get ( 'name' , '' ) , index ) for index , task in enumerate ( matrix ) ] ) for index , task in enumerate ( matrix ) : name = task . get ( 'name' , '' ) group = task . get ( 'group' , '' ) hidden = task . get ( 'hidden' , False ) if names and name in names and index == name_index [ name ] : yield task elif groups and group in groups and not hidden : yield task elif not names and not groups and not hidden : yield task | Iterate tasks . |
49,663 | def spellcheck ( config_file , names = None , groups = None , binary = '' , checker = '' , sources = None , verbose = 0 , debug = False ) : hunspell = None aspell = None spellchecker = None config = util . read_config ( config_file ) if sources is None : sources = [ ] matrix = config . get ( 'matrix' , [ ] ) preferred_checker = config . get ( 'spellchecker' , 'aspell' ) if not matrix : matrix = config . get ( 'documents' , [ ] ) if matrix : util . warn_deprecated ( "'documents' key in config is deprecated. 'matrix' should be used going forward." ) groups = set ( ) if groups is None else set ( groups ) names = set ( ) if names is None else set ( names ) if ( len ( names ) != 1 and len ( sources ) ) : sources = [ ] for task in iter_tasks ( matrix , names , groups ) : if not checker : checker = preferred_checker if checker == "hunspell" : if hunspell is None : hunspell = Hunspell ( config , binary , verbose , debug ) spellchecker = hunspell elif checker == "aspell" : if aspell is None : aspell = Aspell ( config , binary , verbose , debug ) spellchecker = aspell else : raise ValueError ( '%s is not a valid spellchecker!' % checker ) spellchecker . log ( 'Using %s to spellcheck %s' % ( checker , task . get ( 'name' , '' ) ) , 1 ) for result in spellchecker . run_task ( task , source_patterns = sources ) : spellchecker . log ( 'Context: %s' % result . context , 2 ) yield result spellchecker . log ( "" , 1 ) | Spell check . |
49,664 | def get_error ( self , e ) : import traceback return traceback . format_exc ( ) if self . debug else str ( e ) | Get the error . |
49,665 | def _pipeline_step ( self , sources , options , personal_dict , filter_index = 1 , flow_status = flow_control . ALLOW ) : for source in sources : if source . _has_error ( ) : yield source elif not source . _is_bytes ( ) and filter_index < len ( self . pipeline_steps ) : f = self . pipeline_steps [ filter_index ] if isinstance ( f , flow_control . FlowControl ) : err = '' try : status = f . _run ( source . category ) except Exception as e : err = self . get_error ( e ) yield filters . SourceText ( '' , source . context , '' , '' , err ) if not err : if filter_index < len ( self . pipeline_steps ) : yield from self . _pipeline_step ( [ source ] , options , personal_dict , filter_index + 1 , status ) else : if flow_status == flow_control . ALLOW : err = '' try : srcs = f . _run ( source ) except Exception as e : err = self . get_error ( e ) yield filters . SourceText ( '' , source . context , '' , '' , err ) if not err : yield from self . _pipeline_step ( srcs , options , personal_dict , filter_index + 1 ) elif flow_status == flow_control . SKIP : yield from self . _pipeline_step ( [ source ] , options , personal_dict , filter_index + 1 ) else : yield source else : yield source | Recursively run text objects through the pipeline steps . |
49,666 | def _spelling_pipeline ( self , sources , options , personal_dict ) : for source in self . _pipeline_step ( sources , options , personal_dict ) : if source . _has_error ( ) : yield Results ( [ ] , source . context , source . category , source . error ) elif not source . text or source . text . isspace ( ) : continue else : encoding = source . encoding if source . _is_bytes ( ) : text = source . text else : if encoding . startswith ( ( 'utf-16' , 'utf-32' ) ) : encoding = 'utf-8' text = source . text . encode ( encoding ) self . log ( '' , 3 ) self . log ( text , 3 ) cmd = self . setup_command ( encoding , options , personal_dict ) self . log ( "Command: " + str ( cmd ) , 4 ) try : wordlist = util . call_spellchecker ( cmd , input_text = text , encoding = encoding ) yield Results ( [ w for w in sorted ( set ( wordlist . replace ( '\r' , '' ) . split ( '\n' ) ) ) if w ] , source . context , source . category ) except Exception as e : err = self . get_error ( e ) yield Results ( [ ] , source . context , source . category , err ) | Check spelling pipeline . |
49,667 | def _walk_src ( self , targets , flags , pipeline ) : for target in targets : for f in glob . iglob ( target , flags = flags | glob . S ) : if not os . path . isdir ( f ) : self . log ( '' , 2 ) self . log ( '> Processing: %s' % f , 1 ) if pipeline : try : yield pipeline [ 0 ] . _run_first ( f ) except Exception as e : err = self . get_error ( e ) yield [ filters . SourceText ( '' , f , '' , '' , err ) ] else : try : if self . default_encoding : encoding = filters . PYTHON_ENCODING_NAMES . get ( self . default_encoding , self . default_encoding ) . lower ( ) encoding = codecs . lookup ( encoding ) . name else : encoding = self . default_encoding yield [ filters . SourceText ( '' , f , encoding , 'file' ) ] except Exception as e : err = self . get_error ( e ) yield [ filters . SourceText ( '' , f , '' , '' , err ) ] | Walk source and parse files . |
49,668 | def _build_pipeline ( self , task ) : self . pipeline_steps = [ ] kwargs = { } if self . default_encoding : kwargs [ "default_encoding" ] = self . default_encoding steps = task . get ( 'pipeline' , [ ] ) if steps is None : self . pipeline_steps = None else : if not steps : steps = task . get ( 'filters' , [ ] ) if steps : util . warn_deprecated ( "'filters' key in config is deprecated. 'pipeline' should be used going forward." ) if not steps : steps . append ( 'pyspelling.filters.text' ) for step in steps : if isinstance ( step , dict ) : name , options = list ( step . items ( ) ) [ 0 ] else : name = step options = { } if options is None : options = { } module = self . _get_module ( name ) if issubclass ( module , filters . Filter ) : self . pipeline_steps . append ( module ( options , ** kwargs ) ) elif issubclass ( module , flow_control . FlowControl ) : if self . pipeline_steps : self . pipeline_steps . append ( module ( options ) ) else : raise ValueError ( "Pipeline cannot start with a 'Flow Control' plugin!" ) else : raise ValueError ( "'%s' is not a valid plugin!" % name ) | Build up the pipeline . |
49,669 | def _get_module ( self , module ) : if isinstance ( module , str ) : mod = importlib . import_module ( module ) for name in ( 'get_plugin' , 'get_filter' ) : attr = getattr ( mod , name , None ) if attr is not None : break if name == 'get_filter' : util . warn_deprecated ( "'get_filter' is deprecated. Plugins should use 'get_plugin'." ) if not attr : raise ValueError ( "Could not find the 'get_plugin' function in module '%s'!" % module ) return attr ( ) | Get module . |
49,670 | def _to_flags ( self , text ) : flags = 0 for x in text . split ( '|' ) : value = x . strip ( ) . upper ( ) if value : flags |= self . GLOB_FLAG_MAP . get ( value , 0 ) return flags | Convert text representation of flags to actual flags . |
49,671 | def run_task ( self , task , source_patterns = None ) : self . log ( 'Running Task: %s...' % task . get ( 'name' , '' ) , 1 ) self . default_encoding = task . get ( 'default_encoding' , '' ) options = self . setup_spellchecker ( task ) personal_dict = self . setup_dictionary ( task ) glob_flags = self . _to_flags ( task . get ( 'glob_flags' , "N|B|G" ) ) self . _build_pipeline ( task ) if not source_patterns : source_patterns = task . get ( 'sources' , [ ] ) for sources in self . _walk_src ( source_patterns , glob_flags , self . pipeline_steps ) : if self . pipeline_steps is not None : yield from self . _spelling_pipeline ( sources , options , personal_dict ) else : yield from self . spell_check_no_pipeline ( sources , options , personal_dict ) | Walk source and initiate spell check . |
49,672 | def setup_dictionary ( self , task ) : dictionary_options = task . get ( 'dictionary' , { } ) output = os . path . abspath ( dictionary_options . get ( 'output' , self . dict_bin ) ) lang = dictionary_options . get ( 'lang' , 'en_US' ) wordlists = dictionary_options . get ( 'wordlists' , [ ] ) if lang and wordlists : self . compile_dictionary ( lang , dictionary_options . get ( 'wordlists' , [ ] ) , None , output ) else : output = None return output | Setup dictionary . |
49,673 | def spell_check_no_pipeline ( self , sources , options , personal_dict ) : for source in sources : if source . _has_error ( ) : yield Results ( [ ] , source . context , source . category , source . error ) cmd = self . setup_command ( source . encoding , options , personal_dict , source . context ) self . log ( '' , 3 ) self . log ( "Command: " + str ( cmd ) , 4 ) try : wordlist = util . call_spellchecker ( cmd , input_text = None , encoding = source . encoding ) yield Results ( [ w for w in sorted ( set ( wordlist . replace ( '\r' , '' ) . split ( '\n' ) ) ) if w ] , source . context , source . category ) except Exception as e : err = self . get_error ( e ) yield Results ( [ ] , source . context , source . category , err ) | Spell check without the pipeline . |
49,674 | def setup_command ( self , encoding , options , personal_dict , file_name = None ) : cmd = [ self . binary , '-l' ] if encoding : cmd . extend ( [ '-i' , encoding ] ) if personal_dict : cmd . extend ( [ '-p' , personal_dict ] ) allowed = { 'check-apostrophe' , 'check-url' , 'd' , 'H' , 'i' , 'n' , 'O' , 'r' , 't' , 'X' } for k , v in options . items ( ) : if k in allowed : key = ( '-%s' if len ( k ) == 1 else '--%s' ) % k if isinstance ( v , bool ) and v is True : cmd . append ( key ) elif isinstance ( v , str ) : cmd . extend ( [ key , v ] ) elif isinstance ( v , int ) : cmd . extend ( [ key , str ( v ) ] ) elif isinstance ( v , list ) : for value in v : cmd . extend ( [ key , str ( value ) ] ) if file_name is not None : cmd . append ( file_name ) return cmd | Setup command . |
49,675 | def setup ( self ) : self . allow = self . config [ 'allow' ] self . halt = self . config [ 'halt' ] self . skip = self . config [ 'skip' ] | Get default configuration . |
49,676 | def match ( self , category , pattern ) : return fnmatch . fnmatch ( category , pattern , flags = self . FNMATCH_FLAGS ) | Match the category . |
49,677 | def adjust_flow ( self , category ) : status = flow_control . SKIP for allow in self . allow : if self . match ( category , allow ) : status = flow_control . ALLOW for skip in self . skip : if self . match ( category , skip ) : status = flow_control . SKIP for halt in self . halt : if self . match ( category , halt ) : status = flow_control . HALT if status != flow_control . ALLOW : break return status | Adjust the flow of source control objects . |
49,678 | def plot_tcoords ( array , coords , scantypes = None , ax = None , ** kwargs ) : if ax is None : ax = plt . gca ( ) if scantypes is None : ax . plot ( array [ coords [ 0 ] ] , array [ coords [ 1 ] ] , label = 'ALL' , ** kwargs ) else : for scantype in scantypes : ax . plot ( array [ coords [ 0 ] ] [ array . scantype == scantype ] , array [ coords [ 1 ] ] [ array . scantype == scantype ] , label = scantype , ** kwargs ) ax . set_xlabel ( coords [ 0 ] ) ax . set_ylabel ( coords [ 1 ] ) ax . set_title ( '{} vs {}' . format ( coords [ 1 ] , coords [ 0 ] ) ) ax . legend ( ) logger . info ( '{} vs {} has been plotted.' . format ( coords [ 1 ] , coords [ 0 ] ) ) | Plot coordinates related to the time axis . |
49,679 | def plot_timestream ( array , kidid , xtick = 'time' , scantypes = None , ax = None , ** kwargs ) : if ax is None : ax = plt . gca ( ) index = np . where ( array . kidid == kidid ) [ 0 ] if len ( index ) == 0 : raise KeyError ( 'Such a kidid does not exist.' ) index = int ( index ) if scantypes is None : if xtick == 'time' : ax . plot ( array . time , array [ : , index ] , label = 'ALL' , ** kwargs ) elif xtick == 'index' : ax . plot ( np . ogrid [ : len ( array . time ) ] , array [ : , index ] , label = 'ALL' , ** kwargs ) else : for scantype in scantypes : if xtick == 'time' : ax . plot ( array . time [ array . scantype == scantype ] , array [ : , index ] [ array . scantype == scantype ] , label = scantype , ** kwargs ) elif xtick == 'index' : ax . plot ( np . ogrid [ : len ( array . time [ array . scantype == scantype ] ) ] , array [ : , index ] [ array . scantype == scantype ] , label = scantype , ** kwargs ) ax . set_xlabel ( '{}' . format ( xtick ) ) ax . set_ylabel ( str ( array . datatype . values ) ) ax . legend ( ) kidtpdict = { 0 : 'wideband' , 1 : 'filter' , 2 : 'blind' } try : kidtp = kidtpdict [ int ( array . kidtp [ index ] ) ] except KeyError : kidtp = 'filter' ax . set_title ( 'ch #{} ({})' . format ( kidid , kidtp ) ) logger . info ( 'timestream data (ch={}) has been plotted.' . format ( kidid ) ) | Plot timestream data . |
49,680 | def plot_chmap ( cube , kidid , ax = None , ** kwargs ) : if ax is None : ax = plt . gca ( ) index = np . where ( cube . kidid == kidid ) [ 0 ] if len ( index ) == 0 : raise KeyError ( 'Such a kidid does not exist.' ) index = int ( index ) im = ax . pcolormesh ( cube . x , cube . y , cube [ : , : , index ] . T , ** kwargs ) ax . set_xlabel ( 'x' ) ax . set_ylabel ( 'y' ) ax . set_title ( 'intensity map ch #{}' . format ( kidid ) ) return im | Plot an intensity map . |
49,681 | def plotallanvar ( data , dt , tmax = 10 , ax = None , ** kwargs ) : if ax is None : ax = plt . gca ( ) tk , allanvar = allan_variance ( data , dt , tmax ) ax . loglog ( tk , allanvar , ** kwargs ) ax . set_xlabel ( 'Time [s]' ) ax . set_ylabel ( 'Allan Variance' ) ax . legend ( ) | Plot Allan variance . |
49,682 | def _filter ( self , text ) : if self . urls : text = RE_LINK . sub ( '' , text ) if self . emails : text = RE_MAIL . sub ( '' , text ) return text | Filter out the URL and email addresses . |
49,683 | def get_mac_dot_app_dir ( directory ) : return os . path . dirname ( os . path . dirname ( os . path . dirname ( directory ) ) ) | Returns parent directory of mac . app |
49,684 | def copy_function ( func , name = None ) : code = func . __code__ newname = name or func . __name__ newcode = CodeType ( code . co_argcount , code . co_kwonlyargcount , code . co_nlocals , code . co_stacksize , code . co_flags , code . co_code , code . co_consts , code . co_names , code . co_varnames , code . co_filename , newname , code . co_firstlineno , code . co_lnotab , code . co_freevars , code . co_cellvars , ) newfunc = FunctionType ( newcode , func . __globals__ , newname , func . __defaults__ , func . __closure__ , ) newfunc . __dict__ . update ( func . __dict__ ) return newfunc | Copy a function object with different name . |
49,685 | def one_thread_per_process ( ) : try : import mkl is_mkl = True except ImportError : is_mkl = False if is_mkl : n_threads = mkl . get_max_threads ( ) mkl . set_num_threads ( 1 ) try : yield finally : mkl . set_num_threads ( n_threads ) else : yield | Return a context manager where only one thread is allocated to a process . |
49,686 | def sample_upper_hull ( upper_hull , random_stream ) : cdf = cumsum ( [ node . pr for node in upper_hull ] ) U = random_stream . rand ( ) node = next ( ( node for node , cdf_value in zip ( upper_hull , cdf ) if U < cdf_value ) , upper_hull [ - 1 ] ) U = random_stream . rand ( ) m , left , right = node . m , node . left , node . right M = max ( m * right , m * left ) x = ( log ( U * ( exp ( m * right - M ) - exp ( m * left - M ) ) + exp ( m * left - M ) ) + M ) / m assert ( x >= left and x <= right ) if isinf ( x ) or isnan ( x ) : raise ValueError ( "sampled an infinite or 'nan' x" ) return x | Return a single value randomly sampled from the given upper_hull . |
49,687 | def check_for_required_columns ( problems , table , df ) : r = cs . PROTOFEED_REF req_columns = r . loc [ ( r [ 'table' ] == table ) & r [ 'column_required' ] , 'column' ] . values for col in req_columns : if col not in df . columns : problems . append ( [ 'error' , 'Missing column {!s}' . format ( col ) , table , [ ] ] ) return problems | Check that the given ProtoFeed table has the required columns . |
49,688 | def check_for_invalid_columns ( problems , table , df ) : r = cs . PROTOFEED_REF valid_columns = r . loc [ r [ 'table' ] == table , 'column' ] . values for col in df . columns : if col not in valid_columns : problems . append ( [ 'warning' , 'Unrecognized column {!s}' . format ( col ) , table , [ ] ] ) return problems | Check for invalid columns in the given ProtoFeed DataFrame . |
49,689 | def validate ( pfeed , * , as_df = True , include_warnings = True ) : problems = [ ] checkers = [ 'check_frequencies' , 'check_meta' , 'check_service_windows' , 'check_shapes' , 'check_stops' , ] for checker in checkers : problems . extend ( globals ( ) [ checker ] ( pfeed , include_warnings = include_warnings ) ) return gt . format_problems ( problems , as_df = as_df ) | Check whether the given pfeed satisfies the ProtoFeed spec . |
49,690 | def youtube ( keyword = None ) : if keyword is None : web . open ( 'https://www.youtube.com/watch?v=L_mBVT2jBFw' ) else : web . open ( quote ( 'https://www.youtube.com/results?search_query={}' . format ( keyword ) , RESERVED ) ) | Open youtube . |
49,691 | def replace_surrogates ( self , m ) : high , low = ord ( m . group ( 1 ) ) , ord ( m . group ( 2 ) ) return chr ( ( high - 0xD800 ) * 0x400 + low - 0xDC00 + 0x10000 ) | Replace surrogates . |
49,692 | def override_config ( self , options ) : for k , v in options . items ( ) : if k not in self . config : raise KeyError ( "'{}' is not a valid option for '{}'" . format ( k , self . __class__ . __name__ ) ) self . validate_options ( k , v ) self . config [ k ] = v | Override the default configuration . |
49,693 | def build_stop_ids ( shape_id ) : return [ cs . SEP . join ( [ 'stp' , shape_id , str ( i ) ] ) for i in range ( 2 ) ] | Create a pair of stop IDs based on the given shape ID . |
49,694 | def build_agency ( pfeed ) : return pd . DataFrame ( { 'agency_name' : pfeed . meta [ 'agency_name' ] . iat [ 0 ] , 'agency_url' : pfeed . meta [ 'agency_url' ] . iat [ 0 ] , 'agency_timezone' : pfeed . meta [ 'agency_timezone' ] . iat [ 0 ] , } , index = [ 0 ] ) | Given a ProtoFeed return a DataFrame representing agency . txt |
49,695 | def build_routes ( pfeed ) : f = pfeed . frequencies [ [ 'route_short_name' , 'route_long_name' , 'route_type' , 'shape_id' ] ] . drop_duplicates ( ) . copy ( ) f [ 'route_id' ] = 'r' + f [ 'route_short_name' ] . map ( str ) del f [ 'shape_id' ] return f | Given a ProtoFeed return a DataFrame representing routes . txt . |
49,696 | def build_shapes ( pfeed ) : rows = [ ] for shape , geom in pfeed . shapes [ [ 'shape_id' , 'geometry' ] ] . itertuples ( index = False ) : if shape not in pfeed . shapes_extra : continue if pfeed . shapes_extra [ shape ] == 2 : shid = shape + '-1' new_rows = [ [ shid , i , lon , lat ] for i , ( lon , lat ) in enumerate ( geom . coords ) ] rows . extend ( new_rows ) shid = shape + '-0' new_rows = [ [ shid , i , lon , lat ] for i , ( lon , lat ) in enumerate ( reversed ( geom . coords ) ) ] rows . extend ( new_rows ) else : shid = '{}{}{}' . format ( shape , cs . SEP , pfeed . shapes_extra [ shape ] ) new_rows = [ [ shid , i , lon , lat ] for i , ( lon , lat ) in enumerate ( geom . coords ) ] rows . extend ( new_rows ) return pd . DataFrame ( rows , columns = [ 'shape_id' , 'shape_pt_sequence' , 'shape_pt_lon' , 'shape_pt_lat' ] ) | Given a ProtoFeed return DataFrame representing shapes . txt . Only use shape IDs that occur in both pfeed . shapes and pfeed . frequencies . Create reversed shapes where routes traverse shapes in both directions . |
49,697 | def _generate_content ( self ) : dic = self . config dic [ 'mode' ] = self . language if self . theme : dic [ 'theme' ] = self . theme return json . dumps ( dic , indent = 8 , separators = ( ',' , ': ' ) ) | Dumps content using JSON to send to CodeMirror |
49,698 | def ignore_file_extension ( self , extension ) : logger . info ( 'Ignoring file extension: {}' . format ( extension ) ) self . watcher . ignore_file_extension ( extension ) | Configure a file extension to be ignored . |
49,699 | def should_ignore ( self , filename ) : _ , ext = os . path . splitext ( filename ) return ext in self . ignored_file_extensions | Should ignore a given filename? |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.