idx int64 0 63k | question stringlengths 53 5.28k | target stringlengths 5 805 |
|---|---|---|
51,100 | def contrast_rms ( data , * kwargs ) : av = np . average ( data , * kwargs ) mal = 1 / ( data . shape [ 0 ] * data . shape [ 1 ] ) return np . sqrt ( mal * np . sum ( np . square ( data - av ) ) ) | Compute RMS contrast norm of an image |
51,101 | def spectral ( data , lambd , * kwargs ) : fftdata = np . fft . fftn ( data ) fftdata [ 0 , 0 ] = 0 kx = 2 * np . pi * np . fft . fftfreq ( data . shape [ 0 ] ) . reshape ( 1 , - 1 ) ky = 2 * np . pi * np . fft . fftfreq ( data . shape [ 1 ] ) . reshape ( - 1 , 1 ) kmax = ( 2 * np . pi ) / ( 2 * lambd ) fftdata [ np . ... | Compute spectral contrast of image |
51,102 | def write ( s , path , encoding = "utf-8" ) : is_gzip = is_gzip_file ( path ) with open ( path , "wb" ) as f : if is_gzip : f . write ( zlib . compress ( s . encode ( encoding ) ) ) else : f . write ( s . encode ( encoding ) ) | Write string to text file . |
51,103 | def read ( path , encoding = "utf-8" ) : is_gzip = is_gzip_file ( path ) with open ( path , "rb" ) as f : if is_gzip : return zlib . decompress ( f . read ( ) ) . decode ( encoding ) else : return f . read ( ) . decode ( encoding ) | Read string from text file . |
51,104 | def smartread ( path ) : with open ( path , "rb" ) as f : content = f . read ( ) result = chardet . detect ( content ) return content . decode ( result [ "encoding" ] ) | Read text from file automatically detect encoding . chardet required . |
51,105 | def to_utf8 ( path , output_path = None ) : if output_path is None : basename , ext = os . path . splitext ( path ) output_path = basename + "-UTF8Encode" + ext text = smartread ( path ) write ( text , output_path ) | Convert any text file to utf8 encoding . |
51,106 | def load_cache ( self ) : with open ( self . cache_path , "rb" ) as f : print ( "Loading cached Zotero data..." ) cache = pickle . load ( f ) self . _references = cache [ self . CACHE_REFERENCE_LIST ] self . reference_types = cache [ self . CACHE_REFERENCE_TYPES ] self . reference_templates = cache [ self . CACHE_REFER... | Load the cached Zotero data . |
51,107 | def load_distant ( self ) : print ( "Loading distant Zotero data..." ) self . _references = self . get_references ( ) self . reference_types = self . get_reference_types ( ) self . reference_templates = self . get_reference_templates ( self . reference_types ) print ( "Distant Zotero data loaded." ) self . cache ( ) | Load the distant Zotero data . |
51,108 | def cache ( self ) : with open ( self . cache_path , "wb" ) as f : cache = { self . CACHE_REFERENCE_LIST : self . _references , self . CACHE_REFERENCE_TYPES : self . reference_types , self . CACHE_REFERENCE_TEMPLATES : self . reference_templates } pickle . dump ( cache , f ) | Cache the Zotero data . |
51,109 | def create_distant_reference ( self , ref_data ) : self . validate_reference_data ( ref_data ) creation_status = self . _zotero_lib . create_items ( [ ref_data ] ) try : created_item = creation_status [ "successful" ] [ "0" ] return created_item except KeyError as e : print ( creation_status ) raise CreateZoteroItemErr... | Validate and create the reference in Zotero and return the created item . |
51,110 | def update_local_reference ( self , index , ref ) : self . _references [ index ] = ref self . cache ( ) | Replace the reference in the reference list and cache it . |
51,111 | def update_distant_reference ( self , ref ) : self . validate_reference_data ( ref [ "data" ] ) self . _zotero_lib . update_item ( ref ) | Validate and update the reference in Zotero . |
51,112 | def validate_reference_data ( self , ref_data ) : try : self . _zotero_lib . check_items ( [ ref_data ] ) except InvalidItemFields as e : raise InvalidZoteroItemError from e | Validate the reference data . |
51,113 | def get_reference_types ( self ) : item_types = self . _zotero_lib . item_types ( ) return sorted ( [ x [ "itemType" ] for x in item_types ] ) | Return the reference types . |
51,114 | def get_reference_templates ( self , ref_types ) : return OrderedDict ( [ ( x , self . get_reference_template ( x ) ) for x in ref_types ] ) | Return the reference templates for the types as an ordered dictionary . |
51,115 | def get_reference_template ( self , ref_type ) : template = self . _zotero_lib . item_template ( ref_type ) return OrderedDict ( sorted ( template . items ( ) , key = lambda x : x [ 0 ] ) ) | Return the reference template for the type as an ordered dictionary . |
51,116 | def reference_extra_field ( self , field , index ) : ref_data = self . reference_data ( index ) extra_fields = ref_data [ "extra" ] . split ( "\n" ) field_id = field + ":" matched = next ( ( x for x in extra_fields if x . startswith ( field_id ) ) , None ) if matched : return matched . replace ( field_id , "" , 1 ) . s... | Return the value of the field in extra otherwise . |
51,117 | def reference_doi ( self , index ) : return self . reference_data ( index ) . get ( "DOI" , self . reference_extra_field ( "DOI" , index ) ) | Return the reference DOI . |
51,118 | def reference_year ( self , index ) : ref_date = self . reference_date ( index ) try : return parse ( ref_date ) . year except ValueError : matched = re . search ( r"\d{4}" , ref_date ) if matched : return int ( matched . group ( ) ) else : return "" | Return the reference publication year . |
51,119 | def reference_journal ( self , index ) : ref_type = self . reference_type ( index ) if ref_type == "journalArticle" : return self . reference_data ( index ) [ "publicationTitle" ] else : return "({})" . format ( ref_type ) | Return the reference journal name . |
51,120 | def reference_index ( self , ref_id ) : try : indexes = range ( self . reference_count ( ) ) return next ( i for i in indexes if self . reference_id ( i ) == ref_id ) except StopIteration as e : raise ReferenceNotFoundError ( "ID: " + ref_id ) from e | Return the first reference with this ID . |
51,121 | def computePDFSimilarity ( paperId , userPDF ) : if not isPDFInDb ( paperId ) : return None userPDF . save ( "temp.pdf" ) check_call ( [ 'pdftotext' , '-enc' , 'UTF-8' , "temp.pdf" , "temp.txt" ] ) os . remove ( "temp.pdf" ) a = open ( "temp.txt" , 'r' ) . read ( ) b = open ( join ( dbPath , paperId ) + ".txt" , 'r' ) ... | remove punctuation lowercase stem |
51,122 | def install_plugins ( plugins , app , install_type , config ) : try : disable_plugins = config . disable_plugins if not disable_plugins : disable_plugins = [ ] except AttributeError : disable_plugins = [ ] for plugin_name in plugins : plugin_group = f'backendai_{plugin_name}_v10' registry = PluginRegistry ( plugin_name... | Automatically install plugins to the app . |
51,123 | def load_cell ( fname = "HL60_field.zip" ) : "Load zip file and return complex field" here = op . dirname ( op . abspath ( __file__ ) ) data = op . join ( here , "data" ) arc = zipfile . ZipFile ( op . join ( data , fname ) ) for f in arc . filelist : with arc . open ( f ) as fd : if f . filename . count ( "imag" ) : i... | Load zip file and return complex field |
51,124 | def bootstrap ( nside , rand , nbar , * data ) : def split ( data , indices , axis ) : s = [ ] s . append ( slice ( 0 , indices [ 0 ] ) ) for i in range ( len ( indices ) - 1 ) : s . append ( slice ( indices [ i ] , indices [ i + 1 ] ) ) s . append ( slice ( indices [ - 1 ] , None ) ) rt = [ ] for ss in s : ind = [ sli... | This function will bootstrap data based on the sky coverage of rand . It is different from bootstrap in the traditional sense but for correlation functions it gives the correct answer with less computation . |
51,125 | def load_project_flag_list_file ( self , project_exceptions , project ) : if self . loaded : return exception_file = None for item in project_exceptions : if project in item : exception_file = item . get ( project ) if exception_file is not None : try : with open ( exception_file , 'r' ) as f : ex = yaml . safe_load ( ... | Loads project specific lists |
51,126 | def binary_hash ( self , project , patch_file ) : global il exception_file = None try : project_exceptions = il . get ( 'project_exceptions' ) except KeyError : logger . info ( 'project_exceptions missing in %s for %s' , ignore_list , project ) for project_files in project_exceptions : if project in project_files : exc... | Gathers sha256 hashes from binary lists |
51,127 | def file_audit_list ( self , project ) : project_list = False self . load_project_flag_list_file ( il . get ( 'project_exceptions' ) , project ) try : default_list = set ( ( fl [ 'file_audits' ] [ 'file_names' ] ) ) except KeyError : logger . error ( 'Key Error processing file_names list values' ) try : project_list = ... | Gathers file name lists |
51,128 | def file_content_list ( self , project ) : project_list = False self . load_project_flag_list_file ( il . get ( 'project_exceptions' ) , project ) try : flag_list = ( fl [ 'file_audits' ] [ 'file_contents' ] ) except KeyError : logger . error ( 'Key Error processing file_contents list values' ) try : ignore_list = il [... | gathers content strings |
51,129 | def ignore_directories ( self , project ) : project_list = False try : ignore_directories = il [ 'ignore_directories' ] except KeyError : logger . error ( 'Key Error processing ignore_directories list values' ) try : project_exceptions = il . get ( 'project_exceptions' ) for item in project_exceptions : if project in i... | Gathers a list of directories to ignore |
51,130 | def download_from_github ( fname , path ) : base_url = 'https://github.com/ornlneutronimaging/ImagingReso/blob/master/ImagingReso/reference_data/' f = fname + '?raw=true' url = base_url + f block_size = 16384 req = urlopen ( url ) if sys . version_info [ 0 ] < 3 : file_size = int ( req . info ( ) . getheaders ( 'Conten... | Download database from GitHub |
51,131 | def get_list_element_from_database ( database = 'ENDF_VII' ) : _file_path = os . path . abspath ( os . path . dirname ( __file__ ) ) _ref_data_folder = os . path . join ( _file_path , 'reference_data' ) _database_folder = os . path . join ( _ref_data_folder , database ) if not os . path . exists ( _ref_data_folder ) : ... | return a string array of all the element from the database |
51,132 | def get_sigma ( database_file_name = '' , e_min = np . NaN , e_max = np . NaN , e_step = np . NaN , t_kelvin = None ) : file_extension = os . path . splitext ( database_file_name ) [ 1 ] if t_kelvin is None : if file_extension != '.csv' : raise IOError ( "Cross-section File type must be '.csv'" ) else : _df = get_datab... | retrieve the Energy and sigma axis for the given isotope |
51,133 | def temp_repo ( url , branch , commit = '' ) : tmp_folder = tempfile . mkdtemp ( ) git . Repo . clone_from ( url , tmp_folder , branch = branch ) if commit : git_cmd = git . Git ( tmp_folder ) git_cmd . checkout ( commit ) yield tmp_folder shutil . rmtree ( tmp_folder ) | Clone a git repository inside a temporary folder yield the folder then delete the folder . |
51,134 | def force_move ( source , destination ) : if not os . path . exists ( destination ) : raise RuntimeError ( 'The code could not be moved to {destination} ' 'because the folder does not exist' . format ( destination = destination ) ) destination_folder = os . path . join ( destination , os . path . split ( source ) [ - 1... | Force the move of the source inside the destination even if the destination has already a folder with the name inside . In the case the folder will be replaced . |
51,135 | def _run_command_inside_folder ( command , folder ) : logger . debug ( "command: %s" , command ) process = subprocess . Popen ( command . split ( ) , stdout = subprocess . PIPE , cwd = folder ) stream_data = process . communicate ( ) [ 0 ] logger . debug ( "%s stdout: %s (RC %s)" , command , stream_data , process . ret... | Run a command inside the given folder . |
51,136 | def parse_url ( url ) : try : url = unicode ( url ) except NameError : url = url parsed = pystache . parse ( url ) variables = ( element . key for element in parsed . _parse_tree if isinstance ( element , _EscapeNode ) ) return pystache . render ( url , { variable : os . environ [ variable ] for variable in variables }... | Parse the given url and update it with environment value if required . |
51,137 | def _move_modules ( self , temp_repo , destination ) : folders = self . _get_module_folders ( temp_repo ) for folder in folders : force_move ( folder , destination ) | Move modules froom the temp directory to the destination . |
51,138 | def _get_module_folders ( self , temp_repo ) : paths = ( os . path . join ( temp_repo , path ) for path in os . listdir ( temp_repo ) if self . _is_module_included ( path ) ) return ( path for path in paths if os . path . isdir ( path ) ) | Get a list of module paths contained in a temp directory . |
51,139 | def _is_module_included ( self , module ) : if module in self . exclude_modules : return False if self . include_modules is None : return True return module in self . include_modules | Evaluate if the module must be included in the Odoo addons . |
51,140 | def _move_modules ( self , temp_repo , destination ) : tmp_addons = os . path . join ( temp_repo , 'addons' ) tmp_odoo_addons = os . path . join ( temp_repo , 'odoo/addons' ) folders = self . _get_module_folders ( tmp_addons ) for folder in folders : force_move ( folder , tmp_odoo_addons ) tmp_odoo = os . path . join (... | Move odoo modules from the temp directory to the destination . |
51,141 | def apply ( self , folder ) : logger . info ( "Apply Patch %s@%s (commit %s)" , self . url , self . branch , self . commit ) remote_name = 'patch' commands = [ "git remote add {} {}" . format ( remote_name , self . url ) , "git fetch {} {}" . format ( remote_name , self . branch ) , 'git merge {} -m "patch"' . format (... | Merge code from the given repo url to the git repo contained in the given folder . |
51,142 | def apply ( self , folder ) : logger . info ( "Apply Patch File %s" , self . file_path ) command = "git apply {}" . format ( self . file_path ) return_code , stream_data = _run_command_inside_folder ( command , folder ) if return_code : msg = "Could not apply patch file at {}. Error: {}" . format ( self . file_path , s... | Apply a patch from a git patch file . |
51,143 | def _set_up_context ( cls ) : cls . context = AttributeDict ( ) cls . context . new_meta = { } cls . context . new_transitions = { } cls . context . new_methods = { } | Create context to keep all needed variables in . |
51,144 | def _check_states_enum ( cls ) : states_enum_name = cls . context . get_config ( 'states_enum_name' ) try : cls . context [ 'states_enum' ] = getattr ( cls . context . new_class , states_enum_name ) except AttributeError : raise ValueError ( 'No states enum given!' ) proper = True try : if not issubclass ( cls . contex... | Check if states enum exists and is proper one . |
51,145 | def _check_if_states_are_strings ( cls ) : for item in list ( cls . context . states_enum ) : if not isinstance ( item . value , six . string_types ) : raise ValueError ( 'Item {name} is not string. Only strings are allowed.' . format ( name = item . name ) ) | Check if all states are strings . |
51,146 | def _check_state_value ( cls ) : state_value = cls . context . get_config ( 'initial_state' , None ) state_value = state_value or getattr ( cls . context . new_class , cls . context . state_name , None ) if not state_value : raise ValueError ( "Empty state is disallowed, yet no initial state is given!" ) state_value = ... | Check initial state value - if is proper and translate it . |
51,147 | def _add_standard_attributes ( cls ) : setattr ( cls . context . new_class , cls . context . new_meta [ 'state_attribute_name' ] , cls . context . state_value ) setattr ( cls . context . new_class , cls . context . state_name , utils . state_property ) setattr ( cls . context . new_class , 'is_' , utils . is_ ) setattr... | Add attributes common to all state machines . |
51,148 | def _generate_standard_transitions ( cls ) : allowed_transitions = cls . context . get_config ( 'transitions' , { } ) for key , transitions in allowed_transitions . items ( ) : key = cls . context . new_meta [ 'translator' ] . translate ( key ) new_transitions = set ( ) for trans in transitions : if not isinstance ( tr... | Generate methods used for transitions . |
51,149 | def _generate_standard_methods ( cls ) : for state in cls . context . states_enum : getter_name = 'is_{name}' . format ( name = state . value ) cls . context . new_methods [ getter_name ] = utils . generate_getter ( state ) setter_name = 'set_{name}' . format ( name = state . value ) cls . context . new_methods [ sette... | Generate standard setters getters and checkers . |
51,150 | def _add_new_methods ( cls ) : for name , method in cls . context . new_methods . items ( ) : if hasattr ( cls . context . new_class , name ) : raise ValueError ( "Name collision in state machine class - '{name}'." . format ( name ) ) setattr ( cls . context . new_class , name , method ) | Add all generated methods to result class . |
51,151 | def _set_complete_option ( cls ) : get_config = cls . context . get_config complete = get_config ( 'complete' , None ) if complete is None : conditions = [ get_config ( 'transitions' , False ) , get_config ( 'named_transitions' , False ) , ] complete = not any ( conditions ) cls . context . new_meta [ 'complete' ] = co... | Check and set complete option . |
51,152 | def data_directory ( ) : package_directory = os . path . abspath ( os . path . dirname ( __file__ ) ) return os . path . join ( package_directory , "data" ) | Return the absolute path to the directory containing the package data . |
51,153 | def filterMapAttrs ( records = getIndex ( ) , ** tags ) : if len ( tags ) == 0 : return records ret = [ ] for record in records : if matchRecordAttrs ( record , tags ) : ret . append ( record ) return ret | matches available maps if their attributes match as specified |
51,154 | def matchRecordAttrs ( mapobj , attrs ) : for k , v in iteritems ( attrs ) : try : val = getattr ( mapobj , k ) except AttributeError : if bool ( v ) : return False else : continue if val != v : return False return True | attempt to match given attributes against a single map object s attributes |
51,155 | def to_boolean ( value , ctx ) : if isinstance ( value , bool ) : return value elif isinstance ( value , int ) : return value != 0 elif isinstance ( value , Decimal ) : return value != Decimal ( 0 ) elif isinstance ( value , str ) : value = value . lower ( ) if value == 'true' : return True elif value == 'false' : retu... | Tries conversion of any value to a boolean |
51,156 | def to_integer ( value , ctx ) : if isinstance ( value , bool ) : return 1 if value else 0 elif isinstance ( value , int ) : return value elif isinstance ( value , Decimal ) : try : val = int ( value . to_integral_exact ( ROUND_HALF_UP ) ) if isinstance ( val , int ) : return val except ArithmeticError : pass elif isin... | Tries conversion of any value to an integer |
51,157 | def to_decimal ( value , ctx ) : if isinstance ( value , bool ) : return Decimal ( 1 ) if value else Decimal ( 0 ) elif isinstance ( value , int ) : return Decimal ( value ) elif isinstance ( value , Decimal ) : return value elif isinstance ( value , str ) : try : return Decimal ( value ) except Exception : pass raise ... | Tries conversion of any value to a decimal |
51,158 | def to_string ( value , ctx ) : if isinstance ( value , bool ) : return "TRUE" if value else "FALSE" elif isinstance ( value , int ) : return str ( value ) elif isinstance ( value , Decimal ) : return format_decimal ( value ) elif isinstance ( value , str ) : return value elif type ( value ) == datetime . date : return... | Tries conversion of any value to a string |
51,159 | def to_date ( value , ctx ) : if isinstance ( value , str ) : temporal = ctx . get_date_parser ( ) . auto ( value ) if temporal is not None : return to_date ( temporal , ctx ) elif type ( value ) == datetime . date : return value elif isinstance ( value , datetime . datetime ) : return value . date ( ) raise Evaluation... | Tries conversion of any value to a date |
51,160 | def to_datetime ( value , ctx ) : if isinstance ( value , str ) : temporal = ctx . get_date_parser ( ) . auto ( value ) if temporal is not None : return to_datetime ( temporal , ctx ) elif type ( value ) == datetime . date : return ctx . timezone . localize ( datetime . datetime . combine ( value , datetime . time ( 0 ... | Tries conversion of any value to a datetime |
51,161 | def to_date_or_datetime ( value , ctx ) : if isinstance ( value , str ) : temporal = ctx . get_date_parser ( ) . auto ( value ) if temporal is not None : return temporal elif type ( value ) == datetime . date : return value elif isinstance ( value , datetime . datetime ) : return value . astimezone ( ctx . timezone ) r... | Tries conversion of any value to a date or datetime |
51,162 | def to_time ( value , ctx ) : if isinstance ( value , str ) : time = ctx . get_date_parser ( ) . time ( value ) if time is not None : return time elif isinstance ( value , datetime . time ) : return value elif isinstance ( value , datetime . datetime ) : return value . astimezone ( ctx . timezone ) . time ( ) raise Eva... | Tries conversion of any value to a time |
51,163 | def to_same ( value1 , value2 , ctx ) : if type ( value1 ) == type ( value2 ) : return value1 , value2 try : return to_decimal ( value1 , ctx ) , to_decimal ( value2 , ctx ) except EvaluationError : pass try : d1 , d2 = to_date_or_datetime ( value1 , ctx ) , to_date_or_datetime ( value2 , ctx ) if type ( value1 ) != ty... | Converts a pair of arguments to their most - likely types . This deviates from Excel which doesn t auto convert values but is necessary for us to intuitively handle contact fields which don t use the correct value type |
51,164 | def is_containerized ( ) -> bool : try : cginfo = Path ( '/proc/self/cgroup' ) . read_text ( ) if '/docker/' in cginfo or '/lxc/' in cginfo : return True except IOError : return False | Check if I am running inside a Linux container . |
51,165 | def detect_cloud ( ) -> str : if sys . platform . startswith ( 'linux' ) : try : mb = Path ( '/sys/devices/virtual/dmi/id/board_vendor' ) . read_text ( ) . lower ( ) if 'amazon' in mb : return 'amazon' except IOError : pass try : bios = Path ( '/sys/devices/virtual/dmi/id/bios_version' ) . read_text ( ) . lower ( ) if ... | Detect the cloud provider where I am running on . |
51,166 | def refocus ( field , d , nm , res , method = "helmholtz" , num_cpus = 1 , padding = True ) : fshape = len ( field . shape ) assert fshape in [ 1 , 2 ] , "Dimension of `field` must be 1 or 2." func = fft_propagate names = func . __code__ . co_varnames [ : func . __code__ . co_argcount ] loc = locals ( ) vardict = dict ... | Refocus a 1D or 2D field |
51,167 | def refocus_stack ( fieldstack , d , nm , res , method = "helmholtz" , num_cpus = _cpu_count , copy = True , padding = True ) : func = refocus names = func . __code__ . co_varnames [ : func . __code__ . co_argcount ] loc = locals ( ) vardict = dict ( ) for name in names : if name in loc . keys ( ) : vardict [ name ] = ... | Refocus a stack of 1D or 2D fields |
51,168 | def fft_propagate ( fftfield , d , nm , res , method = "helmholtz" , ret_fft = False ) : fshape = len ( fftfield . shape ) assert fshape in [ 1 , 2 ] , "Dimension of `fftfield` must be 1 or 2." if fshape == 1 : func = fft_propagate_2d else : func = fft_propagate_3d names = func . __code__ . co_varnames [ : func . __cod... | Propagates a 1D or 2D Fourier transformed field |
51,169 | def fft_propagate_2d ( fftfield , d , nm , res , method = "helmholtz" , ret_fft = False ) : assert len ( fftfield . shape ) == 1 , "Dimension of `fftfield` must be 1." km = ( 2 * np . pi * nm ) / res kx = np . fft . fftfreq ( len ( fftfield ) ) * 2 * np . pi if method == "helmholtz" : root_km = km ** 2 - kx ** 2 rt0 = ... | Propagate a 1D Fourier transformed field in 2D |
51,170 | def fft_propagate_3d ( fftfield , d , nm , res , method = "helmholtz" , ret_fft = False ) : assert len ( fftfield . shape ) == 2 , "Dimension of `fftfield` must be 2." km = ( 2 * np . pi * nm ) / res kx = ( np . fft . fftfreq ( fftfield . shape [ 0 ] ) * 2 * np . pi ) . reshape ( - 1 , 1 ) ky = ( np . fft . fftfreq ( f... | Propagate a 2D Fourier transformed field in 3D |
51,171 | def autofocus ( field , nm , res , ival , roi = None , metric = "average gradient" , padding = True , ret_d = False , ret_grad = False , num_cpus = 1 ) : if metric == "average gradient" : def metric_func ( x ) : return metrics . average_gradient ( np . abs ( x ) ) elif metric == "rms contrast" : def metric_func ( x ) :... | Numerical autofocusing of a field using the Helmholtz equation . |
51,172 | def autofocus_stack ( fieldstack , nm , res , ival , roi = None , metric = "average gradient" , padding = True , same_dist = False , ret_ds = False , ret_grads = False , num_cpus = _cpu_count , copy = True ) : dopt = list ( ) grad = list ( ) M = fieldstack . shape [ 0 ] stackargs = list ( ) for s in range ( M ) : stack... | Numerical autofocusing of a stack using the Helmholtz equation . |
51,173 | def getIndex ( folderPath = None ) : try : return cache . structure except AttributeError : pass if folderPath == None : from sc2maptool . startup import setup folderPath = setup ( ) def folderSearch ( path , attrList = [ ] ) : ret = [ ] for item in glob ( os . path . join ( path , '*' ) ) : if item == os . sep : conti... | parse the Maps subfolder directory divining criteria for valid maps |
51,174 | def _stringify_number ( v ) : if isinstance ( v , ( float , Decimal ) ) : if math . isinf ( v ) and v > 0 : v = 'Infinity' elif math . isinf ( v ) and v < 0 : v = '-Infinity' else : v = '{:f}' . format ( v ) elif isinstance ( v , BinarySize ) : v = '{:d}' . format ( int ( v ) ) elif isinstance ( v , int ) : v = '{:d}' ... | Stringify a number preventing unwanted scientific notations . |
51,175 | async def resolve_alias ( cls , alias_key : str , etcd : etcd . AsyncEtcd ) : alias_target = None repeats = 0 while repeats < 8 : prev_alias_key = alias_key alias_key = await etcd . get ( f'images/_aliases/{alias_key}' ) if alias_key is None : alias_target = prev_alias_key break repeats += 1 else : raise AliasResolutio... | Resolve the tag using etcd so that the current instance indicates a concrete latest image . |
51,176 | def _init_logging ( anteater_log ) : LOG . setLevel ( logging . DEBUG ) ch = logging . StreamHandler ( ) formatter = logging . Formatter ( '%(asctime)s - %(name)s - ' '%(levelname)s - %(message)s' ) ch . setFormatter ( formatter ) ch . setLevel ( logging . DEBUG ) path = os . path . dirname ( anteater_log ) try : os . ... | Setup root logger for package |
51,177 | def check_dir ( ) : try : os . makedirs ( reports_dir ) logger . info ( 'Creating reports directory: %s' , reports_dir ) except OSError as e : if e . errno != errno . EEXIST : raise | Creates a directory for scan reports |
51,178 | def main ( ) : _init_logging ( config . get ( 'config' , 'anteater_log' ) ) check_dir ( ) arguments = docopt ( __doc__ , version = __version__ ) if arguments [ '<patchset>' ] : prepare_patchset ( arguments [ '<project>' ] , arguments [ '<patchset>' ] , arguments [ '--binaries' ] , arguments [ '--ips' ] , arguments [ '-... | Main function mostly for passing arguments |
51,179 | def fit ( model_code , * args , ** kwargs ) : kwargs = dict ( kwargs ) kwargs [ 'model_code' ] = model_code if 'n_jobs' not in kwargs : kwargs [ 'n_jobs' ] = - 1 if model_code in FIT_CACHE : print ( "Reusing model." ) kwargs [ 'fit' ] = FIT_CACHE [ model_code ] else : print ( "NOT reusing model." ) start = time . time ... | Fit a Stan model . Caches the compiled model . |
51,180 | def count ( self , other , r , attrs = None , info = { } ) : r = numpy . array ( r , dtype = 'f8' ) return _core . KDNode . count ( self , other , r , attrs , info = info ) | Gray & Moore based fast dual tree counting . |
51,181 | def fof ( self , linkinglength , out = None , method = 'splay' ) : if out is None : out = numpy . empty ( self . size , dtype = 'intp' ) return _core . KDNode . fof ( self , linkinglength , out , method ) | Friend - of - Friend clustering with linking length . |
51,182 | def integrate ( self , min , max , attr = None , info = { } ) : if numpy . isscalar ( min ) : min = [ min for i in range ( self . ndims ) ] if numpy . isscalar ( max ) : max = [ max for i in range ( self . ndims ) ] min = numpy . array ( min , dtype = 'f8' , order = 'C' ) max = numpy . array ( max , dtype = 'f8' , orde... | Calculate the total number of points between [ min max ) . |
51,183 | def make_forest ( self , chunksize ) : heap = [ ] heappush ( heap , ( - self . size , self ) ) while True : w , x = heappop ( heap ) if w == 0 : heappush ( heap , ( 0 , x ) ) break if x . less is None or ( x . size < chunksize ) : heappush ( heap , ( 0 , x ) ) continue heappush ( heap , ( x . less . size , x . less ) )... | Divide a tree branch to a forest each subtree of size at most chunksize |
51,184 | def _install_all ( destination = '' , conf_file = '' ) : dir_path = os . path . dirname ( os . path . realpath ( __file__ ) ) destination = destination or os . path . join ( dir_path , '..' , '3rd' ) conf_file = conf_file or os . path . join ( dir_path , '..' , "third_party_addons.yaml" ) work_directory = os . path . d... | Use the conf file to list all the third party Odoo add - ons that will be installed and the patches that should be applied . |
51,185 | def find_lt ( a , x ) : i = bisect . bisect_left ( a , x ) if i : return a [ i - 1 ] raise ValueError | Find rightmost value less than x |
51,186 | def parse ( isatab_ref ) : if os . path . isdir ( isatab_ref ) : fnames = glob . glob ( os . path . join ( isatab_ref , "i_*.txt" ) ) + glob . glob ( os . path . join ( isatab_ref , "*.idf.txt" ) ) assert len ( fnames ) == 1 isatab_ref = fnames [ 0 ] assert os . path . exists ( isatab_ref ) , "Did not find investigatio... | Entry point to parse an ISA - Tab directory . isatab_ref can point to a directory of ISA - Tab data in which case we search for the investigator file or be a reference to the high level investigation file . |
51,187 | def _parse_region ( self , rec , line_iter ) : had_info = False keyvals , section = self . _parse_keyvals ( line_iter ) if keyvals : rec . metadata = keyvals [ 0 ] while section and section [ 0 ] != "STUDY" : had_info = True keyvals , next_section = self . _parse_keyvals ( line_iter ) attr_name = self . _sections [ sec... | Parse a section of an ISA - Tab assigning information to a supplied record . |
51,188 | def _line_iter ( self , in_handle ) : reader = csv . reader ( in_handle , dialect = "excel-tab" ) for line in reader : if len ( line ) > 0 and line [ 0 ] : if line [ 0 ] . upper ( ) == line [ 0 ] and "" . join ( line [ 1 : ] ) == "" : line = [ line [ 0 ] ] yield line | Read tab delimited file handling ISA - Tab special case headers . |
51,189 | def parse ( self , rec ) : final_studies = [ ] for study in rec . studies : source_data = self . _parse_study ( study . metadata [ "Study File Name" ] , [ "Source Name" , "Sample Name" , "Comment[ENA_SAMPLE]" ] ) if source_data : study . nodes = source_data final_assays = [ ] for assay in study . assays : cur_assay = I... | Retrieve row data from files associated with the ISATabRecord . |
51,190 | def _parse_study ( self , fname , node_types ) : if not os . path . exists ( os . path . join ( self . _dir , fname ) ) : return None nodes = { } with open ( os . path . join ( self . _dir , fname ) , "rU" ) as in_handle : reader = csv . reader ( in_handle , dialect = "excel-tab" ) header = self . _swap_synonyms ( next... | Parse study or assay row oriented file around the supplied base node . |
51,191 | def _finalize_metadata ( self , node ) : final = { } for key , val in iter ( node . metadata . items ( ) ) : final [ key ] = list ( val ) node . metadata = final return node | Convert node metadata back into a standard dictionary and list . |
51,192 | def _line_by_type ( self , line , header , hgroups , htypes , out , want_type , collapse_quals_fn = None ) : for index , htype in ( ( i , t ) for i , t in enumerate ( htypes ) if t == want_type ) : col = hgroups [ index ] [ 0 ] key = header [ col ] if collapse_quals_fn : val = collapse_quals_fn ( line , header , hgroup... | Parse out key value pairs for line information based on a group of values . |
51,193 | def _collapse_attributes ( self , line , header , indexes ) : names = [ ] vals = [ ] pat = re . compile ( "[\W]+" ) for i in indexes : names . append ( pat . sub ( "_" , self . _clean_header ( header [ i ] ) ) ) vals . append ( line [ i ] ) Attrs = collections . namedtuple ( 'Attrs' , names ) return Attrs ( * vals ) | Combine attributes in multiple columns into single named tuple . |
51,194 | def _characterize_header ( self , header , hgroups ) : out = [ ] for h in [ header [ g [ 0 ] ] for g in hgroups ] : this_ctype = None for ctype , names in self . _col_types . items ( ) : if h . startswith ( names ) : this_ctype = ctype break out . append ( this_ctype ) return out | Characterize header groups into different data types . |
51,195 | def _collapse_header ( self , header ) : out = [ ] for i , h in enumerate ( header ) : if h . startswith ( self . _col_quals ) : out [ - 1 ] . append ( i ) else : out . append ( [ i ] ) return out | Combine header columns into related groups . |
51,196 | def load ( abspath , default = None , enable_verbose = True ) : if default is None : default = dict ( ) prt ( "\nLoad from '%s' ..." % abspath , enable_verbose ) abspath = lower_ext ( str ( abspath ) ) is_pickle = is_pickle_file ( abspath ) if not os . path . exists ( abspath ) : prt ( " File not found, use default ... | Load Pickle from file . If file are not exists returns default . |
51,197 | def dump ( data , abspath , pk_protocol = py23 . pk_protocol , overwrite = False , enable_verbose = True ) : prt ( "\nDump to '%s' ..." % abspath , enable_verbose ) abspath = lower_ext ( str ( abspath ) ) is_pickle = is_pickle_file ( abspath ) if os . path . exists ( abspath ) : if not overwrite : prt ( " Stop! File... | Dump picklable object to file . Provides multiple choice to customize the behavior . |
51,198 | def obj2bytes ( obj , pk_protocol = py23 . pk_protocol ) : return pickle . dumps ( obj , protocol = pk_protocol ) | Convert arbitrary pickable Python Object to bytes . |
51,199 | def obj2str ( obj , pk_protocol = py23 . pk_protocol ) : return base64 . urlsafe_b64encode ( pickle . dumps ( obj , protocol = pk_protocol ) ) . decode ( "utf-8" ) | Convert arbitrary object to base64 encoded string . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.