idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
9,200
def _build_wells ( self ) -> List [ Well ] : return [ Well ( self . _well_definition [ well ] , Location ( self . _calibrated_offset , self ) , "{} of {}" . format ( well , self . _display_name ) , self . is_tiprack ) for well in self . _ordering ]
This function is used to create one instance of wells to be used by all accessor functions . It is only called again if a new offset needs to be applied .
9,201
def _create_indexed_dictionary ( self , group = 0 ) : dict_list = defaultdict ( list ) for index , well_obj in zip ( self . _ordering , self . _wells ) : dict_list [ self . _pattern . match ( index ) . group ( group ) ] . append ( well_obj ) return dict_list
Creates a dict of lists of Wells . Which way the labware is segmented determines whether this is a dict of rows or dict of columns . If group is 1 then it will collect wells that have the same alphabetic prefix and therefore are considered to be in the same row . If group is 2 it will collect wells that have the same numeric postfix and therefore are considered to be in the same column .
9,202
def set_calibration ( self , delta : Point ) : self . _calibrated_offset = Point ( x = self . _offset . x + delta . x , y = self . _offset . y + delta . y , z = self . _offset . z + delta . z ) self . _wells = self . _build_wells ( )
Called by save calibration in order to update the offset on the object .
9,203
def wells_by_index ( self ) -> Dict [ str , Well ] : return { well : wellObj for well , wellObj in zip ( self . _ordering , self . _wells ) }
Accessor function used to create a look - up table of Wells by name .
9,204
def rows ( self , * args ) -> List [ List [ Well ] ] : row_dict = self . _create_indexed_dictionary ( group = 1 ) keys = sorted ( row_dict ) if not args : res = [ row_dict [ key ] for key in keys ] elif isinstance ( args [ 0 ] , int ) : res = [ row_dict [ keys [ idx ] ] for idx in args ] elif isinstance ( args [ 0 ] , str ) : res = [ row_dict [ idx ] for idx in args ] else : raise TypeError return res
Accessor function used to navigate through a labware by row .
9,205
def rows_by_index ( self ) -> Dict [ str , List [ Well ] ] : row_dict = self . _create_indexed_dictionary ( group = 1 ) return row_dict
Accessor function used to navigate through a labware by row name .
9,206
def columns ( self , * args ) -> List [ List [ Well ] ] : col_dict = self . _create_indexed_dictionary ( group = 2 ) keys = sorted ( col_dict , key = lambda x : int ( x ) ) if not args : res = [ col_dict [ key ] for key in keys ] elif isinstance ( args [ 0 ] , int ) : res = [ col_dict [ keys [ idx ] ] for idx in args ] elif isinstance ( args [ 0 ] , str ) : res = [ col_dict [ idx ] for idx in args ] else : raise TypeError return res
Accessor function used to navigate through a labware by column .
9,207
def columns_by_index ( self ) -> Dict [ str , List [ Well ] ] : col_dict = self . _create_indexed_dictionary ( group = 2 ) return col_dict
Accessor function used to navigate through a labware by column name .
9,208
def next_tip ( self , num_tips : int = 1 ) -> Optional [ Well ] : assert num_tips > 0 columns : List [ List [ Well ] ] = self . columns ( ) drop_leading_empties = [ list ( dropwhile ( lambda x : not x . has_tip , column ) ) for column in columns ] drop_at_first_gap = [ list ( takewhile ( lambda x : x . has_tip , column ) ) for column in drop_leading_empties ] long_enough = [ column for column in drop_at_first_gap if len ( column ) >= num_tips ] try : first_long_enough = long_enough [ 0 ] result : Optional [ Well ] = first_long_enough [ 0 ] except IndexError : result = None return result
Find the next valid well for pick - up .
9,209
def use_tips ( self , start_well : Well , num_channels : int = 1 ) : assert num_channels > 0 , 'Bad call to use_tips: num_channels==0' target_column : List [ Well ] = [ col for col in self . columns ( ) if start_well in col ] [ 0 ] well_idx = target_column . index ( start_well ) num_tips = min ( len ( target_column ) - well_idx , num_channels ) target_wells = target_column [ well_idx : well_idx + num_tips ] assert all ( [ well . has_tip for well in target_wells ] ) , '{} is out of tips' . format ( str ( self ) ) for well in target_wells : well . has_tip = False
Removes tips from the tip tracker .
9,210
def require_linklocal ( handler ) : @ functools . wraps ( handler ) async def decorated ( request : web . Request ) -> web . Response : ipaddr_str = request . headers . get ( 'x-host-ip' ) invalid_req_data = { 'error' : 'bad-interface' , 'message' : f'The endpoint {request.url} can only be used from ' 'local connections' } if not ipaddr_str : return web . json_response ( data = invalid_req_data , status = 403 ) try : addr = ipaddress . ip_address ( ipaddr_str ) except ValueError : LOG . exception ( f"Couldn't parse host ip address {ipaddr_str}" ) raise if not addr . is_link_local : return web . json_response ( data = invalid_req_data , status = 403 ) return await handler ( request ) return decorated
Ensure the decorated is only called if the request is linklocal .
9,211
def authorized_keys ( mode = 'r' ) : path = '/var/home/.ssh/authorized_keys' if not os . path . exists ( path ) : os . makedirs ( os . path . dirname ( path ) ) open ( path , 'w' ) . close ( ) with open ( path , mode ) as ak : yield ak
Open the authorized_keys file . Separate function for mocking .
9,212
def remove_by_hash ( hashval : str ) : key_details = get_keys ( ) with authorized_keys ( 'w' ) as ak : for keyhash , key in key_details : if keyhash != hashval : ak . write ( f'{key}\n' ) break else : raise KeyError ( hashval )
Remove the key whose md5 sum matches hashval .
9,213
async def list_keys ( request : web . Request ) -> web . Response : return web . json_response ( { 'public_keys' : [ { 'key_md5' : details [ 0 ] , 'key' : details [ 1 ] } for details in get_keys ( ) ] } , status = 200 )
List keys in the authorized_keys file .
9,214
async def add ( request : web . Request ) -> web . Response : body = await request . json ( ) if 'key' not in body or not isinstance ( body [ 'key' ] , str ) : return web . json_response ( data = { 'error' : 'no-key' , 'message' : 'No "key" element in body' } , status = 400 ) pubkey = body [ 'key' ] alg = pubkey . split ( ) [ 0 ] if alg != 'ssh-rsa' and not alg . startswith ( 'ecdsa' ) : LOG . warning ( f"weird keyfile uploaded: starts with {alg}" ) return web . json_response ( data = { 'error' : 'bad-key' , 'message' : f'Key starts with invalid algorithm {alg}' } , status = 400 ) if '\n' in pubkey [ : - 1 ] : LOG . warning ( f"Newlines in keyfile that shouldn't be there" ) return web . json_response ( data = { 'error' : 'bad-key' , 'message' : f'Key has a newline' } , status = 400 ) if '\n' == pubkey [ - 1 ] : pubkey = pubkey [ : - 1 ] hashval = hashlib . new ( 'md5' , pubkey . encode ( ) ) . hexdigest ( ) if not key_present ( hashval ) : with authorized_keys ( 'a' ) as ak : ak . write ( f'{pubkey}\n' ) return web . json_response ( data = { 'message' : 'Added key {hashval}' , 'key_md5' : hashval } , status = 201 )
Add a public key to the authorized_keys file .
9,215
async def remove ( request : web . Request ) -> web . Response : requested_hash = request . match_info [ 'key_md5' ] new_keys : List [ str ] = [ ] found = False for keyhash , key in get_keys ( ) : if keyhash == requested_hash : found = True else : new_keys . append ( key ) if not found : return web . json_response ( data = { 'error' : 'invalid-key-hash' , 'message' : f'No such key md5 {requested_hash}' } , status = 404 ) with authorized_keys ( 'w' ) as ak : ak . write ( '\n' . join ( new_keys ) + '\n' ) return web . json_response ( data = { 'message' : f'Key {requested_hash} deleted. ' 'Restart robot to take effect' , 'restart_url' : '/server/restart' } , status = 200 )
Remove a public key from authorized_keys
9,216
def log_init ( ) : fallback_log_level = 'INFO' ot_log_level = hardware . config . log_level if ot_log_level not in logging . _nameToLevel : log . info ( "OT Log Level {} not found. Defaulting to {}" . format ( ot_log_level , fallback_log_level ) ) ot_log_level = fallback_log_level level_value = logging . _nameToLevel [ ot_log_level ] serial_log_filename = CONFIG [ 'serial_log_file' ] api_log_filename = CONFIG [ 'api_log_file' ] logging_config = dict ( version = 1 , formatters = { 'basic' : { 'format' : '%(asctime)s %(name)s %(levelname)s [Line %(lineno)s] %(message)s' } } , handlers = { 'debug' : { 'class' : 'logging.StreamHandler' , 'formatter' : 'basic' , 'level' : level_value } , 'serial' : { 'class' : 'logging.handlers.RotatingFileHandler' , 'formatter' : 'basic' , 'filename' : serial_log_filename , 'maxBytes' : 5000000 , 'level' : logging . DEBUG , 'backupCount' : 3 } , 'api' : { 'class' : 'logging.handlers.RotatingFileHandler' , 'formatter' : 'basic' , 'filename' : api_log_filename , 'maxBytes' : 1000000 , 'level' : logging . DEBUG , 'backupCount' : 5 } } , loggers = { '__main__' : { 'handlers' : [ 'debug' , 'api' ] , 'level' : logging . INFO } , 'opentrons.server' : { 'handlers' : [ 'debug' , 'api' ] , 'level' : level_value } , 'opentrons.api' : { 'handlers' : [ 'debug' , 'api' ] , 'level' : level_value } , 'opentrons.instruments' : { 'handlers' : [ 'debug' , 'api' ] , 'level' : level_value } , 'opentrons.config' : { 'handlers' : [ 'debug' , 'api' ] , 'level' : level_value } , 'opentrons.drivers.smoothie_drivers.driver_3_0' : { 'handlers' : [ 'debug' , 'api' ] , 'level' : level_value } , 'opentrons.drivers.serial_communication' : { 'handlers' : [ 'serial' ] , 'level' : logging . DEBUG } , 'opentrons.drivers.thermocycler.driver' : { 'handlers' : [ 'serial' ] , 'level' : logging . DEBUG } , 'opentrons.protocol_api' : { 'handlers' : [ 'api' , 'debug' ] , 'level' : level_value } , 'opentrons.hardware_control' : { 'handlers' : [ 'api' , 'debug' ] , 'level' : level_value } , 'opentrons.legacy_api.containers' : { 'handlers' : [ 'api' ] , 'level' : level_value } } ) dictConfig ( logging_config )
Function that sets log levels and format strings . Checks for the OT_API_LOG_LEVEL environment variable otherwise defaults to DEBUG .
9,217
def main ( ) : arg_parser = ArgumentParser ( description = "Opentrons robot software" , parents = [ build_arg_parser ( ) ] ) args = arg_parser . parse_args ( ) run ( ** vars ( args ) ) arg_parser . exit ( message = "Stopped\n" )
The main entrypoint for the Opentrons robot API server stack .
9,218
def setup_rules_file ( ) : import shutil import subprocess rules_file = os . path . join ( os . path . abspath ( os . path . dirname ( __file__ ) ) , '..' , 'config' , 'modules' , '95-opentrons-modules.rules' ) shutil . copy2 ( rules_file , '/data/user_storage/opentrons_data/95-opentrons-modules.rules' ) res0 = subprocess . run ( 'udevadm control --reload-rules' , shell = True , stdout = subprocess . PIPE ) . stdout . decode ( ) if res0 : log . warning ( res0 . strip ( ) ) res1 = subprocess . run ( 'udevadm trigger' , shell = True , stdout = subprocess . PIPE ) . stdout . decode ( ) if res1 : log . warning ( res1 . strip ( ) )
Copy the udev rules file for Opentrons Modules to opentrons_data directory and trigger the new rules . This rules file in opentrons_data is symlinked into udev rules directory
9,219
async def restart ( request : web . Request ) -> web . Response : async with request . app [ RESTART_LOCK_NAME ] : asyncio . get_event_loop ( ) . call_later ( 1 , _do_restart ) return web . json_response ( { 'message' : 'Restarting in 1s' } , status = 200 )
Restart the robot .
9,220
async def create_virtual_environment ( loop = None ) : tmp_dir = tempfile . mkdtemp ( ) venv_dir = os . path . join ( tmp_dir , VENV_NAME ) proc1 = await asyncio . create_subprocess_shell ( 'virtualenv {}' . format ( venv_dir ) , loop = loop ) await proc1 . communicate ( ) if sys . platform == 'win32' : python = os . path . join ( venv_dir , 'Scripts' , 'python.exe' ) else : python = os . path . join ( venv_dir , 'bin' , 'python' ) venv_site_pkgs = install_dependencies ( python ) log . info ( "Created virtual environment at {}" . format ( venv_dir ) ) return venv_dir , python , venv_site_pkgs
Create a virtual environment and return the path to the virtual env directory which should contain a bin directory with the python and pip binaries that can be used to a test install of a software package .
9,221
def install_dependencies ( python ) -> str : import aiohttp import virtualenv_support import async_timeout import chardet import multidict import yarl import idna import pip import setuptools import virtualenv tmpdirname = python . split ( VENV_NAME ) [ 0 ] paths_raw = sp . check_output ( '{} -c "import sys; [print(p) for p in sys.path]"' . format ( python ) , shell = True ) paths = paths_raw . decode ( ) . split ( ) venv_site_pkgs = list ( filter ( lambda x : tmpdirname in x and 'site-packages' in x , paths ) ) [ - 1 ] dependencies = [ ( 'aiohttp' , aiohttp ) , ( 'virtualenv_support' , virtualenv_support ) , ( 'async_timeout' , async_timeout ) , ( 'chardet' , chardet ) , ( 'multidict' , multidict ) , ( 'yarl' , yarl ) , ( 'idna' , idna ) , ( 'pip' , pip ) , ( 'setuptools' , setuptools ) , ( 'virtualenv.py' , virtualenv ) ] for dep_name , dep in dependencies : src_dir = os . path . abspath ( os . path . dirname ( dep . __file__ ) ) dst = os . path . join ( venv_site_pkgs , dep_name ) if os . path . exists ( dst ) : log . debug ( '{} already exists--skipping' . format ( dst ) ) else : log . debug ( 'Copying {} to {}' . format ( dep_name , dst ) ) if dep_name . endswith ( '.py' ) : shutil . copy2 ( os . path . join ( src_dir , dep_name ) , dst ) else : shutil . copytree ( src_dir , dst ) return venv_site_pkgs
Copy aiohttp and virtualenv install locations ( and their transitive dependencies in new virtualenv so that the update server can install without access to full system site - packages or connection to the internet . Full access to system site - packages causes the install inside the virtualenv to fail quietly because it does not have permission to overwrite a package by the same name and then it picks up the system version of otupdate . Also we have to do a copy rather than a symlink because a non - admin Windows account does not have permissions to create symlinks .
9,222
async def _start_server ( python , port , venv_site_pkgs = None , cwd = None ) -> sp . Popen : log . info ( "Starting sandboxed update server on port {}" . format ( port ) ) if venv_site_pkgs : python = 'PYTHONPATH={} {}' . format ( venv_site_pkgs , python ) cmd = [ python , '-m' , 'otupdate' , '--debug' , '--test' , '--port' , str ( port ) ] log . debug ( 'cmd: {}' . format ( ' ' . join ( cmd ) ) ) proc = sp . Popen ( ' ' . join ( cmd ) , shell = True , cwd = cwd ) atexit . register ( lambda : _stop_server ( proc ) ) n_retries = 3 async with aiohttp . ClientSession ( ) as session : test_status , detail = await selftest . health_check ( session = session , port = port , retries = n_retries ) if test_status == 'failure' : log . debug ( "Test server failed to start after {} retries. Stopping." . format ( n_retries ) ) _stop_server ( proc ) return proc
Starts an update server sandboxed in the virtual env and attempts to read the health endpoint with retries to determine when the server is available . If the number of retries is exceeded the returned server process will already be terminated .
9,223
async def install_update ( filename , loop ) : log . info ( "Installing update server into system environment" ) log . debug ( 'File {} exists? {}' . format ( filename , os . path . exists ( filename ) ) ) out , err , returncode = await _install ( sys . executable , filename , loop ) if returncode == 0 : msg = out else : msg = err res = { 'message' : msg , 'filename' : filename } return res , returncode
Install the update into the system environment .
9,224
def solve ( expected : List [ Tuple [ float , float ] ] , actual : List [ Tuple [ float , float ] ] ) -> np . ndarray : ex = np . array ( [ list ( point ) + [ 1 ] for point in expected ] ) . transpose ( ) ac = np . array ( [ list ( point ) + [ 1 ] for point in actual ] ) . transpose ( ) transform = np . dot ( ac , inv ( ex ) ) return transform
Takes two lists of 3 x - y points each and calculates the matrix representing the transformation from one space to the other .
9,225
def apply_transform ( t : Union [ List [ List [ float ] ] , np . ndarray ] , pos : Tuple [ float , float , float ] , with_offsets = True ) -> Tuple [ float , float , float ] : extended = 1 if with_offsets else 0 return tuple ( dot ( t , list ( pos ) + [ extended ] ) [ : 3 ] )
Change of base using a transform matrix . Primarily used to render a point in space in a way that is more readable for the user .
9,226
def apply_reverse ( t : Union [ List [ List [ float ] ] , np . ndarray ] , pos : Tuple [ float , float , float ] , with_offsets = True ) -> Tuple [ float , float , float ] : return apply_transform ( inv ( t ) , pos )
Like apply_transform but inverts the transform first
9,227
async def _resin_supervisor_restart ( ) : supervisor = os . environ . get ( 'RESIN_SUPERVISOR_ADDRESS' , 'http://127.0.0.1:48484' ) restart_url = supervisor + '/v1/restart' api = os . environ . get ( 'RESIN_SUPERVISOR_API_KEY' , 'unknown' ) app_id = os . environ . get ( 'RESIN_APP_ID' , 'unknown' ) async with aiohttp . ClientSession ( ) as session : async with session . post ( restart_url , params = { 'apikey' : api } , json = { 'appId' : app_id , 'force' : True } ) as resp : body = await resp . read ( ) if resp . status != 202 : log . error ( "Could not shut down: {}: {}" . format ( resp . status , body ) )
Execute a container restart by requesting it from the supervisor .
9,228
def connect ( self , port : str = None , options : Any = None ) : self . _hardware . connect ( port )
Connect to the robot hardware .
9,229
def _load_instr ( ctx , name : str , mount : str , * args , ** kwargs ) -> InstrumentContext : return ctx . load_instrument ( name , Mount [ mount . upper ( ) ] )
Build an instrument in a backwards - compatible way .
9,230
def load ( self , container_name , slot , label = None , share = False ) : if share : raise NotImplementedError ( "Sharing not supported" ) try : name = self . LW_TRANSLATION [ container_name ] except KeyError : if container_name in self . LW_NO_EQUIVALENT : raise NotImplementedError ( "Labware {} is not supported" . format ( container_name ) ) elif container_name in ( 'magdeck' , 'tempdeck' ) : raise NotImplementedError ( "Module load not yet implemented" ) else : name = container_name return self . _ctx . load_labware_by_name ( name , slot , label )
Load a piece of labware by specifying its name and position .
9,231
def build ( cls , builder , * args , build_loop = None , ** kwargs ) : loop = asyncio . new_event_loop ( ) kwargs [ 'loop' ] = loop args = [ arg for arg in args if not isinstance ( arg , asyncio . AbstractEventLoop ) ] if asyncio . iscoroutinefunction ( builder ) : checked_loop = build_loop or asyncio . get_event_loop ( ) api = checked_loop . run_until_complete ( builder ( * args , ** kwargs ) ) else : api = builder ( * args , ** kwargs ) return cls ( api , loop )
Build a hardware control API and initialize the adapter in one call
9,232
def connect ( self , port : str = None , force : bool = False ) : old_api = object . __getattribute__ ( self , '_api' ) loop = old_api . _loop new_api = loop . run_until_complete ( API . build_hardware_controller ( loop = loop , port = port , config = copy . copy ( old_api . config ) , force = force ) ) old_api . _loop . run_until_complete ( new_api . cache_instruments ( ) ) setattr ( self , '_api' , new_api )
Connect to hardware .
9,233
def disconnect ( self ) : old_api = object . __getattribute__ ( self , '_api' ) new_api = API . build_hardware_simulator ( loop = old_api . _loop , config = copy . copy ( old_api . config ) ) setattr ( self , '_api' , new_api )
Disconnect from connected hardware .
9,234
def get_attached_pipettes ( self ) : api = object . __getattribute__ ( self , '_api' ) instrs = { } for mount , data in api . attached_instruments . items ( ) : instrs [ mount . name . lower ( ) ] = { 'model' : data . get ( 'name' , None ) , 'id' : data . get ( 'pipette_id' , None ) , 'mount_axis' : Axis . by_mount ( mount ) , 'plunger_axis' : Axis . of_plunger ( mount ) } if data . get ( 'name' ) : instrs [ mount . name . lower ( ) ] [ 'tip_length' ] = data . get ( 'tip_length' , None ) return instrs
Mimic the behavior of robot . get_attached_pipettes
9,235
def unzip_update ( filepath : str , progress_callback : Callable [ [ float ] , None ] , acceptable_files : Sequence [ str ] , mandatory_files : Sequence [ str ] , chunk_size : int = 1024 ) -> Tuple [ Mapping [ str , Optional [ str ] ] , Mapping [ str , int ] ] : assert chunk_size total_size = 0 written_size = 0 to_unzip : List [ zipfile . ZipInfo ] = [ ] file_paths : Dict [ str , Optional [ str ] ] = { fn : None for fn in acceptable_files } file_sizes : Dict [ str , int ] = { fn : 0 for fn in acceptable_files } LOG . info ( f"Unzipping {filepath}" ) with zipfile . ZipFile ( filepath , 'r' ) as zf : files = zf . infolist ( ) remaining_filenames = [ fn for fn in acceptable_files ] for fi in files : if fi . filename in acceptable_files : to_unzip . append ( fi ) total_size += fi . file_size remaining_filenames . remove ( fi . filename ) LOG . debug ( f"Found {fi.filename} ({fi.file_size}B)" ) else : LOG . debug ( f"Ignoring {fi.filename}" ) for name in remaining_filenames : if name in mandatory_files : raise FileMissing ( f'File {name} missing from zip' ) for fi in to_unzip : uncomp_path = os . path . join ( os . path . dirname ( filepath ) , fi . filename ) with zf . open ( fi ) as zipped , open ( uncomp_path , 'wb' ) as unzipped : LOG . debug ( f"Beginning unzip of {fi.filename} to {uncomp_path}" ) while True : chunk = zipped . read ( chunk_size ) unzipped . write ( chunk ) written_size += len ( chunk ) progress_callback ( written_size / total_size ) if len ( chunk ) != chunk_size : break file_paths [ fi . filename ] = uncomp_path file_sizes [ fi . filename ] = fi . file_size LOG . debug ( f"Unzipped {fi.filename} to {uncomp_path}" ) LOG . info ( f"Unzipped {filepath}, results: \n\t" + '\n\t' . join ( [ f'{k}: {file_paths[k]} ({file_sizes[k]}B)' for k in file_paths . keys ( ) ] ) ) return file_paths , file_sizes
Unzip an update file
9,236
def hash_file ( path : str , progress_callback : Callable [ [ float ] , None ] , chunk_size : int = 1024 , file_size : int = None , algo : str = 'sha256' ) -> bytes : hasher = hashlib . new ( algo ) have_read = 0 if not chunk_size : chunk_size = 1024 with open ( path , 'rb' ) as to_hash : if not file_size : file_size = to_hash . seek ( 0 , 2 ) to_hash . seek ( 0 ) while True : chunk = to_hash . read ( chunk_size ) hasher . update ( chunk ) have_read += len ( chunk ) progress_callback ( have_read / file_size ) if len ( chunk ) != chunk_size : break return binascii . hexlify ( hasher . digest ( ) )
Hash a file and return the hash providing progress callbacks
9,237
def _find_unused_partition ( ) -> RootPartitions : which = subprocess . check_output ( [ 'ot-unused-partition' ] ) . strip ( ) return { b'2' : RootPartitions . TWO , b'3' : RootPartitions . THREE } [ which ]
Find the currently - unused root partition to write to
9,238
def write_file ( infile : str , outfile : str , progress_callback : Callable [ [ float ] , None ] , chunk_size : int = 1024 , file_size : int = None ) : total_written = 0 with open ( infile , 'rb' ) as img , open ( outfile , 'wb' ) as part : if None is file_size : file_size = img . seek ( 0 , 2 ) img . seek ( 0 ) LOG . info ( f'write_file: file size calculated as {file_size}B' ) LOG . info ( f'write_file: writing {infile} ({file_size}B)' f' to {outfile} in {chunk_size}B chunks' ) while True : chunk = img . read ( chunk_size ) part . write ( chunk ) total_written += len ( chunk ) progress_callback ( total_written / file_size ) if len ( chunk ) != chunk_size : break
Write a file to another file with progress callbacks .
9,239
def write_update ( rootfs_filepath : str , progress_callback : Callable [ [ float ] , None ] , chunk_size : int = 1024 , file_size : int = None ) -> RootPartitions : unused = _find_unused_partition ( ) part_path = unused . value . path write_file ( rootfs_filepath , part_path , progress_callback , chunk_size , file_size ) return unused
Write the new rootfs to the next root partition
9,240
def _switch_partition ( ) -> RootPartitions : res = subprocess . check_output ( [ 'ot-switch-partitions' ] ) for line in res . split ( b'\n' ) : matches = re . match ( b'Current boot partition: ([23]), setting to ([23])' , line ) if matches : return { b'2' : RootPartitions . TWO , b'3' : RootPartitions . THREE } [ matches . group ( 2 ) ] else : raise RuntimeError ( f'Bad output from ot-switch-partitions: {res}' )
Switch the active boot partition using the switch script
9,241
def commit_update ( ) : unused = _find_unused_partition ( ) new = _switch_partition ( ) if new != unused : msg = f"Bad switch: switched to {new} when {unused} was unused" LOG . error ( msg ) raise RuntimeError ( msg ) else : LOG . info ( f'commit_update: committed to booting {new}' )
Switch the target boot partition .
9,242
def get_module ( self ) : for md in SUPPORTED_MODULES : maybe_module = self . get_child_by_name ( md ) if maybe_module : return maybe_module return None
Returns the module placeable if present
9,243
def get_children_from_slice ( self , s ) : if isinstance ( s . start , str ) : s = slice ( self . get_index_from_name ( s . start ) , s . stop , s . step ) if isinstance ( s . stop , str ) : s = slice ( s . start , self . get_index_from_name ( s . stop ) , s . step ) return WellSeries ( self . get_children_list ( ) [ s ] )
Retrieves list of children within slice
9,244
def get_all_children ( self ) : my_children = self . get_children_list ( ) children = [ ] children . extend ( my_children ) for child in my_children : children . extend ( child . get_all_children ( ) ) return children
Returns all children recursively
9,245
def containers ( self ) -> list : all_containers : List = list ( ) for slot in self : all_containers += slot . get_children_list ( ) for container in all_containers : if getattr ( container , 'stackable' , False ) : all_containers += container . get_children_list ( ) return all_containers
Returns all containers on a deck as a list
9,246
def calculate_grid ( self ) : if self . grid is None : self . grid = self . get_wellseries ( self . get_grid ( ) ) if self . grid_transposed is None : self . grid_transposed = self . get_wellseries ( self . transpose ( self . get_grid ( ) ) )
Calculates and stores grid structure
9,247
def transpose ( self , rows ) : res = OrderedDict ( ) for row , cols in rows . items ( ) : for col , cell in cols . items ( ) : if col not in res : res [ col ] = OrderedDict ( ) res [ col ] [ row ] = cell return res
Transposes the grid to allow for cols
9,248
def get_wellseries ( self , matrix ) : res = OrderedDict ( ) for col , cells in matrix . items ( ) : if col not in res : res [ col ] = OrderedDict ( ) for row , cell in cells . items ( ) : res [ col ] [ row ] = self . children_by_name [ '' . join ( cell ) ] res [ col ] = WellSeries ( res [ col ] , name = col ) return WellSeries ( res )
Returns the grid as a WellSeries of WellSeries
9,249
def wells ( self , * args , ** kwargs ) : if len ( args ) and isinstance ( args [ 0 ] , list ) : args = args [ 0 ] new_wells = None if not args and not kwargs : new_wells = WellSeries ( self . get_children_list ( ) ) elif len ( args ) > 1 : new_wells = WellSeries ( [ self . well ( n ) for n in args ] ) elif 'x' in kwargs or 'y' in kwargs : new_wells = self . _parse_wells_x_y ( * args , ** kwargs ) else : new_wells = self . _parse_wells_to_and_length ( * args , ** kwargs ) if len ( new_wells ) == 1 : return new_wells [ 0 ] return new_wells
Returns child Well or list of child Wells
9,250
def parse_device_information ( device_info_string : str ) -> Mapping [ str , str ] : error_msg = 'Unexpected argument to parse_device_information: {}' . format ( device_info_string ) if not device_info_string or not isinstance ( device_info_string , str ) : raise ParseError ( error_msg ) parsed_values = device_info_string . strip ( ) . split ( ' ' ) if len ( parsed_values ) < 3 : log . error ( error_msg ) raise ParseError ( error_msg ) res = { parse_key_from_substring ( s ) : parse_string_value_from_substring ( s ) for s in parsed_values [ : 3 ] } for key in [ 'model' , 'version' , 'serial' ] : if key not in res : raise ParseError ( error_msg ) return res
Parse the modules s device information response .
9,251
def simulate ( protocol_file , propagate_logs = False , log_level = 'warning' ) -> List [ Mapping [ str , Any ] ] : stack_logger = logging . getLogger ( 'opentrons' ) stack_logger . propagate = propagate_logs contents = protocol_file . read ( ) if opentrons . config . feature_flags . use_protocol_api_v2 ( ) : try : execute_args = { 'protocol_json' : json . loads ( contents ) } except json . JSONDecodeError : execute_args = { 'protocol_code' : contents } context = opentrons . protocol_api . contexts . ProtocolContext ( ) context . home ( ) scraper = CommandScraper ( stack_logger , log_level , context . broker ) execute_args . update ( { 'simulate' : True , 'context' : context } ) opentrons . protocol_api . execute . run_protocol ( ** execute_args ) else : try : proto = json . loads ( contents ) except json . JSONDecodeError : proto = contents opentrons . robot . disconnect ( ) scraper = CommandScraper ( stack_logger , log_level , opentrons . robot . broker ) if isinstance ( proto , dict ) : opentrons . protocols . execute_protocol ( proto ) else : exec ( proto , { } ) return scraper . commands
Simulate the protocol itself .
9,252
def main ( ) : parser = argparse . ArgumentParser ( prog = 'opentrons_simulate' , description = __doc__ ) parser . add_argument ( 'protocol' , metavar = 'PROTOCOL_FILE' , type = argparse . FileType ( 'r' ) , help = 'The protocol file to simulate (specify - to read from stdin).' ) parser . add_argument ( '-v' , '--version' , action = 'version' , version = f'%(prog)s {opentrons.__version__}' , help = 'Print the opentrons package version and exit' ) parser . add_argument ( '-o' , '--output' , action = 'store' , help = 'What to output during simulations' , choices = [ 'runlog' , 'nothing' ] , default = 'runlog' ) parser . add_argument ( '-l' , '--log-level' , action = 'store' , help = ( 'Log level for the opentrons stack. Anything below warning ' 'can be chatty' ) , choices = [ 'error' , 'warning' , 'info' , 'debug' ] , default = 'warning' ) args = parser . parse_args ( ) runlog = simulate ( args . protocol , log_level = args . log_level ) if args . output == 'runlog' : print ( format_runlog ( runlog ) ) return 0
Run the simulation
9,253
def _command_callback ( self , message ) : payload = message [ 'payload' ] if message [ '$' ] == 'before' : self . _commands . append ( { 'level' : self . _depth , 'payload' : payload , 'logs' : [ ] } ) self . _depth += 1 else : while not self . _queue . empty ( ) : self . _commands [ - 1 ] [ 'logs' ] . append ( self . _queue . get ( ) ) self . _depth = max ( self . _depth - 1 , 0 )
The callback subscribed to the broker
9,254
async def _update_firmware ( filename , loop ) : try : from opentrons import robot except ModuleNotFoundError : res = "Unable to find module `opentrons`--not updating firmware" rc = 1 log . error ( res ) else : if not robot . is_connected ( ) : robot . connect ( ) port = str ( robot . _driver . port ) robot . _driver . _smoothie_programming_mode ( ) robot . _driver . _connection . close ( ) update_cmd = 'lpc21isp -wipe -donotstart {0} {1} {2} 12000' . format ( filename , port , robot . config . serial_speed ) proc = await asyncio . create_subprocess_shell ( update_cmd , stdout = asyncio . subprocess . PIPE , loop = loop ) rd = await proc . stdout . read ( ) res = rd . decode ( ) . strip ( ) await proc . communicate ( ) rc = proc . returncode if rc == 0 : robot . _driver . _connection . open ( ) robot . _driver . _smoothie_reset ( ) robot . _driver . _setup ( ) return res , rc
Currently uses the robot singleton from the API server to connect to Smoothie . Those calls should be separated out from the singleton so it can be used directly without requiring a full initialization of the API robot .
9,255
async def execute_module_command ( request ) : hw = hw_from_req ( request ) requested_serial = request . match_info [ 'serial' ] data = await request . json ( ) command_type = data . get ( 'command_type' ) args = data . get ( 'args' ) if ff . use_protocol_api_v2 ( ) : hw_mods = await hw . discover_modules ( ) else : hw_mods = hw . attached_modules . values ( ) if len ( hw_mods ) == 0 : return web . json_response ( { "message" : "No connected modules" } , status = 404 ) matching_mod = next ( ( mod for mod in hw_mods if mod . device_info . get ( 'serial' ) == requested_serial ) , None ) if not matching_mod : return web . json_response ( { "message" : "Specified module not found" } , status = 404 ) if hasattr ( matching_mod , command_type ) : clean_args = args or [ ] method = getattr ( matching_mod , command_type ) if asyncio . iscoroutinefunction ( method ) : val = await method ( * clean_args ) else : val = method ( * clean_args ) return web . json_response ( { 'message' : 'Success' , 'returnValue' : val } , status = 200 ) else : return web . json_response ( { 'message' : f'Module does not have command: {command_type}' } , status = 400 )
Execute a command on a given module by its serial number
9,256
async def get_engaged_axes ( request ) : hw = hw_from_req ( request ) return web . json_response ( { str ( k ) . lower ( ) : { 'enabled' : v } for k , v in hw . engaged_axes . items ( ) } )
Query driver for engaged state by axis . Response keys will be axes XYZABC and keys will be True for engaged and False for disengaged . Axes must be manually disengaged and are automatically re - engaged whenever a move or home command is called on that axis .
9,257
async def move ( request ) : hw = hw_from_req ( request ) req = await request . text ( ) data = json . loads ( req ) target , point , mount , model , message , error = _validate_move_data ( data ) if error : status = 400 else : status = 200 if ff . use_protocol_api_v2 ( ) : await hw . cache_instruments ( ) if target == 'mount' : critical_point = CriticalPoint . MOUNT else : critical_point = None mount = Mount [ mount . upper ( ) ] target = Point ( * point ) await hw . home_z ( ) pos = await hw . gantry_position ( mount , critical_point ) await hw . move_to ( mount , target . _replace ( z = pos . z ) , critical_point = critical_point ) await hw . move_to ( mount , target , critical_point = critical_point ) pos = await hw . gantry_position ( mount ) message = 'Move complete. New position: {}' . format ( pos ) else : if target == 'mount' : message = _move_mount ( hw , mount , point ) elif target == 'pipette' : message = _move_pipette ( hw , mount , model , point ) return web . json_response ( { "message" : message } , status = status )
Moves the robot to the specified position as provided by the control . info endpoint response
9,258
def _move_mount ( robot , mount , point ) : carriage = robot . _actuators [ mount ] [ 'carriage' ] robot . poses = carriage . home ( robot . poses ) other_mount = 'left' if mount == 'right' else 'right' robot . poses = robot . _actuators [ other_mount ] [ 'carriage' ] . home ( robot . poses ) robot . gantry . move ( robot . poses , x = point [ 0 ] , y = point [ 1 ] ) robot . poses = carriage . move ( robot . poses , z = point [ 2 ] ) x , y , _ = tuple ( pose_tracker . absolute ( robot . poses , robot . _actuators [ mount ] [ 'carriage' ] ) ) _ , _ , z = tuple ( pose_tracker . absolute ( robot . poses , robot . gantry ) ) new_position = ( x , y , z ) return "Move complete. New position: {}" . format ( new_position )
The carriage moves the mount in the Z axis and the gantry moves in X and Y
9,259
def _eap_check_config ( eap_config : Dict [ str , Any ] ) -> Dict [ str , Any ] : eap_type = eap_config . get ( 'eapType' ) for method in EAP_CONFIG_SHAPE [ 'options' ] : if method [ 'name' ] == eap_type : options = method [ 'options' ] break else : raise ConfigureArgsError ( 'EAP method {} is not valid' . format ( eap_type ) ) _eap_check_no_extra_args ( eap_config , options ) for opt in options : _eap_check_option_ok ( opt , eap_config ) if opt [ 'type' ] == 'file' and opt [ 'name' ] in eap_config : eap_config [ opt [ 'name' ] ] = _get_key_file ( eap_config [ opt [ 'name' ] ] ) return eap_config
Check the eap specific args and replace values where needed .
9,260
def _deduce_security ( kwargs ) -> nmcli . SECURITY_TYPES : sec_translation = { 'wpa-psk' : nmcli . SECURITY_TYPES . WPA_PSK , 'none' : nmcli . SECURITY_TYPES . NONE , 'wpa-eap' : nmcli . SECURITY_TYPES . WPA_EAP , } if not kwargs . get ( 'securityType' ) : if kwargs . get ( 'psk' ) and kwargs . get ( 'eapConfig' ) : raise ConfigureArgsError ( 'Cannot deduce security type: psk and eap both passed' ) elif kwargs . get ( 'psk' ) : kwargs [ 'securityType' ] = 'wpa-psk' elif kwargs . get ( 'eapConfig' ) : kwargs [ 'securityType' ] = 'wpa-eap' else : kwargs [ 'securityType' ] = 'none' try : return sec_translation [ kwargs [ 'securityType' ] ] except KeyError : raise ConfigureArgsError ( 'securityType must be one of {}' . format ( ',' . join ( sec_translation . keys ( ) ) ) )
Make sure that the security_type is known or throw .
9,261
def _check_configure_args ( configure_args : Dict [ str , Any ] ) -> Dict [ str , Any ] : if not configure_args . get ( 'ssid' ) or not isinstance ( configure_args [ 'ssid' ] , str ) : raise ConfigureArgsError ( "SSID must be specified" ) if not configure_args . get ( 'hidden' ) : configure_args [ 'hidden' ] = False elif not isinstance ( configure_args [ 'hidden' ] , bool ) : raise ConfigureArgsError ( 'If specified, hidden must be a bool' ) configure_args [ 'securityType' ] = _deduce_security ( configure_args ) if configure_args [ 'securityType' ] == nmcli . SECURITY_TYPES . WPA_PSK : if not configure_args . get ( 'psk' ) : raise ConfigureArgsError ( 'If securityType is wpa-psk, psk must be specified' ) return configure_args if configure_args [ 'securityType' ] == nmcli . SECURITY_TYPES . WPA_EAP : if not configure_args . get ( 'eapConfig' ) : raise ConfigureArgsError ( 'If securityType is wpa-eap, eapConfig must be specified' ) configure_args [ 'eapConfig' ] = _eap_check_config ( configure_args [ 'eapConfig' ] ) return configure_args return configure_args
Check the arguments passed to configure .
9,262
async def status ( request : web . Request ) -> web . Response : connectivity = { 'status' : 'none' , 'interfaces' : { } } try : connectivity [ 'status' ] = await nmcli . is_connected ( ) connectivity [ 'interfaces' ] = { i . value : await nmcli . iface_info ( i ) for i in nmcli . NETWORK_IFACES } log . debug ( "Connectivity: {}" . format ( connectivity [ 'status' ] ) ) log . debug ( "Interfaces: {}" . format ( connectivity [ 'interfaces' ] ) ) status = 200 except subprocess . CalledProcessError as e : log . error ( "CalledProcessError: {}" . format ( e . stdout ) ) status = 500 except FileNotFoundError as e : log . error ( "FileNotFoundError: {}" . format ( e ) ) status = 500 return web . json_response ( connectivity , status = status )
Get request will return the status of the machine s connection to the internet as well as the status of its network interfaces .
9,263
async def list_keys ( request : web . Request ) -> web . Response : keys_dir = CONFIG [ 'wifi_keys_dir' ] keys : List [ Dict [ str , str ] ] = [ ] for path in os . listdir ( keys_dir ) : full_path = os . path . join ( keys_dir , path ) if os . path . isdir ( full_path ) : in_path = os . listdir ( full_path ) if len ( in_path ) > 1 : log . warning ( "Garbage in key dir for key {}" . format ( path ) ) keys . append ( { 'uri' : '/wifi/keys/{}' . format ( path ) , 'id' : path , 'name' : os . path . basename ( in_path [ 0 ] ) } ) else : log . warning ( "Garbage in wifi keys dir: {}" . format ( full_path ) ) return web . json_response ( { 'keys' : keys } , status = 200 )
List the key files installed in the system .
9,264
async def remove_key ( request : web . Request ) -> web . Response : keys_dir = CONFIG [ 'wifi_keys_dir' ] available_keys = os . listdir ( keys_dir ) requested_hash = request . match_info [ 'key_uuid' ] if requested_hash not in available_keys : return web . json_response ( { 'message' : 'No such key file {}' . format ( requested_hash ) } , status = 404 ) key_path = os . path . join ( keys_dir , requested_hash ) name = os . listdir ( key_path ) [ 0 ] shutil . rmtree ( key_path ) return web . json_response ( { 'message' : 'Key file {} deleted' . format ( name ) } , status = 200 )
Remove a key .
9,265
async def eap_options ( request : web . Request ) -> web . Response : return web . json_response ( EAP_CONFIG_SHAPE , status = 200 )
Get request returns the available configuration options for WPA - EAP .
9,266
def do_publish ( broker , cmd , f , when , res , meta , * args , ** kwargs ) : publish_command = functools . partial ( broker . publish , topic = command_types . COMMAND ) call_args = _get_args ( f , args , kwargs ) if when == 'before' : broker . logger . info ( "{}: {}" . format ( f . __qualname__ , { k : v for k , v in call_args . items ( ) if str ( k ) != 'self' } ) ) command_args = dict ( zip ( reversed ( inspect . getfullargspec ( cmd ) . args ) , reversed ( inspect . getfullargspec ( cmd ) . defaults or [ ] ) ) ) if 'instrument' in inspect . getfullargspec ( cmd ) . args : if 'self' in call_args and 'instrument' not in call_args : call_args [ 'instrument' ] = call_args [ 'self' ] command_args . update ( { key : call_args [ key ] for key in ( set ( inspect . getfullargspec ( cmd ) . args ) & call_args . keys ( ) ) } ) if meta : command_args [ 'meta' ] = meta payload = cmd ( ** command_args ) message = { ** payload , '$' : when } if when == 'after' : message [ 'return' ] = res publish_command ( message = { ** payload , '$' : when } )
Implement the publish so it can be called outside the decorator
9,267
def position ( axis , hardware , cp = None ) : if not ff . use_protocol_api_v2 ( ) : p = hardware . _driver . position return ( p [ 'X' ] , p [ 'Y' ] , p [ axis ] ) else : p = hardware . gantry_position ( axis , critical_point = cp ) return ( p . x , p . y , p . z )
Read position from driver into a tuple and map 3 - rd value to the axis of a pipette currently used
9,268
async def setup_hostname ( ) -> str : machine_id = open ( '/etc/machine-id' ) . read ( ) . strip ( ) hostname = machine_id [ : 6 ] with open ( '/etc/hostname' , 'w' ) as ehn : ehn . write ( f'{hostname}\n' ) LOG . debug ( "Setting hostname" ) proc = await asyncio . create_subprocess_exec ( 'hostname' , '-F' , '/etc/hostname' , stdout = asyncio . subprocess . PIPE , stderr = asyncio . subprocess . PIPE ) stdout , stderr = await proc . communicate ( ) ret = proc . returncode if ret != 0 : LOG . error ( f'Error starting hostname: {ret} ' f'stdout: {stdout} stderr: {stderr}' ) raise RuntimeError ( "Couldn't run hostname" ) LOG . debug ( "Restarting avahi" ) proc = await asyncio . create_subprocess_exec ( 'systemctl' , 'restart' , 'avahi-daemon' , stdout = asyncio . subprocess . PIPE , stderr = asyncio . subprocess . PIPE ) stdout , stderr = await proc . communicate ( ) ret = proc . returncode if ret != 0 : LOG . error ( f'Error restarting avahi-daemon: {ret} ' f'stdout: {stdout} stderr: {stderr}' ) raise RuntimeError ( "Error restarting avahi" ) LOG . debug ( "Updated hostname and restarted avahi OK" ) return hostname
Intended to be run when the server starts . Sets the machine hostname .
9,269
def _update_pretty_hostname ( new_val : str ) : try : with open ( '/etc/machine-info' ) as emi : contents = emi . read ( ) except OSError : LOG . exception ( "Couldn't read /etc/machine-info" ) contents = '' new_contents = '' for line in contents . split ( '\n' ) : if not line . startswith ( 'PRETTY_HOSTNAME' ) : new_contents += f'{line}\n' new_contents += f'PRETTY_HOSTNAME={new_val}\n' with open ( '/etc/machine-info' , 'w' ) as emi : emi . write ( new_contents )
Write a new value for the pretty hostname .
9,270
def get_name ( default : str = 'no name set' ) : try : with open ( '/etc/machine-info' ) as emi : contents = emi . read ( ) except OSError : LOG . exception ( "Couldn't read /etc/machine-info" ) contents = '' for line in contents . split ( '\n' ) : if line . startswith ( 'PRETTY_HOSTNAME=' ) : return '=' . join ( line . split ( '=' ) [ 1 : ] ) LOG . warning ( f"No PRETTY_HOSTNAME in {contents}, defaulting to {default}" ) try : _update_pretty_hostname ( default ) except OSError : LOG . exception ( "Could not write new pretty hostname!" ) return default
Get the currently - configured name of the machine
9,271
async def set_name_endpoint ( request : web . Request ) -> web . Response : def build_400 ( msg : str ) -> web . Response : return web . json_response ( data = { 'message' : msg } , status = 400 ) body = await request . json ( ) if 'name' not in body or not isinstance ( body [ 'name' ] , str ) : return build_400 ( 'Body has no "name" key with a string' ) new_name = await set_name ( body [ 'name' ] ) request . app [ DEVICE_NAME_VARNAME ] = new_name return web . json_response ( data = { 'name' : new_name } , status = 200 )
Set the name of the robot .
9,272
async def get_name_endpoint ( request : web . Request ) -> web . Response : return web . json_response ( data = { 'name' : request . app [ DEVICE_NAME_VARNAME ] } , status = 200 )
Get the name of the robot .
9,273
def move ( self , point : Point ) -> 'Location' : return self . _replace ( point = self . point + point )
Alter the point stored in the location while preserving the labware .
9,274
def _load_weird_container ( container_name ) : old_container_loading . load_all_containers_from_disk ( ) container = old_container_loading . get_persisted_container ( container_name ) rotated_container = database_migration . rotate_container_for_alpha ( container ) database . save_new_container ( rotated_container , container_name ) return container
Load a container from persisted containers whatever that is
9,275
def _setup_container ( container_name ) : for meth in ( database . load_container , load_new_labware , _load_weird_container ) : log . debug ( f"Trying to load container {container_name} via {meth.__name__}" ) try : container = meth ( container_name ) if meth == _load_weird_container : container . properties [ 'type' ] = container_name log . info ( f"Loaded {container_name} from {meth.__name__}" ) break except ( ValueError , KeyError ) : log . info ( f"{container_name} not in {meth.__name__}" ) else : raise KeyError ( f"Unknown labware {container_name}" ) container_x , container_y , container_z = container . _coordinates if container_z == 0 and 'height' in container [ 0 ] . properties : container_z = container [ 0 ] . properties [ 'height' ] from opentrons . util . vector import Vector container . _coordinates = Vector ( container_x , container_y , container_z ) return container
Try and find a container in a variety of methods
9,276
def clear_tips ( self ) : for instrument in self . _instruments . values ( ) : if instrument . tip_attached : instrument . _remove_tip ( instrument . _tip_length )
If reset is called with a tip attached the tip must be removed before the poses and _instruments members are cleared . If the tip is not removed the effective length of the pipette remains increased by the length of the tip and subsequent _add_tip calls will increase the length in addition to this . This should be fixed by changing pose tracking to that it tracks the tip as a separate node rather than adding and subtracting the tip length to the pipette length .
9,277
def identify ( self , seconds ) : from time import sleep for i in range ( seconds ) : self . turn_off_button_light ( ) sleep ( 0.25 ) self . turn_on_button_light ( ) sleep ( 0.25 )
Identify a robot by flashing the light around the frame button for 10s
9,278
def add_instrument ( self , mount , instrument ) : if mount in self . _instruments : prev_instr = self . _instruments [ mount ] raise RuntimeError ( 'Instrument {0} already on {1} mount' . format ( prev_instr . name , mount ) ) self . _instruments [ mount ] = instrument instrument . instrument_actuator = self . _actuators [ mount ] [ 'plunger' ] instrument . instrument_mover = self . _actuators [ mount ] [ 'carriage' ] cx , cy , _ = self . config . instrument_offset [ mount ] [ instrument . type ] mx , my , mz = instrument . model_offset _x , _y , _z = ( mx + cx , my + cy , mz ) if mount == 'left' : _x , _y , _z = ( _x + self . config . mount_offset [ 0 ] , _y + self . config . mount_offset [ 1 ] , _z + self . config . mount_offset [ 2 ] ) self . poses = pose_tracker . add ( self . poses , instrument , parent = mount , point = ( _x , _y , _z ) )
Adds instrument to a robot .
9,279
def connect ( self , port = None , options = None ) : self . _driver . connect ( port = port ) self . fw_version = self . _driver . get_fw_version ( ) self . cache_instrument_models ( )
Connects the robot to a serial port .
9,280
def home ( self , * args , ** kwargs ) : self . poses = self . gantry . home ( self . poses ) self . poses = self . _actuators [ 'left' ] [ 'plunger' ] . home ( self . poses ) self . poses = self . _actuators [ 'right' ] [ 'plunger' ] . home ( self . poses ) self . _previous_instrument = None self . _prev_container = None for a in self . _actuators . values ( ) : self . poses = a [ 'carriage' ] . update_pose_from_driver ( self . poses )
Home robot s head and plunger motors .
9,281
def move_to ( self , location , instrument , strategy = 'arc' , ** kwargs ) : placeable , coordinates = containers . unpack_location ( location ) offset = subtract ( coordinates , placeable . top ( ) [ 1 ] ) if isinstance ( placeable , containers . WellSeries ) : placeable = placeable [ 0 ] target = add ( pose_tracker . absolute ( self . poses , placeable ) , offset . coordinates ) if self . _previous_instrument : if self . _previous_instrument != instrument : self . _previous_instrument . retract ( ) self . _prev_container = None self . _previous_instrument = instrument if strategy == 'arc' : arc_coords = self . _create_arc ( instrument , target , placeable ) for coord in arc_coords : self . poses = instrument . _move ( self . poses , ** coord ) elif strategy == 'direct' : position = { 'x' : target [ 0 ] , 'y' : target [ 1 ] , 'z' : target [ 2 ] } self . poses = instrument . _move ( self . poses , ** position ) else : raise RuntimeError ( 'Unknown move strategy: {}' . format ( strategy ) )
Move an instrument to a coordinate container or a coordinate within a container .
9,282
def _create_arc ( self , inst , destination , placeable = None ) : this_container = None if isinstance ( placeable , containers . Well ) : this_container = placeable . get_parent ( ) elif isinstance ( placeable , containers . WellSeries ) : this_container = placeable . get_parent ( ) elif isinstance ( placeable , containers . Container ) : this_container = placeable if this_container and self . _prev_container == this_container : arc_top = self . max_placeable_height_on_deck ( this_container ) arc_top += TIP_CLEARANCE_LABWARE elif self . _use_safest_height : arc_top = inst . _max_deck_height ( ) else : arc_top = self . max_deck_height ( ) + TIP_CLEARANCE_DECK self . _prev_container = this_container _ , _ , pip_z = pose_tracker . absolute ( self . poses , inst ) arc_top = max ( arc_top , destination [ 2 ] , pip_z ) arc_top = min ( arc_top , inst . _max_deck_height ( ) ) strategy = [ { 'z' : arc_top } , { 'x' : destination [ 0 ] , 'y' : destination [ 1 ] } , { 'z' : destination [ 2 ] } ] return strategy
Returns a list of coordinates to arrive to the destination coordinate
9,283
def disconnect ( self ) : if self . _driver : self . _driver . disconnect ( ) self . axis_homed = { 'x' : False , 'y' : False , 'z' : False , 'a' : False , 'b' : False }
Disconnects from the robot .
9,284
def get_slot_offsets ( self ) : SLOT_OFFSETS = { 'slots' : { 'col_offset' : 132.50 , 'row_offset' : 90.5 } } slot_settings = SLOT_OFFSETS . get ( self . get_deck_slot_types ( ) ) row_offset = slot_settings . get ( 'row_offset' ) col_offset = slot_settings . get ( 'col_offset' ) return ( row_offset , col_offset )
col_offset - from bottom left corner of 1 to bottom corner of 2
9,285
def get_attached_pipettes ( self ) : left_data = { 'mount_axis' : 'z' , 'plunger_axis' : 'b' , 'model' : self . model_by_mount [ 'left' ] [ 'model' ] , 'id' : self . model_by_mount [ 'left' ] [ 'id' ] } left_model = left_data . get ( 'model' ) if left_model : tip_length = pipette_config . load ( left_model , left_data [ 'id' ] ) . tip_length left_data . update ( { 'tip_length' : tip_length } ) right_data = { 'mount_axis' : 'a' , 'plunger_axis' : 'c' , 'model' : self . model_by_mount [ 'right' ] [ 'model' ] , 'id' : self . model_by_mount [ 'right' ] [ 'id' ] } right_model = right_data . get ( 'model' ) if right_model : tip_length = pipette_config . load ( right_model , right_data [ 'id' ] ) . tip_length right_data . update ( { 'tip_length' : tip_length } ) return { 'left' : left_data , 'right' : right_data }
Gets model names of attached pipettes
9,286
def calibrate_container_with_instrument ( self , container : Container , instrument , save : bool ) : well = container [ 0 ] delta = pose_tracker . change_base ( self . poses , src = instrument , dst = well ) if fflags . calibrate_to_bottom ( ) : delta_x = delta [ 0 ] delta_y = delta [ 1 ] if 'tiprack' in container . get_type ( ) : delta_z = delta [ 2 ] else : delta_z = delta [ 2 ] + well . z_size ( ) else : delta_x = delta [ 0 ] delta_y = delta [ 1 ] delta_z = delta [ 2 ] self . poses = self . _calibrate_container_with_delta ( self . poses , container , delta_x , delta_y , delta_z , save ) self . max_deck_height . cache_clear ( )
Calibrates a container using the bottom of the first well
9,287
def _begin_write ( session : UpdateSession , loop : asyncio . AbstractEventLoop , rootfs_file_path : str ) : session . set_progress ( 0 ) session . set_stage ( Stages . WRITING ) write_future = asyncio . ensure_future ( loop . run_in_executor ( None , file_actions . write_update , rootfs_file_path , session . set_progress ) ) def write_done ( fut ) : exc = fut . exception ( ) if exc : session . set_error ( getattr ( exc , 'short' , str ( type ( exc ) ) ) , str ( exc ) ) else : session . set_stage ( Stages . DONE ) write_future . add_done_callback ( write_done )
Start the write process .
9,288
def calibrate ( self ) : if self . _driver and self . _driver . is_connected ( ) : self . _driver . probe_plate ( ) self . _engaged = False
Calibration involves probing for top plate to get the plate height
9,289
def disengage ( self ) : if self . _driver and self . _driver . is_connected ( ) : self . _driver . home ( ) self . _engaged = False
Home the magnet
9,290
def connect ( self ) : if self . _port : self . _driver = MagDeckDriver ( ) self . _driver . connect ( self . _port ) self . _device_info = self . _driver . get_device_info ( ) else : raise MissingDevicePortError ( "MagDeck couldnt connect to port {}" . format ( self . _port ) )
Connect to the serial port
9,291
def reset_globals ( version = None , loop = None ) : global containers global instruments global labware global robot global reset global modules global hardware robot , reset , instruments , containers , labware , modules , hardware = build_globals ( version , loop )
Reinitialize the global singletons with a given API version .
9,292
def _find_protocol_error ( tb , proto_name ) : tb_info = traceback . extract_tb ( tb ) for frame in reversed ( tb_info ) : if frame . filename == proto_name : return frame else : raise KeyError
Return the FrameInfo for the lowest frame in the traceback from the protocol .
9,293
def run_protocol ( protocol_code : Any = None , protocol_json : Dict [ Any , Any ] = None , simulate : bool = False , context : ProtocolContext = None ) : if not config . IS_ROBOT : simulate = True if None is context and simulate : true_context = ProtocolContext ( ) true_context . home ( ) MODULE_LOG . info ( "Generating blank protocol context for simulate" ) elif context : true_context = context else : raise RuntimeError ( 'Will not automatically generate hardware controller' ) if None is not protocol_code : _run_python ( protocol_code , true_context ) elif None is not protocol_json : protocol_version = get_protocol_schema_version ( protocol_json ) if protocol_version > 3 : raise RuntimeError ( f'JSON Protocol version {protocol_version} is not yet ' + 'supported in this version of the API' ) validate_protocol ( protocol_json ) if protocol_version >= 3 : ins = execute_v3 . load_pipettes_from_json ( true_context , protocol_json ) lw = execute_v3 . load_labware_from_json_defs ( true_context , protocol_json ) execute_v3 . dispatch_json ( true_context , protocol_json , ins , lw ) else : ins = execute_v1 . load_pipettes_from_json ( true_context , protocol_json ) lw = execute_v1 . load_labware_from_json_loadnames ( true_context , protocol_json ) execute_v1 . dispatch_json ( true_context , protocol_json , ins , lw ) else : raise RuntimeError ( "run_protocol must have either code or json" )
Create a ProtocolRunner instance from one of a variety of protocol sources .
9,294
def get_ports_by_name ( device_name ) : filtered_devices = filter ( lambda device : device_name in device [ 1 ] , list_ports . comports ( ) ) device_ports = [ device [ 0 ] for device in filtered_devices ] return device_ports
Returns all serial devices with a given name
9,295
def serial_with_temp_timeout ( serial_connection , timeout ) : saved_timeout = serial_connection . timeout if timeout is not None : serial_connection . timeout = timeout yield serial_connection serial_connection . timeout = saved_timeout
Implements a temporary timeout for a serial connection
9,296
def _write_to_device_and_return ( cmd , ack , device_connection ) : log . debug ( 'Write -> {}' . format ( cmd . encode ( ) ) ) device_connection . write ( cmd . encode ( ) ) response = device_connection . read_until ( ack . encode ( ) ) log . debug ( 'Read <- {}' . format ( response ) ) if ack . encode ( ) not in response : raise SerialNoResponse ( 'No response from serial port after {} second(s)' . format ( device_connection . timeout ) ) clean_response = _parse_serial_response ( response , ack . encode ( ) ) if clean_response : return clean_response . decode ( ) return ''
Writes to a serial device . - Formats command - Wait for ack return - return parsed response
9,297
def write_and_return ( command , ack , serial_connection , timeout = DEFAULT_WRITE_TIMEOUT ) : clear_buffer ( serial_connection ) with serial_with_temp_timeout ( serial_connection , timeout ) as device_connection : response = _write_to_device_and_return ( command , ack , device_connection ) return response
Write a command and return the response
9,298
async def get_advanced_settings ( request : web . Request ) -> web . Response : res = _get_adv_settings ( ) return web . json_response ( res )
Handles a GET request and returns a json body with the key settings and a value that is a list of objects where each object has keys id title description and value
9,299
async def set_advanced_setting ( request : web . Request ) -> web . Response : data = await request . json ( ) key = data . get ( 'id' ) value = data . get ( 'value' ) if key and key in advs . settings_by_id . keys ( ) : advs . set_adv_setting ( key , value ) res = _get_adv_settings ( ) status = 200 else : res = { 'message' : 'ID {} not found in settings list' . format ( key ) } status = 400 return web . json_response ( res , status = status )
Handles a POST request with a json body that has keys id and value where the value of id must correspond to an id field of a setting in opentrons . config . advanced_settings . settings . Saves the value of value for the setting that matches the supplied id .