idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
55,900
def build_routename ( cls , name , routename_prefix = None ) : if routename_prefix is None : routename_prefix = 'api_{}' . format ( cls . __name__ . replace ( 'Resource' , '' ) . lower ( ) ) routename_prefix = routename_prefix . rstrip ( '_' ) return '_' . join ( [ routename_prefix , name ] )
Given a name & an optional routename_prefix this generates a name for a URL .
55,901
def add_views ( cls , config , rule_prefix , routename_prefix = None ) : methods = ( 'GET' , 'POST' , 'PUT' , 'DELETE' ) config . add_route ( cls . build_routename ( 'list' , routename_prefix ) , rule_prefix ) config . add_view ( cls . as_list ( ) , route_name = cls . build_routename ( 'list' , routename_prefix ) , request_method = methods ) config . add_route ( cls . build_routename ( 'detail' , routename_prefix ) , rule_prefix + '{name}/' ) config . add_view ( cls . as_detail ( ) , route_name = cls . build_routename ( 'detail' , routename_prefix ) , request_method = methods ) return config
A convenience method for registering the routes and views in pyramid .
55,902
def deserialize ( self , body ) : try : if isinstance ( body , bytes ) : return json . loads ( body . decode ( 'utf-8' ) ) return json . loads ( body ) except ValueError : raise BadRequest ( 'Request body is not valid JSON' )
The low - level deserialization .
55,903
def convert_mnist ( directory , output_directory , output_filename = None , dtype = None ) : if not output_filename : if dtype : output_filename = 'mnist_{}.hdf5' . format ( dtype ) else : output_filename = 'mnist.hdf5' output_path = os . path . join ( output_directory , output_filename ) h5file = h5py . File ( output_path , mode = 'w' ) train_feat_path = os . path . join ( directory , TRAIN_IMAGES ) train_features = read_mnist_images ( train_feat_path , dtype ) train_lab_path = os . path . join ( directory , TRAIN_LABELS ) train_labels = read_mnist_labels ( train_lab_path ) test_feat_path = os . path . join ( directory , TEST_IMAGES ) test_features = read_mnist_images ( test_feat_path , dtype ) test_lab_path = os . path . join ( directory , TEST_LABELS ) test_labels = read_mnist_labels ( test_lab_path ) data = ( ( 'train' , 'features' , train_features ) , ( 'train' , 'targets' , train_labels ) , ( 'test' , 'features' , test_features ) , ( 'test' , 'targets' , test_labels ) ) fill_hdf5_file ( h5file , data ) h5file [ 'features' ] . dims [ 0 ] . label = 'batch' h5file [ 'features' ] . dims [ 1 ] . label = 'channel' h5file [ 'features' ] . dims [ 2 ] . label = 'height' h5file [ 'features' ] . dims [ 3 ] . label = 'width' h5file [ 'targets' ] . dims [ 0 ] . label = 'batch' h5file [ 'targets' ] . dims [ 1 ] . label = 'index' h5file . flush ( ) h5file . close ( ) return ( output_path , )
Converts the MNIST dataset to HDF5 .
55,904
def fill_subparser ( subparser ) : subparser . add_argument ( "--dtype" , help = "dtype to save to; by default, images will be " + "returned in their original unsigned byte format" , choices = ( 'float32' , 'float64' , 'bool' ) , type = str , default = None ) return convert_mnist
Sets up a subparser to convert the MNIST dataset files .
55,905
def read_mnist_images ( filename , dtype = None ) : with gzip . open ( filename , 'rb' ) as f : magic , number , rows , cols = struct . unpack ( '>iiii' , f . read ( 16 ) ) if magic != MNIST_IMAGE_MAGIC : raise ValueError ( "Wrong magic number reading MNIST image file" ) array = numpy . frombuffer ( f . read ( ) , dtype = 'uint8' ) array = array . reshape ( ( number , 1 , rows , cols ) ) if dtype : dtype = numpy . dtype ( dtype ) if dtype . kind == 'b' : array = array >= 128 elif dtype . kind == 'f' : array = array . astype ( dtype ) array /= 255. else : raise ValueError ( "Unknown dtype to convert MNIST to" ) return array
Read MNIST images from the original ubyte file format .
55,906
def read_mnist_labels ( filename ) : with gzip . open ( filename , 'rb' ) as f : magic , _ = struct . unpack ( '>ii' , f . read ( 8 ) ) if magic != MNIST_LABEL_MAGIC : raise ValueError ( "Wrong magic number reading MNIST label file" ) array = numpy . frombuffer ( f . read ( ) , dtype = 'uint8' ) array = array . reshape ( array . size , 1 ) return array
Read MNIST labels from the original ubyte file format .
55,907
def prepare_hdf5_file ( hdf5_file , n_train , n_valid , n_test ) : n_total = n_train + n_valid + n_test splits = create_splits ( n_train , n_valid , n_test ) hdf5_file . attrs [ 'split' ] = H5PYDataset . create_split_array ( splits ) vlen_dtype = h5py . special_dtype ( vlen = numpy . dtype ( 'uint8' ) ) hdf5_file . create_dataset ( 'encoded_images' , shape = ( n_total , ) , dtype = vlen_dtype ) hdf5_file . create_dataset ( 'targets' , shape = ( n_total , 1 ) , dtype = numpy . int16 ) hdf5_file . create_dataset ( 'filenames' , shape = ( n_total , 1 ) , dtype = 'S32' )
Create datasets within a given HDF5 file .
55,908
def process_train_set ( hdf5_file , train_archive , patch_archive , n_train , wnid_map , shuffle_seed = None ) : producer = partial ( train_set_producer , train_archive = train_archive , patch_archive = patch_archive , wnid_map = wnid_map ) consumer = partial ( image_consumer , hdf5_file = hdf5_file , num_expected = n_train , shuffle_seed = shuffle_seed ) producer_consumer ( producer , consumer )
Process the ILSVRC2010 training set .
55,909
def image_consumer ( socket , hdf5_file , num_expected , shuffle_seed = None , offset = 0 ) : with progress_bar ( 'images' , maxval = num_expected ) as pb : if shuffle_seed is None : index_gen = iter ( xrange ( num_expected ) ) else : rng = numpy . random . RandomState ( shuffle_seed ) index_gen = iter ( rng . permutation ( num_expected ) ) for i , num in enumerate ( index_gen ) : image_filename , class_index = socket . recv_pyobj ( zmq . SNDMORE ) image_data = numpy . fromstring ( socket . recv ( ) , dtype = 'uint8' ) _write_to_hdf5 ( hdf5_file , num + offset , image_filename , image_data , class_index ) pb . update ( i + 1 )
Fill an HDF5 file with incoming images from a socket .
55,910
def process_other_set ( hdf5_file , which_set , image_archive , patch_archive , groundtruth , offset ) : producer = partial ( other_set_producer , image_archive = image_archive , patch_archive = patch_archive , groundtruth = groundtruth , which_set = which_set ) consumer = partial ( image_consumer , hdf5_file = hdf5_file , num_expected = len ( groundtruth ) , offset = offset ) producer_consumer ( producer , consumer )
Process the validation or test set .
55,911
def load_from_tar_or_patch ( tar , image_filename , patch_images ) : patched = True image_bytes = patch_images . get ( os . path . basename ( image_filename ) , None ) if image_bytes is None : patched = False try : image_bytes = tar . extractfile ( image_filename ) . read ( ) numpy . array ( Image . open ( io . BytesIO ( image_bytes ) ) ) except ( IOError , OSError ) : with gzip . GzipFile ( fileobj = tar . extractfile ( image_filename ) ) as gz : image_bytes = gz . read ( ) numpy . array ( Image . open ( io . BytesIO ( image_bytes ) ) ) return image_bytes , patched
Do everything necessary to process an image inside a TAR .
55,912
def read_devkit ( f ) : with tar_open ( f ) as tar : meta_mat = tar . extractfile ( DEVKIT_META_PATH ) synsets , cost_matrix = read_metadata_mat_file ( meta_mat ) raw_valid_groundtruth = numpy . loadtxt ( tar . extractfile ( DEVKIT_VALID_GROUNDTRUTH_PATH ) , dtype = numpy . int16 ) return synsets , cost_matrix , raw_valid_groundtruth
Read relevant information from the development kit archive .
55,913
def extract_patch_images ( f , which_set ) : if which_set not in ( 'train' , 'valid' , 'test' ) : raise ValueError ( 'which_set must be one of train, valid, or test' ) which_set = 'val' if which_set == 'valid' else which_set patch_images = { } with tar_open ( f ) as tar : for info_obj in tar : if not info_obj . name . endswith ( '.JPEG' ) : continue tokens = info_obj . name . split ( '/' ) file_which_set = tokens [ - 2 ] if file_which_set != which_set : continue filename = tokens [ - 1 ] patch_images [ filename ] = tar . extractfile ( info_obj . name ) . read ( ) return patch_images
Extracts a dict of the patch images for ILSVRC2010 .
55,914
def convert_cifar10 ( directory , output_directory , output_filename = 'cifar10.hdf5' ) : output_path = os . path . join ( output_directory , output_filename ) h5file = h5py . File ( output_path , mode = 'w' ) input_file = os . path . join ( directory , DISTRIBUTION_FILE ) tar_file = tarfile . open ( input_file , 'r:gz' ) train_batches = [ ] for batch in range ( 1 , 6 ) : file = tar_file . extractfile ( 'cifar-10-batches-py/data_batch_%d' % batch ) try : if six . PY3 : array = cPickle . load ( file , encoding = 'latin1' ) else : array = cPickle . load ( file ) train_batches . append ( array ) finally : file . close ( ) train_features = numpy . concatenate ( [ batch [ 'data' ] . reshape ( batch [ 'data' ] . shape [ 0 ] , 3 , 32 , 32 ) for batch in train_batches ] ) train_labels = numpy . concatenate ( [ numpy . array ( batch [ 'labels' ] , dtype = numpy . uint8 ) for batch in train_batches ] ) train_labels = numpy . expand_dims ( train_labels , 1 ) file = tar_file . extractfile ( 'cifar-10-batches-py/test_batch' ) try : if six . PY3 : test = cPickle . load ( file , encoding = 'latin1' ) else : test = cPickle . load ( file ) finally : file . close ( ) test_features = test [ 'data' ] . reshape ( test [ 'data' ] . shape [ 0 ] , 3 , 32 , 32 ) test_labels = numpy . array ( test [ 'labels' ] , dtype = numpy . uint8 ) test_labels = numpy . expand_dims ( test_labels , 1 ) data = ( ( 'train' , 'features' , train_features ) , ( 'train' , 'targets' , train_labels ) , ( 'test' , 'features' , test_features ) , ( 'test' , 'targets' , test_labels ) ) fill_hdf5_file ( h5file , data ) h5file [ 'features' ] . dims [ 0 ] . label = 'batch' h5file [ 'features' ] . dims [ 1 ] . label = 'channel' h5file [ 'features' ] . dims [ 2 ] . label = 'height' h5file [ 'features' ] . dims [ 3 ] . label = 'width' h5file [ 'targets' ] . dims [ 0 ] . label = 'batch' h5file [ 'targets' ] . dims [ 1 ] . label = 'index' h5file . flush ( ) h5file . close ( ) return ( output_path , )
Converts the CIFAR - 10 dataset to HDF5 .
55,915
def check_exists ( required_files ) : def function_wrapper ( f ) : @ wraps ( f ) def wrapped ( directory , * args , ** kwargs ) : missing = [ ] for filename in required_files : if not os . path . isfile ( os . path . join ( directory , filename ) ) : missing . append ( filename ) if len ( missing ) > 0 : raise MissingInputFiles ( 'Required files missing' , missing ) return f ( directory , * args , ** kwargs ) return wrapped return function_wrapper
Decorator that checks if required files exist before running .
55,916
def fill_hdf5_file ( h5file , data ) : split_names = set ( split_tuple [ 0 ] for split_tuple in data ) for name in split_names : lengths = [ len ( split_tuple [ 2 ] ) for split_tuple in data if split_tuple [ 0 ] == name ] if not all ( le == lengths [ 0 ] for le in lengths ) : raise ValueError ( "split '{}' has sources that " . format ( name ) + "vary in length" ) split_dict = dict ( [ ( split_name , { } ) for split_name in split_names ] ) source_names = set ( split_tuple [ 1 ] for split_tuple in data ) for name in source_names : splits = [ s for s in data if s [ 1 ] == name ] indices = numpy . cumsum ( [ 0 ] + [ len ( s [ 2 ] ) for s in splits ] ) if not all ( s [ 2 ] . dtype == splits [ 0 ] [ 2 ] . dtype for s in splits ) : raise ValueError ( "source '{}' has splits that " . format ( name ) + "vary in dtype" ) if not all ( s [ 2 ] . shape [ 1 : ] == splits [ 0 ] [ 2 ] . shape [ 1 : ] for s in splits ) : raise ValueError ( "source '{}' has splits that " . format ( name ) + "vary in shapes" ) dataset = h5file . create_dataset ( name , ( sum ( len ( s [ 2 ] ) for s in splits ) , ) + splits [ 0 ] [ 2 ] . shape [ 1 : ] , dtype = splits [ 0 ] [ 2 ] . dtype ) dataset [ ... ] = numpy . concatenate ( [ s [ 2 ] for s in splits ] , axis = 0 ) for i , j , s in zip ( indices [ : - 1 ] , indices [ 1 : ] , splits ) : if len ( s ) == 4 : split_dict [ s [ 0 ] ] [ name ] = ( i , j , None , s [ 3 ] ) else : split_dict [ s [ 0 ] ] [ name ] = ( i , j ) h5file . attrs [ 'split' ] = H5PYDataset . create_split_array ( split_dict )
Fills an HDF5 file in a H5PYDataset - compatible manner .
55,917
def progress_bar ( name , maxval , prefix = 'Converting' ) : widgets = [ '{} {}: ' . format ( prefix , name ) , Percentage ( ) , ' ' , Bar ( marker = '=' , left = '[' , right = ']' ) , ' ' , ETA ( ) ] bar = ProgressBar ( widgets = widgets , max_value = maxval , fd = sys . stdout ) . start ( ) try : yield bar finally : bar . update ( maxval ) bar . finish ( )
Manages a progress bar for a conversion .
55,918
def convert_iris ( directory , output_directory , output_filename = 'iris.hdf5' ) : classes = { b'Iris-setosa' : 0 , b'Iris-versicolor' : 1 , b'Iris-virginica' : 2 } data = numpy . loadtxt ( os . path . join ( directory , 'iris.data' ) , converters = { 4 : lambda x : classes [ x ] } , delimiter = ',' ) features = data [ : , : - 1 ] . astype ( 'float32' ) targets = data [ : , - 1 ] . astype ( 'uint8' ) . reshape ( ( - 1 , 1 ) ) data = ( ( 'all' , 'features' , features ) , ( 'all' , 'targets' , targets ) ) output_path = os . path . join ( output_directory , output_filename ) h5file = h5py . File ( output_path , mode = 'w' ) fill_hdf5_file ( h5file , data ) h5file [ 'features' ] . dims [ 0 ] . label = 'batch' h5file [ 'features' ] . dims [ 1 ] . label = 'feature' h5file [ 'targets' ] . dims [ 0 ] . label = 'batch' h5file [ 'targets' ] . dims [ 1 ] . label = 'index' h5file . flush ( ) h5file . close ( ) return ( output_path , )
Convert the Iris dataset to HDF5 .
55,919
def fill_subparser ( subparser ) : urls = ( [ None ] * len ( ALL_FILES ) ) filenames = list ( ALL_FILES ) subparser . set_defaults ( urls = urls , filenames = filenames ) subparser . add_argument ( '-P' , '--url-prefix' , type = str , default = None , help = "URL prefix to prepend to the filenames of " "non-public files, in order to download them. " "Be sure to include the trailing slash." ) return default_downloader
Sets up a subparser to download the ILSVRC2012 dataset files .
55,920
def _get_target_index ( self ) : return ( self . index + self . source_window * ( not self . overlapping ) + self . offset )
Return the index where the target window starts .
55,921
def _get_end_index ( self ) : return max ( self . index + self . source_window , self . _get_target_index ( ) + self . target_window )
Return the end of both windows .
55,922
def convert_svhn ( which_format , directory , output_directory , output_filename = None ) : if which_format not in ( 1 , 2 ) : raise ValueError ( "SVHN format needs to be either 1 or 2." ) if not output_filename : output_filename = 'svhn_format_{}.hdf5' . format ( which_format ) if which_format == 1 : return convert_svhn_format_1 ( directory , output_directory , output_filename ) else : return convert_svhn_format_2 ( directory , output_directory , output_filename )
Converts the SVHN dataset to HDF5 .
55,923
def open_ ( filename , mode = 'r' , encoding = None ) : if filename . endswith ( '.gz' ) : if six . PY2 : zf = io . BufferedReader ( gzip . open ( filename , mode ) ) if encoding : return codecs . getreader ( encoding ) ( zf ) else : return zf else : return io . BufferedReader ( gzip . open ( filename , mode , encoding = encoding ) ) if six . PY2 : if encoding : return codecs . open ( filename , mode , encoding = encoding ) else : return open ( filename , mode ) else : return open ( filename , mode , encoding = encoding )
Open a text file with encoding and optional gzip compression .
55,924
def tar_open ( f ) : if isinstance ( f , six . string_types ) : return tarfile . open ( name = f ) else : return tarfile . open ( fileobj = f )
Open either a filename or a file - like object as a TarFile .
55,925
def copy_from_server_to_local ( dataset_remote_dir , dataset_local_dir , remote_fname , local_fname ) : log . debug ( "Copying file `{}` to a local directory `{}`." . format ( remote_fname , dataset_local_dir ) ) head , tail = os . path . split ( local_fname ) head += os . path . sep if not os . path . exists ( head ) : os . makedirs ( os . path . dirname ( head ) ) shutil . copyfile ( remote_fname , local_fname ) st = os . stat ( remote_fname ) os . chmod ( local_fname , st . st_mode ) try : os . chown ( local_fname , - 1 , st . st_gid ) except OSError : pass dirs = os . path . dirname ( local_fname ) . replace ( dataset_local_dir , '' ) sep = dirs . split ( os . path . sep ) if sep [ 0 ] == "" : sep = sep [ 1 : ] for i in range ( len ( sep ) ) : orig_p = os . path . join ( dataset_remote_dir , * sep [ : i + 1 ] ) new_p = os . path . join ( dataset_local_dir , * sep [ : i + 1 ] ) orig_st = os . stat ( orig_p ) new_st = os . stat ( new_p ) if not new_st . st_mode & stat . S_IWGRP : os . chmod ( new_p , new_st . st_mode | stat . S_IWGRP ) if orig_st . st_gid != new_st . st_gid : try : os . chown ( new_p , - 1 , orig_st . st_gid ) except OSError : pass
Copies a remote file locally .
55,926
def convert_to_one_hot ( y ) : max_value = max ( y ) min_value = min ( y ) length = len ( y ) one_hot = numpy . zeros ( ( length , ( max_value - min_value + 1 ) ) ) one_hot [ numpy . arange ( length ) , y ] = 1 return one_hot
converts y into one hot reprsentation .
55,927
def convert_binarized_mnist ( directory , output_directory , output_filename = 'binarized_mnist.hdf5' ) : output_path = os . path . join ( output_directory , output_filename ) h5file = h5py . File ( output_path , mode = 'w' ) train_set = numpy . loadtxt ( os . path . join ( directory , TRAIN_FILE ) ) . reshape ( ( - 1 , 1 , 28 , 28 ) ) . astype ( 'uint8' ) valid_set = numpy . loadtxt ( os . path . join ( directory , VALID_FILE ) ) . reshape ( ( - 1 , 1 , 28 , 28 ) ) . astype ( 'uint8' ) test_set = numpy . loadtxt ( os . path . join ( directory , TEST_FILE ) ) . reshape ( ( - 1 , 1 , 28 , 28 ) ) . astype ( 'uint8' ) data = ( ( 'train' , 'features' , train_set ) , ( 'valid' , 'features' , valid_set ) , ( 'test' , 'features' , test_set ) ) fill_hdf5_file ( h5file , data ) for i , label in enumerate ( ( 'batch' , 'channel' , 'height' , 'width' ) ) : h5file [ 'features' ] . dims [ i ] . label = label h5file . flush ( ) h5file . close ( ) return ( output_path , )
Converts the binarized MNIST dataset to HDF5 .
55,928
def fill_subparser ( subparser ) : url = 'http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz' filename = 'cifar-10-python.tar.gz' subparser . set_defaults ( urls = [ url ] , filenames = [ filename ] ) return default_downloader
Sets up a subparser to download the CIFAR - 10 dataset file .
55,929
def convert_celeba_aligned_cropped ( directory , output_directory , output_filename = OUTPUT_FILENAME ) : output_path = os . path . join ( output_directory , output_filename ) h5file = _initialize_conversion ( directory , output_path , ( 218 , 178 ) ) features_dataset = h5file [ 'features' ] image_file_path = os . path . join ( directory , IMAGE_FILE ) with zipfile . ZipFile ( image_file_path , 'r' ) as image_file : with progress_bar ( 'images' , NUM_EXAMPLES ) as bar : for i in range ( NUM_EXAMPLES ) : image_name = 'img_align_celeba/{:06d}.jpg' . format ( i + 1 ) features_dataset [ i ] = numpy . asarray ( Image . open ( image_file . open ( image_name , 'r' ) ) ) . transpose ( 2 , 0 , 1 ) bar . update ( i + 1 ) h5file . flush ( ) h5file . close ( ) return ( output_path , )
Converts the aligned and cropped CelebA dataset to HDF5 .
55,930
def convert_celeba ( which_format , directory , output_directory , output_filename = None ) : if which_format not in ( 'aligned_cropped' , '64' ) : raise ValueError ( "CelebA format needs to be either " "'aligned_cropped' or '64'." ) if not output_filename : output_filename = 'celeba_{}.hdf5' . format ( which_format ) if which_format == 'aligned_cropped' : return convert_celeba_aligned_cropped ( directory , output_directory , output_filename ) else : return convert_celeba_64 ( directory , output_directory , output_filename )
Converts the CelebA dataset to HDF5 .
55,931
def disk_usage ( path ) : st = os . statvfs ( path ) total = st . f_blocks * st . f_frsize used = ( st . f_blocks - st . f_bfree ) * st . f_frsize return total , used
Return free usage about the given path in bytes .
55,932
def safe_mkdir ( folder_name , force_perm = None ) : if os . path . exists ( folder_name ) : return intermediary_folders = folder_name . split ( os . path . sep ) if intermediary_folders [ - 1 ] == "" : intermediary_folders = intermediary_folders [ : - 1 ] if force_perm : force_perm_path = folder_name . split ( os . path . sep ) if force_perm_path [ - 1 ] == "" : force_perm_path = force_perm_path [ : - 1 ] for i in range ( 1 , len ( intermediary_folders ) ) : folder_to_create = os . path . sep . join ( intermediary_folders [ : i + 1 ] ) if os . path . exists ( folder_to_create ) : continue os . mkdir ( folder_to_create ) if force_perm : os . chmod ( folder_to_create , force_perm )
Create the specified folder .
55,933
def check_enough_space ( dataset_local_dir , remote_fname , local_fname , max_disk_usage = 0.9 ) : storage_need = os . path . getsize ( remote_fname ) storage_total , storage_used = disk_usage ( dataset_local_dir ) return ( ( storage_used + storage_need ) < ( storage_total * max_disk_usage ) )
Check if the given local folder has enough space .
55,934
def convert_cifar100 ( directory , output_directory , output_filename = 'cifar100.hdf5' ) : output_path = os . path . join ( output_directory , output_filename ) h5file = h5py . File ( output_path , mode = "w" ) input_file = os . path . join ( directory , 'cifar-100-python.tar.gz' ) tar_file = tarfile . open ( input_file , 'r:gz' ) file = tar_file . extractfile ( 'cifar-100-python/train' ) try : if six . PY3 : train = cPickle . load ( file , encoding = 'latin1' ) else : train = cPickle . load ( file ) finally : file . close ( ) train_features = train [ 'data' ] . reshape ( train [ 'data' ] . shape [ 0 ] , 3 , 32 , 32 ) train_coarse_labels = numpy . array ( train [ 'coarse_labels' ] , dtype = numpy . uint8 ) train_fine_labels = numpy . array ( train [ 'fine_labels' ] , dtype = numpy . uint8 ) file = tar_file . extractfile ( 'cifar-100-python/test' ) try : if six . PY3 : test = cPickle . load ( file , encoding = 'latin1' ) else : test = cPickle . load ( file ) finally : file . close ( ) test_features = test [ 'data' ] . reshape ( test [ 'data' ] . shape [ 0 ] , 3 , 32 , 32 ) test_coarse_labels = numpy . array ( test [ 'coarse_labels' ] , dtype = numpy . uint8 ) test_fine_labels = numpy . array ( test [ 'fine_labels' ] , dtype = numpy . uint8 ) data = ( ( 'train' , 'features' , train_features ) , ( 'train' , 'coarse_labels' , train_coarse_labels . reshape ( ( - 1 , 1 ) ) ) , ( 'train' , 'fine_labels' , train_fine_labels . reshape ( ( - 1 , 1 ) ) ) , ( 'test' , 'features' , test_features ) , ( 'test' , 'coarse_labels' , test_coarse_labels . reshape ( ( - 1 , 1 ) ) ) , ( 'test' , 'fine_labels' , test_fine_labels . reshape ( ( - 1 , 1 ) ) ) ) fill_hdf5_file ( h5file , data ) h5file [ 'features' ] . dims [ 0 ] . label = 'batch' h5file [ 'features' ] . dims [ 1 ] . label = 'channel' h5file [ 'features' ] . dims [ 2 ] . label = 'height' h5file [ 'features' ] . dims [ 3 ] . label = 'width' h5file [ 'coarse_labels' ] . dims [ 0 ] . label = 'batch' h5file [ 'coarse_labels' ] . dims [ 1 ] . label = 'index' h5file [ 'fine_labels' ] . dims [ 0 ] . label = 'batch' h5file [ 'fine_labels' ] . dims [ 1 ] . label = 'index' h5file . flush ( ) h5file . close ( ) return ( output_path , )
Converts the CIFAR - 100 dataset to HDF5 .
55,935
def verify_axis_labels ( self , expected , actual , source_name ) : if not getattr ( self , '_checked_axis_labels' , False ) : self . _checked_axis_labels = defaultdict ( bool ) if not self . _checked_axis_labels [ source_name ] : if actual is None : log . warning ( "%s instance could not verify (missing) axis " "expected %s, got None" , self . __class__ . __name__ , expected ) else : if expected != actual : raise AxisLabelsMismatchError ( "{} expected axis labels " "{}, got {} instead" . format ( self . __class__ . __name__ , expected , actual ) ) self . _checked_axis_labels [ source_name ] = True
Verify that axis labels for a given source are as expected .
55,936
def get_data ( self , request = None ) : if request is None : raise ValueError data = [ [ ] for _ in self . sources ] for i in range ( request ) : try : for source_data , example in zip ( data , next ( self . child_epoch_iterator ) ) : source_data . append ( example ) except StopIteration : if not self . strictness and data [ 0 ] : break elif self . strictness > 1 and data [ 0 ] : raise ValueError raise return tuple ( numpy . asarray ( source_data ) for source_data in data )
Get data from the dataset .
55,937
def _producer_wrapper ( f , port , addr = 'tcp://127.0.0.1' ) : try : context = zmq . Context ( ) socket = context . socket ( zmq . PUSH ) socket . connect ( ':' . join ( [ addr , str ( port ) ] ) ) f ( socket ) finally : context . destroy ( )
A shim that sets up a socket and starts the producer callable .
55,938
def _spawn_producer ( f , port , addr = 'tcp://127.0.0.1' ) : process = Process ( target = _producer_wrapper , args = ( f , port , addr ) ) process . start ( ) return process
Start a process that sends results on a PUSH socket .
55,939
def producer_consumer ( producer , consumer , addr = 'tcp://127.0.0.1' , port = None , context = None ) : context_created = False if context is None : context_created = True context = zmq . Context ( ) try : consumer_socket = context . socket ( zmq . PULL ) if port is None : port = consumer_socket . bind_to_random_port ( addr ) try : process = _spawn_producer ( producer , port ) result = consumer ( consumer_socket ) finally : process . terminate ( ) return result finally : if context_created : context . destroy ( )
A producer - consumer pattern .
55,940
def convert_dogs_vs_cats ( directory , output_directory , output_filename = 'dogs_vs_cats.hdf5' ) : output_path = os . path . join ( output_directory , output_filename ) h5file = h5py . File ( output_path , mode = 'w' ) dtype = h5py . special_dtype ( vlen = numpy . dtype ( 'uint8' ) ) hdf_features = h5file . create_dataset ( 'image_features' , ( 37500 , ) , dtype = dtype ) hdf_shapes = h5file . create_dataset ( 'image_features_shapes' , ( 37500 , 3 ) , dtype = 'int32' ) hdf_labels = h5file . create_dataset ( 'targets' , ( 25000 , 1 ) , dtype = 'uint8' ) hdf_features . dims . create_scale ( hdf_shapes , 'shapes' ) hdf_features . dims [ 0 ] . attach_scale ( hdf_shapes ) hdf_shapes_labels = h5file . create_dataset ( 'image_features_shapes_labels' , ( 3 , ) , dtype = 'S7' ) hdf_shapes_labels [ ... ] = [ 'channel' . encode ( 'utf8' ) , 'height' . encode ( 'utf8' ) , 'width' . encode ( 'utf8' ) ] hdf_features . dims . create_scale ( hdf_shapes_labels , 'shape_labels' ) hdf_features . dims [ 0 ] . attach_scale ( hdf_shapes_labels ) hdf_features . dims [ 0 ] . label = 'batch' hdf_labels . dims [ 0 ] . label = 'batch' hdf_labels . dims [ 1 ] . label = 'index' i = 0 for split , split_size in zip ( [ TRAIN , TEST ] , [ 25000 , 12500 ] ) : filename = os . path . join ( directory , split ) zip_file = zipfile . ZipFile ( filename , 'r' ) image_names = zip_file . namelist ( ) [ 1 : ] if split == TRAIN : rng = numpy . random . RandomState ( 123522 ) rng . shuffle ( image_names ) else : image_names . sort ( key = lambda fn : int ( os . path . splitext ( fn [ 6 : ] ) [ 0 ] ) ) with progress_bar ( filename , split_size ) as bar : for image_name in image_names : image = numpy . array ( Image . open ( zip_file . open ( image_name ) ) ) image = image . transpose ( 2 , 0 , 1 ) hdf_features [ i ] = image . flatten ( ) hdf_shapes [ i ] = image . shape if split == TRAIN : hdf_labels [ i ] = 0 if 'cat' in image_name else 1 i += 1 bar . update ( i if split == TRAIN else i - 25000 ) split_dict = { } sources = [ 'image_features' , 'targets' ] split_dict [ 'train' ] = dict ( zip ( sources , [ ( 0 , 25000 ) ] * 2 ) ) split_dict [ 'test' ] = { sources [ 0 ] : ( 25000 , 37500 ) } h5file . attrs [ 'split' ] = H5PYDataset . create_split_array ( split_dict ) h5file . flush ( ) h5file . close ( ) return ( output_path , )
Converts the Dogs vs . Cats dataset to HDF5 .
55,941
def main ( args = None ) : built_in_datasets = dict ( downloaders . all_downloaders ) if fuel . config . extra_downloaders : for name in fuel . config . extra_downloaders : extra_datasets = dict ( importlib . import_module ( name ) . all_downloaders ) if any ( key in built_in_datasets for key in extra_datasets . keys ( ) ) : raise ValueError ( 'extra downloaders conflict in name with ' 'built-in downloaders' ) built_in_datasets . update ( extra_datasets ) parser = argparse . ArgumentParser ( description = 'Download script for built-in datasets.' ) parent_parser = argparse . ArgumentParser ( add_help = False ) parent_parser . add_argument ( "-d" , "--directory" , help = "where to save the downloaded files" , type = str , default = os . getcwd ( ) ) parent_parser . add_argument ( "--clear" , help = "clear the downloaded files" , action = 'store_true' ) subparsers = parser . add_subparsers ( ) download_functions = { } for name , fill_subparser in built_in_datasets . items ( ) : subparser = subparsers . add_parser ( name , parents = [ parent_parser ] , help = 'Download the {} dataset' . format ( name ) ) subparser . set_defaults ( which_ = name ) download_functions [ name ] = fill_subparser ( subparser ) args = parser . parse_args ( ) args_dict = vars ( args ) download_function = download_functions [ args_dict . pop ( 'which_' ) ] try : download_function ( ** args_dict ) except NeedURLPrefix : parser . error ( url_prefix_message )
Entry point for fuel - download script .
55,942
def fill_subparser ( subparser ) : filenames = [ 'train-images-idx3-ubyte.gz' , 'train-labels-idx1-ubyte.gz' , 't10k-images-idx3-ubyte.gz' , 't10k-labels-idx1-ubyte.gz' ] urls = [ 'http://yann.lecun.com/exdb/mnist/' + f for f in filenames ] subparser . set_defaults ( urls = urls , filenames = filenames ) return default_downloader
Sets up a subparser to download the MNIST dataset files .
55,943
def main ( args = None ) : parser = argparse . ArgumentParser ( description = 'Extracts metadata from a Fuel-converted HDF5 file.' ) parser . add_argument ( "filename" , help = "HDF5 file to analyze" ) args = parser . parse_args ( ) with h5py . File ( args . filename , 'r' ) as h5file : interface_version = h5file . attrs . get ( 'h5py_interface_version' , 'N/A' ) fuel_convert_version = h5file . attrs . get ( 'fuel_convert_version' , 'N/A' ) fuel_convert_command = h5file . attrs . get ( 'fuel_convert_command' , 'N/A' ) message_prefix = message_prefix_template . format ( os . path . basename ( args . filename ) ) message_body = message_body_template . format ( fuel_convert_command , interface_version , fuel_convert_version ) message = '' . join ( [ '\n' , message_prefix , '\n' , '=' * len ( message_prefix ) , message_body ] ) print ( message )
Entry point for fuel - info script .
55,944
def convert_silhouettes ( size , directory , output_directory , output_filename = None ) : if size not in ( 16 , 28 ) : raise ValueError ( 'size must be 16 or 28' ) if output_filename is None : output_filename = 'caltech101_silhouettes{}.hdf5' . format ( size ) output_file = os . path . join ( output_directory , output_filename ) input_file = 'caltech101_silhouettes_{}_split1.mat' . format ( size ) input_file = os . path . join ( directory , input_file ) if not os . path . isfile ( input_file ) : raise MissingInputFiles ( 'Required files missing' , [ input_file ] ) with h5py . File ( output_file , mode = "w" ) as h5file : mat = loadmat ( input_file ) train_features = mat [ 'train_data' ] . reshape ( [ - 1 , 1 , size , size ] ) train_targets = mat [ 'train_labels' ] valid_features = mat [ 'val_data' ] . reshape ( [ - 1 , 1 , size , size ] ) valid_targets = mat [ 'val_labels' ] test_features = mat [ 'test_data' ] . reshape ( [ - 1 , 1 , size , size ] ) test_targets = mat [ 'test_labels' ] data = ( ( 'train' , 'features' , train_features ) , ( 'train' , 'targets' , train_targets ) , ( 'valid' , 'features' , valid_features ) , ( 'valid' , 'targets' , valid_targets ) , ( 'test' , 'features' , test_features ) , ( 'test' , 'targets' , test_targets ) , ) fill_hdf5_file ( h5file , data ) for i , label in enumerate ( ( 'batch' , 'channel' , 'height' , 'width' ) ) : h5file [ 'features' ] . dims [ i ] . label = label for i , label in enumerate ( ( 'batch' , 'index' ) ) : h5file [ 'targets' ] . dims [ i ] . label = label return ( output_file , )
Convert the CalTech 101 Silhouettes Datasets .
55,945
def cross_validation ( scheme_class , num_examples , num_folds , strict = True , ** kwargs ) : if strict and num_examples % num_folds != 0 : raise ValueError ( ( "{} examples are not divisible in {} evenly-sized " + "folds. To allow this, have a look at the " + "`strict` argument." ) . format ( num_examples , num_folds ) ) for i in xrange ( num_folds ) : begin = num_examples * i // num_folds end = num_examples * ( i + 1 ) // num_folds train = scheme_class ( list ( chain ( xrange ( 0 , begin ) , xrange ( end , num_examples ) ) ) , ** kwargs ) valid = scheme_class ( xrange ( begin , end ) , ** kwargs ) if strict : yield ( train , valid ) else : yield ( train , valid , end - begin )
Return pairs of schemes to be used for cross - validation .
55,946
def main ( args = None ) : built_in_datasets = dict ( converters . all_converters ) if fuel . config . extra_converters : for name in fuel . config . extra_converters : extra_datasets = dict ( importlib . import_module ( name ) . all_converters ) if any ( key in built_in_datasets for key in extra_datasets . keys ( ) ) : raise ValueError ( 'extra converters conflict in name with ' 'built-in converters' ) built_in_datasets . update ( extra_datasets ) parser = argparse . ArgumentParser ( description = 'Conversion script for built-in datasets.' ) subparsers = parser . add_subparsers ( ) parent_parser = argparse . ArgumentParser ( add_help = False ) parent_parser . add_argument ( "-d" , "--directory" , help = "directory in which input files reside" , type = str , default = os . getcwd ( ) ) convert_functions = { } for name , fill_subparser in built_in_datasets . items ( ) : subparser = subparsers . add_parser ( name , parents = [ parent_parser ] , help = 'Convert the {} dataset' . format ( name ) ) subparser . add_argument ( "-o" , "--output-directory" , help = "where to save the dataset" , type = str , default = os . getcwd ( ) , action = CheckDirectoryAction ) subparser . add_argument ( "-r" , "--output_filename" , help = "new name of the created dataset" , type = str , default = None ) subparser . set_defaults ( which_ = name ) convert_functions [ name ] = fill_subparser ( subparser ) args = parser . parse_args ( args ) args_dict = vars ( args ) if args_dict [ 'output_filename' ] is not None and os . path . splitext ( args_dict [ 'output_filename' ] ) [ 1 ] not in ( '.hdf5' , '.hdf' , '.h5' ) : args_dict [ 'output_filename' ] += '.hdf5' if args_dict [ 'output_filename' ] is None : args_dict . pop ( 'output_filename' ) convert_function = convert_functions [ args_dict . pop ( 'which_' ) ] try : output_paths = convert_function ( ** args_dict ) except MissingInputFiles as e : intro = "The following required files were not found:\n" message = "\n" . join ( [ intro ] + [ " * " + f for f in e . filenames ] ) message += "\n\nDid you forget to run fuel-download?" parser . error ( message ) for output_path in output_paths : h5file = h5py . File ( output_path , 'a' ) interface_version = H5PYDataset . interface_version . encode ( 'utf-8' ) h5file . attrs [ 'h5py_interface_version' ] = interface_version fuel_convert_version = converters . __version__ . encode ( 'utf-8' ) h5file . attrs [ 'fuel_convert_version' ] = fuel_convert_version command = [ os . path . basename ( sys . argv [ 0 ] ) ] + sys . argv [ 1 : ] h5file . attrs [ 'fuel_convert_command' ] = ( ' ' . join ( command ) . encode ( 'utf-8' ) ) h5file . flush ( ) h5file . close ( )
Entry point for fuel - convert script .
55,947
def refresh_lock ( lock_file ) : unique_id = '%s_%s_%s' % ( os . getpid ( ) , '' . join ( [ str ( random . randint ( 0 , 9 ) ) for i in range ( 10 ) ] ) , hostname ) try : lock_write = open ( lock_file , 'w' ) lock_write . write ( unique_id + '\n' ) lock_write . close ( ) except Exception : while get_lock . n_lock > 0 : release_lock ( ) raise return unique_id
Refresh an existing lock .
55,948
def get_lock ( lock_dir , ** kw ) : if not hasattr ( get_lock , 'n_lock' ) : get_lock . n_lock = 0 if not hasattr ( get_lock , 'lock_is_enabled' ) : get_lock . lock_is_enabled = True get_lock . lock_dir = lock_dir get_lock . unlocker = Unlocker ( get_lock . lock_dir ) else : if lock_dir != get_lock . lock_dir : assert get_lock . n_lock == 0 get_lock . lock_dir = lock_dir get_lock . unlocker = Unlocker ( get_lock . lock_dir ) if get_lock . lock_is_enabled : if get_lock . n_lock == 0 : lock ( get_lock . lock_dir , ** kw ) atexit . register ( Unlocker . unlock , get_lock . unlocker ) get_lock . start_time = time . time ( ) else : if get_lock . start_time is None : while get_lock . n_lock > 0 : release_lock ( ) raise Exception ( "For some unknow reason, the lock was already taken," " but no start time was registered." ) now = time . time ( ) if now - get_lock . start_time > TIMEOUT : lockpath = os . path . join ( get_lock . lock_dir , 'lock' ) logger . info ( 'Refreshing lock %s' , str ( lockpath ) ) refresh_lock ( lockpath ) get_lock . start_time = now get_lock . n_lock += 1
Obtain lock on compilation directory .
55,949
def release_lock ( ) : get_lock . n_lock -= 1 assert get_lock . n_lock >= 0 if get_lock . lock_is_enabled and get_lock . n_lock == 0 : get_lock . start_time = None get_lock . unlocker . unlock ( )
Release lock on compilation directory .
55,950
def release_readlock ( lockdir_name ) : if os . path . exists ( lockdir_name ) and os . path . isdir ( lockdir_name ) : os . rmdir ( lockdir_name )
Release a previously obtained readlock .
55,951
def get_readlock ( pid , path ) : timestamp = int ( time . time ( ) * 1e6 ) lockdir_name = "%s.readlock.%i.%i" % ( path , pid , timestamp ) os . mkdir ( lockdir_name ) atexit . register ( release_readlock , lockdir_name = lockdir_name )
Obtain a readlock on a file .
55,952
def unlock ( self ) : try : self . os . remove ( self . os . path . join ( self . tmp_dir , 'lock' ) ) except Exception : pass try : self . os . rmdir ( self . tmp_dir ) except Exception : pass
Remove current lock .
55,953
def filename_from_url ( url , path = None ) : r = requests . get ( url , stream = True ) if 'Content-Disposition' in r . headers : filename = re . findall ( r'filename=([^;]+)' , r . headers [ 'Content-Disposition' ] ) [ 0 ] . strip ( '"\"' ) else : filename = os . path . basename ( urllib . parse . urlparse ( url ) . path ) return filename
Parses a URL to determine a file name .
55,954
def download ( url , file_handle , chunk_size = 1024 ) : r = requests . get ( url , stream = True ) total_length = r . headers . get ( 'content-length' ) if total_length is None : maxval = UnknownLength else : maxval = int ( total_length ) name = file_handle . name with progress_bar ( name = name , maxval = maxval ) as bar : for i , chunk in enumerate ( r . iter_content ( chunk_size ) ) : if total_length : bar . update ( i * chunk_size ) file_handle . write ( chunk )
Downloads a given URL to a specific file .
55,955
def default_downloader ( directory , urls , filenames , url_prefix = None , clear = False ) : for i , url in enumerate ( urls ) : filename = filenames [ i ] if not filename : filename = filename_from_url ( url ) if not filename : raise ValueError ( "no filename available for URL '{}'" . format ( url ) ) filenames [ i ] = filename files = [ os . path . join ( directory , f ) for f in filenames ] if clear : for f in files : if os . path . isfile ( f ) : os . remove ( f ) else : print ( 'Downloading ' + ', ' . join ( filenames ) + '\n' ) ensure_directory_exists ( directory ) for url , f , n in zip ( urls , files , filenames ) : if not url : if url_prefix is None : raise NeedURLPrefix url = url_prefix + n with open ( f , 'wb' ) as file_handle : download ( url , file_handle )
Downloads or clears files from URLs and filenames .
55,956
def find_in_data_path ( filename ) : for path in config . data_path : path = os . path . expanduser ( os . path . expandvars ( path ) ) file_path = os . path . join ( path , filename ) if os . path . isfile ( file_path ) : return file_path raise IOError ( "{} not found in Fuel's data path" . format ( filename ) )
Searches for a file within Fuel s data path .
55,957
def lazy_property_factory ( lazy_property ) : def lazy_property_getter ( self ) : if not hasattr ( self , '_' + lazy_property ) : self . load ( ) if not hasattr ( self , '_' + lazy_property ) : raise ValueError ( "{} wasn't loaded" . format ( lazy_property ) ) return getattr ( self , '_' + lazy_property ) def lazy_property_setter ( self , value ) : setattr ( self , '_' + lazy_property , value ) return lazy_property_getter , lazy_property_setter
Create properties that perform lazy loading of attributes .
55,958
def do_not_pickle_attributes ( * lazy_properties ) : r def wrap_class ( cls ) : if not hasattr ( cls , 'load' ) : raise ValueError ( "no load method implemented" ) for lazy_property in lazy_properties : setattr ( cls , lazy_property , property ( * lazy_property_factory ( lazy_property ) ) ) if not hasattr ( cls , '__getstate__' ) : def __getstate__ ( self ) : serializable_state = self . __dict__ . copy ( ) for lazy_property in lazy_properties : attr = serializable_state . get ( '_' + lazy_property ) if isinstance ( attr , collections . Iterator ) : raise ValueError ( "Iterators can't be lazy loaded" ) serializable_state . pop ( '_' + lazy_property , None ) return serializable_state setattr ( cls , '__getstate__' , __getstate__ ) return cls return wrap_class
r Decorator to assign non - pickable properties .
55,959
def sorted_fancy_indexing ( indexable , request ) : if len ( request ) > 1 : indices = numpy . argsort ( request ) data = numpy . empty ( shape = ( len ( request ) , ) + indexable . shape [ 1 : ] , dtype = indexable . dtype ) data [ indices ] = indexable [ numpy . array ( request ) [ indices ] , ... ] else : data = indexable [ request ] return data
Safe fancy indexing .
55,960
def slice_to_numerical_args ( slice_ , num_examples ) : start = slice_ . start if slice_ . start is not None else 0 stop = slice_ . stop if slice_ . stop is not None else num_examples step = slice_ . step if slice_ . step is not None else 1 return start , stop , step
Translate a slice s attributes into numerical attributes .
55,961
def get_list_representation ( self ) : if self . is_list : return self . list_or_slice else : return self [ list ( range ( self . num_examples ) ) ]
Returns this subset s representation as a list of indices .
55,962
def index_within_subset ( self , indexable , subset_request , sort_indices = False ) : if isinstance ( subset_request , numbers . Integral ) : request , = self [ [ subset_request ] ] else : request = self [ subset_request ] if isinstance ( request , numbers . Integral ) or hasattr ( request , 'step' ) : return indexable [ request ] if sort_indices : return self . sorted_fancy_indexing ( indexable , request ) if isinstance ( indexable , ( numpy . ndarray , h5py . Dataset ) ) : return indexable [ request ] return iterable_fancy_indexing ( indexable , request )
Index an indexable object within the context of this subset .
55,963
def num_examples ( self ) : if self . is_list : return len ( self . list_or_slice ) else : start , stop , step = self . slice_to_numerical_args ( self . list_or_slice , self . original_num_examples ) return stop - start
The number of examples this subset spans .
55,964
def get_epoch_iterator ( self , ** kwargs ) : if not self . _fresh_state : self . next_epoch ( ) else : self . _fresh_state = False return super ( DataStream , self ) . get_epoch_iterator ( ** kwargs )
Get an epoch iterator for the data stream .
55,965
def fill_subparser ( subparser ) : sets = [ 'train' , 'valid' , 'test' ] urls = [ 'http://www.cs.toronto.edu/~larocheh/public/datasets/' + 'binarized_mnist/binarized_mnist_{}.amat' . format ( s ) for s in sets ] filenames = [ 'binarized_mnist_{}.amat' . format ( s ) for s in sets ] subparser . set_defaults ( urls = urls , filenames = filenames ) return default_downloader
Sets up a subparser to download the binarized MNIST dataset files .
55,966
def download ( directory , youtube_id , clear = False ) : filepath = os . path . join ( directory , '{}.m4a' . format ( youtube_id ) ) if clear : os . remove ( filepath ) return if not PAFY_AVAILABLE : raise ImportError ( "pafy is required to download YouTube videos" ) url = 'https://www.youtube.com/watch?v={}' . format ( youtube_id ) video = pafy . new ( url ) audio = video . getbestaudio ( ) audio . download ( quiet = False , filepath = filepath )
Download the audio of a YouTube video .
55,967
def fill_subparser ( subparser ) : subparser . add_argument ( '--youtube-id' , type = str , required = True , help = ( "The YouTube ID of the video from which to extract audio, " "usually an 11-character string." ) ) return download
Sets up a subparser to download audio of YouTube videos .
55,968
def convert_youtube_audio ( directory , output_directory , youtube_id , channels , sample , output_filename = None ) : input_file = os . path . join ( directory , '{}.m4a' . format ( youtube_id ) ) wav_filename = '{}.wav' . format ( youtube_id ) wav_file = os . path . join ( directory , wav_filename ) ffmpeg_not_available = subprocess . call ( [ 'ffmpeg' , '-version' ] ) if ffmpeg_not_available : raise RuntimeError ( 'conversion requires ffmpeg' ) subprocess . check_call ( [ 'ffmpeg' , '-y' , '-i' , input_file , '-ac' , str ( channels ) , '-ar' , str ( sample ) , wav_file ] , stdout = sys . stdout ) _ , data = scipy . io . wavfile . read ( wav_file ) if data . ndim == 1 : data = data [ : , None ] data = data [ None , : ] if output_filename is None : output_filename = '{}.hdf5' . format ( youtube_id ) output_file = os . path . join ( output_directory , output_filename ) with h5py . File ( output_file , 'w' ) as h5file : fill_hdf5_file ( h5file , ( ( 'train' , 'features' , data ) , ) ) h5file [ 'features' ] . dims [ 0 ] . label = 'batch' h5file [ 'features' ] . dims [ 1 ] . label = 'time' h5file [ 'features' ] . dims [ 2 ] . label = 'feature' return ( output_file , )
Converts downloaded YouTube audio to HDF5 format .
55,969
def fill_subparser ( subparser ) : subparser . add_argument ( '--youtube-id' , type = str , required = True , help = ( "The YouTube ID of the video from which to extract audio, " "usually an 11-character string." ) ) subparser . add_argument ( '--channels' , type = int , default = 1 , help = ( "The number of audio channels to convert to. The default of 1" "means audio is converted to mono." ) ) subparser . add_argument ( '--sample' , type = int , default = 16000 , help = ( "The sampling rate in Hz. The default of 16000 is " "significantly downsampled compared to normal WAVE files; " "pass 44100 for the usual sampling rate." ) ) return convert_youtube_audio
Sets up a subparser to convert YouTube audio files .
55,970
def convert_ilsvrc2012 ( directory , output_directory , output_filename = 'ilsvrc2012.hdf5' , shuffle_seed = config . default_seed ) : devkit_path = os . path . join ( directory , DEVKIT_ARCHIVE ) train , valid , test = [ os . path . join ( directory , fn ) for fn in IMAGE_TARS ] n_train , valid_groundtruth , n_test , wnid_map = prepare_metadata ( devkit_path ) n_valid = len ( valid_groundtruth ) output_path = os . path . join ( output_directory , output_filename ) with h5py . File ( output_path , 'w' ) as f , create_temp_tar ( ) as patch : log . info ( 'Creating HDF5 datasets...' ) prepare_hdf5_file ( f , n_train , n_valid , n_test ) log . info ( 'Processing training set...' ) process_train_set ( f , train , patch , n_train , wnid_map , shuffle_seed ) log . info ( 'Processing validation set...' ) process_other_set ( f , 'valid' , valid , patch , valid_groundtruth , n_train ) log . info ( 'Processing test set...' ) process_other_set ( f , 'test' , test , patch , ( None , ) * n_test , n_train + n_valid ) log . info ( 'Done.' ) return ( output_path , )
Converter for data from the ILSVRC 2012 competition .
55,971
def fill_subparser ( subparser ) : subparser . add_argument ( "--shuffle-seed" , help = "Seed to use for randomizing order of the " "training set on disk." , default = config . default_seed , type = int , required = False ) return convert_ilsvrc2012
Sets up a subparser to convert the ILSVRC2012 dataset files .
55,972
def read_metadata_mat_file ( meta_mat ) : mat = loadmat ( meta_mat , squeeze_me = True ) synsets = mat [ 'synsets' ] new_dtype = numpy . dtype ( [ ( 'ILSVRC2012_ID' , numpy . int16 ) , ( 'WNID' , ( 'S' , max ( map ( len , synsets [ 'WNID' ] ) ) ) ) , ( 'wordnet_height' , numpy . int8 ) , ( 'gloss' , ( 'S' , max ( map ( len , synsets [ 'gloss' ] ) ) ) ) , ( 'num_children' , numpy . int8 ) , ( 'words' , ( 'S' , max ( map ( len , synsets [ 'words' ] ) ) ) ) , ( 'children' , ( numpy . int8 , max ( synsets [ 'num_children' ] ) ) ) , ( 'num_train_images' , numpy . uint16 ) ] ) new_synsets = numpy . empty ( synsets . shape , dtype = new_dtype ) for attr in [ 'ILSVRC2012_ID' , 'WNID' , 'wordnet_height' , 'gloss' , 'num_children' , 'words' , 'num_train_images' ] : new_synsets [ attr ] = synsets [ attr ] children = [ numpy . atleast_1d ( ch ) for ch in synsets [ 'children' ] ] padded_children = [ numpy . concatenate ( ( c , - numpy . ones ( new_dtype [ 'children' ] . shape [ 0 ] - len ( c ) , dtype = numpy . int16 ) ) ) for c in children ] new_synsets [ 'children' ] = padded_children return new_synsets
Read ILSVRC2012 metadata from the distributed MAT file .
55,973
def multiple_paths_parser ( value ) : if isinstance ( value , six . string_types ) : value = value . split ( os . path . pathsep ) return value
Parses data_path argument .
55,974
def add_config ( self , key , type_ , default = NOT_SET , env_var = None ) : self . config [ key ] = { 'type' : type_ } if env_var is not None : self . config [ key ] [ 'env_var' ] = env_var if default is not NOT_SET : self . config [ key ] [ 'default' ] = default
Add a configuration setting .
55,975
def send_arrays ( socket , arrays , stop = False ) : if arrays : arrays = [ numpy . ascontiguousarray ( array ) for array in arrays ] if stop : headers = { 'stop' : True } socket . send_json ( headers ) else : headers = [ header_data_from_array_1_0 ( array ) for array in arrays ] socket . send_json ( headers , zmq . SNDMORE ) for array in arrays [ : - 1 ] : socket . send ( array , zmq . SNDMORE ) socket . send ( arrays [ - 1 ] )
Send NumPy arrays using the buffer interface and some metadata .
55,976
def recv_arrays ( socket ) : headers = socket . recv_json ( ) if 'stop' in headers : raise StopIteration arrays = [ ] for header in headers : data = socket . recv ( copy = False ) buf = buffer_ ( data ) array = numpy . frombuffer ( buf , dtype = numpy . dtype ( header [ 'descr' ] ) ) array . shape = header [ 'shape' ] if header [ 'fortran_order' ] : array . shape = header [ 'shape' ] [ : : - 1 ] array = array . transpose ( ) arrays . append ( array ) return arrays
Receive a list of NumPy arrays .
55,977
def start_server ( data_stream , port = 5557 , hwm = 10 ) : logging . basicConfig ( level = 'INFO' ) context = zmq . Context ( ) socket = context . socket ( zmq . PUSH ) socket . set_hwm ( hwm ) socket . bind ( 'tcp://*:{}' . format ( port ) ) it = data_stream . get_epoch_iterator ( ) logger . info ( 'server started' ) while True : try : data = next ( it ) stop = False logger . debug ( "sending {} arrays" . format ( len ( data ) ) ) except StopIteration : it = data_stream . get_epoch_iterator ( ) data = None stop = True logger . debug ( "sending StopIteration" ) send_arrays ( socket , data , stop = stop )
Start a data processing server .
55,978
def create_images ( raw_data_directory : str , destination_directory : str , stroke_thicknesses : List [ int ] , canvas_width : int = None , canvas_height : int = None , staff_line_spacing : int = 14 , staff_line_vertical_offsets : List [ int ] = None , random_position_on_canvas : bool = False ) -> dict : all_symbol_files = [ y for x in os . walk ( raw_data_directory ) for y in glob ( os . path . join ( x [ 0 ] , '*.txt' ) ) ] staff_line_multiplier = 1 if staff_line_vertical_offsets is not None and staff_line_vertical_offsets : staff_line_multiplier = len ( staff_line_vertical_offsets ) total_number_of_symbols = len ( all_symbol_files ) * len ( stroke_thicknesses ) * staff_line_multiplier output = "Generating {0} images with {1} symbols in {2} different stroke thicknesses ({3})" . format ( total_number_of_symbols , len ( all_symbol_files ) , len ( stroke_thicknesses ) , stroke_thicknesses ) if staff_line_vertical_offsets is not None : output += " and with staff-lines with {0} different offsets from the top ({1})" . format ( staff_line_multiplier , staff_line_vertical_offsets ) if canvas_width is not None and canvas_height is not None : if random_position_on_canvas is False : output += "\nRandomly drawn on a fixed canvas of size {0}x{1} (Width x Height)" . format ( canvas_width , canvas_height ) else : output += "\nCentrally drawn on a fixed canvas of size {0}x{1} (Width x Height)" . format ( canvas_width , canvas_height ) print ( output ) print ( "In directory {0}" . format ( os . path . abspath ( destination_directory ) ) , flush = True ) bounding_boxes = dict ( ) progress_bar = tqdm ( total = total_number_of_symbols , mininterval = 0.25 ) for symbol_file in all_symbol_files : with open ( symbol_file ) as file : content = file . read ( ) symbol = HomusSymbol . initialize_from_string ( content ) target_directory = os . path . join ( destination_directory , symbol . symbol_class ) os . makedirs ( target_directory , exist_ok = True ) raw_file_name_without_extension = os . path . splitext ( os . path . basename ( symbol_file ) ) [ 0 ] for stroke_thickness in stroke_thicknesses : export_path = ExportPath ( destination_directory , symbol . symbol_class , raw_file_name_without_extension , 'png' , stroke_thickness ) if canvas_width is None and canvas_height is None : symbol . draw_into_bitmap ( export_path , stroke_thickness , margin = 2 ) else : symbol . draw_onto_canvas ( export_path , stroke_thickness , 0 , canvas_width , canvas_height , staff_line_spacing , staff_line_vertical_offsets , bounding_boxes , random_position_on_canvas ) progress_bar . update ( 1 * staff_line_multiplier ) progress_bar . close ( ) return bounding_boxes
Creates a visual representation of the Homus Dataset by parsing all text - files and the symbols as specified by the parameters by drawing lines that connect the points from each stroke of each symbol .
55,979
def extract_and_render_all_symbol_masks ( self , raw_data_directory : str , destination_directory : str ) : print ( "Extracting Symbols from Muscima++ Dataset..." ) xml_files = self . get_all_xml_file_paths ( raw_data_directory ) crop_objects = self . load_crop_objects_from_xml_files ( xml_files ) self . render_masks_of_crop_objects_into_image ( crop_objects , destination_directory )
Extracts all symbols from the raw XML documents and generates individual symbols from the masks
55,980
def invert_images ( self , image_directory : str , image_file_ending : str = "*.bmp" ) : image_paths = [ y for x in os . walk ( image_directory ) for y in glob ( os . path . join ( x [ 0 ] , image_file_ending ) ) ] for image_path in tqdm ( image_paths , desc = "Inverting all images in directory {0}" . format ( image_directory ) ) : white_on_black_image = Image . open ( image_path ) . convert ( "L" ) black_on_white_image = ImageOps . invert ( white_on_black_image ) black_on_white_image . save ( os . path . splitext ( image_path ) [ 0 ] + ".png" )
In - situ converts the white on black images of a directory to black on white images
55,981
def create_capitan_images ( self , raw_data_directory : str , destination_directory : str , stroke_thicknesses : List [ int ] ) -> None : symbols = self . load_capitan_symbols ( raw_data_directory ) self . draw_capitan_stroke_images ( symbols , destination_directory , stroke_thicknesses ) self . draw_capitan_score_images ( symbols , destination_directory )
Creates a visual representation of the Capitan strokes by parsing all text - files and the symbols as specified by the parameters by drawing lines that connect the points from each stroke of each symbol .
55,982
def draw_capitan_stroke_images ( self , symbols : List [ CapitanSymbol ] , destination_directory : str , stroke_thicknesses : List [ int ] ) -> None : total_number_of_symbols = len ( symbols ) * len ( stroke_thicknesses ) output = "Generating {0} images with {1} symbols in {2} different stroke thicknesses ({3})" . format ( total_number_of_symbols , len ( symbols ) , len ( stroke_thicknesses ) , stroke_thicknesses ) print ( output ) print ( "In directory {0}" . format ( os . path . abspath ( destination_directory ) ) , flush = True ) progress_bar = tqdm ( total = total_number_of_symbols , mininterval = 0.25 , desc = "Rendering strokes" ) capitan_file_name_counter = 0 for symbol in symbols : capitan_file_name_counter += 1 target_directory = os . path . join ( destination_directory , symbol . symbol_class ) os . makedirs ( target_directory , exist_ok = True ) raw_file_name_without_extension = "capitan-{0}-{1}-stroke" . format ( symbol . symbol_class , capitan_file_name_counter ) for stroke_thickness in stroke_thicknesses : export_path = ExportPath ( destination_directory , symbol . symbol_class , raw_file_name_without_extension , 'png' , stroke_thickness ) symbol . draw_capitan_stroke_onto_canvas ( export_path , stroke_thickness , 0 ) progress_bar . update ( 1 ) progress_bar . close ( )
Creates a visual representation of the Capitan strokes by drawing lines that connect the points from each stroke of each symbol .
55,983
def overlap ( r1 : 'Rectangle' , r2 : 'Rectangle' ) : h_overlaps = ( r1 . left <= r2 . right ) and ( r1 . right >= r2 . left ) v_overlaps = ( r1 . bottom >= r2 . top ) and ( r1 . top <= r2 . bottom ) return h_overlaps and v_overlaps
Overlapping rectangles overlap both horizontally & vertically
55,984
def extract_symbols ( self , raw_data_directory : str , destination_directory : str ) : print ( "Extracting Symbols from Audiveris OMR Dataset..." ) all_xml_files = [ y for x in os . walk ( raw_data_directory ) for y in glob ( os . path . join ( x [ 0 ] , '*.xml' ) ) ] all_image_files = [ y for x in os . walk ( raw_data_directory ) for y in glob ( os . path . join ( x [ 0 ] , '*.png' ) ) ] data_pairs = [ ] for i in range ( len ( all_xml_files ) ) : data_pairs . append ( ( all_xml_files [ i ] , all_image_files [ i ] ) ) for data_pair in data_pairs : self . __extract_symbols ( data_pair [ 0 ] , data_pair [ 1 ] , destination_directory )
Extracts the symbols from the raw XML documents and matching images of the Audiveris OMR dataset into individual symbols
55,985
def initialize_from_string ( content : str ) -> 'HomusSymbol' : if content is None or content is "" : return None lines = content . splitlines ( ) min_x = sys . maxsize max_x = 0 min_y = sys . maxsize max_y = 0 symbol_name = lines [ 0 ] strokes = [ ] for stroke_string in lines [ 1 : ] : stroke = [ ] for point_string in stroke_string . split ( ";" ) : if point_string is "" : continue point_x , point_y = point_string . split ( "," ) x = int ( point_x ) y = int ( point_y ) stroke . append ( Point2D ( x , y ) ) max_x = max ( max_x , x ) min_x = min ( min_x , x ) max_y = max ( max_y , y ) min_y = min ( min_y , y ) strokes . append ( stroke ) dimensions = Rectangle ( Point2D ( min_x , min_y ) , max_x - min_x + 1 , max_y - min_y + 1 ) return HomusSymbol ( content , strokes , symbol_name , dimensions )
Create and initializes a new symbol from a string
55,986
def draw_into_bitmap ( self , export_path : ExportPath , stroke_thickness : int , margin : int = 0 ) -> None : self . draw_onto_canvas ( export_path , stroke_thickness , margin , self . dimensions . width + 2 * margin , self . dimensions . height + 2 * margin )
Draws the symbol in the original size that it has plus an optional margin
55,987
def draw_onto_canvas ( self , export_path : ExportPath , stroke_thickness : int , margin : int , destination_width : int , destination_height : int , staff_line_spacing : int = 14 , staff_line_vertical_offsets : List [ int ] = None , bounding_boxes : dict = None , random_position_on_canvas : bool = False ) -> None : width = self . dimensions . width + 2 * margin height = self . dimensions . height + 2 * margin if random_position_on_canvas : random_horizontal_offset = random . randint ( 0 , max ( 0 , destination_width - width ) ) random_vertical_offset = random . randint ( 0 , max ( 0 , destination_height - height ) ) offset = Point2D ( self . dimensions . origin . x - margin - random_horizontal_offset , self . dimensions . origin . y - margin - random_vertical_offset ) else : width_offset_for_centering = ( destination_width - width ) / 2 height_offset_for_centering = ( destination_height - height ) / 2 offset = Point2D ( self . dimensions . origin . x - margin - width_offset_for_centering , self . dimensions . origin . y - margin - height_offset_for_centering ) image_without_staff_lines = Image . new ( 'RGB' , ( destination_width , destination_height ) , "white" ) draw = ImageDraw . Draw ( image_without_staff_lines ) black = ( 0 , 0 , 0 ) for stroke in self . strokes : for i in range ( 0 , len ( stroke ) - 1 ) : start_point = self . __subtract_offset ( stroke [ i ] , offset ) end_point = self . __subtract_offset ( stroke [ i + 1 ] , offset ) draw . line ( ( start_point . x , start_point . y , end_point . x , end_point . y ) , black , stroke_thickness ) location = self . __subtract_offset ( self . dimensions . origin , offset ) bounding_box_in_image = Rectangle ( location , self . dimensions . width , self . dimensions . height ) del draw if staff_line_vertical_offsets is not None and staff_line_vertical_offsets : for staff_line_vertical_offset in staff_line_vertical_offsets : image_with_staff_lines = image_without_staff_lines . copy ( ) self . __draw_staff_lines_into_image ( image_with_staff_lines , stroke_thickness , staff_line_spacing , staff_line_vertical_offset ) file_name_with_offset = export_path . get_full_path ( staff_line_vertical_offset ) image_with_staff_lines . save ( file_name_with_offset ) image_with_staff_lines . close ( ) if bounding_boxes is not None : class_and_file_name = export_path . get_class_name_and_file_path ( staff_line_vertical_offset ) bounding_boxes [ class_and_file_name ] = bounding_box_in_image else : image_without_staff_lines . save ( export_path . get_full_path ( ) ) if bounding_boxes is not None : class_and_file_name = export_path . get_class_name_and_file_path ( ) bounding_boxes [ class_and_file_name ] = bounding_box_in_image image_without_staff_lines . close ( )
Draws the symbol onto a canvas with a fixed size
55,988
def update_locals ( locals_instance , instance_iterator , * args , ** kwargs ) : for instance in instance_iterator ( ) : locals_instance . update ( { type ( instance ) . __name__ : instance . __class__ } )
import all of the detector classes into the local namespace to make it easy to do things like import scrubadub . detectors . NameDetector without having to add each new Detector or Filth
55,989
def iter_filth_clss ( ) : return iter_subclasses ( os . path . dirname ( os . path . abspath ( __file__ ) ) , Filth , _is_abstract_filth , )
Iterate over all of the filths that are included in this sub - package . This is a convenience method for capturing all new Filth that are added over time .
55,990
def iter_filths ( ) : for filth_cls in iter_filth_clss ( ) : if issubclass ( filth_cls , RegexFilth ) : m = next ( re . finditer ( r"\s+" , "fake pattern string" ) ) yield filth_cls ( m ) else : yield filth_cls ( )
Iterate over all instances of filth
55,991
def _update_content ( self , other_filth ) : if self . end < other_filth . beg or other_filth . end < self . beg : raise exceptions . FilthMergeError ( "a_filth goes from [%s, %s) and b_filth goes from [%s, %s)" % ( self . beg , self . end , other_filth . beg , other_filth . end ) ) if self . beg < other_filth . beg : first = self second = other_filth else : second = self first = other_filth end_offset = second . end - first . end if end_offset > 0 : self . text = first . text + second . text [ - end_offset : ] self . beg = min ( self . beg , other_filth . beg ) self . end = max ( self . end , other_filth . end ) if self . end - self . beg != len ( self . text ) : raise exceptions . FilthMergeError ( "text length isn't consistent" ) self . filths . append ( other_filth ) self . _placeholder = '+' . join ( [ filth . type for filth in self . filths ] )
this updates the bounds text and placeholder for the merged filth
55,992
def add_detector ( self , detector_cls ) : if not issubclass ( detector_cls , detectors . base . Detector ) : raise TypeError ( ( '"%(detector_cls)s" is not a subclass of Detector' ) % locals ( ) ) name = detector_cls . filth_cls . type if name in self . _detectors : raise KeyError ( ( 'can not add Detector "%(name)s"---it already exists. ' 'Try removing it first.' ) % locals ( ) ) self . _detectors [ name ] = detector_cls ( )
Add a Detector to scrubadub
55,993
def clean ( self , text , ** kwargs ) : if sys . version_info < ( 3 , 0 ) : if not isinstance ( text , unicode ) : raise exceptions . UnicodeRequired clean_chunks = [ ] filth = Filth ( ) for next_filth in self . iter_filth ( text ) : clean_chunks . append ( text [ filth . end : next_filth . beg ] ) clean_chunks . append ( next_filth . replace_with ( ** kwargs ) ) filth = next_filth clean_chunks . append ( text [ filth . end : ] ) return u'' . join ( clean_chunks )
This is the master method that cleans all of the filth out of the dirty dirty text . All keyword arguments to this function are passed through to the Filth . replace_with method to fine - tune how the Filth is cleaned .
55,994
def iter_filth ( self , text ) : all_filths = [ ] for detector in self . _detectors . values ( ) : for filth in detector . iter_filth ( text ) : if not isinstance ( filth , Filth ) : raise TypeError ( 'iter_filth must always yield Filth' ) all_filths . append ( filth ) all_filths . sort ( key = lambda f : ( f . beg , - f . end ) ) if not all_filths : raise StopIteration filth = all_filths [ 0 ] for next_filth in all_filths [ 1 : ] : if filth . end < next_filth . beg : yield filth filth = next_filth else : filth = filth . merge ( next_filth ) yield filth
Iterate over the different types of filth that can exist .
55,995
async def download_file ( self , Bucket , Key , Filename , ExtraArgs = None , Callback = None , Config = None ) : with open ( Filename , 'wb' ) as open_file : await download_fileobj ( self , Bucket , Key , open_file , ExtraArgs = ExtraArgs , Callback = Callback , Config = Config )
Download an S3 object to a file .
55,996
async def download_fileobj ( self , Bucket , Key , Fileobj , ExtraArgs = None , Callback = None , Config = None ) : try : resp = await self . get_object ( Bucket = Bucket , Key = Key ) except ClientError as err : if err . response [ 'Error' ] [ 'Code' ] == 'NoSuchKey' : raise ClientError ( { 'Error' : { 'Code' : '404' , 'Message' : 'Not Found' } } , 'HeadObject' ) raise body = resp [ 'Body' ] while True : data = await body . read ( 4096 ) if data == b'' : break if Callback : try : Callback ( len ( data ) ) except : pass Fileobj . write ( data ) await asyncio . sleep ( 0.0 )
Download an object from S3 to a file - like object .
55,997
async def upload_fileobj ( self , Fileobj : BinaryIO , Bucket : str , Key : str , ExtraArgs : Optional [ Dict [ str , Any ] ] = None , Callback : Optional [ Callable [ [ int ] , None ] ] = None , Config : Optional [ S3TransferConfig ] = None ) : if not ExtraArgs : ExtraArgs = { } multipart_chunksize = 8388608 if Config is None else Config . multipart_chunksize io_chunksize = 262144 if Config is None else Config . io_chunksize resp = await self . create_multipart_upload ( Bucket = Bucket , Key = Key , ** ExtraArgs ) upload_id = resp [ 'UploadId' ] part = 0 parts = [ ] running = True sent_bytes = 0 try : while running : part += 1 multipart_payload = b'' while len ( multipart_payload ) < multipart_chunksize : if asyncio . iscoroutinefunction ( Fileobj . read ) : data = await Fileobj . read ( io_chunksize ) else : data = Fileobj . read ( io_chunksize ) if data == b'' : running = False break multipart_payload += data resp = await self . upload_part ( Body = multipart_payload , Bucket = Bucket , Key = Key , PartNumber = part , UploadId = upload_id ) parts . append ( { 'ETag' : resp [ 'ETag' ] , 'PartNumber' : part } ) sent_bytes += len ( multipart_payload ) try : Callback ( sent_bytes ) except : pass await self . complete_multipart_upload ( Bucket = Bucket , Key = Key , UploadId = upload_id , MultipartUpload = { 'Parts' : parts } ) except : await self . abort_multipart_upload ( Bucket = Bucket , Key = Key , UploadId = upload_id ) raise
Upload a file - like object to S3 .
55,998
async def upload_file ( self , Filename , Bucket , Key , ExtraArgs = None , Callback = None , Config = None ) : with open ( Filename , 'rb' ) as open_file : await upload_fileobj ( self , open_file , Bucket , Key , ExtraArgs = ExtraArgs , Callback = Callback , Config = Config )
Upload a file to an S3 object .
55,999
def _create_action ( factory_self , action_model , resource_name , service_context , is_load = False ) : action = AIOServiceAction ( action_model , factory = factory_self , service_context = service_context ) if is_load : async def do_action ( self , * args , ** kwargs ) : response = await action . async_call ( self , * args , ** kwargs ) self . meta . data = response lazy_docstring = docstring . LoadReloadDocstring ( action_name = action_model . name , resource_name = resource_name , event_emitter = factory_self . _emitter , load_model = action_model , service_model = service_context . service_model , include_signature = False ) else : async def do_action ( self , * args , ** kwargs ) : response = await action . async_call ( self , * args , ** kwargs ) if hasattr ( self , 'load' ) : self . meta . data = None return response lazy_docstring = docstring . ActionDocstring ( resource_name = resource_name , event_emitter = factory_self . _emitter , action_model = action_model , service_model = service_context . service_model , include_signature = False ) do_action . __name__ = str ( action_model . name ) do_action . __doc__ = lazy_docstring return do_action
Creates a new method which makes a request to the underlying AWS service .