idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
59,300
def training_set_multiplication ( training_set , mult_queue ) : logging . info ( "Multiply data..." ) for algorithm in mult_queue : new_trning_set = [ ] for recording in training_set : samples = algorithm ( recording [ 'handwriting' ] ) for sample in samples : new_trning_set . append ( { 'id' : recording [ 'id' ] , 'is_in_testset' : 0 , 'formula_id' : recording [ 'formula_id' ] , 'handwriting' : sample , 'formula_in_latex' : recording [ 'formula_in_latex' ] } ) training_set = new_trning_set return new_trning_set
Multiply the training set by all methods listed in mult_queue .
59,301
def _calculate_feature_stats ( feature_list , prepared , serialization_file ) : feats = [ x for x , _ in prepared ] means = numpy . mean ( feats , 0 ) mins = numpy . min ( feats , 0 ) maxs = numpy . max ( feats , 0 ) start = 0 mode = 'w' arguments = { 'newline' : '' } if sys . version_info . major < 3 : mode += 'b' arguments = { } with open ( serialization_file , mode , ** arguments ) as csvfile : spamwriter = csv . writer ( csvfile , delimiter = str ( ';' ) , quotechar = str ( '"' ) , quoting = csv . QUOTE_MINIMAL ) for feature in feature_list : end = start + feature . get_dimension ( ) feature . mean = numpy . array ( means [ start : end ] ) feature . min = numpy . array ( mins [ start : end ] ) feature . max = numpy . array ( maxs [ start : end ] ) start = end for mean , fmax , fmin in zip ( feature . mean , feature . max , feature . min ) : spamwriter . writerow ( [ mean , fmax - fmin ] )
Calculate min max and mean for each feature . Store it in object .
59,302
def make_hdf5 ( dataset_name , feature_count , data , output_filename , create_learning_curve ) : if dataset_name == "traindata" and create_learning_curve : max_trainingexamples = 501 output_filename_save = output_filename steps = 10 for trainingexamples in range ( 100 , max_trainingexamples , steps ) : tmp = output_filename_save . split ( "." ) tmp [ - 2 ] += "-%i-examples" % trainingexamples output_filename = "." . join ( map ( str , tmp ) ) seen_symbols = defaultdict ( int ) new_data = { } for feature_string , label in data : if seen_symbols [ label ] < trainingexamples : seen_symbols [ label ] += 1 new_data = ( feature_string , label ) utils . create_hdf5 ( output_filename , feature_count , new_data ) else : utils . create_hdf5 ( output_filename , feature_count , data )
Create the hdf5 file .
59,303
def get_dataset ( ) : seg_data = "segmentation-X.npy" seg_labels = "segmentation-y.npy" if os . path . isfile ( seg_data ) and os . path . isfile ( seg_labels ) : X = numpy . load ( seg_data ) y = numpy . load ( seg_labels ) with open ( 'datasets.pickle' , 'rb' ) as f : datasets = pickle . load ( f ) return ( X , y , datasets ) datasets = get_segmented_raw_data ( ) X , y = [ ] , [ ] for i , data in enumerate ( datasets ) : if i % 10 == 0 : logging . info ( "[Create Dataset] i=%i/%i" , i , len ( datasets ) ) segmentation = json . loads ( data [ 'segmentation' ] ) recording = json . loads ( data [ 'data' ] ) X_symbol = [ get_median_stroke_distance ( recording ) ] if len ( [ p for s in recording for p in s if p [ 'time' ] is None ] ) > 0 : continue combis = itertools . combinations ( list ( range ( len ( recording ) ) ) , 2 ) for strokeid1 , strokeid2 in combis : stroke1 = recording [ strokeid1 ] stroke2 = recording [ strokeid2 ] if len ( stroke1 ) == 0 or len ( stroke2 ) == 0 : logging . debug ( "stroke len 0. Skip." ) continue X . append ( get_stroke_features ( recording , strokeid1 , strokeid2 ) + X_symbol ) same_symbol = ( _get_symbol_index ( strokeid1 , segmentation ) == _get_symbol_index ( strokeid2 , segmentation ) ) y . append ( int ( same_symbol ) ) X = numpy . array ( X , dtype = numpy . float32 ) y = numpy . array ( y , dtype = numpy . int32 ) numpy . save ( seg_data , X ) numpy . save ( seg_labels , y ) datasets = filter_recordings ( datasets ) with open ( 'datasets.pickle' , 'wb' ) as f : pickle . dump ( datasets , f , protocol = pickle . HIGHEST_PROTOCOL ) return ( X , y , datasets )
Create a dataset for machine learning of segmentations .
59,304
def get_segmented_raw_data ( top_n = 10000 ) : cfg = utils . get_database_configuration ( ) mysql = cfg [ 'mysql_online' ] connection = pymysql . connect ( host = mysql [ 'host' ] , user = mysql [ 'user' ] , passwd = mysql [ 'passwd' ] , db = mysql [ 'db' ] , cursorclass = pymysql . cursors . DictCursor ) cursor = connection . cursor ( ) sql = ( "SELECT `id`, `data`, `segmentation` " "FROM `wm_raw_draw_data` WHERE " "(`segmentation` IS NOT NULL OR `accepted_formula_id` IS NOT NULL) " "AND `wild_point_count` = 0 " "AND `stroke_segmentable` = 1 " "ORDER BY `id` LIMIT 0, %i" ) % top_n logging . info ( sql ) cursor . execute ( sql ) datasets = cursor . fetchall ( ) logging . info ( "Fetched %i recordings. Add missing segmentations." , len ( datasets ) ) for i in range ( len ( datasets ) ) : if datasets [ i ] [ 'segmentation' ] is None : stroke_count = len ( json . loads ( datasets [ i ] [ 'data' ] ) ) if stroke_count > 10 : print ( "Massive stroke count! %i" % stroke_count ) datasets [ i ] [ 'segmentation' ] = str ( [ [ s for s in range ( stroke_count ) ] ] ) return datasets
Fetch data from the server .
59,305
def get_stroke_features ( recording , strokeid1 , strokeid2 ) : stroke1 = recording [ strokeid1 ] stroke2 = recording [ strokeid2 ] assert isinstance ( stroke1 , list ) , "stroke1 is a %s" % type ( stroke1 ) X_i = [ ] for s in [ stroke1 , stroke2 ] : hw = HandwrittenData ( json . dumps ( [ s ] ) ) feat1 = features . ConstantPointCoordinates ( strokes = 1 , points_per_stroke = 20 , fill_empty_with = 0 ) feat2 = features . ReCurvature ( strokes = 1 ) feat3 = features . Ink ( ) X_i += hw . feature_extraction ( [ feat1 , feat2 , feat3 ] ) X_i += [ get_strokes_distance ( stroke1 , stroke2 ) ] X_i += [ get_time_distance ( stroke1 , stroke2 ) ] X_i += [ abs ( strokeid2 - strokeid1 ) ] return X_i
Get the features used to decide if two strokes belong to the same symbol or not .
59,306
def get_segmentation ( recording , single_clf , single_stroke_clf , stroke_segmented_classifier ) : mst_wood = get_mst_wood ( recording , single_clf ) return [ ( normalize_segmentation ( [ mst [ 'strokes' ] for mst in mst_wood ] ) , 1.0 ) ] X_symbol = [ get_median_stroke_distance ( recording ) ] g_top_segmentations = [ ( [ ] , 1.0 ) ] for chunk_part in mst_wood : chunk = [ recording [ stroke ] for stroke in chunk_part [ 'strokes' ] ] prob = [ [ 1.0 for _ in chunk ] for _ in chunk ] for strokeid1 , strokeid2 in itertools . product ( range ( len ( chunk ) ) , range ( len ( chunk ) ) ) : if strokeid1 == strokeid2 : continue X = get_stroke_features ( chunk , strokeid1 , strokeid2 ) X += X_symbol X = numpy . array ( [ X ] , dtype = numpy . float32 ) prob [ strokeid1 ] [ strokeid2 ] = stroke_segmented_classifier ( X ) ts = list ( partitions . get_top_segmentations ( prob , 500 ) ) for i , segmentation in enumerate ( ts ) : symbols = apply_segmentation ( chunk , segmentation ) min_top2 = partitions . TopFinder ( 1 , find_min = True ) for i , symbol in enumerate ( symbols ) : predictions = single_clf . predict ( symbol ) min_top2 . push ( "value-%i" % i , predictions [ 0 ] [ 'probability' ] + predictions [ 1 ] [ 'probability' ] ) ts [ i ] [ 1 ] *= list ( min_top2 ) [ 0 ] [ 1 ] g_top_segmentations = merge_segmentations ( g_top_segmentations , ts , chunk_part [ 'strokes' ] ) return [ ( normalize_segmentation ( seg ) , probability ) for seg , probability in g_top_segmentations ]
Get a list of segmentations of recording with the probability of the segmentation being correct .
59,307
def break_mst ( mst , i ) : for j in range ( len ( mst [ 'mst' ] ) ) : mst [ 'mst' ] [ i ] [ j ] = 0 mst [ 'mst' ] [ j ] [ i ] = 0 _ , components = scipy . sparse . csgraph . connected_components ( mst [ 'mst' ] ) comp_indices = { } for el in set ( components ) : comp_indices [ el ] = { 'strokes' : [ ] , 'strokes_i' : [ ] } for i , comp_nr in enumerate ( components ) : comp_indices [ comp_nr ] [ 'strokes' ] . append ( mst [ 'strokes' ] [ i ] ) comp_indices [ comp_nr ] [ 'strokes_i' ] . append ( i ) mst_wood = [ ] for key in comp_indices : matrix = [ ] for i , line in enumerate ( mst [ 'mst' ] ) : line_add = [ ] if i not in comp_indices [ key ] [ 'strokes_i' ] : continue for j , el in enumerate ( line ) : if j in comp_indices [ key ] [ 'strokes_i' ] : line_add . append ( el ) matrix . append ( line_add ) assert len ( matrix ) > 0 , ( "len(matrix) == 0 (strokes: %s, mst=%s, i=%i)" % ( comp_indices [ key ] [ 'strokes' ] , mst , i ) ) assert len ( matrix ) == len ( matrix [ 0 ] ) , ( "matrix was %i x %i, but should be square" % ( len ( matrix ) , len ( matrix [ 0 ] ) ) ) assert len ( matrix ) == len ( comp_indices [ key ] [ 'strokes' ] ) , ( ( "stroke length was not equal to matrix length " "(strokes=%s, len(matrix)=%i)" ) % ( comp_indices [ key ] [ 'strokes' ] , len ( matrix ) ) ) mst_wood . append ( { 'mst' : matrix , 'strokes' : comp_indices [ key ] [ 'strokes' ] } ) return mst_wood
Break mst into multiple MSTs by removing one node i .
59,308
def _is_out_of_order ( segmentation ) : last_stroke = - 1 for symbol in segmentation : for stroke in symbol : if last_stroke > stroke : return True last_stroke = stroke return False
Check if a given segmentation is out of order .
59,309
def get_bb_intersections ( recording ) : intersections = numpy . zeros ( ( len ( recording ) , len ( recording ) ) , dtype = bool ) for i in range ( len ( recording ) - 1 ) : a = geometry . get_bounding_box ( recording [ i ] ) . grow ( 0.2 ) for j in range ( i + 1 , len ( recording ) ) : b = geometry . get_bounding_box ( recording [ j ] ) . grow ( 0.2 ) intersections [ i ] [ j ] = geometry . do_bb_intersect ( a , b ) intersections [ j ] [ i ] = intersections [ i ] [ j ] return intersections
Get all intersections of the bounding boxes of strokes .
59,310
def p_strokes ( symbol , count ) : global stroke_prob assert count >= 1 epsilon = 0.00000001 if stroke_prob is None : misc_path = pkg_resources . resource_filename ( 'hwrt' , 'misc/' ) stroke_prob_file = os . path . join ( misc_path , 'prob_stroke_count_by_symbol.yml' ) with open ( stroke_prob_file , 'r' ) as stream : stroke_prob = yaml . load ( stream ) if symbol in stroke_prob : if count in stroke_prob [ symbol ] : return stroke_prob [ symbol ] [ count ] else : return epsilon return epsilon
Get the probability of a written symbol having count strokes .
59,311
def _add_hypotheses_assuming_new_stroke ( self , new_stroke , stroke_nr , new_beam ) : guesses = single_clf . predict ( { 'data' : [ new_stroke ] , 'id' : None } ) [ : self . m ] for hyp in self . hypotheses : new_geometry = deepcopy ( hyp [ 'geometry' ] ) most_right = new_geometry if len ( hyp [ 'symbols' ] ) == 0 : while 'right' in most_right : most_right = most_right [ 'right' ] most_right [ 'right' ] = { 'symbol_index' : len ( hyp [ 'symbols' ] ) , 'right' : None } else : most_right = { 'symbol_index' : len ( hyp [ 'symbols' ] ) , 'right' : None } for guess in guesses : sym = { 'symbol' : guess [ 'semantics' ] , 'probability' : guess [ 'probability' ] } new_seg = deepcopy ( hyp [ 'segmentation' ] ) new_seg . append ( [ stroke_nr ] ) new_sym = deepcopy ( hyp [ 'symbols' ] ) new_sym . append ( sym ) b = { 'segmentation' : new_seg , 'symbols' : new_sym , 'geometry' : new_geometry , 'probability' : None } new_beam . hypotheses . append ( b )
Get new guesses by assuming new_stroke is a new symbol .
59,312
def add_stroke ( self , new_stroke ) : global single_clf if len ( self . hypotheses ) == 0 : self . hypotheses = [ { 'segmentation' : [ ] , 'symbols' : [ ] , 'geometry' : { } , 'probability' : Decimal ( 1 ) } ] stroke_nr = len ( self . history [ 'data' ] ) new_history = deepcopy ( self . history ) new_history [ 'data' ] . append ( new_stroke ) new_beam = Beam ( ) new_beam . history = new_history evaluated_segmentations = [ ] had_multisymbol = False for hyp in self . hypotheses : for i in range ( min ( self . n , len ( hyp [ 'segmentation' ] ) ) ) : new_strokes = { 'data' : [ ] , 'id' : - 1 } for stroke_index in hyp [ 'segmentation' ] [ - ( i + 1 ) ] : curr_stroke = self . history [ 'data' ] [ stroke_index ] new_strokes [ 'data' ] . append ( curr_stroke ) new_strokes [ 'data' ] . append ( new_stroke ) new_seg = deepcopy ( hyp [ 'segmentation' ] ) new_seg [ - ( i + 1 ) ] . append ( stroke_nr ) if new_seg in evaluated_segmentations : continue else : evaluated_segmentations . append ( new_seg ) guesses = single_clf . predict ( new_strokes ) [ : self . m ] for guess in guesses : if guess [ 'semantics' ] . split ( ";" ) [ 1 ] == "::MULTISYMBOL::" : had_multisymbol = True continue sym = { 'symbol' : guess [ 'semantics' ] , 'probability' : guess [ 'probability' ] } new_sym = deepcopy ( hyp [ 'symbols' ] ) new_sym [ - ( i + 1 ) ] = sym b = { 'segmentation' : new_seg , 'symbols' : new_sym , 'geometry' : deepcopy ( hyp [ 'geometry' ] ) , 'probability' : None } new_beam . hypotheses . append ( b ) if len ( self . hypotheses ) <= 1 or had_multisymbol : self . _add_hypotheses_assuming_new_stroke ( new_stroke , stroke_nr , new_beam ) for hyp in new_beam . hypotheses : hyp [ 'probability' ] = _calc_hypothesis_probability ( hyp ) self . hypotheses = new_beam . hypotheses self . history = new_beam . history self . _prune ( ) new_probs = softmax ( [ h [ 'probability' ] for h in self . hypotheses ] ) for hyp , prob in zip ( self . hypotheses , new_probs ) : hyp [ 'probability' ] = prob
Update the beam so that it considers new_stroke .
59,313
def _prune ( self ) : self . hypotheses = sorted ( self . hypotheses , key = lambda e : e [ 'probability' ] , reverse = True ) [ : self . k ]
Shorten hypotheses to the best k ones .
59,314
def get_matrices ( ) : with open ( 'hwrt/misc/is_one_symbol_classifier.pickle' , 'rb' ) as f : a = pickle . load ( f ) arrays = [ ] for el1 in a . input_storage : for el2 in el1 . __dict__ [ 'storage' ] : if isinstance ( el2 , cuda . CudaNdarray ) : arrays . append ( { 'storage' : numpy . asarray ( el2 ) , 'name' : el1 . name } ) else : logging . warning ( "was type %s. Do nothing." % type ( el2 ) ) logging . debug ( el1 . name ) return arrays
Get the matrices from a pickled files .
59,315
def create_model_tar ( matrices , tarname = "model-cuda-converted.tar" ) : filenames = [ ] for layer in range ( len ( matrices ) ) : if matrices [ layer ] [ 'name' ] == 'W' : weights = matrices [ layer ] [ 'storage' ] weights_file = h5py . File ( 'W%i.hdf5' % ( layer / 2 ) , 'w' ) weights_file . create_dataset ( weights_file . id . name , data = weights ) weights_file . close ( ) filenames . append ( 'W%i.hdf5' % ( layer / 2 ) ) elif matrices [ layer ] [ 'name' ] == 'b' : b = matrices [ layer ] [ 'storage' ] bfile = h5py . File ( 'b%i.hdf5' % ( layer / 2 ) , 'w' ) bfile . create_dataset ( bfile . id . name , data = b ) bfile . close ( ) filenames . append ( 'b%i.hdf5' % ( layer / 2 ) ) with tarfile . open ( tarname , "w:" ) as tar : for name in filenames : tar . add ( name ) for filename in filenames : os . remove ( filename )
Create a tar file which contains the model .
59,316
def check_python_version ( ) : req_version = ( 2 , 7 ) cur_version = sys . version_info if cur_version >= req_version : print ( "Python version... %sOK%s (found %s, requires %s)" % ( Bcolors . OKGREEN , Bcolors . ENDC , str ( platform . python_version ( ) ) , str ( req_version [ 0 ] ) + "." + str ( req_version [ 1 ] ) ) ) else : print ( "Python version... %sFAIL%s (found %s, requires %s)" % ( Bcolors . FAIL , Bcolors . ENDC , str ( cur_version ) , str ( req_version ) ) )
Check if the currently running Python version is new enough .
59,317
def main ( ) : check_python_version ( ) check_python_modules ( ) check_executables ( ) home = os . path . expanduser ( "~" ) print ( "\033[1mCheck files\033[0m" ) rcfile = os . path . join ( home , ".hwrtrc" ) if os . path . isfile ( rcfile ) : print ( "~/.hwrtrc... %sFOUND%s" % ( Bcolors . OKGREEN , Bcolors . ENDC ) ) else : print ( "~/.hwrtrc... %sNOT FOUND%s" % ( Bcolors . FAIL , Bcolors . ENDC ) ) misc_path = pkg_resources . resource_filename ( 'hwrt' , 'misc/' ) print ( "misc-path: %s" % misc_path )
Execute all checks .
59,318
def merge ( d1 , d2 ) : if d1 [ 'formula_id2latex' ] is None : formula_id2latex = { } else : formula_id2latex = d1 [ 'formula_id2latex' ] . copy ( ) formula_id2latex . update ( d2 [ 'formula_id2latex' ] ) handwriting_datasets = d1 [ 'handwriting_datasets' ] for dataset in d2 [ 'handwriting_datasets' ] : handwriting_datasets . append ( dataset ) return { 'formula_id2latex' : formula_id2latex , 'handwriting_datasets' : handwriting_datasets }
Merge two raw datasets into one .
59,319
def is_file_consistent ( local_path_file , md5_hash ) : return os . path . isfile ( local_path_file ) and hashlib . md5 ( open ( local_path_file , 'rb' ) . read ( ) ) . hexdigest ( ) == md5_hash
Check if file is there and if the md5_hash is correct .
59,320
def main ( ) : project_root = utils . get_project_root ( ) infofile = os . path . join ( project_root , "raw-datasets/info.yml" ) logging . info ( "Read '%s'..." , infofile ) with open ( infofile , 'r' ) as ymlfile : datasets = yaml . load ( ymlfile ) for dataset in datasets : local_path_file = os . path . join ( project_root , dataset [ 'online_path' ] ) i = 0 while not is_file_consistent ( local_path_file , dataset [ 'md5' ] ) and i < 3 : if os . path . isfile ( local_path_file ) : local_file_size = os . path . getsize ( local_path_file ) logging . info ( "MD5 codes differ. " ) logging . info ( "The file size of the downloaded file is %s." , utils . sizeof_fmt ( local_file_size ) ) logging . info ( "Download the file '%s'..." , dataset [ 'online_path' ] ) urllib . urlretrieve ( dataset [ 'url' ] , local_path_file ) i += 1 if i < 10 : logging . info ( "Found '%s'." , dataset [ 'online_path' ] )
Main part of the download script .
59,321
def load_model ( ) : logging . info ( "Load language model..." ) ngram_arpa_t = pkg_resources . resource_filename ( 'hwrt' , 'misc/ngram.arpa.tar.bz2' ) with tarfile . open ( ngram_arpa_t , 'r:bz2' ) as tar : tarfolder = tempfile . mkdtemp ( ) tar . extractall ( path = tarfolder ) ngram_arpa_f = os . path . join ( tarfolder , 'ngram.arpa' ) with open ( ngram_arpa_f ) as f : content = f . read ( ) ngram_model = NgramLanguageModel ( ) ngram_model . load_from_arpa_str ( content ) return ngram_model
Load a n - gram language model for mathematics in ARPA format which gets shipped with hwrt .
59,322
def load_from_arpa_str ( self , arpa_str ) : data_found = False end_found = False in_ngram_block = 0 for i , line in enumerate ( arpa_str . split ( "\n" ) ) : if not end_found : if not data_found : if "\\data\\" in line : data_found = True else : if in_ngram_block == 0 : if line . startswith ( "ngram" ) : ngram_type , count = line . split ( "=" ) _ , n = ngram_type . split ( " " ) n = int ( n ) self . ngrams [ n ] = { 'data' : { } , 'count' : count } elif line . startswith ( "\\" ) : n = int ( line . split ( "-" ) [ 0 ] [ 1 : ] ) in_ngram_block = n else : continue elif in_ngram_block > 0 : if "\\end\\" in line : end_found = True elif line . startswith ( "\\" ) : n = int ( line . split ( "-" ) [ 0 ] [ 1 : ] ) in_ngram_block = n elif len ( line ) <= 1 : continue else : data = line . split ( "\t" ) probability = Decimal ( data [ 0 ] ) ngram = data [ 1 : ] if len ( ngram ) != n : raise Exception ( ( "ARPA language file is " "inconsistant. Line %i has " "only %i items, but should " "have %i items." ) % ( i , len ( ngram ) , n ) ) rest = ngram append_to = self . ngrams [ n ] [ 'data' ] while len ( rest ) > 1 : first , rest = rest [ 0 ] , rest [ 1 : ] if first not in append_to : append_to [ first ] = { } append_to = append_to [ first ] if rest [ 0 ] in append_to : raise Exception ( ( "Duplicate entry for " "ngram %s" ) % ngram ) append_to [ rest [ 0 ] ] = probability else : if line . startswith ( "info: " ) : logging . info ( line [ 6 : ] )
Initialize N - gram model by reading an ARPA language model string .
59,323
def get_probability ( self , sentence ) : if len ( sentence ) == 1 : return Decimal ( 10 ) ** self . get_unigram_log_prob ( sentence ) elif len ( sentence ) == 2 : return Decimal ( 10 ) ** self . get_bigram_log_prob ( sentence ) else : log_prob = Decimal ( 0.0 ) for w1 , w2 , w3 in zip ( sentence , sentence [ 1 : ] , sentence [ 2 : ] ) : log_prob += self . get_trigram_log_prob ( ( w1 , w2 , w3 ) ) log_prob = Decimal ( log_prob ) return Decimal ( 10 ) ** log_prob
Calculate the probability of a sentence given this language model .
59,324
def evaluate_dir ( sample_dir ) : results = [ ] if sample_dir [ - 1 ] == "/" : sample_dir = sample_dir [ : - 1 ] for filename in glob . glob ( "%s/*.inkml" % sample_dir ) : results . append ( evaluate_inkml ( filename ) ) return results
Evaluate all recordings in sample_dir .
59,325
def evaluate_inkml ( inkml_file_path ) : logging . info ( "Start evaluating '%s'..." , inkml_file_path ) ret = { 'filename' : inkml_file_path } recording = inkml . read ( inkml_file_path ) results = evaluate ( json . dumps ( recording . get_sorted_pointlist ( ) ) , result_format = 'LaTeX' ) ret [ 'results' ] = results return ret
Evaluate an InkML file .
59,326
def generate_output_csv ( evaluation_results , filename = 'results.csv' ) : with open ( filename , 'w' ) as f : for result in evaluation_results : for i , entry in enumerate ( result [ 'results' ] ) : if entry [ 'semantics' ] == ',' : result [ 'results' ] [ 'semantics' ] = 'COMMA' f . write ( "%s, " % result [ 'filename' ] ) f . write ( ", " . join ( [ entry [ 'semantics' ] for entry in result [ 'results' ] ] ) ) f . write ( "\n" ) f . write ( "%s, " % "scores" ) f . write ( ", " . join ( [ str ( entry [ 'probability' ] ) for entry in result [ 'results' ] ] ) ) f . write ( "\n" )
Generate the evaluation results in the format
59,327
def get_project_configuration ( ) : home = os . path . expanduser ( "~" ) rcfile = os . path . join ( home , ".hwrtrc" ) if not os . path . isfile ( rcfile ) : create_project_configuration ( rcfile ) with open ( rcfile , 'r' ) as ymlfile : cfg = yaml . load ( ymlfile ) return cfg
Get project configuration as dictionary .
59,328
def create_project_configuration ( filename ) : home = os . path . expanduser ( "~" ) project_root_folder = os . path . join ( home , "hwr-experiments" ) config = { 'root' : project_root_folder , 'nntoolkit' : None , 'dropbox_app_key' : None , 'dropbox_app_secret' : None , 'dbconfig' : os . path . join ( home , "hwrt-config/db.config.yml" ) , 'data_analyzation_queue' : [ { 'Creator' : None } ] , 'worker_api_key' : '1234567890abc' , 'environment' : 'development' } with open ( filename , 'w' ) as f : yaml . dump ( config , f , default_flow_style = False )
Create a project configuration file which contains a configuration that might make sense .
59,329
def get_project_root ( ) : cfg = get_project_configuration ( ) for dirname in [ "raw-datasets" , "preprocessed" , "feature-files" , "models" , "reports" ] : directory = os . path . join ( cfg [ 'root' ] , dirname ) if not os . path . exists ( directory ) : os . makedirs ( directory ) raw_yml_path = pkg_resources . resource_filename ( 'hwrt' , 'misc/' ) raw_data_dst = os . path . join ( cfg [ 'root' ] , "raw-datasets/info.yml" ) if not os . path . isfile ( raw_data_dst ) : raw_yml_pkg_src = os . path . join ( raw_yml_path , "info.yml" ) shutil . copy ( raw_yml_pkg_src , raw_data_dst ) for dirname in [ "models/small-baseline" , "feature-files/small-baseline" , "preprocessed/small-baseline" ] : directory = os . path . join ( cfg [ 'root' ] , dirname ) if not os . path . exists ( directory ) : os . makedirs ( directory ) paths = [ ( "preprocessed/small-baseline/" , "preprocessing-small-info.yml" ) , ( "feature-files/small-baseline/" , "feature-small-info.yml" ) , ( "models/small-baseline/" , "model-small-info.yml" ) ] for dest , src in paths : raw_data_dst = os . path . join ( cfg [ 'root' ] , "%s/info.yml" % dest ) if not os . path . isfile ( raw_data_dst ) : raw_yml_pkg_src = os . path . join ( raw_yml_path , src ) shutil . copy ( raw_yml_pkg_src , raw_data_dst ) return cfg [ 'root' ]
Get the project root folder as a string .
59,330
def get_template_folder ( ) : cfg = get_project_configuration ( ) if 'templates' not in cfg : home = os . path . expanduser ( "~" ) rcfile = os . path . join ( home , ".hwrtrc" ) cfg [ 'templates' ] = pkg_resources . resource_filename ( 'hwrt' , 'templates/' ) with open ( rcfile , 'w' ) as f : yaml . dump ( cfg , f , default_flow_style = False ) return cfg [ 'templates' ]
Get path to the folder where th HTML templates are .
59,331
def get_database_config_file ( ) : cfg = get_project_configuration ( ) if 'dbconfig' in cfg : if os . path . isfile ( cfg [ 'dbconfig' ] ) : return cfg [ 'dbconfig' ] else : logging . info ( "File '%s' was not found. Adjust 'dbconfig' in your " "~/.hwrtrc file." , cfg [ 'dbconfig' ] ) else : logging . info ( "No database connection file found. " "Specify 'dbconfig' in your ~/.hwrtrc file." ) return None
Get the absolute path to the database configuration file .
59,332
def get_database_configuration ( ) : db_config = get_database_config_file ( ) if db_config is None : return None with open ( db_config , 'r' ) as ymlfile : cfg = yaml . load ( ymlfile ) return cfg
Get database configuration as dictionary .
59,333
def input_int_default ( question = "" , default = 0 ) : answer = input_string ( question ) if answer == "" or answer == "yes" : return default else : return int ( answer )
A function that works for both Python 2 . x and Python 3 . x . It asks the user for input and returns it as a string .
59,334
def create_run_logfile ( folder ) : with open ( os . path . join ( folder , "run.log" ) , "w" ) as f : datestring = datetime . datetime . utcnow ( ) . strftime ( "%Y-%m-%d %H:%M:%S" ) f . write ( "timestamp: '%s'" % datestring )
Create a run . log within folder . This file contains the time of the latest successful run .
59,335
def choose_raw_dataset ( currently = "" ) : folder = os . path . join ( get_project_root ( ) , "raw-datasets" ) files = [ os . path . join ( folder , name ) for name in os . listdir ( folder ) if name . endswith ( ".pickle" ) ] default = - 1 for i , filename in enumerate ( files ) : if os . path . basename ( currently ) == os . path . basename ( filename ) : default = i if i != default : print ( "[%i]\t%s" % ( i , os . path . basename ( filename ) ) ) else : print ( "\033[1m[%i]\033[0m\t%s" % ( i , os . path . basename ( filename ) ) ) i = input_int_default ( "Choose a dataset by number: " , default ) return files [ i ]
Let the user choose a raw dataset . Return the absolute path .
59,336
def get_readable_time ( t ) : ms = t % 1000 t -= ms t /= 1000 s = t % 60 t -= s t /= 60 minutes = t % 60 t -= minutes t /= 60 if t != 0 : return "%ih, %i minutes %is %ims" % ( t , minutes , s , ms ) elif minutes != 0 : return "%i minutes %is %ims" % ( minutes , s , ms ) elif s != 0 : return "%is %ims" % ( s , ms ) else : return "%ims" % ms
Format the time to a readable format .
59,337
def default_model ( ) : project_root = get_project_root ( ) models_dir = os . path . join ( project_root , "models" ) curr_dir = os . getcwd ( ) if os . path . commonprefix ( [ models_dir , curr_dir ] ) == models_dir and curr_dir != models_dir : latest_model = curr_dir else : latest_model = get_latest_folder ( models_dir ) return latest_model
Get a path for a default value for the model . Start searching in the current directory .
59,338
def create_adjusted_model_for_percentages ( model_src , model_use ) : shutil . copyfile ( model_src , model_use ) with open ( model_src ) as f : content = f . read ( ) content = content . replace ( "logreg" , "sigmoid" ) with open ( model_use , "w" ) as f : f . write ( content )
Replace logreg layer by sigmoid to get probabilities .
59,339
def create_hdf5 ( output_filename , feature_count , data ) : import h5py logging . info ( "Start creating of %s hdf file" , output_filename ) x = [ ] y = [ ] for features , label in data : assert len ( features ) == feature_count , "Expected %i features, got %i features" % ( feature_count , len ( features ) ) x . append ( features ) y . append ( int ( label ) ) Wfile = h5py . File ( output_filename , 'w' ) Wfile . create_dataset ( "data" , data = x , dtype = 'float32' ) Wfile . create_dataset ( "labels" , data = y , dtype = 'int32' ) Wfile . close ( )
Create a HDF5 feature files .
59,340
def load_model ( model_file ) : with tarfile . open ( model_file ) as tar : tarfolder = tempfile . mkdtemp ( ) tar . extractall ( path = tarfolder ) from . import features from . import preprocessing with open ( os . path . join ( tarfolder , "preprocessing.yml" ) , 'r' ) as ymlfile : preprocessing_description = yaml . load ( ymlfile ) preprocessing_queue = preprocessing . get_preprocessing_queue ( preprocessing_description [ 'queue' ] ) with open ( os . path . join ( tarfolder , "features.yml" ) , 'r' ) as ymlfile : feature_description = yaml . load ( ymlfile ) feature_str_list = feature_description [ 'features' ] feature_list = features . get_features ( feature_str_list ) import nntoolkit . utils model = nntoolkit . utils . get_model ( model_file ) output_semantics_file = os . path . join ( tarfolder , 'output_semantics.csv' ) output_semantics = nntoolkit . utils . get_outputs ( output_semantics_file ) shutil . rmtree ( tarfolder ) return ( preprocessing_queue , feature_list , model , output_semantics )
Load a model by its file . This includes the model itself but also the preprocessing queue the feature list and the output semantics .
59,341
def evaluate_model_single_recording_preloaded ( preprocessing_queue , feature_list , model , output_semantics , recording , recording_id = None ) : handwriting = handwritten_data . HandwrittenData ( recording , raw_data_id = recording_id ) handwriting . preprocessing ( preprocessing_queue ) x = handwriting . feature_extraction ( feature_list ) import nntoolkit . evaluate model_output = nntoolkit . evaluate . get_model_output ( model , [ x ] ) return nntoolkit . evaluate . get_results ( model_output , output_semantics )
Evaluate a model for a single recording after everything has been loaded .
59,342
def evaluate_model_single_recording_preloaded_multisymbol ( preprocessing_queue , feature_list , model , output_semantics , recording ) : import json import nntoolkit . evaluate recording = json . loads ( recording ) logging . info ( ( "## start (%i strokes)" % len ( recording ) ) + "#" * 80 ) hypotheses = [ ] for split in get_possible_splits ( len ( recording ) ) : recording_segmented = segment_by_split ( split , recording ) cur_split_results = [ ] for i , symbol in enumerate ( recording_segmented ) : handwriting = handwritten_data . HandwrittenData ( json . dumps ( symbol ) ) handwriting . preprocessing ( preprocessing_queue ) x = handwriting . feature_extraction ( feature_list ) model_output = nntoolkit . evaluate . get_model_output ( model , [ x ] ) results = nntoolkit . evaluate . get_results ( model_output , output_semantics ) results = results [ : 10 ] cur_split_results . append ( [ el for el in results if el [ 'probability' ] >= 0.01 ] ) import itertools for hyp in itertools . product ( * cur_split_results ) : hypotheses . append ( { 'score' : reduce ( lambda x , y : x * y , [ s [ 'probability' ] for s in hyp ] ) * len ( hyp ) / len ( recording ) , 'symbols' : [ s [ 'semantics' ] for s in hyp ] , 'min_part' : min ( [ s [ 'probability' ] for s in hyp ] ) , 'segmentation' : split } ) hypotheses = sorted ( hypotheses , key = lambda n : n [ 'min_part' ] , reverse = True ) [ : 10 ] for i , hyp in enumerate ( hypotheses ) : if hyp [ 'score' ] > 0.001 : logging . info ( "%0.4f: %s (seg: %s)" , hyp [ 'score' ] , hyp [ 'symbols' ] , hyp [ 'segmentation' ] ) return nntoolkit . evaluate . get_results ( model_output , output_semantics )
Evaluate a model for a single recording after everything has been loaded . Multiple symbols are recognized .
59,343
def evaluate_model_single_recording_multisymbol ( model_file , recording ) : ( preprocessing_queue , feature_list , model , output_semantics ) = load_model ( model_file ) logging . info ( "multiple symbol mode" ) logging . info ( recording ) results = evaluate_model_single_recording_preloaded ( preprocessing_queue , feature_list , model , output_semantics , recording ) return results
Evaluate a model for a single recording where possibly multiple symbols are .
59,344
def evaluate_model ( recording , model_folder , verbose = False ) : from . import preprocess_dataset from . import features for target_folder in get_recognizer_folders ( model_folder ) : if "preprocessed" in target_folder : logging . info ( "Start applying preprocessing methods..." ) t = target_folder _ , _ , preprocessing_queue = preprocess_dataset . get_parameters ( t ) handwriting = handwritten_data . HandwrittenData ( recording ) if verbose : handwriting . show ( ) handwriting . preprocessing ( preprocessing_queue ) if verbose : logging . debug ( "After preprocessing: %s" , handwriting . get_sorted_pointlist ( ) ) handwriting . show ( ) elif "feature-files" in target_folder : logging . info ( "Create feature file..." ) infofile_path = os . path . join ( target_folder , "info.yml" ) with open ( infofile_path , 'r' ) as ymlfile : feature_description = yaml . load ( ymlfile ) feature_str_list = feature_description [ 'features' ] feature_list = features . get_features ( feature_str_list ) feature_count = sum ( map ( lambda n : n . get_dimension ( ) , feature_list ) ) x = handwriting . feature_extraction ( feature_list ) _ , output_filename = tempfile . mkstemp ( suffix = '.hdf5' , text = True ) create_hdf5 ( output_filename , feature_count , [ ( x , 0 ) ] ) elif "model" in target_folder : logfile , model_use = _evaluate_model_single_file ( target_folder , output_filename ) return logfile else : logging . info ( "'%s' not found" , target_folder ) os . remove ( output_filename ) os . remove ( model_use )
Evaluate model for a single recording .
59,345
def get_index2latex ( model_description ) : index2latex = { } translation_csv = os . path . join ( get_project_root ( ) , model_description [ "data-source" ] , "index2formula_id.csv" ) with open ( translation_csv ) as csvfile : csvreader = csv . DictReader ( csvfile , delimiter = ',' , quotechar = '"' ) for row in csvreader : index2latex [ int ( row [ 'index' ] ) ] = row [ 'latex' ] return index2latex
Get a dictionary that maps indices to LaTeX commands .
59,346
def get_online_symbol_data ( database_id ) : import pymysql import pymysql . cursors cfg = get_database_configuration ( ) mysql = cfg [ 'mysql_online' ] connection = pymysql . connect ( host = mysql [ 'host' ] , user = mysql [ 'user' ] , passwd = mysql [ 'passwd' ] , db = mysql [ 'db' ] , cursorclass = pymysql . cursors . DictCursor ) cursor = connection . cursor ( ) sql = ( "SELECT `id`, `formula_in_latex`, `unicode_dec`, `font`, " "`font_style` FROM `wm_formula` WHERE `id` =%i" ) % database_id cursor . execute ( sql ) datasets = cursor . fetchall ( ) if len ( datasets ) == 1 : return datasets [ 0 ] else : return None
Get from the server .
59,347
def classify_single_recording ( raw_data_json , model_folder , verbose = False ) : evaluation_file = evaluate_model ( raw_data_json , model_folder , verbose ) with open ( os . path . join ( model_folder , "info.yml" ) ) as ymlfile : model_description = yaml . load ( ymlfile ) index2latex = get_index2latex ( model_description ) with open ( evaluation_file ) as f : probabilities = f . read ( ) probabilities = map ( float , probabilities . split ( " " ) ) results = [ ] for index , probability in enumerate ( probabilities ) : results . append ( ( index2latex [ index ] , probability ) ) results = sorted ( results , key = lambda n : n [ 1 ] , reverse = True ) return results
Get the classification as a list of tuples . The first value is the LaTeX code the second value is the probability .
59,348
def get_objectlist ( description , config_key , module ) : object_list = [ ] for feature in description : for feat , params in feature . items ( ) : feat = get_class ( feat , config_key , module ) if params is None : object_list . append ( feat ( ) ) else : parameters = { } for dicts in params : for param_name , param_value in dicts . items ( ) : parameters [ param_name ] = param_value object_list . append ( feat ( ** parameters ) ) return object_list
Take a description and return a list of classes .
59,349
def get_class ( name , config_key , module ) : clsmembers = inspect . getmembers ( module , inspect . isclass ) for string_name , act_class in clsmembers : if string_name == name : return act_class cfg = get_project_configuration ( ) if config_key in cfg : modname = os . path . splitext ( os . path . basename ( cfg [ config_key ] ) ) [ 0 ] if os . path . isfile ( cfg [ config_key ] ) : usermodule = imp . load_source ( modname , cfg [ config_key ] ) clsmembers = inspect . getmembers ( usermodule , inspect . isclass ) for string_name , act_class in clsmembers : if string_name == name : return act_class else : logging . warning ( "File '%s' does not exist. Adjust ~/.hwrtrc." , cfg [ 'data_analyzation_plugins' ] ) logging . debug ( "Unknown class '%s'." , name ) return None
Get the class by its name as a string .
59,350
def get_mysql_cfg ( ) : environment = get_project_configuration ( ) [ 'environment' ] cfg = get_database_configuration ( ) if environment == 'production' : mysql = cfg [ 'mysql_online' ] else : mysql = cfg [ 'mysql_dev' ] return mysql
Get the appropriate MySQL configuration
59,351
def softmax ( w , t = 1.0 ) : w = [ Decimal ( el ) for el in w ] e = numpy . exp ( numpy . array ( w ) / Decimal ( t ) ) dist = e / numpy . sum ( e ) return dist
Calculate the softmax of a list of numbers w .
59,352
def get_beam_cache_directory ( ) : home = os . path . expanduser ( "~" ) cache_dir = os . path . join ( home , '.hwrt-beam-cache' ) if not os . path . exists ( cache_dir ) : os . makedirs ( cache_dir ) return cache_dir
Get a directory where pickled Beam Data can be stored .
59,353
def get_beam ( secret_uuid ) : beam_dir = get_beam_cache_directory ( ) beam_filename = os . path . join ( beam_dir , secret_uuid ) if os . path . isfile ( beam_filename ) : with open ( beam_filename , 'rb' ) as handle : beam = pickle . load ( handle ) return beam else : return None
Get a beam from the session with secret_uuid .
59,354
def is_valid_uuid ( uuid_to_test , version = 4 ) : try : uuid_obj = UUID ( uuid_to_test , version = version ) except ValueError : return False return str ( uuid_obj ) == uuid_to_test
Check if uuid_to_test is a valid UUID .
59,355
def prepare_table ( table ) : n = len ( table ) for i , row in enumerate ( table ) : assert len ( row ) == n for j , el in enumerate ( row ) : if i == j : table [ i ] [ i ] = 0.0 elif i > j : table [ i ] [ j ] = 1 - table [ j ] [ i ] return table
Make the table symmetric where the lower left part of the matrix is the reverse probability
59,356
def neclusters ( l , K ) : for c in clusters ( l , K ) : if all ( x for x in c ) : yield c
Partition list l in K partitions without empty parts .
59,357
def all_segmentations ( l ) : for K in range ( 1 , len ( l ) + 1 ) : gen = neclusters ( l , K ) for el in gen : yield el
Get all segmentations of a list l .
59,358
def q ( segmentation , s1 , s2 ) : index1 = find_index ( segmentation , s1 ) index2 = find_index ( segmentation , s2 ) return index1 == index2
Test if s1 and s2 are in the same symbol given the segmentation .
59,359
def score_segmentation ( segmentation , table ) : stroke_nr = sum ( 1 for symbol in segmentation for stroke in symbol ) score = 1 for i in range ( stroke_nr ) : for j in range ( i + 1 , stroke_nr ) : qval = q ( segmentation , i , j ) if qval : score *= table [ i ] [ j ] else : score *= table [ j ] [ i ] return score
Get the score of a segmentation .
59,360
def push ( self , element , value ) : insert_pos = 0 for index , el in enumerate ( self . tops ) : if not self . find_min and el [ 1 ] >= value : insert_pos = index + 1 elif self . find_min and el [ 1 ] <= value : insert_pos = index + 1 self . tops . insert ( insert_pos , [ element , value ] ) self . tops = self . tops [ : self . n ]
Push an element into the datastrucutre together with its value and only save it if it currently is one of the top n elements .
59,361
def _array2cstr ( arr ) : out = StringIO ( ) np . save ( out , arr ) return b64encode ( out . getvalue ( ) )
Serializes a numpy array to a compressed base64 string
59,362
def _str2array ( d ) : if type ( d ) == list : return np . asarray ( [ _str2array ( s ) for s in d ] ) ins = StringIO ( d ) return np . loadtxt ( ins )
Reconstructs a numpy array from a plain - text string
59,363
def create_output_semantics ( model_folder , outputs ) : with open ( 'output_semantics.csv' , 'wb' ) as csvfile : model_description_file = os . path . join ( model_folder , "info.yml" ) with open ( model_description_file , 'r' ) as ymlfile : model_description = yaml . load ( ymlfile ) logging . info ( "Start fetching translation dict..." ) translation_dict = utils . get_index2data ( model_description ) spamwriter = csv . writer ( csvfile , delimiter = ';' , quotechar = '|' , quoting = csv . QUOTE_MINIMAL ) for output_index in range ( outputs ) : if output_index in translation_dict : spamwriter . writerow ( translation_dict [ output_index ] ) else : print ( "No data for %i." % output_index ) spamwriter . writerow ( [ "output %i" % output_index ] )
Create a output_semantics . csv file which contains information what the output of the single output neurons mean .
59,364
def elementtree_to_dict ( element ) : d = dict ( ) if hasattr ( element , 'text' ) and element . text is not None : d [ 'text' ] = element . text d . update ( element . items ( ) ) for c in list ( element ) : if c . tag not in d : d [ c . tag ] = elementtree_to_dict ( c ) else : if not isinstance ( d [ c . tag ] , list ) : d [ c . tag ] = [ d [ c . tag ] , elementtree_to_dict ( c ) ] else : d [ c . tag ] . append ( elementtree_to_dict ( c ) ) return d
Convert an xml ElementTree to a dictionary .
59,365
def strip_end ( text , suffix ) : if not text . endswith ( suffix ) : return text return text [ : len ( text ) - len ( suffix ) ]
Strip suffix from the end of text if text has that suffix .
59,366
def formula_to_dbid ( formula_str , backslash_fix = False ) : global __formula_to_dbid_cache if __formula_to_dbid_cache is None : mysql = utils . get_mysql_cfg ( ) connection = pymysql . connect ( host = mysql [ 'host' ] , user = mysql [ 'user' ] , passwd = mysql [ 'passwd' ] , db = mysql [ 'db' ] , charset = 'utf8mb4' , cursorclass = pymysql . cursors . DictCursor ) cursor = connection . cursor ( ) sql = ( "SELECT `id`, `formula_in_latex` FROM `wm_formula` " ) cursor . execute ( sql ) formulas = cursor . fetchall ( ) __formula_to_dbid_cache = { } for fm in formulas : __formula_to_dbid_cache [ fm [ 'formula_in_latex' ] ] = fm [ 'id' ] if formula_str in __formula_to_dbid_cache : return __formula_to_dbid_cache [ formula_str ] elif backslash_fix and ( '\\%s' % formula_str ) in __formula_to_dbid_cache : return __formula_to_dbid_cache [ '\\%s' % formula_str ] else : logging . info ( "Symbol '%s' was not found. Add it to write-math.com." , formula_str ) mysql = utils . get_mysql_cfg ( ) connection = pymysql . connect ( host = mysql [ 'host' ] , user = mysql [ 'user' ] , passwd = mysql [ 'passwd' ] , db = mysql [ 'db' ] , charset = 'utf8mb4' , cursorclass = pymysql . cursors . DictCursor ) cursor = connection . cursor ( ) sql = ( "INSERT INTO `wm_formula` (`user_id`, `formula_name`, " "`formula_in_latex`, " "`mode`, `package`) VALUES (" "'10', %s, %s, 'bothmodes', NULL);" ) if len ( formula_str ) < 20 : logging . info ( "Insert formula %s." , formula_str ) cursor . execute ( sql , ( formula_str , formula_str ) ) connection . commit ( ) __formula_to_dbid_cache [ formula_str ] = connection . insert_id ( ) return __formula_to_dbid_cache [ formula_str ]
Convert a LaTeX formula to the database index .
59,367
def insert_recording ( hw ) : mysql = utils . get_mysql_cfg ( ) connection = pymysql . connect ( host = mysql [ 'host' ] , user = mysql [ 'user' ] , passwd = mysql [ 'passwd' ] , db = mysql [ 'db' ] , charset = 'utf8mb4' , cursorclass = pymysql . cursors . DictCursor ) try : cursor = connection . cursor ( ) sql = ( "INSERT INTO `wm_raw_draw_data` (" "`user_id`, " "`data`, " "`md5data`, " "`creation_date`, " "`device_type`, " "`accepted_formula_id`, " "`secret`, " "`ip`, " "`segmentation`, " "`internal_id`, " "`description` " ") VALUES (%s, %s, MD5(data), " "%s, %s, %s, %s, %s, %s, %s, %s);" ) data = ( hw . user_id , hw . raw_data_json , getattr ( hw , 'creation_date' , None ) , getattr ( hw , 'device_type' , '' ) , getattr ( hw , 'formula_id' , None ) , getattr ( hw , 'secret' , '' ) , getattr ( hw , 'ip' , None ) , str ( getattr ( hw , 'segmentation' , '' ) ) , getattr ( hw , 'internal_id' , '' ) , getattr ( hw , 'description' , '' ) ) cursor . execute ( sql , data ) connection . commit ( ) for symbol_id , strokes in zip ( hw . symbol_stream , hw . segmentation ) : insert_symbol_mapping ( cursor . lastrowid , symbol_id , hw . user_id , strokes ) logging . info ( "Insert raw data." ) except pymysql . err . IntegrityError as e : print ( "Error: {} (can probably be ignored)" . format ( e ) )
Insert recording hw into database .
59,368
def insert_symbol_mapping ( raw_data_id , symbol_id , user_id , strokes ) : mysql = utils . get_mysql_cfg ( ) connection = pymysql . connect ( host = mysql [ 'host' ] , user = mysql [ 'user' ] , passwd = mysql [ 'passwd' ] , db = mysql [ 'db' ] , charset = 'utf8mb4' , cursorclass = pymysql . cursors . DictCursor ) cursor = connection . cursor ( ) sql = ( "INSERT INTO `wm_partial_answer` " "(`recording_id`, `symbol_id`, `strokes`, `user_id`, " "`is_accepted`) " "VALUES (%s, %s, %s, %s, 1);" ) data = ( raw_data_id , symbol_id , "," . join ( [ str ( stroke ) for stroke in strokes ] ) , user_id ) cursor . execute ( sql , data ) connection . commit ( )
Insert data into wm_strokes_to_symbol .
59,369
def filter_label ( label , replace_by_similar = True ) : bad_names = [ 'celsius' , 'degree' , 'ohm' , 'venus' , 'mars' , 'astrosun' , 'fullmoon' , 'leftmoon' , 'female' , 'male' , 'checked' , 'diameter' , 'sun' , 'Bowtie' , 'sqrt' , 'cong' , 'copyright' , 'dag' , 'parr' , 'notin' , 'dotsc' , 'mathds' , 'mathfrak' ] if any ( label [ 1 : ] . startswith ( bad ) for bad in bad_names ) : if label == '\\dag' and replace_by_similar : return '\\dagger' elif label == '\\diameter' and replace_by_similar : return '\\O' return label [ 1 : ] else : return label
Some labels currently don t work together because of LaTeX naming clashes . Those will be replaced by simple strings .
59,370
def analyze_feature ( raw_datasets , feature , basename = "aspect_ratios" ) : csv_file = dam . prepare_file ( basename + '.csv' ) raw_file = dam . prepare_file ( basename + '.raw' ) csv_file = open ( csv_file , 'a' ) raw_file = open ( raw_file , 'a' ) csv_file . write ( "label,mean,std\n" ) raw_file . write ( "latex,raw_data_id,value\n" ) print_data = [ ] for _ , datasets in dam . sort_by_formula_id ( raw_datasets ) . items ( ) : values = [ ] for data in datasets : value = feature ( data ) [ 0 ] values . append ( value ) raw_file . write ( "%s,%i,%0.2f\n" % ( datasets [ 0 ] . formula_in_latex , data . raw_data_id , value ) ) label = filter_label ( datasets [ 0 ] . formula_in_latex ) print_data . append ( ( label , numpy . mean ( values ) , numpy . std ( values ) ) ) print_data = sorted ( print_data , key = lambda n : n [ 1 ] , reverse = True ) for label , mean , std in print_data : csv_file . write ( "%s,%0.2f,%0.2f\n" % ( label , mean , std ) ) csv_file . close ( )
Apply feature to all recordings in raw_datasets . Store the results in two files . One file stores the raw result the other one groups the results by symbols and stores the mean standard deviation and the name of the symbol as a csv file .
59,371
def main ( handwriting_datasets_file , analyze_features ) : logging . info ( "Start loading data '%s' ..." , handwriting_datasets_file ) loaded = pickle . load ( open ( handwriting_datasets_file ) ) raw_datasets = loaded [ 'handwriting_datasets' ] logging . info ( "%i datasets loaded." , len ( raw_datasets ) ) logging . info ( "Start analyzing..." ) if analyze_features : featurelist = [ ( features . AspectRatio ( ) , "aspect_ratio.csv" ) , ( features . ReCurvature ( 1 ) , "re_curvature.csv" ) , ( features . Height ( ) , "height.csv" ) , ( features . Width ( ) , "width.csv" ) , ( features . Time ( ) , "time.csv" ) , ( features . Ink ( ) , "ink.csv" ) , ( features . StrokeCount ( ) , "stroke-count.csv" ) ] for feat , filename in featurelist : logging . info ( "create %s..." , filename ) analyze_feature ( raw_datasets , feat , filename ) cfg = utils . get_project_configuration ( ) if 'data_analyzation_queue' in cfg : metrics = dam . get_metrics ( cfg [ 'data_analyzation_queue' ] ) for metric in metrics : logging . info ( "Start metric %s..." , str ( metric ) ) metric ( raw_datasets ) else : logging . info ( "No 'data_analyzation_queue' in ~/.hwrtrc" )
Start the creation of the wanted metric .
59,372
def remove_matching_braces ( latex ) : if latex . startswith ( '{' ) and latex . endswith ( '}' ) : opened = 1 matches = True for char in latex [ 1 : - 1 ] : if char == '{' : opened += 1 elif char == '}' : opened -= 1 if opened == 0 : matches = False if matches : latex = latex [ 1 : - 1 ] return latex
If latex is surrounded by matching braces remove them . They are not necessary .
59,373
def read_folder ( folder ) : recordings = [ ] for filename in glob . glob ( os . path . join ( folder , '*.ink' ) ) : recording = parse_scg_ink_file ( filename ) recordings . append ( recording ) return recordings
Read all files of folder and return a list of HandwrittenData objects .
59,374
def _get_colors ( segmentation ) : symbol_count = len ( segmentation ) num_colors = symbol_count color_array = [ "#000000" , "#FFFF00" , "#1CE6FF" , "#FF34FF" , "#FF4A46" , "#008941" , "#006FA6" , "#A30059" , "#FFDBE5" , "#7A4900" , "#0000A6" , "#63FFAC" , "#B79762" , "#004D43" , "#8FB0FF" , "#997D87" , "#5A0007" , "#809693" , "#FEFFE6" , "#1B4400" , "#4FC601" , "#3B5DFF" , "#4A3B53" , "#FF2F80" , "#61615A" , "#BA0900" , "#6B7900" , "#00C2A0" , "#FFAA92" , "#FF90C9" , "#B903AA" , "#D16100" , "#DDEFFF" , "#000035" , "#7B4F4B" , "#A1C299" , "#300018" , "#0AA6D8" , "#013349" , "#00846F" , "#372101" , "#FFB500" , "#C2FFED" , "#A079BF" , "#CC0744" , "#C0B9B2" , "#C2FF99" , "#001E09" , "#00489C" , "#6F0062" , "#0CBD66" , "#EEC3FF" , "#456D75" , "#B77B68" , "#7A87A1" , "#788D66" , "#885578" , "#FAD09F" , "#FF8A9A" , "#D157A0" , "#BEC459" , "#456648" , "#0086ED" , "#886F4C" , "#34362D" , "#B4A8BD" , "#00A6AA" , "#452C2C" , "#636375" , "#A3C8C9" , "#FF913F" , "#938A81" , "#575329" , "#00FECF" , "#B05B6F" , "#8CD0FF" , "#3B9700" , "#04F757" , "#C8A1A1" , "#1E6E00" , "#7900D7" , "#A77500" , "#6367A9" , "#A05837" , "#6B002C" , "#772600" , "#D790FF" , "#9B9700" , "#549E79" , "#FFF69F" , "#201625" , "#72418F" , "#BC23FF" , "#99ADC0" , "#3A2465" , "#922329" , "#5B4534" , "#FDE8DC" , "#404E55" , "#0089A3" , "#CB7E98" , "#A4E804" , "#324E72" , "#6A3A4C" , "#83AB58" , "#001C1E" , "#D1F7CE" , "#004B28" , "#C8D0F6" , "#A3A489" , "#806C66" , "#222800" , "#BF5650" , "#E83000" , "#66796D" , "#DA007C" , "#FF1A59" , "#8ADBB4" , "#1E0200" , "#5B4E51" , "#C895C5" , "#320033" , "#FF6832" , "#66E1D3" , "#CFCDAC" , "#D0AC94" , "#7ED379" , "#012C58" ] new_array = color_array [ : ] while len ( new_array ) <= num_colors : new_array += color_array return new_array [ : num_colors ]
Get a list of colors which is as long as the segmentation .
59,375
def fix_times ( self ) : pointlist = self . get_pointlist ( ) times = [ point [ 'time' ] for stroke in pointlist for point in stroke ] times_min = max ( min ( times ) , 0 ) for i , stroke in enumerate ( pointlist ) : for j , point in enumerate ( stroke ) : if point [ 'time' ] is None : pointlist [ i ] [ j ] [ 'time' ] = times_min else : times_min = point [ 'time' ] self . raw_data_json = json . dumps ( pointlist )
Some recordings have wrong times . Fix them so that nothing after loading a handwritten recording breaks .
59,376
def get_pointlist ( self ) : try : pointlist = json . loads ( self . raw_data_json ) except Exception as inst : logging . debug ( "pointStrokeList: strokelistP" ) logging . debug ( self . raw_data_json ) logging . debug ( "didn't work" ) raise inst if len ( pointlist ) == 0 : logging . warning ( "Pointlist was empty. Search for '" + self . raw_data_json + "' in `wm_raw_draw_data`." ) return pointlist
Get a list of lists of tuples from JSON raw data string . Those lists represent strokes with control points .
59,377
def get_sorted_pointlist ( self ) : pointlist = self . get_pointlist ( ) for i in range ( len ( pointlist ) ) : pointlist [ i ] = sorted ( pointlist [ i ] , key = lambda p : p [ 'time' ] ) pointlist = sorted ( pointlist , key = lambda stroke : stroke [ 0 ] [ 'time' ] ) return pointlist
Make sure that the points and strokes are in order .
59,378
def set_pointlist ( self , pointlist ) : assert type ( pointlist ) is list , "pointlist is not of type list, but %r" % type ( pointlist ) assert len ( pointlist ) >= 1 , "The pointlist of formula_id %i is %s" % ( self . formula_id , self . get_pointlist ( ) ) self . raw_data_json = json . dumps ( pointlist )
Overwrite pointlist .
59,379
def get_bounding_box ( self ) : pointlist = self . get_pointlist ( ) minx , maxx = pointlist [ 0 ] [ 0 ] [ "x" ] , pointlist [ 0 ] [ 0 ] [ "x" ] miny , maxy = pointlist [ 0 ] [ 0 ] [ "y" ] , pointlist [ 0 ] [ 0 ] [ "y" ] mint , maxt = pointlist [ 0 ] [ 0 ] [ "time" ] , pointlist [ 0 ] [ 0 ] [ "time" ] for stroke in pointlist : for p in stroke : minx , maxx = min ( minx , p [ "x" ] ) , max ( maxx , p [ "x" ] ) miny , maxy = min ( miny , p [ "y" ] ) , max ( maxy , p [ "y" ] ) mint , maxt = min ( mint , p [ "time" ] ) , max ( maxt , p [ "time" ] ) return { "minx" : minx , "maxx" : maxx , "miny" : miny , "maxy" : maxy , "mint" : mint , "maxt" : maxt }
Get the bounding box of a pointlist .
59,380
def get_bitmap ( self , time = None , size = 32 , store_path = None ) : img = Image . new ( 'L' , ( size , size ) , 'black' ) draw = ImageDraw . Draw ( img , 'L' ) bb = self . get_bounding_box ( ) for stroke in self . get_sorted_pointlist ( ) : for p1 , p2 in zip ( stroke , stroke [ 1 : ] ) : if time is not None and ( p1 [ 'time' ] > time or p2 [ 'time' ] > time ) : continue y_from = int ( ( - bb [ 'miny' ] + p1 [ 'y' ] ) / max ( self . get_height ( ) , 1 ) * size ) x_from = int ( ( - bb [ 'minx' ] + p1 [ 'x' ] ) / max ( self . get_width ( ) , 1 ) * size ) y_to = int ( ( - bb [ 'miny' ] + p2 [ 'y' ] ) / max ( self . get_height ( ) , 1 ) * size ) x_to = int ( ( - bb [ 'minx' ] + p2 [ 'x' ] ) / max ( self . get_width ( ) , 1 ) * size ) draw . line ( [ x_from , y_from , x_to , y_to ] , fill = '#ffffff' , width = 1 ) del draw if store_path is not None : img . save ( store_path ) return numpy . asarray ( img )
Get a bitmap of the object at a given instance of time . If time is None then the bitmap is generated for the last point in time .
59,381
def preprocessing ( self , algorithms ) : assert type ( algorithms ) is list for algorithm in algorithms : algorithm ( self )
Apply preprocessing algorithms .
59,382
def feature_extraction ( self , algorithms ) : assert type ( algorithms ) is list features = [ ] for algorithm in algorithms : new_features = algorithm ( self ) assert len ( new_features ) == algorithm . get_dimension ( ) , "Expected %i features from algorithm %s, got %i features" % ( algorithm . get_dimension ( ) , str ( algorithm ) , len ( new_features ) ) features += new_features return features
Get a list of features .
59,383
def show ( self ) : import matplotlib . pyplot as plt pointlist = self . get_pointlist ( ) if 'pen_down' in pointlist [ 0 ] [ 0 ] : assert len ( pointlist ) > 1 , "Lenght of pointlist was %i. Got: %s" % ( len ( pointlist ) , pointlist ) new_pointlist = [ ] last_pendown_state = None stroke = [ ] for point in pointlist [ 0 ] : if last_pendown_state is None : last_pendown_state = point [ 'pen_down' ] if point [ 'pen_down' ] != last_pendown_state : new_pointlist . append ( stroke ) last_pendown_state = point [ 'pen_down' ] stroke = [ ] else : stroke . append ( point ) new_pointlist . append ( stroke ) pointlist = new_pointlist _ , ax = plt . subplots ( ) ax . set_title ( "Raw data id: %s, " "Formula_id: %s" % ( str ( self . raw_data_id ) , str ( self . formula_id ) ) ) colors = _get_colors ( self . segmentation ) for symbols , color in zip ( self . segmentation , colors ) : for stroke_index in symbols : stroke = pointlist [ stroke_index ] xs , ys = [ ] , [ ] for p in stroke : xs . append ( p [ 'x' ] ) ys . append ( p [ 'y' ] ) if "pen_down" in stroke [ 0 ] and stroke [ 0 ] [ "pen_down" ] is False : plt . plot ( xs , ys , '-x' , color = color ) else : plt . plot ( xs , ys , '-o' , color = color ) plt . gca ( ) . invert_yaxis ( ) ax . set_aspect ( 'equal' ) plt . show ( )
Show the data graphically in a new pop - up window .
59,384
def count_single_dots ( self ) : pointlist = self . get_pointlist ( ) single_dots = 0 for stroke in pointlist : if len ( stroke ) == 1 : single_dots += 1 return single_dots
Count all strokes of this recording that have only a single dot .
59,385
def to_single_symbol_list ( self ) : symbol_stream = getattr ( self , 'symbol_stream' , [ None for symbol in self . segmentation ] ) single_symbols = [ ] pointlist = self . get_sorted_pointlist ( ) for stroke_indices , label in zip ( self . segmentation , symbol_stream ) : strokes = [ ] for stroke_index in stroke_indices : strokes . append ( pointlist [ stroke_index ] ) single_symbols . append ( HandwrittenData ( json . dumps ( strokes ) , formula_id = label ) ) return single_symbols
Convert this HandwrittenData object into a list of HandwrittenData objects . Each element of the list is a single symbol .
59,386
def get_git_postversion ( addon_dir ) : addon_dir = os . path . realpath ( addon_dir ) last_version = read_manifest ( addon_dir ) . get ( 'version' , '0.0.0' ) last_version_parsed = parse_version ( last_version ) if not is_git_controlled ( addon_dir ) : return last_version if get_git_uncommitted ( addon_dir ) : uncommitted = True count = 1 else : uncommitted = False count = 0 last_sha = None git_root = get_git_root ( addon_dir ) for sha in git_log_iterator ( addon_dir ) : try : manifest = read_manifest_from_sha ( sha , addon_dir , git_root ) except NoManifestFound : break version = manifest . get ( 'version' , '0.0.0' ) version_parsed = parse_version ( version ) if version_parsed != last_version_parsed : break if last_sha is None : last_sha = sha else : count += 1 if not count : return last_version if last_sha : return last_version + ".99.dev%s" % count if uncommitted : return last_version + ".dev1" return last_version
return the addon version number with a developmental version increment if there were git commits in the addon_dir after the last version change .
59,387
def _get_odoo_version_info ( addons_dir , odoo_version_override = None ) : odoo_version_info = None addons = os . listdir ( addons_dir ) for addon in addons : addon_dir = os . path . join ( addons_dir , addon ) if is_installable_addon ( addon_dir ) : manifest = read_manifest ( addon_dir ) _ , _ , addon_odoo_version_info = _get_version ( addon_dir , manifest , odoo_version_override , git_post_version = False ) if odoo_version_info is not None and odoo_version_info != addon_odoo_version_info : raise DistutilsSetupError ( "Not all addons are for the same " "odoo version in %s (error detected " "in %s)" % ( addons_dir , addon ) ) odoo_version_info = addon_odoo_version_info return odoo_version_info
Detect Odoo version from an addons directory
59,388
def _get_version ( addon_dir , manifest , odoo_version_override = None , git_post_version = True ) : version = manifest . get ( 'version' ) if not version : warn ( "No version in manifest in %s" % addon_dir ) version = '0.0.0' if not odoo_version_override : if len ( version . split ( '.' ) ) < 5 : raise DistutilsSetupError ( "Version in manifest must have at least " "5 components and start with " "the Odoo series number in %s" % addon_dir ) odoo_version = '.' . join ( version . split ( '.' ) [ : 2 ] ) else : odoo_version = odoo_version_override if odoo_version not in ODOO_VERSION_INFO : raise DistutilsSetupError ( "Unsupported odoo version '%s' in %s" % ( odoo_version , addon_dir ) ) odoo_version_info = ODOO_VERSION_INFO [ odoo_version ] if git_post_version : version = get_git_postversion ( addon_dir ) return version , odoo_version , odoo_version_info
Get addon version information from an addon directory
59,389
def get_install_requires_odoo_addon ( addon_dir , no_depends = [ ] , depends_override = { } , external_dependencies_override = { } , odoo_version_override = None ) : manifest = read_manifest ( addon_dir ) _ , _ , odoo_version_info = _get_version ( addon_dir , manifest , odoo_version_override , git_post_version = False ) return _get_install_requires ( odoo_version_info , manifest , no_depends , depends_override , external_dependencies_override )
Get the list of requirements for an addon
59,390
def get_install_requires_odoo_addons ( addons_dir , depends_override = { } , external_dependencies_override = { } , odoo_version_override = None ) : addon_dirs = [ ] addons = os . listdir ( addons_dir ) for addon in addons : addon_dir = os . path . join ( addons_dir , addon ) if is_installable_addon ( addon_dir ) : addon_dirs . append ( addon_dir ) install_requires = set ( ) for addon_dir in addon_dirs : r = get_install_requires_odoo_addon ( addon_dir , no_depends = addons , depends_override = depends_override , external_dependencies_override = external_dependencies_override , odoo_version_override = odoo_version_override , ) install_requires . update ( r ) return sorted ( install_requires )
Get the list of requirements for a directory containing addons
59,391
def make_declarative_base ( self , metadata = None ) : return make_declarative_base ( self . session , Model = self . Model , metadata = metadata )
Override parent function with alchy s
59,392
def prep_doc ( self , doc_obj ) : doc = doc_obj . _data . copy ( ) for key , prop in list ( doc_obj . _base_properties . items ( ) ) : prop . validate ( doc . get ( key ) , key ) raw_value = prop . get_python_value ( doc . get ( key ) ) if prop . unique : self . check_unique ( doc_obj , key , raw_value ) value = prop . get_db_value ( raw_value ) doc [ key ] = value doc [ '_doc_type' ] = get_doc_type ( doc_obj . __class__ ) return doc
This method Validates gets the Python value checks unique indexes gets the db value and then returns the prepared doc dict object . Useful for save and backup functions .
59,393
def apply_zappa_settings ( zappa_obj , zappa_settings , environment ) : settings_all = json . load ( zappa_settings ) settings = settings_all [ environment ] for key , value in DEFAULT_SETTINGS . items ( ) : settings [ key ] = settings . get ( key , value ) if '~' in settings [ 'settings_file' ] : settings [ 'settings_file' ] = settings [ 'settings_file' ] . replace ( '~' , os . path . expanduser ( '~' ) ) if not os . path . isfile ( settings [ 'settings_file' ] ) : raise SettingsError ( "Please make sure your settings_file " "is properly defined in {0}." . format ( zappa_settings ) ) for setting in CUSTOM_SETTINGS : if setting in settings : setattr ( zappa_obj , setting , settings [ setting ] ) return settings
Load Zappa settings set defaults if needed and apply to the Zappa object
59,394
def deploy ( environment , zappa_settings ) : print ( ( "Deploying " + environment ) ) zappa , settings , lambda_name , zip_path = _package ( environment , zappa_settings ) s3_bucket_name = settings [ 's3_bucket' ] try : zappa . load_credentials ( ) zappa . create_iam_roles ( ) zip_arn = zappa . upload_to_s3 ( zip_path , s3_bucket_name ) lambda_arn = zappa . create_lambda_function ( bucket = s3_bucket_name , s3_key = zip_path , function_name = lambda_name , handler = 'handler.lambda_handler' , vpc_config = settings [ 'vpc_config' ] , memory_size = settings [ 'memory_size' ] ) api_id = zappa . create_api_gateway_routes ( lambda_arn , lambda_name ) endpoint_url = zappa . deploy_api_gateway ( api_id , environment ) zappa . remove_from_s3 ( zip_path , s3_bucket_name ) if settings [ 'touch' ] : requests . get ( endpoint_url ) finally : try : if settings [ 'delete_zip' ] : os . remove ( zip_path ) except : print ( "WARNING: Manual cleanup of the zip might be needed." ) print ( ( "Your Zappa deployment is live!: " + endpoint_url ) )
Package create and deploy to Lambda .
59,395
def update ( environment , zappa_settings ) : print ( ( "Updating " + environment ) ) zappa , settings , lambda_name , zip_path = _package ( environment , zappa_settings ) s3_bucket_name = settings [ 's3_bucket' ] try : zappa . load_credentials ( ) zappa . create_iam_roles ( ) zip_arn = zappa . upload_to_s3 ( zip_path , s3_bucket_name ) lambda_arn = zappa . update_lambda_function ( s3_bucket_name , zip_path , lambda_name ) zappa . remove_from_s3 ( zip_path , s3_bucket_name ) finally : try : if settings [ 'delete_zip' ] : os . remove ( zip_path ) except : print ( "WARNING: Manual cleanup of the zip might be needed." ) print ( "Your updated Zappa deployment is live!" )
Update an existing deployment .
59,396
def lambda_handler ( event , context , settings_name = "zappa_settings" ) : settings = importlib . import_module ( settings_name ) app_module = importlib . import_module ( settings . APP_MODULE ) app = getattr ( app_module , settings . APP_OBJECT ) app . config . from_object ( 'zappa_settings' ) app . wsgi_app = ZappaWSGIMiddleware ( app . wsgi_app ) if event . get ( 'method' , None ) : event_echo = getattr ( settings , "EVENT_ECHO" , True ) if event_echo : if 'event_echo' in list ( event [ 'params' ] . values ( ) ) : return { 'Content' : str ( event ) + '\n' + str ( context ) , 'Status' : 200 } environ = create_wsgi_request ( event , script_name = settings . SCRIPT_NAME , trailing_slash = False ) environ [ 'wsgi.url_scheme' ] = 'https' response = Response . from_app ( app , environ ) zappa_returndict = dict ( ) if response . data : zappa_returndict [ 'Content' ] = response . data for ( header_name , header_value ) in response . headers : zappa_returndict [ header_name ] = header_value zappa_returndict [ 'Status' ] = response . status_code if response . status_code in [ 400 , 401 , 403 , 404 , 500 ] : content = "<!DOCTYPE html>" + str ( response . status_code ) + response . data b64_content = base64 . b64encode ( content ) raise Exception ( b64_content ) elif response . status_code in [ 301 , 302 ] : location = response . location hostname = 'https://' + environ [ 'HTTP_HOST' ] if location . startswith ( hostname ) : location = location [ len ( hostname ) : ] raise Exception ( location ) else : return zappa_returndict
An AWS Lambda function which parses specific API Gateway input into a WSGI request feeds it to Flask procceses the Flask response and returns that back to the API Gateway .
59,397
def get_context_data ( self , ** kwargs ) : queryset = kwargs . pop ( 'object_list' ) page_template = kwargs . pop ( 'page_template' , None ) context_object_name = self . get_context_object_name ( queryset ) context = { 'object_list' : queryset , 'view' : self } context . update ( kwargs ) if context_object_name is not None : context [ context_object_name ] = queryset if page_template is None : if hasattr ( queryset , 'model' ) : page_template = self . get_page_template ( ** kwargs ) else : raise ImproperlyConfigured ( 'AjaxListView requires a page_template' ) context [ 'page_template' ] = self . page_template = page_template return context
Get the context for this view .
59,398
def clean_var ( text ) : text = re_invalid_var . sub ( '' , text ) text = re_invalid_start . sub ( '' , text ) return text
Turn text into a valid python classname or variable
59,399
def full_tasktrace ( self ) : if self . prev_error : return self . prev_error . tasktrace + self . tasktrace else : return self . tasktrace
List of all failed tasks caused by this and all previous errors .