idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
49,600
def _print_MatMul ( self , expr ) : from sympy import MatrixExpr links = [ ] for i , j in zip ( expr . args [ 1 : ] , expr . args [ : - 1 ] ) : if isinstance ( i , MatrixExpr ) and isinstance ( j , MatrixExpr ) : links . append ( ').dot(' ) else : links . append ( '*' ) printouts = [ self . _print ( i ) for i in expr . args ] result = [ printouts [ 0 ] ] for link , printout in zip ( links , printouts [ 1 : ] ) : result . extend ( [ link , printout ] ) return '({0})' . format ( '' . join ( result ) )
Matrix multiplication printer . The sympy one turns everything into a dot product without type - checking .
49,601
def execute ( self , ** kwargs ) : show = kwargs . pop ( 'show' ) if show : plt . show ( ** kwargs )
Execute the interactive guessing procedure .
49,602
def _set_up_sliders ( self ) : i = 0.05 self . _sliders = { } for param in self . model . params : if not param . fixed : axbg = 'lightgoldenrodyellow' else : axbg = 'red' ax = self . fig . add_axes ( ( 0.162 , i , 0.68 , 0.03 ) , facecolor = axbg , label = param ) val = param . value if not hasattr ( param , 'min' ) or param . min is None : minimum = 0 else : minimum = param . min if not hasattr ( param , 'max' ) or param . max is None : maximum = 2 * val else : maximum = param . max slid = plt . Slider ( ax , param , minimum , maximum , valinit = val , valfmt = '% 5.4g' ) self . _sliders [ param ] = slid slid . on_changed ( self . _update_plot ) i += 0.05
Creates an slider for every parameter .
49,603
def _update_plot ( self , _ ) : for param in self . model . params : param . value = self . _sliders [ param ] . val for indep_var , dep_var in self . _projections : self . _update_specific_plot ( indep_var , dep_var )
Callback to redraw the plot to reflect the new parameter values .
49,604
def _eval_model ( self ) : arguments = self . _x_grid . copy ( ) arguments . update ( { param : param . value for param in self . model . params } ) return self . model ( ** key2str ( arguments ) )
Convenience method for evaluating the model with the current parameters
49,605
def plot_data ( self , proj , ax ) : x , y = proj ax . scatter ( self . ig . independent_data [ x ] , self . ig . dependent_data [ y ] , c = 'b' )
Creates and plots a scatter plot of the original data .
49,606
def plot_data ( self , proj , ax ) : x , y = proj x_data = self . ig . independent_data [ x ] y_data = self . ig . dependent_data [ y ] projected_data = np . column_stack ( ( x_data , y_data ) ) . T kde = gaussian_kde ( projected_data ) xx , yy = np . meshgrid ( self . ig . _x_points [ x ] , self . ig . _y_points [ y ] ) x_grid = xx . flatten ( ) y_grid = yy . flatten ( ) contour_grid = kde . pdf ( np . column_stack ( ( x_grid , y_grid ) ) . T ) if self . ig . log_contour : contour_grid = np . log ( contour_grid ) vmin = - 7 else : vmin = None ax . contourf ( xx , yy , contour_grid . reshape ( xx . shape ) , 50 , vmin = vmin , cmap = 'Blues' )
Creates and plots the contourplot of the original data . This is done by evaluating the density of projected datapoints on a grid .
49,607
def BivariateGaussian ( x , y , mu_x , mu_y , sig_x , sig_y , rho ) : exponent = - 1 / ( 2 * ( 1 - rho ** 2 ) ) exponent *= ( x - mu_x ) ** 2 / sig_x ** 2 + ( y - mu_y ) ** 2 / sig_y ** 2 - 2 * rho * ( x - mu_x ) * ( y - mu_y ) / ( sig_x * sig_y ) return sympy . exp ( exponent ) / ( 2 * sympy . pi * sig_x * sig_y * sympy . sqrt ( 1 - rho ** 2 ) )
Bivariate Gaussian pdf .
49,608
def r_squared ( model , fit_result , data ) : y_is = [ data [ var ] for var in model if var in data ] x_is = [ value for var , value in data . items ( ) if var . name in model . __signature__ . parameters ] y_bars = [ np . mean ( y_i ) if y_i is not None else None for y_i in y_is ] f_is = model ( * x_is , ** fit_result . params ) SS_res = np . sum ( [ np . sum ( ( y_i - f_i ) ** 2 ) for y_i , f_i in zip ( y_is , f_is ) if y_i is not None ] ) SS_tot = np . sum ( [ np . sum ( ( y_i - y_bar ) ** 2 ) for y_i , y_bar in zip ( y_is , y_bars ) if y_i is not None ] ) return 1 - SS_res / SS_tot
Calculates the coefficient of determination R^2 for the fit .
49,609
def _partial_subs ( func , func2vars ) : if isinstance ( func , sympy . Derivative ) : new_func = func . expr . xreplace ( func2vars ) new_variables = tuple ( var . xreplace ( func2vars ) for var in func . variables ) return _partial_diff ( new_func , * new_variables ) else : return func . xreplace ( func2vars )
Partial - bug proof substitution . Works by making the substitutions on the expression inside the derivative first and then rebuilding the derivative safely without evaluating it using _partial_diff .
49,610
def _init_from_dict ( self , model_dict ) : sort_func = lambda symbol : symbol . name self . model_dict = OrderedDict ( sorted ( model_dict . items ( ) , key = lambda i : sort_func ( i [ 0 ] ) ) ) ordered = list ( toposort ( self . connectivity_mapping ) ) independent = sorted ( ordered . pop ( 0 ) , key = sort_func ) self . dependent_vars = sorted ( ordered . pop ( - 1 ) , key = sort_func ) self . interdependent_vars = sorted ( [ item for items in ordered for item in items ] , key = sort_func ) self . independent_vars = [ s for s in independent if not isinstance ( s , Parameter ) and not s in self ] self . params = [ s for s in independent if isinstance ( s , Parameter ) ] try : assert not any ( isinstance ( var , Parameter ) for var in self . dependent_vars ) assert not any ( isinstance ( var , Parameter ) for var in self . interdependent_vars ) except AssertionError : raise ModelError ( '`Parameter`\'s can not feature in the role ' 'of `Variable`' ) self . sigmas = { var : Variable ( name = 'sigma_{}' . format ( var . name ) ) for var in self . dependent_vars }
Initiate self from a model_dict to make sure attributes such as vars params are available .
49,611
def function_dict ( self ) : func_dict = OrderedDict ( ) for var , func in self . vars_as_functions . items ( ) : expr = self . model_dict [ var ] . xreplace ( self . vars_as_functions ) func_dict [ func ] = expr return func_dict
Equivalent to self . model_dict but with all variables replaced by functions if applicable . Sorted by the evaluation order according to self . ordered_symbols not alphabetical like self . model_dict !
49,612
def _model_sanity ( model ) : if not isinstance ( model , ODEModel ) and not isinstance ( model , BaseNumericalModel ) : for var , expr in model . items ( ) : if isinstance ( var , sympy . Derivative ) or expr . has ( sympy . Derivative ) : warnings . warn ( RuntimeWarning ( 'The model contains derivatives in its definition. ' 'Are you sure you don\'t mean to use `symfit.ODEModel`?' ) )
Perform some basic sanity checking on the model to warn users when they might be trying something ill advised .
49,613
def data_shapes ( self ) : independent_shapes = [ ] for var , data in self . independent_data . items ( ) : if data is not None : independent_shapes . append ( data . shape ) dependent_shapes = [ ] for var , data in self . dependent_data . items ( ) : if data is not None : dependent_shapes . append ( data . shape ) return list ( set ( independent_shapes ) ) , list ( set ( dependent_shapes ) )
Returns the shape of the data . In most cases this will be the same for all variables of the same type if not this raises an Exception .
49,614
def execute ( self , ** minimize_options ) : minimizer_ans = self . minimizer . execute ( ** minimize_options ) try : cov_matrix = minimizer_ans . covariance_matrix except AttributeError : cov_matrix = self . covariance_matrix ( dict ( zip ( self . model . params , minimizer_ans . _popt ) ) ) else : if cov_matrix is None : cov_matrix = self . covariance_matrix ( dict ( zip ( self . model . params , minimizer_ans . _popt ) ) ) finally : minimizer_ans . covariance_matrix = cov_matrix minimizer_ans . model = self . model minimizer_ans . gof_qualifiers [ 'r_squared' ] = r_squared ( self . model , minimizer_ans , self . data ) return minimizer_ans
Execute the fit .
49,615
def eval_components ( self , * args , ** kwargs ) : bound_arguments = self . __signature__ . bind ( * args , ** kwargs ) t_like = bound_arguments . arguments [ self . independent_vars [ 0 ] . name ] f = lambda ys , t , * a : [ c ( t , * ( list ( ys ) + list ( a ) ) ) for c in self . _ncomponents ] Dfun = lambda ys , t , * a : [ [ c ( t , * ( list ( ys ) + list ( a ) ) ) for c in row ] for row in self . _njacobian ] initial_dependent = [ self . initial [ var ] for var in self . dependent_vars ] t_initial = self . initial [ self . independent_vars [ 0 ] ] try : t_like [ 0 ] except ( TypeError , IndexError ) : t_like = np . array ( [ t_like ] ) if t_initial in t_like : t_bigger = t_like [ t_like >= t_initial ] t_smaller = t_like [ t_like <= t_initial ] [ : : - 1 ] else : t_bigger = np . concatenate ( ( np . array ( [ t_initial ] ) , t_like [ t_like > t_initial ] ) ) t_smaller = np . concatenate ( ( np . array ( [ t_initial ] ) , t_like [ t_like < t_initial ] [ : : - 1 ] ) ) t_total = np . concatenate ( ( t_smaller [ : : - 1 ] [ : - 1 ] , t_bigger ) ) ans_bigger = odeint ( f , initial_dependent , t_bigger , args = tuple ( bound_arguments . arguments [ param . name ] for param in self . params ) , Dfun = Dfun , * self . lsoda_args , ** self . lsoda_kwargs ) ans_smaller = odeint ( f , initial_dependent , t_smaller , args = tuple ( bound_arguments . arguments [ param . name ] for param in self . params ) , Dfun = Dfun , * self . lsoda_args , ** self . lsoda_kwargs ) ans = np . concatenate ( ( ans_smaller [ 1 : ] [ : : - 1 ] , ans_bigger ) ) if t_initial in t_like : return ans . T else : return ans [ t_total != t_initial ] . T
Numerically integrate the system of ODEs .
49,616
def call ( self , * values , ** named_values ) : independent_vars , params = seperate_symbols ( self ) func = sympy_to_py ( self , independent_vars + params ) parameters = [ inspect_sig . Parameter ( arg . name , inspect_sig . Parameter . POSITIONAL_OR_KEYWORD ) for arg in independent_vars + params ] arg_names = [ arg . name for arg in independent_vars + params ] relevant_named_values = { name : value for name , value in named_values . items ( ) if name in arg_names } signature = inspect_sig . Signature ( parameters = parameters ) bound_arguments = signature . bind ( * values , ** relevant_named_values ) return func ( ** bound_arguments . arguments )
Call an expression to evaluate it at the given point .
49,617
def variance ( self , param ) : param_number = self . model . params . index ( param ) try : return self . covariance_matrix [ param_number , param_number ] except TypeError : return None
Return the variance in a given parameter as found by the fit .
49,618
def covariance ( self , param_1 , param_2 ) : param_1_number = self . model . params . index ( param_1 ) param_2_number = self . model . params . index ( param_2 ) return self . covariance_matrix [ param_1_number , param_2_number ]
Return the covariance between param_1 and param_2 .
49,619
def _array_safe_dict_eq ( one_dict , other_dict ) : for key in one_dict : try : assert one_dict [ key ] == other_dict [ key ] except ValueError as err : if isinstance ( one_dict [ key ] , dict ) : assert FitResults . _array_safe_dict_eq ( one_dict [ key ] , other_dict [ key ] ) else : assert np . allclose ( one_dict [ key ] , other_dict [ key ] ) except AssertionError : return False else : return True
Dicts containing arrays are hard to compare . This function uses numpy . allclose to compare arrays and does normal comparison for all other types .
49,620
def nonanalytical_func ( x , a , b ) : y = np . zeros_like ( x ) y [ x > b ] = ( a * ( x - b ) + b ) [ x > b ] y [ x <= b ] = b return y
This can be any pythonic function which should be fitted typically one which is not easily written or supported as an analytical expression .
49,621
def get_form ( self , request , obj = None , ** kwargs ) : if obj : if obj . alternatives : choices = [ ( alternative , alternative ) for alternative in obj . alternatives . keys ( ) ] else : choices = [ ( conf . CONTROL_GROUP , conf . CONTROL_GROUP ) ] class ExperimentModelForm ( forms . ModelForm ) : default_alternative = forms . ChoiceField ( choices = choices , initial = obj . default_alternative , required = False ) kwargs [ 'form' ] = ExperimentModelForm return super ( ExperimentAdmin , self ) . get_form ( request , obj = obj , ** kwargs )
Add the default alternative dropdown with appropriate choices
49,622
def set_alternative_view ( self , request ) : if not request . user . has_perm ( 'experiments.change_experiment' ) : return HttpResponseForbidden ( ) experiment_name = request . POST . get ( "experiment" ) alternative_name = request . POST . get ( "alternative" ) if not ( experiment_name and alternative_name ) : return HttpResponseBadRequest ( ) participant ( request ) . set_alternative ( experiment_name , alternative_name ) return JsonResponse ( { 'success' : True , 'alternative' : participant ( request ) . get_alternative ( experiment_name ) } )
Allows the admin user to change their assigned alternative
49,623
def set_state_view ( self , request ) : if not request . user . has_perm ( 'experiments.change_experiment' ) : return HttpResponseForbidden ( ) try : state = int ( request . POST . get ( "state" , "" ) ) except ValueError : return HttpResponseBadRequest ( ) try : experiment = Experiment . objects . get ( name = request . POST . get ( "experiment" ) ) except Experiment . DoesNotExist : return HttpResponseBadRequest ( ) experiment . state = state if state == 0 : experiment . end_date = timezone . now ( ) else : experiment . end_date = None experiment . save ( ) return HttpResponse ( )
Changes the experiment state
49,624
def get_alternative ( self , experiment_name ) : experiment = None try : experiment = experiment_manager [ experiment_name ] except KeyError : pass if experiment : if experiment . is_displaying_alternatives ( ) : alternative = self . _get_enrollment ( experiment ) if alternative is not None : return alternative else : return experiment . default_alternative return conf . CONTROL_GROUP
Get the alternative this user is enrolled in .
49,625
def set_alternative ( self , experiment_name , alternative ) : experiment = experiment_manager . get_experiment ( experiment_name ) if experiment : self . _set_enrollment ( experiment , alternative )
Explicitly set the alternative the user is enrolled in for the specified experiment .
49,626
def goal ( self , goal_name , count = 1 ) : for enrollment in self . _get_all_enrollments ( ) : if enrollment . experiment . is_displaying_alternatives ( ) : self . _experiment_goal ( enrollment . experiment , enrollment . alternative , goal_name , count )
Record that this user has performed a particular goal
49,627
def incorporate ( self , other_user ) : for enrollment in other_user . _get_all_enrollments ( ) : if not self . _get_enrollment ( enrollment . experiment ) : self . _set_enrollment ( enrollment . experiment , enrollment . alternative , enrollment . enrollment_date , enrollment . last_seen ) goals = self . experiment_counter . participant_goal_frequencies ( enrollment . experiment , enrollment . alternative , other_user . _participant_identifier ( ) ) for goal_name , count in goals : self . experiment_counter . increment_goal_count ( enrollment . experiment , enrollment . alternative , goal_name , self . _participant_identifier ( ) , count ) other_user . _cancel_enrollment ( enrollment . experiment )
Incorporate all enrollments and goals performed by the other user
49,628
def visit ( self ) : for enrollment in self . _get_all_enrollments ( ) : if enrollment . experiment . is_displaying_alternatives ( ) : if not enrollment . last_seen : self . _experiment_goal ( enrollment . experiment , enrollment . alternative , conf . VISIT_NOT_PRESENT_COUNT_GOAL , 1 ) self . _set_last_seen ( enrollment . experiment , now ( ) ) elif now ( ) - enrollment . last_seen >= timedelta ( hours = conf . SESSION_LENGTH ) : self . _experiment_goal ( enrollment . experiment , enrollment . alternative , conf . VISIT_NOT_PRESENT_COUNT_GOAL , 1 ) self . _experiment_goal ( enrollment . experiment , enrollment . alternative , conf . VISIT_PRESENT_COUNT_GOAL , 1 ) self . _set_last_seen ( enrollment . experiment , now ( ) )
Record that the user has visited the site for the purposes of retention tracking
49,629
def flatten_pages ( self , pages , level = 1 ) : flattened = [ ] for page in pages : if type ( page ) is list : flattened . append ( { 'file' : page [ 0 ] , 'title' : page [ 1 ] , 'level' : level , } ) if type ( page ) is dict : if type ( list ( page . values ( ) ) [ 0 ] ) is str : flattened . append ( { 'file' : list ( page . values ( ) ) [ 0 ] , 'title' : list ( page . keys ( ) ) [ 0 ] , 'level' : level , } ) if type ( list ( page . values ( ) ) [ 0 ] ) is list : flattened . extend ( self . flatten_pages ( list ( page . values ( ) ) [ 0 ] , level + 1 ) ) return flattened
Recursively flattens pages data structure into a one - dimensional data structure
49,630
def convert ( self ) : lines = [ ] pages = self . flatten_pages ( self . config [ 'pages' ] ) f_exclude = mkdocs_pandoc . filters . exclude . ExcludeFilter ( exclude = self . exclude ) f_include = mkdocs_pandoc . filters . include . IncludeFilter ( base_path = self . config [ 'docs_dir' ] , encoding = self . encoding ) f_headlevel = mkdocs_pandoc . filters . headlevels . HeadlevelFilter ( pages ) for page in pages : fname = os . path . join ( self . config [ 'docs_dir' ] , page [ 'file' ] ) try : p = codecs . open ( fname , 'r' , self . encoding ) except IOError as e : raise FatalError ( "Couldn't open %s for reading: %s" % ( fname , e . strerror ) , 1 ) f_chapterhead = mkdocs_pandoc . filters . chapterhead . ChapterheadFilter ( headlevel = page [ 'level' ] , title = page [ 'title' ] ) f_image = mkdocs_pandoc . filters . images . ImageFilter ( filename = page [ 'file' ] , image_path = self . config [ 'site_dir' ] , image_ext = self . image_ext ) lines_tmp = [ ] for line in p . readlines ( ) : lines_tmp . append ( line . rstrip ( ) ) if self . exclude : lines_tmp = f_exclude . run ( lines_tmp ) if self . filter_include : lines_tmp = f_include . run ( lines_tmp ) lines_tmp = f_headlevel . run ( lines_tmp ) lines_tmp = f_chapterhead . run ( lines_tmp ) lines_tmp = f_image . run ( lines_tmp ) lines . extend ( lines_tmp ) lines . append ( '' ) if self . strip_anchors : lines = mkdocs_pandoc . filters . anchors . AnchorFilter ( ) . run ( lines ) if self . filter_xrefs : lines = mkdocs_pandoc . filters . xref . XrefFilter ( ) . run ( lines ) if self . filter_toc : lines = mkdocs_pandoc . filters . toc . TocFilter ( ) . run ( lines ) if self . filter_tables : lines = mkdocs_pandoc . filters . tables . TableFilter ( ) . run ( lines ) return ( lines )
User - facing conversion method . Returns pandoc document as a list of lines .
49,631
def blocks ( self , lines ) : state = markdown . blockparser . State ( ) blocks = [ ] state . set ( 'start' ) currblock = 0 for line in lines : line += '\n' if state . isstate ( 'start' ) : if line [ : 3 ] == '```' : state . set ( '```' ) else : state . set ( '\n' ) blocks . append ( '' ) currblock = len ( blocks ) - 1 else : marker = line [ : 3 ] if state . isstate ( marker ) : state . reset ( ) blocks [ currblock ] += line return blocks
Groups lines into markdown blocks
49,632
def ruler_line ( self , widths , linetype = '-' ) : cells = [ ] for w in widths : cells . append ( linetype * ( w + 2 ) ) return '+' + '+' . join ( cells ) + '+'
Generates a ruler line for separating rows from each other
49,633
def wrap_row ( self , widths , row , width_default = None ) : lines = [ ] longest = 0 if not width_default : width_default = self . width_default for i in range ( 0 , len ( row ) ) : w = width_default if i < len ( widths ) : w = widths [ i ] tw = textwrap . TextWrapper ( width = w , break_on_hyphens = False ) row [ i ] = tw . wrap ( textwrap . dedent ( row [ i ] ) ) for l in range ( 0 , len ( row [ i ] ) ) : row [ i ] [ l ] += ( w - len ( row [ i ] [ l ] ) ) * ' ' if len ( row [ i ] ) > longest : longest = len ( row [ i ] ) for i in range ( 0 , len ( row ) ) : w = width_default if i < len ( widths ) : w = widths [ i ] if len ( row [ i ] ) < longest : for j in range ( len ( row [ i ] ) , longest ) : row [ i ] . append ( w * ' ' ) for l in range ( 0 , longest ) : line = [ ] for c in range ( len ( row ) ) : line . append ( row [ c ] [ l ] ) line = '| ' + ' | ' . join ( line ) + ' |' lines . append ( line ) return lines
Wraps a single line table row into a fixed width multi - line table .
49,634
def render_export_form ( self , request , context , form_url = '' ) : context . update ( { 'has_change_permission' : self . has_change_permission ( request ) , 'form_url' : mark_safe ( form_url ) , 'opts' : self . opts , 'add' : True , 'save_on_top' : self . save_on_top , } ) return TemplateResponse ( request , self . export_form_template , context )
Render the from submission export form .
49,635
def clean_form_template ( self ) : form_template = self . cleaned_data . get ( 'form_template' , '' ) if form_template : try : get_template ( form_template ) except TemplateDoesNotExist : msg = _ ( 'Selected Form Template does not exist.' ) raise forms . ValidationError ( msg ) return form_template
Check if template exists
49,636
def _embed ( x , order = 3 , delay = 1 ) : N = len ( x ) Y = np . empty ( ( order , N - ( order - 1 ) * delay ) ) for i in range ( order ) : Y [ i ] = x [ i * delay : i * delay + Y . shape [ 1 ] ] return Y . T
Time - delay embedding .
49,637
def util_pattern_space ( time_series , lag , dim ) : n = len ( time_series ) if lag * dim > n : raise Exception ( 'Result matrix exceeded size limit, try to change lag or dim.' ) elif lag < 1 : raise Exception ( 'Lag should be greater or equal to 1.' ) pattern_space = np . empty ( ( n - lag * ( dim - 1 ) , dim ) ) for i in range ( n - lag * ( dim - 1 ) ) : for j in range ( dim ) : pattern_space [ i ] [ j ] = time_series [ i + j * lag ] return pattern_space
Create a set of sequences with given lag and dimension
49,638
def util_granulate_time_series ( time_series , scale ) : n = len ( time_series ) b = int ( np . fix ( n / scale ) ) temp = np . reshape ( time_series [ 0 : b * scale ] , ( b , scale ) ) cts = np . mean ( temp , axis = 1 ) return cts
Extract coarse - grained time series
49,639
def shannon_entropy ( time_series ) : if not isinstance ( time_series , str ) : time_series = list ( time_series ) data_set = list ( set ( time_series ) ) freq_list = [ ] for entry in data_set : counter = 0. for i in time_series : if i == entry : counter += 1 freq_list . append ( float ( counter ) / len ( time_series ) ) ent = 0.0 for freq in freq_list : ent += freq * np . log2 ( freq ) ent = - ent return ent
Return the Shannon Entropy of the sample data .
49,640
def sample_entropy ( time_series , sample_length , tolerance = None ) : M = sample_length - 1 time_series = np . array ( time_series ) if tolerance is None : tolerance = 0.1 * np . std ( time_series ) n = len ( time_series ) Ntemp = np . zeros ( M + 2 ) Ntemp [ 0 ] = n * ( n - 1 ) / 2 for i in range ( n - M - 1 ) : template = time_series [ i : ( i + M + 1 ) ] rem_time_series = time_series [ i + 1 : ] searchlist = np . nonzero ( np . abs ( rem_time_series - template [ 0 ] ) < tolerance ) [ 0 ] go = len ( searchlist ) > 0 length = 1 Ntemp [ length ] += len ( searchlist ) while go : length += 1 nextindxlist = searchlist + 1 nextindxlist = nextindxlist [ nextindxlist < n - 1 - i ] nextcandidates = rem_time_series [ nextindxlist ] hitlist = np . abs ( nextcandidates - template [ length - 1 ] ) < tolerance searchlist = nextindxlist [ hitlist ] Ntemp [ length ] += np . sum ( hitlist ) go = any ( hitlist ) and length < M + 1 sampen = - np . log ( Ntemp [ 1 : ] / Ntemp [ : - 1 ] ) return sampen
Calculates the sample entropy of degree m of a time_series .
49,641
def multiscale_entropy ( time_series , sample_length , tolerance = None , maxscale = None ) : if tolerance is None : tolerance = 0.1 * np . std ( time_series ) if maxscale is None : maxscale = len ( time_series ) mse = np . zeros ( maxscale ) for i in range ( maxscale ) : temp = util_granulate_time_series ( time_series , i + 1 ) mse [ i ] = sample_entropy ( temp , sample_length , tolerance ) [ - 1 ] return mse
Calculate the Multiscale Entropy of the given time series considering different time - scales of the time series .
49,642
def permutation_entropy ( time_series , order = 3 , delay = 1 , normalize = False ) : x = np . array ( time_series ) hashmult = np . power ( order , np . arange ( order ) ) sorted_idx = _embed ( x , order = order , delay = delay ) . argsort ( kind = 'quicksort' ) hashval = ( np . multiply ( sorted_idx , hashmult ) ) . sum ( 1 ) _ , c = np . unique ( hashval , return_counts = True ) p = np . true_divide ( c , c . sum ( ) ) pe = - np . multiply ( p , np . log2 ( p ) ) . sum ( ) if normalize : pe /= np . log2 ( factorial ( order ) ) return pe
Permutation Entropy .
49,643
def multiscale_permutation_entropy ( time_series , m , delay , scale ) : mspe = [ ] for i in range ( scale ) : coarse_time_series = util_granulate_time_series ( time_series , i + 1 ) pe = permutation_entropy ( coarse_time_series , order = m , delay = delay ) mspe . append ( pe ) return mspe
Calculate the Multiscale Permutation Entropy
49,644
def composite_multiscale_entropy ( time_series , sample_length , scale , tolerance = None ) : cmse = np . zeros ( ( 1 , scale ) ) for i in range ( scale ) : for j in range ( i ) : tmp = util_granulate_time_series ( time_series [ j : ] , i + 1 ) cmse [ i ] += sample_entropy ( tmp , sample_length , tolerance ) / ( i + 1 ) return cmse
Calculate the Composite Multiscale Entropy of the given time series .
49,645
def ones_comp_sum16 ( num1 : int , num2 : int ) -> int : carry = 1 << 16 result = num1 + num2 return result if result < carry else result + 1 - carry
Calculates the 1 s complement sum for 16 - bit numbers .
49,646
def checksum ( source : bytes ) -> int : if len ( source ) % 2 : source += b'\x00' sum = 0 for i in range ( 0 , len ( source ) , 2 ) : sum = ones_comp_sum16 ( sum , ( source [ i + 1 ] << 8 ) + source [ i ] ) return ~ sum & 0xffff
Calculates the checksum of the input bytes .
49,647
def send_one_ping ( sock : socket , dest_addr : str , icmp_id : int , seq : int , size : int ) : try : dest_addr = socket . gethostbyname ( dest_addr ) except socket . gaierror as e : print ( "Cannot resolve {}: Unknown host" . format ( dest_addr ) ) raise errors . HostUnknown ( dest_addr ) from e pseudo_checksum = 0 icmp_header = struct . pack ( ICMP_HEADER_FORMAT , IcmpType . ECHO_REQUEST , ICMP_DEFAULT_CODE , pseudo_checksum , icmp_id , seq ) padding = ( size - struct . calcsize ( ICMP_TIME_FORMAT ) - struct . calcsize ( ICMP_HEADER_FORMAT ) ) * "Q" icmp_payload = struct . pack ( ICMP_TIME_FORMAT , default_timer ( ) ) + padding . encode ( ) real_checksum = checksum ( icmp_header + icmp_payload ) icmp_header = struct . pack ( ICMP_HEADER_FORMAT , IcmpType . ECHO_REQUEST , ICMP_DEFAULT_CODE , socket . htons ( real_checksum ) , icmp_id , seq ) packet = icmp_header + icmp_payload sock . sendto ( packet , ( dest_addr , 0 ) )
Sends one ping to the given destination .
49,648
def receive_one_ping ( sock : socket , icmp_id : int , seq : int , timeout : int ) -> float or None : ip_header_slice = slice ( 0 , struct . calcsize ( IP_HEADER_FORMAT ) ) icmp_header_slice = slice ( ip_header_slice . stop , ip_header_slice . stop + struct . calcsize ( ICMP_HEADER_FORMAT ) ) ip_header_keys = ( 'version' , 'tos' , 'len' , 'id' , 'flags' , 'ttl' , 'protocol' , 'checksum' , 'src_addr' , 'dest_addr' ) icmp_header_keys = ( 'type' , 'code' , 'checksum' , 'id' , 'seq' ) while True : selected = select . select ( [ sock ] , [ ] , [ ] , timeout ) if selected [ 0 ] == [ ] : raise errors . Timeout ( timeout ) time_recv = default_timer ( ) recv_data , addr = sock . recvfrom ( 1024 ) ip_header_raw , icmp_header_raw , icmp_payload_raw = recv_data [ ip_header_slice ] , recv_data [ icmp_header_slice ] , recv_data [ icmp_header_slice . stop : ] ip_header = dict ( zip ( ip_header_keys , struct . unpack ( IP_HEADER_FORMAT , ip_header_raw ) ) ) _debug ( "IP HEADER:" , ip_header ) icmp_header = dict ( zip ( icmp_header_keys , struct . unpack ( ICMP_HEADER_FORMAT , icmp_header_raw ) ) ) _debug ( "ICMP HEADER:" , icmp_header ) if icmp_header [ 'type' ] == IcmpType . TIME_EXCEEDED : if icmp_header [ 'code' ] == IcmpTimeExceededCode . TTL_EXPIRED : raise errors . TimeToLiveExpired ( ) raise errors . TimeExceeded ( ) if icmp_header [ 'id' ] == icmp_id and icmp_header [ 'seq' ] == seq : if icmp_header [ 'type' ] == IcmpType . ECHO_REQUEST : _debug ( "ECHO_REQUEST filtered out." ) continue if icmp_header [ 'type' ] == IcmpType . ECHO_REPLY : time_sent = struct . unpack ( ICMP_TIME_FORMAT , icmp_payload_raw [ 0 : struct . calcsize ( ICMP_TIME_FORMAT ) ] ) [ 0 ] return time_recv - time_sent
Receives the ping from the socket .
49,649
def ping ( dest_addr : str , timeout : int = 4 , unit : str = "s" , src_addr : str = None , ttl : int = 64 , seq : int = 0 , size : int = 56 ) -> float or None : with socket . socket ( socket . AF_INET , socket . SOCK_RAW , socket . IPPROTO_ICMP ) as sock : sock . setsockopt ( socket . SOL_IP , socket . IP_TTL , ttl ) if src_addr : sock . bind ( ( src_addr , 0 ) ) icmp_id = threading . current_thread ( ) . ident % 0xFFFF try : send_one_ping ( sock = sock , dest_addr = dest_addr , icmp_id = icmp_id , seq = seq , size = size ) delay = receive_one_ping ( sock = sock , icmp_id = icmp_id , seq = seq , timeout = timeout ) except errors . PingError as e : _debug ( e ) if EXCEPTIONS : raise e return None if delay is None : return None if unit == "ms" : delay *= 1000 return delay
Send one ping to destination address with the given timeout .
49,650
def verbose_ping ( dest_addr : str , count : int = 4 , * args , ** kwargs ) : timeout = kwargs . get ( "timeout" ) src = kwargs . get ( "src" ) unit = kwargs . setdefault ( "unit" , "ms" ) for i in range ( count ) : output_text = "ping '{}'" . format ( dest_addr ) output_text += " from '{}'" . format ( src ) if src else "" output_text += " ... " print ( output_text , end = "" ) delay = ping ( dest_addr , seq = i , * args , ** kwargs ) if delay is None : print ( "Timeout > {}s" . format ( timeout ) if timeout else "Timeout" ) else : print ( "{value}{unit}" . format ( value = int ( delay ) , unit = unit ) )
Send pings to destination address with the given timeout and display the result .
49,651
def distance ( self , val ) : tmp = 2 try : int ( val ) if val > 0 and val <= 2 : tmp = val except ( ValueError , TypeError ) : pass self . _distance = tmp
set the distance parameter
49,652
def export ( self , filepath , encoding = "utf-8" , gzipped = True ) : data = json . dumps ( self . word_frequency . dictionary , sort_keys = True ) write_file ( filepath , encoding , gzipped , data )
Export the word frequency list for import in the future
49,653
def word_probability ( self , word , total_words = None ) : if total_words is None : total_words = self . _word_frequency . total_words return self . _word_frequency . dictionary [ word ] / total_words
Calculate the probability of the word being the desired correct word
49,654
def correction ( self , word ) : return max ( self . candidates ( word ) , key = self . word_probability )
The most probable correct spelling for the word
49,655
def candidates ( self , word ) : if self . known ( [ word ] ) : return { word } res = [ x for x in self . edit_distance_1 ( word ) ] tmp = self . known ( res ) if tmp : return tmp if self . _distance == 2 : tmp = self . known ( [ x for x in self . __edit_distance_alt ( res ) ] ) if tmp : return tmp return { word }
Generate possible spelling corrections for the provided word up to an edit distance of two if and only when needed
49,656
def known ( self , words ) : tmp = [ w . lower ( ) for w in words ] return set ( w for w in tmp if w in self . _word_frequency . dictionary or not self . _check_if_should_check ( w ) )
The subset of words that appear in the dictionary of words
49,657
def edit_distance_1 ( self , word ) : word = word . lower ( ) if self . _check_if_should_check ( word ) is False : return { word } letters = self . _word_frequency . letters splits = [ ( word [ : i ] , word [ i : ] ) for i in range ( len ( word ) + 1 ) ] deletes = [ L + R [ 1 : ] for L , R in splits if R ] transposes = [ L + R [ 1 ] + R [ 0 ] + R [ 2 : ] for L , R in splits if len ( R ) > 1 ] replaces = [ L + c + R [ 1 : ] for L , R in splits if R for c in letters ] inserts = [ L + c + R for L , R in splits for c in letters ] return set ( deletes + transposes + replaces + inserts )
Compute all strings that are one edit away from word using only the letters in the corpus
49,658
def edit_distance_2 ( self , word ) : word = word . lower ( ) return [ e2 for e1 in self . edit_distance_1 ( word ) for e2 in self . edit_distance_1 ( e1 ) ]
Compute all strings that are two edits away from word using only the letters in the corpus
49,659
def __edit_distance_alt ( self , words ) : words = [ x . lower ( ) for x in words ] return [ e2 for e1 in words for e2 in self . edit_distance_1 ( e1 ) ]
Compute all strings that are 1 edits away from all the words using only the letters in the corpus
49,660
def pop ( self , key , default = None ) : return self . _dictionary . pop ( key . lower ( ) , default )
Remove the key and return the associated value or default if not found
49,661
def items ( self ) : for word in self . _dictionary . keys ( ) : yield word , self . _dictionary [ word ]
Iterator over the words in the dictionary
49,662
def load_dictionary ( self , filename , encoding = "utf-8" ) : with load_file ( filename , encoding ) as data : self . _dictionary . update ( json . loads ( data . lower ( ) , encoding = encoding ) ) self . _update_dictionary ( )
Load in a pre - built word frequency list
49,663
def load_text_file ( self , filename , encoding = "utf-8" , tokenizer = None ) : with load_file ( filename , encoding = encoding ) as data : self . load_text ( data , tokenizer )
Load in a text file from which to generate a word frequency list
49,664
def load_text ( self , text , tokenizer = None ) : if tokenizer : words = [ x . lower ( ) for x in tokenizer ( text ) ] else : words = self . tokenize ( text ) self . _dictionary . update ( words ) self . _update_dictionary ( )
Load text from which to generate a word frequency list
49,665
def load_words ( self , words ) : self . _dictionary . update ( [ word . lower ( ) for word in words ] ) self . _update_dictionary ( )
Load a list of words from which to generate a word frequency list
49,666
def remove_words ( self , words ) : for word in words : self . _dictionary . pop ( word . lower ( ) ) self . _update_dictionary ( )
Remove a list of words from the word frequency list
49,667
def remove ( self , word ) : self . _dictionary . pop ( word . lower ( ) ) self . _update_dictionary ( )
Remove a word from the word frequency list
49,668
def remove_by_threshold ( self , threshold = 5 ) : keys = [ x for x in self . _dictionary . keys ( ) ] for key in keys : if self . _dictionary [ key ] <= threshold : self . _dictionary . pop ( key ) self . _update_dictionary ( )
Remove all words at or below the provided threshold
49,669
def _update_dictionary ( self ) : self . _total_words = sum ( self . _dictionary . values ( ) ) self . _unique_words = len ( self . _dictionary . keys ( ) ) self . _letters = set ( ) for key in self . _dictionary : self . _letters . update ( key )
Update the word frequency object
49,670
def load_file ( filename , encoding ) : try : with gzip . open ( filename , mode = "rt" ) as fobj : yield fobj . read ( ) except ( OSError , IOError ) : with OPEN ( filename , mode = "r" , encoding = encoding ) as fobj : yield fobj . read ( )
Context manager to handle opening a gzip or text file correctly and reading all the data
49,671
def write_file ( filepath , encoding , gzipped , data ) : if gzipped : with gzip . open ( filepath , "wt" ) as fobj : fobj . write ( data ) else : with OPEN ( filepath , "w" , encoding = encoding ) as fobj : if sys . version_info < ( 3 , 0 ) : data = data . decode ( encoding ) fobj . write ( data )
Write the data to file either as a gzip file or text based on the gzipped parameter
49,672
def preprocess ( self , raw_inputs ) : image_arrays = [ ] for raw_im in raw_inputs : im = raw_im . convert ( 'L' ) im = im . resize ( MNIST_DIM , Image . ANTIALIAS ) arr = np . array ( im ) image_arrays . append ( arr ) inputs = np . array ( image_arrays ) return inputs . reshape ( len ( inputs ) , MNIST_DIM [ 0 ] , MNIST_DIM [ 1 ] , 1 ) . astype ( 'float32' ) / 255
Convert images into the format required by our model .
49,673
def initialize_new_session ( ) : if 'image_uid_counter' in session and 'image_list' in session : logger . debug ( 'images are already being tracked' ) else : session [ 'image_uid_counter' ] = 0 session [ 'image_list' ] = [ ] if 'img_input_dir' in session and 'img_output_dir' in session : logger . debug ( 'temporary image directories already exist' ) else : session [ 'img_input_dir' ] = mkdtemp ( ) session [ 'img_output_dir' ] = mkdtemp ( )
Check session and initialize if necessary
49,674
def images ( ) : if request . method == 'POST' : file_upload = request . files [ 'file' ] if file_upload : image = dict ( ) image [ 'filename' ] = secure_filename ( file_upload . filename ) full_path = os . path . join ( session [ 'img_input_dir' ] , image [ 'filename' ] ) file_upload . save ( full_path ) image [ 'uid' ] = session [ 'image_uid_counter' ] session [ 'image_uid_counter' ] += 1 current_app . logger . debug ( 'File %d is saved as %s' , image [ 'uid' ] , image [ 'filename' ] ) session [ 'image_list' ] . append ( image ) return jsonify ( ok = "true" , file = image [ 'filename' ] , uid = image [ 'uid' ] ) return jsonify ( ok = "false" ) if request . method == 'GET' : return jsonify ( images = session [ 'image_list' ] )
Upload images via REST interface
49,675
def visualizers ( ) : list_of_visualizers = [ ] for visualizer in get_visualizations ( ) : list_of_visualizers . append ( { 'name' : visualizer } ) return jsonify ( visualizers = list_of_visualizers )
Get a list of available visualizers
49,676
def visualize ( ) : session [ 'settings' ] = { } image_uid = request . args . get ( 'image' ) vis_name = request . args . get ( 'visualizer' ) vis = get_visualizations ( ) [ vis_name ] if vis . ALLOWED_SETTINGS : for key in vis . ALLOWED_SETTINGS . keys ( ) : if request . args . get ( key ) is not None : session [ 'settings' ] [ key ] = request . args . get ( key ) else : session [ 'settings' ] [ key ] = vis . ALLOWED_SETTINGS [ key ] [ 0 ] else : logger . debug ( 'Selected Visualizer {0} has no settings.' . format ( vis_name ) ) inputs = [ ] for image in session [ 'image_list' ] : if image [ 'uid' ] == int ( image_uid ) : full_path = os . path . join ( session [ 'img_input_dir' ] , image [ 'filename' ] ) entry = dict ( ) entry [ 'filename' ] = image [ 'filename' ] entry [ 'data' ] = Image . open ( full_path ) inputs . append ( entry ) vis . update_settings ( session [ 'settings' ] ) output = vis . make_visualization ( inputs , output_dir = session [ 'img_output_dir' ] ) return jsonify ( output [ 0 ] )
Trigger a visualization via the REST API
49,677
def reset ( ) : shutil . rmtree ( session [ 'img_input_dir' ] ) shutil . rmtree ( session [ 'img_output_dir' ] ) session . clear ( ) return jsonify ( ok = 'true' )
Delete the session and clear temporary directories
49,678
def update_settings ( self , settings ) : def error_string ( setting , setting_val ) : return ( '{val} is not an acceptable value for ' 'parameter {param} for visualization' '{vis}.' ) . format ( val = setting_val , param = setting , vis = self . __class__ . __name__ ) for setting in settings : if settings [ setting ] in self . ALLOWED_SETTINGS [ setting ] : setattr ( self , '_' + re . sub ( '\W|^(?=\d)' , '_' , setting ) . lower ( ) , settings [ setting ] ) else : raise ValueError ( error_string ( settings [ setting ] , setting ) )
Update the settings
49,679
def load_model ( model_cls_path , model_cls_name , model_load_args ) : spec = importlib . util . spec_from_file_location ( 'active_model' , model_cls_path ) model_module = importlib . util . module_from_spec ( spec ) spec . loader . exec_module ( model_module ) model_cls = getattr ( model_module , model_cls_name ) model = model_cls ( ) if not isinstance ( model , BaseModel ) : warnings . warn ( "Loaded model '%s' at '%s' is not an instance of %r" % ( model_cls_name , model_cls_path , BaseModel ) ) model . load ( ** model_load_args ) return model
Get an instance of the described model .
49,680
def decode_prob ( self , class_probabilities ) : results = [ ] for row in class_probabilities : entries = [ ] for i , prob in enumerate ( row ) : entries . append ( { 'index' : i , 'name' : str ( i ) , 'prob' : prob } ) entries = sorted ( entries , key = itemgetter ( 'prob' ) , reverse = True ) [ : self . top_probs ] for entry in entries : entry [ 'prob' ] = '{:.3f}' . format ( entry [ 'prob' ] ) results . append ( entries ) return results
Given predicted class probabilites for a set of examples annotate each logit with a class name .
49,681
def _get_visualization_classes ( ) : visualization_attr = vars ( import_module ( 'picasso.visualizations' ) ) visualization_submodules = [ visualization_attr [ x ] for x in visualization_attr if isinstance ( visualization_attr [ x ] , ModuleType ) ] visualization_classes = [ ] for submodule in visualization_submodules : attrs = vars ( submodule ) for attr_name in attrs : attr = attrs [ attr_name ] if ( inspect . isclass ( attr ) and issubclass ( attr , BaseVisualization ) and attr is not BaseVisualization ) : visualization_classes . append ( attr ) return visualization_classes
Import visualizations classes dynamically
49,682
def get_model ( ) : if not hasattr ( g , 'model' ) : g . model = load_model ( current_app . config [ 'MODEL_CLS_PATH' ] , current_app . config [ 'MODEL_CLS_NAME' ] , current_app . config [ 'MODEL_LOAD_ARGS' ] ) return g . model
Get the NN model that s being analyzed from the request context . Put the model in the request context if it is not yet there .
49,683
def get_visualizations ( ) : if not hasattr ( g , 'visualizations' ) : g . visualizations = { } for VisClass in _get_visualization_classes ( ) : vis = VisClass ( get_model ( ) ) g . visualizations [ vis . __class__ . __name__ ] = vis return g . visualizations
Get the available visualizations from the request context . Put the visualizations in the request context if they are not yet there .
49,684
def get_app_state ( ) : if not hasattr ( g , 'app_state' ) : model = get_model ( ) g . app_state = { 'app_title' : APP_TITLE , 'model_name' : type ( model ) . __name__ , 'latest_ckpt_name' : model . latest_ckpt_name , 'latest_ckpt_time' : model . latest_ckpt_time } return g . app_state
Get current status of application in context
49,685
async def login ( username : str , password : str , brand : str , websession : ClientSession = None ) -> API : api = API ( brand , websession ) await api . authenticate ( username , password ) return api
Log in to the API .
49,686
def _create_websession ( self ) : from socket import AF_INET from aiohttp import ClientTimeout , TCPConnector _LOGGER . debug ( 'Creating web session' ) conn = TCPConnector ( family = AF_INET , limit_per_host = 5 , enable_cleanup_closed = True , ) session_timeout = ClientTimeout ( connect = 10 ) self . _websession = ClientSession ( connector = conn , timeout = session_timeout ) self . _supplied_websession = False
Create a web session .
49,687
async def close_websession ( self ) : if self . _supplied_websession or self . _websession is None : return _LOGGER . debug ( 'Closing connections' ) temp_websession = self . _websession self . _websession = None await temp_websession . close ( ) await asyncio . sleep ( 0 ) _LOGGER . debug ( 'Connections closed' )
Close web session if not already closed and created by us .
49,688
async def authenticate ( self , username : str , password : str ) -> None : self . _credentials = { 'username' : username , 'password' : password , } await self . _get_security_token ( )
Authenticate against the API .
49,689
async def _get_security_token ( self ) -> None : _LOGGER . debug ( 'Requesting security token.' ) if self . _credentials is None : return async with self . _security_token_lock : if self . _security_token is None : login_resp = await self . _request ( 'post' , LOGIN_ENDPOINT , json = self . _credentials , login_request = True , ) return_code = int ( login_resp . get ( 'ReturnCode' , 1 ) ) if return_code != 0 : if return_code == 203 : _LOGGER . debug ( 'Invalid username or password' ) self . _credentials = None raise MyQError ( login_resp [ 'ErrorMessage' ] ) self . _security_token = login_resp [ 'SecurityToken' ]
Request a security token .
49,690
async def get_devices ( self , covers_only : bool = True ) -> list : from . device import MyQDevice _LOGGER . debug ( 'Retrieving list of devices' ) devices_resp = await self . _request ( 'get' , DEVICE_LIST_ENDPOINT ) device_list = [ ] if devices_resp is None : return device_list for device in devices_resp [ 'Devices' ] : if not covers_only or device [ 'MyQDeviceTypeName' ] in SUPPORTED_DEVICE_TYPE_NAMES : self . _devices . append ( { 'device_id' : device [ 'MyQDeviceId' ] , 'device_info' : device } ) myq_device = MyQDevice ( self . _devices [ - 1 ] , self . _brand , self ) device_list . append ( myq_device ) self . _store_device_states ( devices_resp . get ( 'Devices' , [ ] ) ) _LOGGER . debug ( 'List of devices retrieved' ) return device_list
Get a list of all devices associated with the account .
49,691
def name ( self ) -> str : return next ( attr [ 'Value' ] for attr in self . _device_json . get ( 'Attributes' , [ ] ) if attr . get ( 'AttributeDisplayName' ) == 'desc' )
Return the device name .
49,692
def available ( self ) -> bool : is_available = self . api . online and next ( attr [ 'Value' ] for attr in self . _device_json . get ( 'Attributes' , [ ] ) if attr . get ( 'AttributeDisplayName' ) == 'online' ) == "True" return is_available
Return if device is online or not .
49,693
def open_allowed ( self ) -> bool : return next ( attr [ 'Value' ] for attr in self . _device_json . get ( 'Attributes' , [ ] ) if attr . get ( 'AttributeDisplayName' ) == 'isunattendedopenallowed' ) == "1"
Door can be opened unattended .
49,694
def close_allowed ( self ) -> bool : return next ( attr [ 'Value' ] for attr in self . _device_json . get ( 'Attributes' , [ ] ) if attr . get ( 'AttributeDisplayName' ) == 'isunattendedcloseallowed' ) == "1"
Door can be closed unattended .
49,695
def _update_state ( self , value : str ) -> None : attribute = next ( attr for attr in self . _device [ 'device_info' ] . get ( 'Attributes' , [ ] ) if attr . get ( 'AttributeDisplayName' ) == 'doorstate' ) if attribute is not None : attribute [ 'Value' ] = value
Update state temporary during open or close .
49,696
def _coerce_state_from_string ( value : Union [ int , str ] ) -> str : try : return STATE_MAP [ int ( value ) ] except KeyError : _LOGGER . error ( 'Unknown state: %s' , value ) return STATE_UNKNOWN
Return a proper state from a string input .
49,697
async def _set_state ( self , state : int ) -> bool : try : set_state_resp = await self . api . _request ( 'put' , DEVICE_SET_ENDPOINT , json = { 'attributeName' : 'desireddoorstate' , 'myQDeviceId' : self . device_id , 'AttributeValue' : state , } ) except RequestError as err : _LOGGER . error ( '%s: Setting state failed (and halting): %s' , self . name , err ) return False if set_state_resp is None : return False if int ( set_state_resp . get ( 'ReturnCode' , 1 ) ) != 0 : _LOGGER . error ( '%s: Error setting the device state: %s' , self . name , set_state_resp . get ( 'ErrorMessage' , 'Unknown Error' ) ) return False return True
Set the state of the device .
49,698
async def close ( self ) -> bool : _LOGGER . debug ( '%s: Sending close command' , self . name ) if not await self . _set_state ( 0 ) : return False self . next_allowed_update = datetime . utcnow ( ) + timedelta ( seconds = 10 ) if self . state not in ( STATE_CLOSED , STATE_CLOSING ) : self . _update_state ( '5' ) self . _device_json = self . _device [ 'device_info' ] _LOGGER . debug ( '%s: Close command send' , self . name ) return True
Close the device .
49,699
async def update ( self ) -> None : if self . next_allowed_update is not None and datetime . utcnow ( ) < self . next_allowed_update : return self . next_allowed_update = None await self . api . _update_device_state ( ) self . _device_json = self . _device [ 'device_info' ]
Retrieve updated device state .