code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
with open(indexed_audio_file_abs_path, "wb") as f: pickle.dump(self.get_timestamps(), f, pickle.HIGHEST_PROTOCOL)
def save_indexed_audio(self, indexed_audio_file_abs_path)
Writes the corrected timestamps to a file. Timestamps are a python dictionary. Parameters ---------- indexed_audio_file_abs_path : str
2.847717
2.752114
1.034738
with open(indexed_audio_file_abs_path, "rb") as f: self.__timestamps = pickle.load(f)
def load_indexed_audio(self, indexed_audio_file_abs_path)
Parameters ---------- indexed_audio_file_abs_path : str
3.519301
4.098563
0.858667
return bool(re.search(".*".join(sub), sup))
def _is_subsequence_of(self, sub, sup)
Parameters ---------- sub : str sup : str Returns ------- bool
12.939256
15.153499
0.853879
def get_all_in(one, another): for element in one: if element in another: yield element def containment_check(sub, sup): return (set(Counter(sub).keys()).issubset( set(Counter(sup).keys()))) def containment_freq_check(sub, sup): return (all([Counter(sub)[element] <= Counter(sup)[element] for element in Counter(sub)])) def extra_freq_check(sub, sup, list_of_tups): # Would be used for matching anagrams, subsequences etc. return (len(list_of_tups) > 0 and all([Counter(sub)[tup[0]] <= Counter(sup)[tup[1]] for tup in list_of_tups])) # Regarding containment checking while having extra conditions, # there's no good way to map each anagram or subseuqnece etc. that was # found to the query word, without making it more complicated than # it already is, because a query word can be anagram/subsequence etc. # to multiple words of the timestamps yet finding the one with the # right index would be the problem. # Therefore we just approximate the solution by just counting # the elements. if len(sub) > len(sup): return False for pred, func in set([(anagram, self._is_anagram_of), (subsequence, self._is_subsequence_of), (supersequence, self._is_supersequence_of)]): if pred: pred_seive = [(sub_key, sup_key) for sub_key in set(Counter(sub).keys()) for sup_key in set(Counter(sup).keys()) if func(sub_key, sup_key)] if not extra_freq_check(sub, sup, pred_seive): return False if ( not any([anagram, subsequence, supersequence]) and (not containment_check(sub, sup) or not containment_freq_check(sub, sup)) ): return False for x1, x2 in zip(get_all_in(sup, sub), get_all_in(sub, sup)): if x1 != x2: return False return True
def _partial_search_validator(self, sub, sup, anagram=False, subsequence=False, supersequence=False)
It's responsible for validating the partial results of `search` method. If it returns True, the search would return its result. Else, search method would discard what it found and look for others. First, checks to see if all elements of `sub` is in `sup` with at least the same frequency and then checks to see if every element `sub` appears in `sup` with the same order (index-wise). If advanced control sturctures are specified, the containment condition won't be checked. The code for index checking is from [1]_. Parameters ---------- sub : list sup : list anagram : bool, optional Default is `False` subsequence : bool, optional Default is `False` supersequence : bool, optional Default is `False` Returns ------- bool References ---------- .. [1] : ` https://stackoverflow.com/questions/35964155/checking-if-list-is-a-sublist`
4.292263
4.108762
1.044661
search_gen_rest_of_kwargs = { "audio_basename": audio_basename, "case_sensitive": case_sensitive, "subsequence": subsequence, "supersequence": supersequence, "timing_error": timing_error, "anagram": anagram, "missing_word_tolerance": missing_word_tolerance} if not isinstance(queries, (list, str)): raise TypeError("Invalid query type.") if type(queries) is not list: queries = [queries] search_results = _PrettyDefaultDict(lambda: _PrettyDefaultDict(list)) for query in queries: search_gen = self.search_gen(query=query, **search_gen_rest_of_kwargs) for search_result in search_gen: search_results[query][ search_result["File Name"]].append(search_result["Result"]) return search_results
def search_all(self, queries, audio_basename=None, case_sensitive=False, subsequence=False, supersequence=False, timing_error=0.0, anagram=False, missing_word_tolerance=0)
Returns a dictionary of all results of all of the queries for all of the audio files. All the specified parameters work per query. Parameters ---------- queries : [str] or str A list of the strings that'll be searched. If type of queries is `str`, it'll be insterted into a list within the body of the method. audio_basename : str, optional Search only within the given audio_basename. Default is `None`. case_sensitive : bool Default is `False` subsequence : bool, optional `True` if it's not needed for the exact word be detected and larger strings that contain the given one are fine. If the query is a sentences with multiple words, it'll be considered for each word, not the whole sentence. Default is `False`. supersequence : bool, optional `True` if it's not needed for the exact word be detected and smaller strings that are contained within the given one are fine. If the query is a sentences with multiple words, it'll be considered for each word, not the whole sentence. Default is `False`. anagram : bool, optional `True` if it's acceptable for a complete permutation of the word to be found. e.g. "abcde" would be acceptable for "edbac". If the query is a sentences with multiple words, it'll be considered for each word, not the whole sentence. Default is `False`. timing_error : None or float, optional Sometimes other words (almost always very small) would be detected between the words of the `query`. This parameter defines the timing difference/tolerance of the search. Default is 0.0 i.e. No timing error is tolerated. missing_word_tolerance : int, optional The number of words that can be missed within the result. For example, if the query is "Some random text" and the tolerance value is `1`, then "Some text" would be a valid response. Note that the first and last words cannot be missed. Also, there'll be an error if the value is more than the number of available words. For the example above, any value more than 1 would have given an error (since there's only one word i.e. "random" that can be missed) Default is 0. Returns ------- search_results : {str: {str: [(float, float)]}} A dictionary whose keys are queries and whose values are dictionaries whose keys are all the audiofiles in which the query is present and whose values are a list whose elements are 2-tuples whose first element is the starting second of the query and whose values are the ending second. e.g. {"apple": {"fruits.wav" : [(1.1, 1.12)]}} Raises ------ TypeError if `queries` is neither a list nor a str
2.442975
2.567037
0.951671
def indexes_in_transcript_to_start_end_second(index_tup, audio_basename): space_indexes = [i for i, x in enumerate( transcription[audio_basename]) if x == " "] space_indexes.sort(reverse=True) index_start, index_end = index_tup # re.finditer returns the ending index by one more index_end -= 1 while transcription[audio_basename][index_start] == " ": index_start += 1 while transcription[audio_basename][index_end] == " ": index_end -= 1 block_number_start = 0 block_number_end = len(space_indexes) for block_cursor, space_index in enumerate(space_indexes): if index_start > space_index: block_number_start = (len(space_indexes) - block_cursor) break for block_cursor, space_index in enumerate(space_indexes): if index_end > space_index: block_number_end = (len(space_indexes) - block_cursor) break return (timestamps[audio_basename][block_number_start].start, timestamps[audio_basename][block_number_end].end) timestamps = self.get_timestamps() if audio_basename is not None: timestamps = {audio_basename: timestamps[audio_basename]} transcription = { audio_basename: ' '.join( [word_block.word for word_block in timestamps[audio_basename]] ) for audio_basename in timestamps} match_map = map( lambda audio_basename: tuple(( audio_basename, re.finditer(pattern, transcription[audio_basename]))), transcription.keys()) search_results = _PrettyDefaultDict(lambda: _PrettyDefaultDict(list)) for audio_basename, match_iter in match_map: for match in match_iter: search_results[match.group()][audio_basename].append( tuple(indexes_in_transcript_to_start_end_second( match.span(), audio_basename))) return search_results
def search_regexp(self, pattern, audio_basename=None)
First joins the words of the word_blocks of timestamps with space, per audio_basename. Then matches `pattern` and calculates the index of the word_block where the first and last word of the matched result appears in. Then presents the output like `search_all` method. Note that the leading and trailing spaces from the matched results would be removed while determining which word_block they belong to. Parameters ---------- pattern : str A regex pattern. audio_basename : str, optional Search only within the given audio_basename. Default is `False`. Returns ------- search_results : {str: {str: [(float, float)]}} A dictionary whose keys are queries and whose values are dictionaries whose keys are all the audiofiles in which the query is present and whose values are a list whose elements are 2-tuples whose first element is the starting second of the query and whose values are the ending second. e.g. {"apple": {"fruits.wav" : [(1.1, 1.12)]}}
2.837855
2.648902
1.071333
# http://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188 ''' Returns a list of primes < n ''' sieve = [True] * (n/2) for i in xrange(3,int(n**0.5)+1,2): if sieve[i/2]: sieve[i*i/2::i] = [False] * ((n-i*i-1)/(2*i)+1) return [2] + [2*i+1 for i in xrange(1,n/2) if sieve[i]]
def rwh_primes1(n)
Returns a list of primes < n
1.65989
1.646753
1.007978
'''Multiplication in Galois Fields without using a precomputed look-up table (and thus it's slower) by using the standard carry-less multiplication + modular reduction using an irreducible prime polynomial.''' ### Define bitwise carry-less operations as inner functions ### def cl_mult(x,y): '''Bitwise carry-less multiplication on integers''' z = 0 i = 0 while (y>>i) > 0: if y & (1<<i): z ^= x<<i i += 1 return z def bit_length(n): '''Compute the position of the most significant bit (1) of an integer. Equivalent to int.bit_length()''' bits = 0 while n >> bits: bits += 1 return bits def cl_div(dividend, divisor=None): '''Bitwise carry-less long division on integers and returns the remainder''' # Compute the position of the most significant bit for each integers dl1 = bit_length(dividend) dl2 = bit_length(divisor) # If the dividend is smaller than the divisor, just exit if dl1 < dl2: return dividend # Else, align the most significant 1 of the divisor to the most significant 1 of the dividend (by shifting the divisor) for i in xrange(dl1-dl2,-1,-1): # Check that the dividend is divisible (useless for the first iteration but important for the next ones) if dividend & (1 << i+dl2-1): # If divisible, then shift the divisor to align the most significant bits and XOR (carry-less substraction) dividend ^= divisor << i return dividend ### Main GF multiplication routine ### # Multiply the gf numbers result = cl_mult(x,y) # Then do a modular reduction (ie, remainder from the division) with an irreducible primitive polynomial so that it stays inside GF bounds if prim > 0: result = cl_div(result, prim) return result
def gf_mult_noLUT_slow(x, y, prim=0)
Multiplication in Galois Fields without using a precomputed look-up table (and thus it's slower) by using the standard carry-less multiplication + modular reduction using an irreducible prime polynomial.
5.609053
4.525486
1.239436
'''Galois Field integer multiplication using Russian Peasant Multiplication algorithm (faster than the standard multiplication + modular reduction). If prim is 0 and carryless=False, then the function produces the result for a standard integers multiplication (no carry-less arithmetics nor modular reduction).''' r = 0 while y: # while y is above 0 if y & 1: r = r ^ x if carryless else r + x # y is odd, then add the corresponding x to r (the sum of all x's corresponding to odd y's will give the final product). Note that since we're in GF(2), the addition is in fact an XOR (very important because in GF(2) the multiplication and additions are carry-less, thus it changes the result!). y = y >> 1 # equivalent to y // 2 x = x << 1 # equivalent to x*2 if prim > 0 and x & field_charac_full: x = x ^ prim # GF modulo: if x >= 256 then apply modular reduction using the primitive polynomial (we just substract, but since the primitive number can be above 256 then we directly XOR). return r
def gf_mult_noLUT(x, y, prim=0, field_charac_full=256, carryless=True)
Galois Field integer multiplication using Russian Peasant Multiplication algorithm (faster than the standard multiplication + modular reduction). If prim is 0 and carryless=False, then the function produces the result for a standard integers multiplication (no carry-less arithmetics nor modular reduction).
8.934613
5.246832
1.702859
'''Multiply two polynomials, inside Galois Field (but the procedure is generic). Optimized function by precomputation of log.''' # Pre-allocate the result array r = bytearray(len(p) + len(q) - 1) # Precompute the logarithm of p lp = [gf_log[p[i]] for i in xrange(len(p))] # Compute the polynomial multiplication (just like the outer product of two vectors, we multiply each coefficients of p with all coefficients of q) for j in xrange(len(q)): qj = q[j] # optimization: load the coefficient once if qj != 0: # log(0) is undefined, we need to check that lq = gf_log[qj] # Optimization: precache the logarithm of the current coefficient of q for i in xrange(len(p)): if p[i] != 0: # log(0) is undefined, need to check that... r[i + j] ^= gf_exp[lp[i] + lq] # equivalent to: r[i + j] = gf_add(r[i+j], gf_mul(p[i], q[j])) return r
def gf_poly_mul(p, q)
Multiply two polynomials, inside Galois Field (but the procedure is generic). Optimized function by precomputation of log.
5.15943
3.748172
1.376519
for i in xrange(len(p)): r[i + j] ^= gf_mul(p[i], q[j]) # equivalent to: r[i + j] = gf_add(r[i+j], gf_mul(p[i], q[j])) -- you can see it's your usual polynomial multiplication return r
def gf_poly_mul_simple(p, q): # simple equivalent way of multiplying two polynomials without precomputation, but thus it's slower '''Multiply two polynomials, inside Galois Field''' # Pre-allocate the result array r = bytearray(len(p) + len(q) - 1) # Compute the polynomial multiplication (just like the outer product of two vectors, we multiply each coefficients of p with all coefficients of q) for j in xrange(len(q))
Multiply two polynomials, inside Galois Field
3.737112
3.646304
1.024904
'''Fast polynomial division by using Extended Synthetic Division and optimized for GF(2^p) computations (doesn't work with standard polynomials outside of this galois field).''' # CAUTION: this function expects polynomials to follow the opposite convention at decoding: the terms must go from the biggest to lowest degree (while most other functions here expect a list from lowest to biggest degree). eg: 1 + 2x + 5x^2 = [5, 2, 1], NOT [1, 2, 5] msg_out = bytearray(dividend) # Copy the dividend list and pad with 0 where the ecc bytes will be computed #normalizer = divisor[0] # precomputing for performance for i in xrange(len(dividend) - (len(divisor)-1)): #msg_out[i] /= normalizer # for general polynomial division (when polynomials are non-monic), the usual way of using synthetic division is to divide the divisor g(x) with its leading coefficient (call it a). In this implementation, this means:we need to compute: coef = msg_out[i] / gen[0]. For more infos, see http://en.wikipedia.org/wiki/Synthetic_division coef = msg_out[i] # precaching if coef != 0: # log(0) is undefined, so we need to avoid that case explicitly (and it's also a good optimization). In fact if you remove it, it should still work because gf_mul() will take care of the condition. But it's still a good practice to put the condition here. for j in xrange(1, len(divisor)): # in synthetic division, we always skip the first coefficient of the divisior, because it's only used to normalize the dividend coefficient if divisor[j] != 0: # log(0) is undefined msg_out[i + j] ^= gf_mul(divisor[j], coef) # equivalent to the more mathematically correct (but xoring directly is faster): msg_out[i + j] += -divisor[j] * coef # The resulting msg_out contains both the quotient and the remainder, the remainder being the size of the divisor (the remainder has necessarily the same degree as the divisor -- not length but degree == length-1 -- since it's what we couldn't divide from the dividend), so we compute the index where this separation is, and return the quotient and remainder. separator = -(len(divisor)-1) return msg_out[:separator], msg_out[separator:]
def gf_poly_div(dividend, divisor)
Fast polynomial division by using Extended Synthetic Division and optimized for GF(2^p) computations (doesn't work with standard polynomials outside of this galois field).
10.165225
8.431161
1.205673
'''Linear time implementation of polynomial squaring. For details, see paper: "A fast software implementation for arithmetic operations in GF (2n)". De Win, E., Bosselaers, A., Vandenberghe, S., De Gersem, P., & Vandewalle, J. (1996, January). In Advances in Cryptology - Asiacrypt'96 (pp. 65-76). Springer Berlin Heidelberg.''' length = len(poly) out = bytearray(2*length - 1) for i in xrange(length-1): p = poly[i] k = 2*i if p != 0: #out[k] = gf_exp[(2*gf_log[p]) % field_charac] # not necessary to modulo (2^r)-1 since gf_exp is duplicated up to 510. out[k] = gf_exp[2*gf_log[p]] #else: # not necessary since the output is already initialized to an array of 0 #out[k] = 0 out[2*length-2] = gf_exp[2*gf_log[poly[length-1]]] if out[0] == 0: out[0] = 2*poly[1] - 1 return out
def gf_poly_square(poly)
Linear time implementation of polynomial squaring. For details, see paper: "A fast software implementation for arithmetic operations in GF (2n)". De Win, E., Bosselaers, A., Vandenberghe, S., De Gersem, P., & Vandewalle, J. (1996, January). In Advances in Cryptology - Asiacrypt'96 (pp. 65-76). Springer Berlin Heidelberg.
8.15899
3.022784
2.699164
'''Evaluates a polynomial in GF(2^p) given the value for x. This is based on Horner's scheme for maximum efficiency.''' y = poly[0] for i in xrange(1, len(poly)): y = gf_mul(y, x) ^ poly[i] return y
def gf_poly_eval(poly, x)
Evaluates a polynomial in GF(2^p) given the value for x. This is based on Horner's scheme for maximum efficiency.
4.576447
2.345882
1.950843
'''Generate an irreducible generator polynomial (necessary to encode a message into Reed-Solomon)''' g = bytearray([1]) for i in xrange(nsym): g = gf_poly_mul(g, [1, gf_pow(generator, i+fcr)]) return g
def rs_generator_poly(nsym, fcr=0, generator=2)
Generate an irreducible generator polynomial (necessary to encode a message into Reed-Solomon)
6.582948
3.825726
1.720706
'''Generate all irreducible generator polynomials up to max_nsym (usually you can use n, the length of the message+ecc). Very useful to reduce processing time if you want to encode using variable schemes and nsym rates.''' g_all = {} g_all[0] = g_all[1] = [1] for nsym in xrange(max_nsym): g_all[nsym] = rs_generator_poly(nsym, fcr, generator) return g_all
def rs_generator_poly_all(max_nsym, fcr=0, generator=2)
Generate all irreducible generator polynomials up to max_nsym (usually you can use n, the length of the message+ecc). Very useful to reduce processing time if you want to encode using variable schemes and nsym rates.
8.188491
2.173454
3.767502
'''Simple Reed-Solomon encoding (mainly an example for you to understand how it works, because it's slower than the inlined function below)''' global field_charac if (len(msg_in) + nsym) > field_charac: raise ValueError("Message is too long (%i when max is %i)" % (len(msg_in)+nsym, field_charac)) if gen is None: gen = rs_generator_poly(nsym, fcr, generator) # Pad the message, then divide it by the irreducible generator polynomial _, remainder = gf_poly_div(msg_in + bytearray(len(gen)-1), gen) # The remainder is our RS code! Just append it to our original message to get our full codeword (this represents a polynomial of max 256 terms) msg_out = msg_in + remainder # Return the codeword return msg_out
def rs_simple_encode_msg(msg_in, nsym, fcr=0, generator=2, gen=None)
Simple Reed-Solomon encoding (mainly an example for you to understand how it works, because it's slower than the inlined function below)
6.929587
5.034453
1.376433
'''Reed-Solomon main encoding function, using polynomial division (Extended Synthetic Division, the fastest algorithm available to my knowledge), better explained at http://research.swtch.com/field''' global field_charac if (len(msg_in) + nsym) > field_charac: raise ValueError("Message is too long (%i when max is %i)" % (len(msg_in)+nsym, field_charac)) if gen is None: gen = rs_generator_poly(nsym, fcr, generator) msg_in = bytearray(msg_in) msg_out = bytearray(msg_in) + bytearray(len(gen)-1) # init msg_out with the values inside msg_in and pad with len(gen)-1 bytes (which is the number of ecc symbols). # Precompute the logarithm of every items in the generator lgen = bytearray([gf_log[gen[j]] for j in xrange(len(gen))]) # Extended synthetic division main loop # Fastest implementation with PyPy (but the Cython version in creedsolo.pyx is about 2x faster) for i in xrange(len(msg_in)): coef = msg_out[i] # Note that it's msg_out here, not msg_in. Thus, we reuse the updated value at each iteration (this is how Synthetic Division works: instead of storing in a temporary register the intermediate values, we directly commit them to the output). # coef = gf_mul(msg_out[i], gf_inverse(gen[0])) # for general polynomial division (when polynomials are non-monic), the usual way of using synthetic division is to divide the divisor g(x) with its leading coefficient (call it a). In this implementation, this means:we need to compute: coef = msg_out[i] / gen[0] if coef != 0: # log(0) is undefined, so we need to manually check for this case. There's no need to check the divisor here because we know it can't be 0 since we generated it. lcoef = gf_log[coef] # precaching for j in xrange(1, len(gen)): # in synthetic division, we always skip the first coefficient of the divisior, because it's only used to normalize the dividend coefficient (which is here useless since the divisor, the generator polynomial, is always monic) #if gen[j] != 0: # log(0) is undefined so we need to check that, but it slow things down in fact and it's useless in our case (reed-solomon encoding) since we know that all coefficients in the generator are not 0 msg_out[i + j] ^= gf_exp[lcoef + lgen[j]] # optimization, equivalent to gf_mul(gen[j], msg_out[i]) and we just substract it to msg_out[i+j] (but since we are in GF256, it's equivalent to an addition and to an XOR). In other words, this is simply a "multiply-accumulate operation" # Recopy the original message bytes (overwrites the part where the quotient was computed) msg_out[:len(msg_in)] = msg_in # equivalent to c = mprime - b, where mprime is msg_in padded with [0]*nsym return msg_out
def rs_encode_msg(msg_in, nsym, fcr=0, generator=2, gen=None)
Reed-Solomon main encoding function, using polynomial division (Extended Synthetic Division, the fastest algorithm available to my knowledge), better explained at http://research.swtch.com/field
9.054228
7.596738
1.191857
'''Given the received codeword msg and the number of error correcting symbols (nsym), computes the syndromes polynomial. Mathematically, it's essentially equivalent to a Fourrier Transform (Chien search being the inverse). ''' # Note the "[0] +" : we add a 0 coefficient for the lowest degree (the constant). This effectively shifts the syndrome, and will shift every computations depending on the syndromes (such as the errors locator polynomial, errors evaluator polynomial, etc. but not the errors positions). # This is not necessary as anyway syndromes are defined such as there are only non-zero coefficients (the only 0 is the shift of the constant here) and subsequent computations will/must account for the shift by skipping the first iteration (eg, the often seen range(1, n-k+1)), but you can also avoid prepending the 0 coeff and adapt every subsequent computations to start from 0 instead of 1. return [0] + [gf_poly_eval(msg, gf_pow(generator, i+fcr)) for i in xrange(nsym)]
def rs_calc_syndromes(msg, nsym, fcr=0, generator=2)
Given the received codeword msg and the number of error correcting symbols (nsym), computes the syndromes polynomial. Mathematically, it's essentially equivalent to a Fourrier Transform (Chien search being the inverse).
21.563828
12.147223
1.775206
'''Compute the erasures/errors/errata locator polynomial from the erasures/errors/errata positions (the positions must be relative to the x coefficient, eg: "hello worldxxxxxxxxx" is tampered to "h_ll_ worldxxxxxxxxx" with xxxxxxxxx being the ecc of length n-k=9, here the string positions are [1, 4], but the coefficients are reversed since the ecc characters are placed as the first coefficients of the polynomial, thus the coefficients of the erased characters are n-1 - [1, 4] = [18, 15] = erasures_loc to be specified as an argument.''' # See: http://ocw.usu.edu/Electrical_and_Computer_Engineering/Error_Control_Coding/lecture7.pdf and Blahut, Richard E. "Transform techniques for error control codes." IBM Journal of Research and development 23.3 (1979): 299-315. http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.92.600&rep=rep1&type=pdf and also a MatLab implementation here: http://www.mathworks.com/matlabcentral/fileexchange/23567-reed-solomon-errors-and-erasures-decoder/content//RS_E_E_DEC.m e_loc = [1] # just to init because we will multiply, so it must be 1 so that the multiplication starts correctly without nulling any term # erasures_loc is very simple to compute: erasures_loc = prod(1 - x*alpha**i) for i in erasures_pos and where alpha is the alpha chosen to evaluate polynomials (here in this library it's gf(3)). To generate c*x where c is a constant, we simply generate a Polynomial([c, 0]) where 0 is the constant and c is positionned to be the coefficient for x^1. for i in e_pos: e_loc = gf_poly_mul( e_loc, gf_poly_add([1], [gf_pow(generator, i), 0]) ) return e_loc
def rs_find_errata_locator(e_pos, generator=2)
Compute the erasures/errors/errata locator polynomial from the erasures/errors/errata positions (the positions must be relative to the x coefficient, eg: "hello worldxxxxxxxxx" is tampered to "h_ll_ worldxxxxxxxxx" with xxxxxxxxx being the ecc of length n-k=9, here the string positions are [1, 4], but the coefficients are reversed since the ecc characters are placed as the first coefficients of the polynomial, thus the coefficients of the erased characters are n-1 - [1, 4] = [18, 15] = erasures_loc to be specified as an argument.
13.388133
5.623987
2.380541
'''Compute the error (or erasures if you supply sigma=erasures locator polynomial, or errata) evaluator polynomial Omega from the syndrome and the error/erasures/errata locator Sigma. Omega is already computed at the same time as Sigma inside the Berlekamp-Massey implemented above, but in case you modify Sigma, you can recompute Omega afterwards using this method, or just ensure that Omega computed by BM is correct given Sigma.''' # Omega(x) = [ Synd(x) * Error_loc(x) ] mod x^(n-k+1) _, remainder = gf_poly_div( gf_poly_mul(synd, err_loc), ([1] + [0]*(nsym+1)) ) # first multiply syndromes * errata_locator, then do a polynomial division to truncate the polynomial to the required length # Faster way that is equivalent #remainder = gf_poly_mul(synd, err_loc) # first multiply the syndromes with the errata locator polynomial #remainder = remainder[len(remainder)-(nsym+1):] # then divide by a polynomial of the length we want, which is equivalent to slicing the list (which represents the polynomial) return remainder
def rs_find_error_evaluator(synd, err_loc, nsym)
Compute the error (or erasures if you supply sigma=erasures locator polynomial, or errata) evaluator polynomial Omega from the syndrome and the error/erasures/errata locator Sigma. Omega is already computed at the same time as Sigma inside the Berlekamp-Massey implemented above, but in case you modify Sigma, you can recompute Omega afterwards using this method, or just ensure that Omega computed by BM is correct given Sigma.
13.017222
3.969483
3.279325
'''Find the roots (ie, where evaluation = zero) of error polynomial by bruteforce trial, this is a sort of Chien's search (but less efficient, Chien's search is a way to evaluate the polynomial such that each evaluation only takes constant time).''' # nmess = length of whole codeword (message + ecc symbols) errs = len(err_loc) - 1 err_pos = [] for i in xrange(nmess): # normally we should try all 2^8 possible values, but here we optimize to just check the interesting symbols if gf_poly_eval(err_loc, gf_pow(generator, i)) == 0: # It's a 0? Bingo, it's a root of the error locator polynomial, in other terms this is the location of an error err_pos.append(nmess - 1 - i) # Sanity check: the number of errors/errata positions found should be exactly the same as the length of the errata locator polynomial if len(err_pos) != errs: # TODO: to decode messages+ecc with length n > 255, we may try to use a bruteforce approach: the correct positions ARE in the final array j, but the problem is because we are above the Galois Field's range, there is a wraparound so that for example if j should be [0, 1, 2, 3], we will also get [255, 256, 257, 258] (because 258 % 255 == 3, same for the other values), so we can't discriminate. The issue is that fixing any errs_nb errors among those will always give a correct output message (in the sense that the syndrome will be all 0), so we may not even be able to check if that's correct or not, so I'm not sure the bruteforce approach may even be possible. raise ReedSolomonError("Too many (or few) errors found by Chien Search for the errata locator polynomial!") return err_pos
def rs_find_errors(err_loc, nmess, generator=2)
Find the roots (ie, where evaluation = zero) of error polynomial by bruteforce trial, this is a sort of Chien's search (but less efficient, Chien's search is a way to evaluate the polynomial such that each evaluation only takes constant time).
13.581536
9.130706
1.487457
'''Reed-Solomon main decoding function''' global field_charac if len(msg_in) > field_charac: # Note that it is in fact possible to encode/decode messages that are longer than field_charac, but because this will be above the field, this will generate more error positions during Chien Search than it should, because this will generate duplicate values, which should normally be prevented thank's to the prime polynomial reduction (eg, because it can't discriminate between error at position 1 or 256, both being exactly equal under galois field 2^8). So it's really not advised to do it, but it's possible (but then you're not guaranted to be able to correct any error/erasure on symbols with a position above the length of field_charac -- if you really need a bigger message without chunking, then you should better enlarge c_exp so that you get a bigger field). raise ValueError("Message is too long (%i when max is %i)" % (len(msg_in), field_charac)) msg_out = bytearray(msg_in) # copy of message # erasures: set them to null bytes for easier decoding (but this is not necessary, they will be corrected anyway, but debugging will be easier with null bytes because the error locator polynomial values will only depend on the errors locations, not their values) if erase_pos is None: erase_pos = [] else: for e_pos in erase_pos: msg_out[e_pos] = 0 # check if there are too many erasures to correct (beyond the Singleton bound) if len(erase_pos) > nsym: raise ReedSolomonError("Too many erasures to correct") # prepare the syndrome polynomial using only errors (ie: errors = characters that were either replaced by null byte or changed to another character, but we don't know their positions) synd = rs_calc_syndromes(msg_out, nsym, fcr, generator) # check if there's any error/erasure in the input codeword. If not (all syndromes coefficients are 0), then just return the codeword as-is. if max(synd) == 0: return msg_out[:-nsym], msg_out[-nsym:] # no errors # Find errors locations if only_erasures: err_pos = [] else: # compute the Forney syndromes, which hide the erasures from the original syndrome (so that BM will just have to deal with errors, not erasures) fsynd = rs_forney_syndromes(synd, erase_pos, len(msg_out), generator) # compute the error locator polynomial using Berlekamp-Massey err_loc = rs_find_error_locator(fsynd, nsym, erase_count=len(erase_pos)) # locate the message errors using Chien search (or bruteforce search) err_pos = rs_find_errors(err_loc[::-1], len(msg_out), generator) if err_pos is None: raise ReedSolomonError("Could not locate error") # Find errors values and apply them to correct the message # compute errata evaluator and errata magnitude polynomials, then correct errors and erasures msg_out = rs_correct_errata(msg_out, synd, (erase_pos + err_pos), fcr, generator) # note that we here use the original syndrome, not the forney syndrome (because we will correct both errors and erasures, so we need the full syndrome) # check if the final message is fully repaired synd = rs_calc_syndromes(msg_out, nsym, fcr, generator) if max(synd) > 0: raise ReedSolomonError("Could not correct message") # return the successfully decoded message return msg_out[:-nsym], msg_out[-nsym:]
def rs_correct_msg(msg_in, nsym, fcr=0, generator=2, erase_pos=None, only_erasures=False)
Reed-Solomon main decoding function
7.257377
7.182334
1.010448
'''Reed-Solomon main decoding function, without using the modified Forney syndromes''' global field_charac if len(msg_in) > field_charac: raise ValueError("Message is too long (%i when max is %i)" % (len(msg_in), field_charac)) msg_out = bytearray(msg_in) # copy of message # erasures: set them to null bytes for easier decoding (but this is not necessary, they will be corrected anyway, but debugging will be easier with null bytes because the error locator polynomial values will only depend on the errors locations, not their values) if erase_pos is None: erase_pos = [] else: for e_pos in erase_pos: msg_out[e_pos] = 0 # check if there are too many erasures if len(erase_pos) > nsym: raise ReedSolomonError("Too many erasures to correct") # prepare the syndrome polynomial using only errors (ie: errors = characters that were either replaced by null byte or changed to another character, but we don't know their positions) synd = rs_calc_syndromes(msg_out, nsym, fcr, generator) # check if there's any error/erasure in the input codeword. If not (all syndromes coefficients are 0), then just return the codeword as-is. if max(synd) == 0: return msg_out[:-nsym], msg_out[-nsym:] # no errors # prepare erasures locator and evaluator polynomials erase_loc = None #erase_eval = None erase_count = 0 if erase_pos: erase_count = len(erase_pos) erase_pos_reversed = [len(msg_out)-1-eras for eras in erase_pos] erase_loc = rs_find_errata_locator(erase_pos_reversed, generator=generator) #erase_eval = rs_find_error_evaluator(synd[::-1], erase_loc, len(erase_loc)-1) # prepare errors/errata locator polynomial if only_erasures: err_loc = erase_loc[::-1] #err_eval = erase_eval[::-1] else: err_loc = rs_find_error_locator(synd, nsym, erase_loc=erase_loc, erase_count=erase_count) err_loc = err_loc[::-1] #err_eval = rs_find_error_evaluator(synd[::-1], err_loc[::-1], len(err_loc)-1)[::-1] # find error/errata evaluator polynomial (not really necessary since we already compute it at the same time as the error locator poly in BM) # locate the message errors err_pos = rs_find_errors(err_loc, len(msg_out), generator) # find the roots of the errata locator polynomial (ie: the positions of the errors/errata) if err_pos is None: raise ReedSolomonError("Could not locate error") # compute errata evaluator and errata magnitude polynomials, then correct errors and erasures msg_out = rs_correct_errata(msg_out, synd, err_pos, fcr=fcr, generator=generator) # check if the final message is fully repaired synd = rs_calc_syndromes(msg_out, nsym, fcr, generator) if max(synd) > 0: raise ReedSolomonError("Could not correct message") # return the successfully decoded message return msg_out[:-nsym], msg_out[-nsym:]
def rs_correct_msg_nofsynd(msg_in, nsym, fcr=0, generator=2, erase_pos=None, only_erasures=False)
Reed-Solomon main decoding function, without using the modified Forney syndromes
4.250029
4.033705
1.053629
'''Returns true if the message + ecc has no error of false otherwise (may not always catch a wrong decoding or a wrong message, particularly if there are too many errors -- above the Singleton bound --, but it usually does)''' return ( max(rs_calc_syndromes(msg, nsym, fcr, generator)) == 0 )
def rs_check(msg, nsym, fcr=0, generator=2)
Returns true if the message + ecc has no error of false otherwise (may not always catch a wrong decoding or a wrong message, particularly if there are too many errors -- above the Singleton bound --, but it usually does)
29.76466
2.585412
11.512542
'''Encode a message (ie, add the ecc symbols) using Reed-Solomon, whatever the length of the message because we use chunking''' if isinstance(data, str): data = bytearray(data, "latin-1") chunk_size = self.nsize - self.nsym enc = bytearray() for i in xrange(0, len(data), chunk_size): chunk = data[i:i+chunk_size] enc.extend(rs_encode_msg(chunk, self.nsym, fcr=self.fcr, generator=self.generator)) return enc
def encode(self, data)
Encode a message (ie, add the ecc symbols) using Reed-Solomon, whatever the length of the message because we use chunking
5.892349
3.000943
1.963499
'''Repair a message, whatever its size is, by using chunking''' # erase_pos is a list of positions where you know (or greatly suspect at least) there is an erasure (ie, wrong character but you know it's at this position). Just input the list of all positions you know there are errors, and this method will automatically split the erasures positions to attach to the corresponding data chunk. if isinstance(data, str): data = bytearray(data, "latin-1") dec = bytearray() for i in xrange(0, len(data), self.nsize): # Split the long message in a chunk chunk = data[i:i+self.nsize] # Extract the erasures for this chunk e_pos = [] if erase_pos: # First extract the erasures for this chunk (all erasures below the maximum chunk length) e_pos = [x for x in erase_pos if x <= self.nsize] # Then remove the extract erasures from the big list and also decrement all subsequent positions values by nsize (the current chunk's size) so as to prepare the correct alignment for the next iteration erase_pos = [x - (self.nsize+1) for x in erase_pos if x > self.nsize] # Decode/repair this chunk! dec.extend(rs_correct_msg(chunk, self.nsym, fcr=self.fcr, generator=self.generator, erase_pos=e_pos, only_erasures=only_erasures)[0]) return dec
def decode(self, data, erase_pos=None, only_erasures=False)
Repair a message, whatever its size is, by using chunking
7.317102
6.501808
1.125395
if already_seen is None: already_seen = set() if record['address'] not in already_seen: already_seen.add(record['address']) if 'refs' in record: for child in children( record, index, stop_types=stop_types ): if child['address'] not in already_seen: for descendant in recurse( child, index, stop_types, already_seen=already_seen, type_group=type_group, ): yield descendant yield record
def recurse( record, index, stop_types=STOP_TYPES,already_seen=None, type_group=False )
Depth first traversal of a tree, all children are yielded before parent record -- dictionary record to be recursed upon index -- mapping 'address' ids to dictionary records stop_types -- types which will *not* recurse already_seen -- set storing already-visited nodes yields the traversed nodes
2.574896
2.447792
1.051926
if open is None: open = [] if seen is None: seen = set() for child in children( record, index, stop_types = stop_types ): if child['type'] in stop_types or child['type'] == LOOP_TYPE: continue if child['address'] in open: # loop has been found start = open.index( child['address'] ) new = frozenset( open[start:] ) if new not in seen: seen.add(new) yield new elif child['address'] in seen: continue else: seen.add( child['address']) open.append( child['address'] ) for loop in find_loops( child, index, stop_types=stop_types, open=open, seen=seen ): yield loop open.pop( -1 )
def find_loops( record, index, stop_types = STOP_TYPES, open=None, seen = None )
Find all loops within the index and replace with loop records
2.491622
2.523758
0.987266
for loop in loops: loop = list(loop) members = [index[addr] for addr in loop] external_parents = list(set([ addr for addr in sum([shared.get(addr,[]) for addr in loop],[]) if addr not in loop ])) if external_parents: if len(external_parents) == 1: # potentially a loop that's been looped... parent = index.get( external_parents[0] ) if parent['type'] == LOOP_TYPE: continue # we haven't already been looped... loop_addr = new_address( index ) shared[loop_addr] = external_parents loop_record = index[loop_addr] = { 'address': loop_addr, 'refs': loop, 'parents': external_parents, 'type': LOOP_TYPE, 'size': 0, } for member in members: # member's references must *not* point to loop... member['refs'] = [ ref for ref in member['refs'] if ref not in loop ] # member's parents are *just* the loop member['parents'][:] = [loop_addr] # each referent to loop holds a single reference to the loop rather than many to children for parent in external_parents: parent = index[parent] for member in members: rewrite_references( parent['refs'], member['address'], None ) parent['refs'].append( loop_addr )
def promote_loops( loops, index, shared )
Turn loops into "objects" that can be processed normally
4.706289
4.711974
0.998793
result = [] for ref in record.get( key,[]): try: record = index[ref] except KeyError, err: #print 'No record for %s address %s in %s'%(key, ref, record['address'] ) pass # happens when an unreachable references a reachable that has been compressed out... else: if record['type'] not in stop_types: result.append( record ) return result
def children( record, index, key='refs', stop_types=STOP_TYPES )
Retrieve children records for given record
7.488065
7.402491
1.01156
types = {} for child in children( record, index, key, stop_types=stop_types ): types.setdefault(child['type'],[]).append( child ) return types
def children_types( record, index, key='refs', stop_types=STOP_TYPES )
Produce dictionary mapping type-key to instances for all children
2.961756
2.710263
1.092793
for record in recurse( overall_record, index, stop_types=stop_types, already_seen=already_seen, type_group=True, ): # anything with a totsize we've already processed... if record.get('totsize') is not None: continue rinfo = record rinfo['module'] = overall_record.get('name',NON_MODULE_REFS ) if not record['refs']: rinfo['rsize'] = 0 rinfo['children'] = [] else: # TODO: provide a flag to coalesce based on e.g. type at each level or throughout... rinfo['children'] = rinfo_children = list ( children( record, index, stop_types=stop_types ) ) rinfo['rsize'] = sum([ ( child.get('totsize',0.0)/float(len(shared.get( child['address'], [])) or 1) ) for child in rinfo_children ], 0.0 ) rinfo['totsize'] = record['size'] + rinfo['rsize'] return None
def recurse_module( overall_record, index, shared, stop_types=STOP_TYPES, already_seen=None, min_size=0 )
Creates a has-a recursive-cost hierarchy Mutates objects in-place to produce a hierarchy of memory usage based on reference-holding cost assignment
6.435147
6.580117
0.977968
for parent in targets: if not isinstance( parent, dict ): try: parent = index[parent] except KeyError, err: continue rewrite_references( parent[key], old, new, single_ref=single_ref )
def rewrite_refs( targets, old,new, index, key='refs', single_ref=False )
Rewrite key in all targets (from index if necessary) to replace old with new
4.312271
4.118441
1.047064
old,new = as_id(old),as_id(new) to_delete = [] for i,n in enumerate(sequence): if n == old: if new is None: to_delete.append( i ) else: sequence[i] = new if single_ref: new = None elif n == new and single_ref: new = None if to_delete: to_delete.reverse() for i in to_delete: del sequence[i] return sequence
def rewrite_references( sequence, old, new, single_ref=False )
Rewrite parents to point to new in old sequence -- sequence of id references old -- old id new -- new id returns rewritten sequence
2.54213
2.634668
0.964877
return ( not child.get('refs',()) and ( not shared.get(child['address']) or shared.get(child['address']) == [parent['address']] ) )
def simple( child, shared, parent )
Return sub-set of children who are "simple" in the sense of group_children
7.32304
7.155785
1.023374
to_compress = [] for to_simplify in list(iterindex( index )): if not isinstance( to_simplify, dict ): continue for typ,kids in children_types( to_simplify, index, stop_types=stop_types ).items(): kids = [k for k in kids if k and simple(k,shared, to_simplify)] if len(kids) >= min_kids: # we can group and compress out... to_compress.append( (to_simplify,typ,kids)) for to_simplify,typ,kids in to_compress: typ_address = new_address(index) kid_addresses = [k['address'] for k in kids] index[typ_address] = { 'address': typ_address, 'type': MANY_TYPE, 'name': typ, 'size': sum( [k.get('size',0) for k in kids], 0), 'parents': [to_simplify['address']], } shared[typ_address] = index[typ_address]['parents'] to_simplify['refs'][:] = [typ_address] if delete_children: for address in kid_addresses: try: del index[address] except KeyError, err: pass # already compressed out try: del shared[address] except KeyError, err: pass # already compressed out index[typ_address]['refs'] = [] else: index[typ_address]['refs'] = kid_addresses
def group_children( index, shared, min_kids=10, stop_types=STOP_TYPES, delete_children=True )
Collect like-type children into sub-groups of objects for objects with long children-lists Only group if: * there are more than X children of type Y * children are "simple" * individual children have no children themselves * individual children have no other parents...
3.577456
3.572908
1.001273
# things which will have their dictionaries compressed out to_delete = set() for to_simplify in iterindex(index): if to_simplify['address'] in to_delete: continue if to_simplify['type'] in simplify_dicts and not 'compressed' in to_simplify: refs = to_simplify['refs'] for ref in refs: child = index.get( ref ) if child is not None and child['type'] == 'dict': child_referrers = child['parents'][:] if len(child_referrers) == 1 or to_simplify['type'] in always_compress: to_simplify['compressed'] = True to_simplify['refs'] = child['refs'] to_simplify['size'] += child['size'] # rewrite anything *else* that was pointing to child to point to us... while to_simplify['address'] in child_referrers: child_referrers.remove( to_simplify['address'] ) if child_referrers: rewrite_refs( child_referrers, child['address'], to_simplify['address'], index, single_ref=True ) # now rewrite grandchildren to point to root obj instead of dict for grandchild in child['refs']: grandchild = index[grandchild] parent_set = grandchild['parents'] if parent_set: rewrite_references( parent_set, child, to_simplify, single_ref = True, ) assert parent_set to_delete.add( child['address'] ) for item in to_delete: del index[item] del shared[item] return index
def simplify_dicts( index, shared, simplify_dicts=SIMPLIFY_DICTS, always_compress=ALWAYS_COMPRESS_DICTS )
Eliminate "noise" dictionary records from the index index -- overall index of objects (including metadata such as type records) shared -- parent-count mapping for records in index module/type/class dictionaries
4.159259
4.200969
0.990071
reachable = set() already_seen = set() for module in modules: for child in recurse( module, index, stop_types=stop_types, already_seen=already_seen): reachable.add( child['address'] ) return reachable
def find_reachable( modules, index, shared, stop_types=STOP_TYPES )
Find the set of all reachable objects from given root nodes (modules)
3.550405
3.265246
1.087331
for id,shares in shared.iteritems(): if id in reachable: # child is reachable filtered = [ x for x in shares if x in reachable # only those parents which are reachable ] if len(filtered) != len(shares): shares[:] = filtered
def deparent_unreachable( reachable, shared )
Eliminate all parent-links from unreachable objects from reachable objects
6.227453
5.976416
1.042005
for v in iterindex( index ): v['parents'] = shared.get( v['address'], [] )
def bind_parents( index, shared )
Set parents on all items in index
10.933576
9.420609
1.160602
log.warn( '%s disconnected objects in %s total objects', len(disconnected), len(index)) natural_roots = [x for x in disconnected if x.get('refs') and not x.get('parents')] log.warn( '%s objects with no parents at all' ,len(natural_roots)) for natural_root in natural_roots: recurse_module( natural_root, index, shared ) yield natural_root rest = [x for x in disconnected if x.get( 'totsize' ) is None] un_found = { 'type': 'module', 'name': '<disconnected objects>', 'children': rest, 'parents': [ ], 'size': 0, 'totsize': sum([x['size'] for x in rest],0), 'address': new_address( index ), } index[un_found['address']] = un_found yield un_found
def find_roots( disconnected, index, shared )
Find appropriate "root" objects from which to recurse the hierarchies Will generate a synthetic root for anything which doesn't have any parents...
5.25175
5.067362
1.036387
if key not in self.roots: root,self.rows = load( self.filename, include_interpreter = self.include_interpreter ) self.roots[key] = root return self.roots[key]
def get_root( self, key )
Retrieve the given root by type-key
6.216745
5.655668
1.099206
if not self.URL_TEMPLATE: raise NotImplementedError return self.URL_TEMPLATE.format(self=self)
def url(self)
The fetching target URL. The default behavior of this property is build URL string with the :const:`~brownant.dinergate.Dinergate.URL_TEMPLATE`. The subclasses could override :const:`~brownant.dinergate.Dinergate.URL_TEMPLATE` or use a different implementation.
6.743523
4.642512
1.452559
self.actions.append((method_name, args, kwargs))
def record_action(self, method_name, *args, **kwargs)
Record the method-calling action. The actions expect to be played on an target object. :param method_name: the name of called method. :param args: the general arguments for calling method. :param kwargs: the keyword arguments for calling method.
3.647785
5.210307
0.700109
for method_name, args, kwargs in self.actions: method = getattr(target, method_name) method(*args, **kwargs)
def play_actions(self, target)
Play record actions on the target object. :param target: the target which recive all record actions, is a brown ant app instance normally. :type target: :class:`~brownant.app.Brownant`
3.036921
3.97896
0.763245
def decorator(func): endpoint = "{func.__module__}:{func.__name__}".format(func=func) self.record_action("add_url_rule", host, rule, endpoint, **options) return func return decorator
def route(self, host, rule, **options)
The decorator to register wrapped function as the brown ant app. All optional parameters of this method are compatible with the :meth:`~brownant.app.Brownant.add_url_rule`. Registered functions or classes must be import-able with its qualified name. It is different from the :class:`~flask.Flask`, but like a lazy-loading mode. Registered objects only be loaded before the first using. The right way:: @site.route("www.example.com", "/item/<int:item_id>") def spam(request, item_id): pass The wrong way:: def egg(): # the function could not be imported by its qualified name @site.route("www.example.com", "/item/<int:item_id>") def spam(request, item_id): pass egg() :param host: the limited host name. :param rule: the URL path rule as string. :param options: the options to be forwarded to the :class:`werkzeug.routing.Rule` object.
4.4113
4.809402
0.917224
if not isinstance(text, (bytes, text_type)): raise TypeError("must be string type") if isinstance(text, text_type): return text.encode(encoding) return text
def to_bytes_safe(text, encoding="utf-8")
Convert the input value into bytes type. If the input value is string type and could be encode as UTF-8 bytes, the encoded value will be returned. Otherwise, the encoding has failed, the origin value will be returned as well. :param text: the input value which could be string or bytes. :param encoding: the expected encoding be used while converting the string input into bytes. :rtype: :class:`~__builtin__.bytes`
3.487318
3.326035
1.048491
attr_name = self.attr_names[name] return getattr(obj, attr_name)
def get_attr(self, obj, name)
Get attribute of the target object with the configured attribute name in the :attr:`~brownant.pipeline.base.PipelineProperty.attr_names` of this instance. :param obj: the target object. :type obj: :class:`~brownant.dinergate.Dinergate` :param name: the internal name used in the :attr:`~brownant.pipeline.base.PipelineProperty.attr_names`. (.e.g. `"text_attr"`)
3.791746
4.172009
0.908854
rule = Rule(rule_string, host=host, endpoint=endpoint, **options) self.url_map.add(rule)
def add_url_rule(self, host, rule_string, endpoint, **options)
Add a url rule to the app instance. The url rule is the same with Flask apps and other Werkzeug apps. :param host: the matched hostname. e.g. "www.python.org" :param rule_string: the matched path pattern. e.g. "/news/<int:id>" :param endpoint: the endpoint name as a dispatching key such as the qualified name of the object.
2.527413
4.402966
0.574025
url = urllib.parse.urlparse(url_string) url = self.validate_url(url) url_adapter = self.url_map.bind(server_name=url.hostname, url_scheme=url.scheme, path_info=url.path) query_args = url_decode(url.query) return url, url_adapter, query_args
def parse_url(self, url_string)
Parse the URL string with the url map of this app instance. :param url_string: the origin URL string. :returns: the tuple as `(url, url_adapter, query_args)`, the url is parsed by the standard library `urlparse`, the url_adapter is from the werkzeug bound URL map, the query_args is a multidict from the werkzeug.
3.142058
2.811693
1.117497
# fix up the non-ascii path url_path = to_bytes_safe(url.path) url_path = urllib.parse.quote(url_path, safe=b"/%") # fix up the non-ascii query url_query = to_bytes_safe(url.query) url_query = urllib.parse.quote(url_query, safe=b"?=&") url = urllib.parse.ParseResult(url.scheme, url.netloc, url_path, url.params, url_query, url.fragment) # validate the components of URL has_hostname = url.hostname is not None and len(url.hostname) > 0 has_http_scheme = url.scheme in ("http", "https") has_path = not len(url.path) or url.path.startswith("/") if not (has_hostname and has_http_scheme and has_path): raise NotSupported("invalid url: %s" % repr(url)) return url
def validate_url(self, url)
Validate the :class:`~urllib.parse.ParseResult` object. This method will make sure the :meth:`~brownant.app.BrownAnt.parse_url` could work as expected even meet a unexpected URL string. :param url: the parsed url. :type url: :class:`~urllib.parse.ParseResult`
2.57759
2.65108
0.972279
url, url_adapter, query_args = self.parse_url(url_string) try: endpoint, kwargs = url_adapter.match() except NotFound: raise NotSupported(url_string) except RequestRedirect as e: new_url = "{0.new_url}?{1}".format(e, url_encode(query_args)) return self.dispatch_url(new_url) try: handler = import_string(endpoint) request = Request(url=url, args=query_args) return handler(request, **kwargs) except RequestRedirect as e: return self.dispatch_url(e.new_url)
def dispatch_url(self, url_string)
Dispatch the URL string to the target endpoint function. :param url_string: the origin URL string. :returns: the return value of calling dispatched function.
3.208714
3.691072
0.869318
if isinstance(site, string_types): site = import_string(site) site.play_actions(target=self)
def mount_site(self, site)
Mount a supported site to this app instance. :param site: the site instance be mounted.
10.211308
14.238583
0.717158
return self._makeApiCall(self.funcinfo["ping"], *args, **kwargs)
def ping(self, *args, **kwargs)
Ping Server Respond without doing anything. This endpoint is used to check that the service is up. This method is ``stable``
15.555004
20.579929
0.755834
return self._makeApiCall(self.funcinfo["githubWebHookConsumer"], *args, **kwargs)
def githubWebHookConsumer(self, *args, **kwargs)
Consume GitHub WebHook Capture a GitHub event and publish it via pulse, if it's a push, release or pull request. This method is ``experimental``
12.283296
17.192822
0.714443
return self._makeApiCall(self.funcinfo["badge"], *args, **kwargs)
def badge(self, *args, **kwargs)
Latest Build Status Badge Checks the status of the latest build of a given branch and returns corresponding badge svg. This method is ``experimental``
17.162397
21.199625
0.809561
return self._makeApiCall(self.funcinfo["createComment"], *args, **kwargs)
def createComment(self, *args, **kwargs)
Post a comment on a given GitHub Issue or Pull Request For a given Issue or Pull Request of a repository, this will write a new message. This method takes input: ``v1/create-comment.json#`` This method is ``experimental``
13.110386
19.620392
0.668202
w = lambda l: l[random.randrange(len(l))] er = lambda w: w[:-1]+'ier' if w.endswith('y') else (w+'r' if w.endswith('e') else w+'er') s = lambda w: w+'s' punc = lambda c, *l: " ".join(l)+c sentence = lambda *l: lambda: " ".join(l) pick = lambda *l: (l[random.randrange(len(l))])() while True: yield pick( sentence('the',w(adj),w(them),'and the',w(them),w(them_verb)), sentence('delivering me to',w(place)), sentence('they',w(action),'my',w(me_part),'and',w(me_verb),'with all my',w(feeling)), sentence('in the',w(place),'my',w(feeling),'shall',w(me_verb)), sentence(punc(',', er(w(adj)),'than the a petty',w(feeling))), sentence(er(w(adj)),'than',w(them),'in',w(place)), sentence(punc('!','oh my',w(me_part)),punc('!','the',w(feeling))), sentence('no one',s(w(angst)),'why the',w(them),w(them_verb + me_verb)))
def lorem_gotham()
Cheesy Gothic Poetry Generator Uses Python generators to yield eternal angst. When you need to generate random verbiage to test your code or typographic design, let's face it... Lorem Ipsum and "the quick brown fox" are old and boring! What you need is something with *flavor*, the kind of thing a depressed teenager with a lot of black makeup would write.
6.055866
6.065129
0.998473
w = lambda l: l[random.randrange(len(l))] sentence = lambda *l: lambda: " ".join(l) pick = lambda *l: (l[random.randrange(len(l))])() return pick( sentence('why i',w(me_verb)), sentence(w(place)), sentence('a',w(adj),w(adj),w(place)), sentence('the',w(them)))
def lorem_gotham_title()
Names your poem
7.508852
7.424128
1.011412
print() print("-~*~--~*~--~*~--~*~--~*~--~*~--~*~--~*~--~*~--~*~-") print(lorem_gotham_title().center(50)) print("-~*~--~*~--~*~--~*~--~*~--~*~--~*~--~*~--~*~--~*~-") print() poem = lorem_gotham() for n in range(16): if n in (4, 8, 12): print() print(next(poem)) print()
def main()
I provide a command-line interface for this module
2.774493
2.7817
0.997409
return await self._makeApiCall(self.funcinfo["listWorkerTypes"], *args, **kwargs)
async def listWorkerTypes(self, *args, **kwargs)
See the list of worker types which are known to be managed This method is only for debugging the ec2-manager This method gives output: ``v1/list-worker-types.json#`` This method is ``experimental``
11.76386
14.655767
0.802678
return await self._makeApiCall(self.funcinfo["runInstance"], *args, **kwargs)
async def runInstance(self, *args, **kwargs)
Run an instance Request an instance of a worker type This method takes input: ``v1/run-instance-request.json#`` This method is ``experimental``
14.319862
22.150627
0.646477
return await self._makeApiCall(self.funcinfo["workerTypeStats"], *args, **kwargs)
async def workerTypeStats(self, *args, **kwargs)
Look up the resource stats for a workerType Return an object which has a generic state description. This only contains counts of instances This method gives output: ``v1/worker-type-resources.json#`` This method is ``experimental``
11.873431
17.092812
0.694645
return await self._makeApiCall(self.funcinfo["workerTypeHealth"], *args, **kwargs)
async def workerTypeHealth(self, *args, **kwargs)
Look up the resource health for a workerType Return a view of the health of a given worker type This method gives output: ``v1/health.json#`` This method is ``experimental``
12.592219
18.226162
0.690887
return await self._makeApiCall(self.funcinfo["workerTypeErrors"], *args, **kwargs)
async def workerTypeErrors(self, *args, **kwargs)
Look up the most recent errors of a workerType Return a list of the most recent errors encountered by a worker type This method gives output: ``v1/errors.json#`` This method is ``experimental``
13.59235
17.631807
0.770899
return await self._makeApiCall(self.funcinfo["workerTypeState"], *args, **kwargs)
async def workerTypeState(self, *args, **kwargs)
Look up the resource state for a workerType Return state information for a given worker type This method gives output: ``v1/worker-type-state.json#`` This method is ``experimental``
12.628479
16.56139
0.762525
return await self._makeApiCall(self.funcinfo["ensureKeyPair"], *args, **kwargs)
async def ensureKeyPair(self, *args, **kwargs)
Ensure a KeyPair for a given worker type exists Idempotently ensure that a keypair of a given name exists This method takes input: ``v1/create-key-pair.json#`` This method is ``experimental``
14.652827
23.665508
0.619164
return await self._makeApiCall(self.funcinfo["removeKeyPair"], *args, **kwargs)
async def removeKeyPair(self, *args, **kwargs)
Ensure a KeyPair for a given worker type does not exist Ensure that a keypair of a given name does not exist. This method is ``experimental``
14.370121
21.72608
0.661423
return await self._makeApiCall(self.funcinfo["terminateInstance"], *args, **kwargs)
async def terminateInstance(self, *args, **kwargs)
Terminate an instance Terminate an instance in a specified region This method is ``experimental``
13.152555
19.271217
0.682497
return await self._makeApiCall(self.funcinfo["getSpecificPrices"], *args, **kwargs)
async def getSpecificPrices(self, *args, **kwargs)
Request prices for EC2 Return a list of possible prices for EC2 This method takes input: ``v1/prices-request.json#`` This method gives output: ``v1/prices.json#`` This method is ``experimental``
10.376702
15.870541
0.653834
return await self._makeApiCall(self.funcinfo["getHealth"], *args, **kwargs)
async def getHealth(self, *args, **kwargs)
Get EC2 account health metrics Give some basic stats on the health of our EC2 account This method gives output: ``v1/health.json#`` This method is ``experimental``
12.868346
18.74979
0.686319
return await self._makeApiCall(self.funcinfo["getRecentErrors"], *args, **kwargs)
async def getRecentErrors(self, *args, **kwargs)
Look up the most recent errors in the provisioner across all worker types Return a list of recent errors encountered This method gives output: ``v1/errors.json#`` This method is ``experimental``
11.993319
19.613049
0.611497
return await self._makeApiCall(self.funcinfo["regions"], *args, **kwargs)
async def regions(self, *args, **kwargs)
See the list of regions managed by this ec2-manager This method is only for debugging the ec2-manager This method is ``experimental``
14.847516
17.857334
0.831452
return await self._makeApiCall(self.funcinfo["amiUsage"], *args, **kwargs)
async def amiUsage(self, *args, **kwargs)
See the list of AMIs and their usage List AMIs and their usage by returning a list of objects in the form: { region: string volumetype: string lastused: timestamp } This method is ``experimental``
12.543196
17.804184
0.704508
return await self._makeApiCall(self.funcinfo["ebsUsage"], *args, **kwargs)
async def ebsUsage(self, *args, **kwargs)
See the current EBS volume usage list Lists current EBS volume usage by returning a list of objects that are uniquely defined by {region, volumetype, state} in the form: { region: string, volumetype: string, state: string, totalcount: integer, totalgb: integer, touched: timestamp (last time that information was updated), } This method is ``experimental``
12.550022
18.930984
0.662936
return await self._makeApiCall(self.funcinfo["dbpoolStats"], *args, **kwargs)
async def dbpoolStats(self, *args, **kwargs)
Statistics on the Database client pool This method is only for debugging the ec2-manager This method is ``experimental``
12.094499
14.63349
0.826494
return await self._makeApiCall(self.funcinfo["sqsStats"], *args, **kwargs)
async def sqsStats(self, *args, **kwargs)
Statistics on the sqs queues This method is only for debugging the ec2-manager This method is ``experimental``
13.580398
16.452261
0.825443
return await self._makeApiCall(self.funcinfo["purgeQueues"], *args, **kwargs)
async def purgeQueues(self, *args, **kwargs)
Purge the SQS queues This method is only for debugging the ec2-manager This method is ``experimental``
13.929899
15.53612
0.896614
ref = { 'exchange': 'pull-request', 'name': 'pullRequest', 'routingKey': [ { 'constant': 'primary', 'multipleWords': False, 'name': 'routingKeyKind', }, { 'multipleWords': False, 'name': 'organization', }, { 'multipleWords': False, 'name': 'repository', }, { 'multipleWords': False, 'name': 'action', }, ], 'schema': 'v1/github-pull-request-message.json#', } return self._makeTopicExchange(ref, *args, **kwargs)
def pullRequest(self, *args, **kwargs)
GitHub Pull Request Event When a GitHub pull request event is posted it will be broadcast on this exchange with the designated `organization` and `repository` in the routing-key along with event specific metadata in the payload. This exchange outputs: ``v1/github-pull-request-message.json#``This exchange takes the following keys: * routingKeyKind: Identifier for the routing-key kind. This is always `"primary"` for the formalized routing key. (required) * organization: The GitHub `organization` which had an event. All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required) * repository: The GitHub `repository` which had an event.All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required) * action: The GitHub `action` which triggered an event. See for possible values see the payload actions property. (required)
4.743968
3.114724
1.523078
ref = { 'exchange': 'push', 'name': 'push', 'routingKey': [ { 'constant': 'primary', 'multipleWords': False, 'name': 'routingKeyKind', }, { 'multipleWords': False, 'name': 'organization', }, { 'multipleWords': False, 'name': 'repository', }, ], 'schema': 'v1/github-push-message.json#', } return self._makeTopicExchange(ref, *args, **kwargs)
def push(self, *args, **kwargs)
GitHub push Event When a GitHub push event is posted it will be broadcast on this exchange with the designated `organization` and `repository` in the routing-key along with event specific metadata in the payload. This exchange outputs: ``v1/github-push-message.json#``This exchange takes the following keys: * routingKeyKind: Identifier for the routing-key kind. This is always `"primary"` for the formalized routing key. (required) * organization: The GitHub `organization` which had an event. All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required) * repository: The GitHub `repository` which had an event.All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
6.341426
3.62078
1.751398
ref = { 'exchange': 'release', 'name': 'release', 'routingKey': [ { 'constant': 'primary', 'multipleWords': False, 'name': 'routingKeyKind', }, { 'multipleWords': False, 'name': 'organization', }, { 'multipleWords': False, 'name': 'repository', }, ], 'schema': 'v1/github-release-message.json#', } return self._makeTopicExchange(ref, *args, **kwargs)
def release(self, *args, **kwargs)
GitHub release Event When a GitHub release event is posted it will be broadcast on this exchange with the designated `organization` and `repository` in the routing-key along with event specific metadata in the payload. This exchange outputs: ``v1/github-release-message.json#``This exchange takes the following keys: * routingKeyKind: Identifier for the routing-key kind. This is always `"primary"` for the formalized routing key. (required) * organization: The GitHub `organization` which had an event. All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required) * repository: The GitHub `repository` which had an event.All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
6.421085
3.58011
1.793544
ref = { 'exchange': 'task-group-creation-requested', 'name': 'taskGroupCreationRequested', 'routingKey': [ { 'constant': 'primary', 'multipleWords': False, 'name': 'routingKeyKind', }, { 'multipleWords': False, 'name': 'organization', }, { 'multipleWords': False, 'name': 'repository', }, ], 'schema': 'v1/task-group-creation-requested.json#', } return self._makeTopicExchange(ref, *args, **kwargs)
def taskGroupCreationRequested(self, *args, **kwargs)
tc-gh requested the Queue service to create all the tasks in a group supposed to signal that taskCreate API has been called for every task in the task group for this particular repo and this particular organization currently used for creating initial status indicators in GitHub UI using Statuses API. This particular exchange can also be bound to RabbitMQ queues by custom routes - for that, Pass in the array of routes as a second argument to the publish method. Currently, we do use the statuses routes to bind the handler that creates the initial status. This exchange outputs: ``v1/task-group-creation-requested.json#``This exchange takes the following keys: * routingKeyKind: Identifier for the routing-key kind. This is always `"primary"` for the formalized routing key. (required) * organization: The GitHub `organization` which had an event. All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required) * repository: The GitHub `repository` which had an event.All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
5.164251
3.238618
1.594585
degrees = 0 while True: t1 = time.time() with Frame() as frame: oval_width = frame.width oval_height = frame.height / 3.0 cube_height = int(oval_height * 2) (p1_x, p1_y) = ellipse_point(degrees, oval_width, oval_height) (p2_x, p2_y) = ellipse_point(degrees + 90, oval_width, oval_height) (p3_x, p3_y) = ellipse_point(degrees + 180, oval_width, oval_height) (p4_x, p4_y) = ellipse_point(degrees + 270, oval_width, oval_height) degrees = (degrees + degree_change) % 360 # connect square thing at top frame.line(p1_x, p1_y, p2_x, p2_y) frame.line(p2_x, p2_y, p3_x, p3_y) frame.line(p3_x, p3_y, p4_x, p4_y) frame.line(p4_x, p4_y, p1_x, p1_y) # connect top to bottom frame.line(p1_x, p1_y, p1_x, p1_y + cube_height) frame.line(p2_x, p2_y, p2_x, p2_y + cube_height) frame.line(p3_x, p3_y, p3_x, p3_y + cube_height) frame.line(p4_x, p4_y, p4_x, p4_y + cube_height) # connect square thing at bottom frame.line(p1_x, p1_y + cube_height, p2_x, p2_y + cube_height) frame.line(p2_x, p2_y + cube_height, p3_x, p3_y + cube_height) frame.line(p3_x, p3_y + cube_height, p4_x, p4_y + cube_height) frame.line(p4_x, p4_y + cube_height, p1_x, p1_y + cube_height) elapsed = (time.time() - t1) time.sleep(abs(1.0 / frame_rate - elapsed))
def rotating_cube(degree_change=3, frame_rate=3)
Rotating cube program How it works: 1. Create two imaginary ellipses 2. Sized to fit in the top third and bottom third of screen 3. Create four imaginary points on each ellipse 4. Make those points the top and bottom corners of your cube 5. Connect the lines and render 6. Rotate the points on the ellipses and repeat
1.536965
1.497388
1.026431
r steep = abs(y1 - y0) > abs(x1 - x0) if steep: (x0, y0) = (y0, x0) (x1, y1) = (y1, x1) if x0 > x1: (x0, x1) = (x1, x0) (y0, y1) = (y1, y0) deltax = x1 - x0 deltay = abs(y1 - y0) error = deltax / 2 y = y0 if y0 < y1: ystep = 1 else: ystep = -1 for x in range(x0, x1 - 1): if steep: self[y, x] = c else: self[x, y] = c error = error - deltay if error < 0: y = y + ystep error = error + deltax
def line(self, x0, y0, x1, y1, c='*')
r"""Draws a line Who would have thought this would be so complicated? Thanks again Wikipedia_ <3 .. _Wikipedia: http://en.wikipedia.org/wiki/Bresenham's_line_algorithm
1.594346
1.535929
1.038034
return self._makeApiCall(self.funcinfo["terminateWorkerType"], *args, **kwargs)
def terminateWorkerType(self, *args, **kwargs)
Terminate all resources from a worker type Terminate all instances for this worker type This method is ``experimental``
12.028389
15.930218
0.755067
return self._makeApiCall(self.funcinfo["getPrices"], *args, **kwargs)
def getPrices(self, *args, **kwargs)
Request prices for EC2 Return a list of possible prices for EC2 This method gives output: ``v1/prices.json#`` This method is ``experimental``
12.163607
16.255159
0.748292
return self._makeApiCall(self.funcinfo["allState"], *args, **kwargs)
def allState(self, *args, **kwargs)
List out the entire internal state This method is only for debugging the ec2-manager This method is ``experimental``
13.118561
14.940805
0.878036
return self._makeApiCall(self.funcinfo["task"], *args, **kwargs)
def task(self, *args, **kwargs)
Get Task Definition This end-point will return the task-definition. Notice that the task definition may have been modified by queue, if an optional property is not specified the queue may provide a default value. This method gives output: ``v1/task.json#`` This method is ``stable``
19.104469
24.327921
0.78529
return self._makeApiCall(self.funcinfo["defineTask"], *args, **kwargs)
def defineTask(self, *args, **kwargs)
Define Task **Deprecated**, this is the same as `createTask` with a **self-dependency**. This is only present for legacy. This method takes input: ``v1/create-task-request.json#`` This method gives output: ``v1/task-status-response.json#`` This method is ``deprecated``
15.445311
18.036623
0.856331
return self._makeApiCall(self.funcinfo["scheduleTask"], *args, **kwargs)
def scheduleTask(self, *args, **kwargs)
Schedule Defined Task scheduleTask will schedule a task to be executed, even if it has unresolved dependencies. A task would otherwise only be scheduled if its dependencies were resolved. This is useful if you have defined a task that depends on itself or on some other task that has not been resolved, but you wish the task to be scheduled immediately. This will announce the task as pending and workers will be allowed to claim it and resolve the task. **Note** this operation is **idempotent** and will not fail or complain if called with a `taskId` that is already scheduled, or even resolved. To reschedule a task previously resolved, use `rerunTask`. This method gives output: ``v1/task-status-response.json#`` This method is ``stable``
16.058744
20.767347
0.773269
return self._makeApiCall(self.funcinfo["rerunTask"], *args, **kwargs)
def rerunTask(self, *args, **kwargs)
Rerun a Resolved Task This method _reruns_ a previously resolved task, even if it was _completed_. This is useful if your task completes unsuccessfully, and you just want to run it from scratch again. This will also reset the number of `retries` allowed. This method is deprecated in favour of creating a new task with the same task definition (but with a new taskId). Remember that `retries` in the task status counts the number of runs that the queue have started because the worker stopped responding, for example because a spot node died. **Remark** this operation is idempotent, if you try to rerun a task that is not either `failed` or `completed`, this operation will just return the current task status. This method gives output: ``v1/task-status-response.json#`` This method is ``deprecated``
12.936927
19.440821
0.665452
return self._makeApiCall(self.funcinfo["cancelTask"], *args, **kwargs)
def cancelTask(self, *args, **kwargs)
Cancel Task This method will cancel a task that is either `unscheduled`, `pending` or `running`. It will resolve the current run as `exception` with `reasonResolved` set to `canceled`. If the task isn't scheduled yet, ie. it doesn't have any runs, an initial run will be added and resolved as described above. Hence, after canceling a task, it cannot be scheduled with `queue.scheduleTask`, but a new run can be created with `queue.rerun`. These semantics is equivalent to calling `queue.scheduleTask` immediately followed by `queue.cancelTask`. **Remark** this operation is idempotent, if you try to cancel a task that isn't `unscheduled`, `pending` or `running`, this operation will just return the current task status. This method gives output: ``v1/task-status-response.json#`` This method is ``stable``
14.122085
20.1134
0.702123
return self._makeApiCall(self.funcinfo["reportFailed"], *args, **kwargs)
def reportFailed(self, *args, **kwargs)
Report Run Failed Report a run failed, resolving the run as `failed`. Use this to resolve a run that failed because the task specific code behaved unexpectedly. For example the task exited non-zero, or didn't produce expected output. Do not use this if the task couldn't be run because if malformed payload, or other unexpected condition. In these cases we have a task exception, which should be reported with `reportException`. This method gives output: ``v1/task-status-response.json#`` This method is ``stable``
17.442177
17.939232
0.972292
return self._makeApiCall(self.funcinfo["reportException"], *args, **kwargs)
def reportException(self, *args, **kwargs)
Report Task Exception Resolve a run as _exception_. Generally, you will want to report tasks as failed instead of exception. You should `reportException` if, * The `task.payload` is invalid, * Non-existent resources are referenced, * Declared actions cannot be executed due to unavailable resources, * The worker had to shutdown prematurely, * The worker experienced an unknown error, or, * The task explicitly requested a retry. Do not use this to signal that some user-specified code crashed for any reason specific to this code. If user-specific code hits a resource that is temporarily unavailable worker should report task _failed_. This method takes input: ``v1/task-exception-request.json#`` This method gives output: ``v1/task-status-response.json#`` This method is ``stable``
17.474182
19.382813
0.90153
return self._makeApiCall(self.funcinfo["createArtifact"], *args, **kwargs)
def createArtifact(self, *args, **kwargs)
Create Artifact This API end-point creates an artifact for a specific run of a task. This should **only** be used by a worker currently operating on this task, or from a process running within the task (ie. on the worker). All artifacts must specify when they `expires`, the queue will automatically take care of deleting artifacts past their expiration point. This features makes it feasible to upload large intermediate artifacts from data processing applications, as the artifacts can be set to expire a few days later. We currently support 3 different `storageType`s, each storage type have slightly different features and in some cases difference semantics. We also have 2 deprecated `storageType`s which are only maintained for backwards compatiability and should not be used in new implementations **Blob artifacts**, are useful for storing large files. Currently, these are all stored in S3 but there are facilities for adding support for other backends in futre. A call for this type of artifact must provide information about the file which will be uploaded. This includes sha256 sums and sizes. This method will return a list of general form HTTP requests which are signed by AWS S3 credentials managed by the Queue. Once these requests are completed the list of `ETag` values returned by the requests must be passed to the queue `completeArtifact` method **S3 artifacts**, DEPRECATED is useful for static files which will be stored on S3. When creating an S3 artifact the queue will return a pre-signed URL to which you can do a `PUT` request to upload your artifact. Note that `PUT` request **must** specify the `content-length` header and **must** give the `content-type` header the same value as in the request to `createArtifact`. **Azure artifacts**, DEPRECATED are stored in _Azure Blob Storage_ service which given the consistency guarantees and API interface offered by Azure is more suitable for artifacts that will be modified during the execution of the task. For example docker-worker has a feature that persists the task log to Azure Blob Storage every few seconds creating a somewhat live log. A request to create an Azure artifact will return a URL featuring a [Shared-Access-Signature](http://msdn.microsoft.com/en-us/library/azure/dn140256.aspx), refer to MSDN for further information on how to use these. **Warning: azure artifact is currently an experimental feature subject to changes and data-drops.** **Reference artifacts**, only consists of meta-data which the queue will store for you. These artifacts really only have a `url` property and when the artifact is requested the client will be redirect the URL provided with a `303` (See Other) redirect. Please note that we cannot delete artifacts you upload to other service, we can only delete the reference to the artifact, when it expires. **Error artifacts**, only consists of meta-data which the queue will store for you. These artifacts are only meant to indicate that you the worker or the task failed to generate a specific artifact, that you would otherwise have uploaded. For example docker-worker will upload an error artifact, if the file it was supposed to upload doesn't exists or turns out to be a directory. Clients requesting an error artifact will get a `424` (Failed Dependency) response. This is mainly designed to ensure that dependent tasks can distinguish between artifacts that were suppose to be generated and artifacts for which the name is misspelled. **Artifact immutability**, generally speaking you cannot overwrite an artifact when created. But if you repeat the request with the same properties the request will succeed as the operation is idempotent. This is useful if you need to refresh a signed URL while uploading. Do not abuse this to overwrite artifacts created by another entity! Such as worker-host overwriting artifact created by worker-code. As a special case the `url` property on _reference artifacts_ can be updated. You should only use this to update the `url` property for reference artifacts your process has created. This method takes input: ``v1/post-artifact-request.json#`` This method gives output: ``v1/post-artifact-response.json#`` This method is ``stable``
13.115017
30.167976
0.434733
return self._makeApiCall(self.funcinfo["completeArtifact"], *args, **kwargs)
def completeArtifact(self, *args, **kwargs)
Complete Artifact This endpoint finalises an upload done through the blob `storageType`. The queue will ensure that the task/run is still allowing artifacts to be uploaded. For single-part S3 blob artifacts, this endpoint will simply ensure the artifact is present in S3. For multipart S3 artifacts, the endpoint will perform the commit step of the multipart upload flow. As the final step for both multi and single part artifacts, the `present` entity field will be set to `true` to reflect that the artifact is now present and a message published to pulse. NOTE: This endpoint *must* be called for all artifacts of storageType 'blob' This method takes input: ``v1/put-artifact-request.json#`` This method is ``experimental``
13.476502
21.221769
0.635032
return self._makeApiCall(self.funcinfo["listProvisioners"], *args, **kwargs)
def listProvisioners(self, *args, **kwargs)
Get a list of all active provisioners Get all active provisioners. The term "provisioner" is taken broadly to mean anything with a provisionerId. This does not necessarily mean there is an associated service performing any provisioning activity. The response is paged. If this end-point returns a `continuationToken`, you should call the end-point again with the `continuationToken` as a query-string option. By default this end-point will list up to 1000 provisioners in a single page. You may limit this with the query-string parameter `limit`. This method gives output: ``v1/list-provisioners-response.json#`` This method is ``experimental``
11.052512
13.216747
0.836251
return self._makeApiCall(self.funcinfo["getProvisioner"], *args, **kwargs)
def getProvisioner(self, *args, **kwargs)
Get an active provisioner Get an active provisioner. The term "provisioner" is taken broadly to mean anything with a provisionerId. This does not necessarily mean there is an associated service performing any provisioning activity. This method gives output: ``v1/provisioner-response.json#`` This method is ``experimental``
12.461894
13.479311
0.92452
return self._makeApiCall(self.funcinfo["declareProvisioner"], *args, **kwargs)
def declareProvisioner(self, *args, **kwargs)
Update a provisioner Declare a provisioner, supplying some details about it. `declareProvisioner` allows updating one or more properties of a provisioner as long as the required scopes are possessed. For example, a request to update the `aws-provisioner-v1` provisioner with a body `{description: 'This provisioner is great'}` would require you to have the scope `queue:declare-provisioner:aws-provisioner-v1#description`. The term "provisioner" is taken broadly to mean anything with a provisionerId. This does not necessarily mean there is an associated service performing any provisioning activity. This method takes input: ``v1/update-provisioner-request.json#`` This method gives output: ``v1/provisioner-response.json#`` This method is ``experimental``
12.993777
16.638958
0.780925