sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def reqScannerSubscription(self, tickerId, subscription, scannerSubscriptionOptions): """reqScannerSubscription(EClientSocketBase self, int tickerId, ScannerSubscription subscription, TagValueListSPtr const & scannerSubscriptionOptions)""" return _swigibpy.EClientSocketBase_reqScannerSubscription(self, tickerId, subscription, scannerSubscriptionOptions)
reqScannerSubscription(EClientSocketBase self, int tickerId, ScannerSubscription subscription, TagValueListSPtr const & scannerSubscriptionOptions)
entailment
def reqFundamentalData(self, reqId, arg3, reportType): """reqFundamentalData(EClientSocketBase self, TickerId reqId, Contract arg3, IBString const & reportType)""" return _swigibpy.EClientSocketBase_reqFundamentalData(self, reqId, arg3, reportType)
reqFundamentalData(EClientSocketBase self, TickerId reqId, Contract arg3, IBString const & reportType)
entailment
def calculateImpliedVolatility(self, reqId, contract, optionPrice, underPrice): """calculateImpliedVolatility(EClientSocketBase self, TickerId reqId, Contract contract, double optionPrice, double underPrice)""" return _swigibpy.EClientSocketBase_calculateImpliedVolatility(self, reqId, contract, optionPrice, underPrice)
calculateImpliedVolatility(EClientSocketBase self, TickerId reqId, Contract contract, double optionPrice, double underPrice)
entailment
def calculateOptionPrice(self, reqId, contract, volatility, underPrice): """calculateOptionPrice(EClientSocketBase self, TickerId reqId, Contract contract, double volatility, double underPrice)""" return _swigibpy.EClientSocketBase_calculateOptionPrice(self, reqId, contract, volatility, underPrice)
calculateOptionPrice(EClientSocketBase self, TickerId reqId, Contract contract, double volatility, double underPrice)
entailment
def reqAccountSummary(self, reqId, groupName, tags): """reqAccountSummary(EClientSocketBase self, int reqId, IBString const & groupName, IBString const & tags)""" return _swigibpy.EClientSocketBase_reqAccountSummary(self, reqId, groupName, tags)
reqAccountSummary(EClientSocketBase self, int reqId, IBString const & groupName, IBString const & tags)
entailment
def _run(self): '''Continually poll TWS''' stop = self._stop_evt connected = self._connected_evt tws = self._tws fd = tws.fd() pollfd = [fd] while not stop.is_set(): while (not connected.is_set() or not tws.isConnected()) and not stop.is_set(): connected.clear() backoff = 0 retries = 0 while not connected.is_set() and not stop.is_set(): if tws.reconnect_auto and not tws.reconnect(): if backoff < self.MAX_BACKOFF: retries += 1 backoff = min(2**(retries + 1), self.MAX_BACKOFF) connected.wait(backoff / 1000.) else: connected.wait(1) fd = tws.fd() pollfd = [fd] if fd > 0: try: evtin, _evtout, evterr = select.select(pollfd, [], pollfd, 1) except select.error: connected.clear() continue else: if fd in evtin: try: if not tws.checkMessages(): tws.eDisconnect(stop_polling=False) continue except (SystemExit, SystemError, KeyboardInterrupt): break except: try: self._wrapper.pyError(*sys.exc_info()) except: print_exc() elif fd in evterr: connected.clear() continue
Continually poll TWS
entailment
def tickPrice(self, tickerId, field, price, canAutoExecute): """tickPrice(EWrapper self, TickerId tickerId, TickType field, double price, int canAutoExecute)""" return _swigibpy.EWrapper_tickPrice(self, tickerId, field, price, canAutoExecute)
tickPrice(EWrapper self, TickerId tickerId, TickType field, double price, int canAutoExecute)
entailment
def tickSize(self, tickerId, field, size): """tickSize(EWrapper self, TickerId tickerId, TickType field, int size)""" return _swigibpy.EWrapper_tickSize(self, tickerId, field, size)
tickSize(EWrapper self, TickerId tickerId, TickType field, int size)
entailment
def tickOptionComputation(self, tickerId, tickType, impliedVol, delta, optPrice, pvDividend, gamma, vega, theta, undPrice): """tickOptionComputation(EWrapper self, TickerId tickerId, TickType tickType, double impliedVol, double delta, double optPrice, double pvDividend, double gamma, double vega, double theta, double undPrice)""" return _swigibpy.EWrapper_tickOptionComputation(self, tickerId, tickType, impliedVol, delta, optPrice, pvDividend, gamma, vega, theta, undPrice)
tickOptionComputation(EWrapper self, TickerId tickerId, TickType tickType, double impliedVol, double delta, double optPrice, double pvDividend, double gamma, double vega, double theta, double undPrice)
entailment
def tickGeneric(self, tickerId, tickType, value): """tickGeneric(EWrapper self, TickerId tickerId, TickType tickType, double value)""" return _swigibpy.EWrapper_tickGeneric(self, tickerId, tickType, value)
tickGeneric(EWrapper self, TickerId tickerId, TickType tickType, double value)
entailment
def tickString(self, tickerId, tickType, value): """tickString(EWrapper self, TickerId tickerId, TickType tickType, IBString const & value)""" return _swigibpy.EWrapper_tickString(self, tickerId, tickType, value)
tickString(EWrapper self, TickerId tickerId, TickType tickType, IBString const & value)
entailment
def tickEFP(self, tickerId, tickType, basisPoints, formattedBasisPoints, totalDividends, holdDays, futureExpiry, dividendImpact, dividendsToExpiry): """tickEFP(EWrapper self, TickerId tickerId, TickType tickType, double basisPoints, IBString const & formattedBasisPoints, double totalDividends, int holdDays, IBString const & futureExpiry, double dividendImpact, double dividendsToExpiry)""" return _swigibpy.EWrapper_tickEFP(self, tickerId, tickType, basisPoints, formattedBasisPoints, totalDividends, holdDays, futureExpiry, dividendImpact, dividendsToExpiry)
tickEFP(EWrapper self, TickerId tickerId, TickType tickType, double basisPoints, IBString const & formattedBasisPoints, double totalDividends, int holdDays, IBString const & futureExpiry, double dividendImpact, double dividendsToExpiry)
entailment
def orderStatus(self, orderId, status, filled, remaining, avgFillPrice, permId, parentId, lastFillPrice, clientId, whyHeld): """orderStatus(EWrapper self, OrderId orderId, IBString const & status, int filled, int remaining, double avgFillPrice, int permId, int parentId, double lastFillPrice, int clientId, IBString const & whyHeld)""" return _swigibpy.EWrapper_orderStatus(self, orderId, status, filled, remaining, avgFillPrice, permId, parentId, lastFillPrice, clientId, whyHeld)
orderStatus(EWrapper self, OrderId orderId, IBString const & status, int filled, int remaining, double avgFillPrice, int permId, int parentId, double lastFillPrice, int clientId, IBString const & whyHeld)
entailment
def openOrder(self, orderId, arg0, arg1, arg2): """openOrder(EWrapper self, OrderId orderId, Contract arg0, Order arg1, OrderState arg2)""" return _swigibpy.EWrapper_openOrder(self, orderId, arg0, arg1, arg2)
openOrder(EWrapper self, OrderId orderId, Contract arg0, Order arg1, OrderState arg2)
entailment
def updateAccountValue(self, key, val, currency, accountName): """updateAccountValue(EWrapper self, IBString const & key, IBString const & val, IBString const & currency, IBString const & accountName)""" return _swigibpy.EWrapper_updateAccountValue(self, key, val, currency, accountName)
updateAccountValue(EWrapper self, IBString const & key, IBString const & val, IBString const & currency, IBString const & accountName)
entailment
def updatePortfolio(self, contract, position, marketPrice, marketValue, averageCost, unrealizedPNL, realizedPNL, accountName): """updatePortfolio(EWrapper self, Contract contract, int position, double marketPrice, double marketValue, double averageCost, double unrealizedPNL, double realizedPNL, IBString const & accountName)""" return _swigibpy.EWrapper_updatePortfolio(self, contract, position, marketPrice, marketValue, averageCost, unrealizedPNL, realizedPNL, accountName)
updatePortfolio(EWrapper self, Contract contract, int position, double marketPrice, double marketValue, double averageCost, double unrealizedPNL, double realizedPNL, IBString const & accountName)
entailment
def execDetails(self, reqId, contract, execution): """execDetails(EWrapper self, int reqId, Contract contract, Execution execution)""" return _swigibpy.EWrapper_execDetails(self, reqId, contract, execution)
execDetails(EWrapper self, int reqId, Contract contract, Execution execution)
entailment
def error(self, id, errorCode, errorString): '''Error during communication with TWS''' if errorCode == 165: # Historical data sevice message sys.stderr.write("TWS INFO - %s: %s\n" % (errorCode, errorString)) elif errorCode >= 501 and errorCode < 600: # Socket read failed sys.stderr.write("TWS CLIENT-ERROR - %s: %s\n" % (errorCode, errorString)) elif errorCode >= 100 and errorCode < 1100: sys.stderr.write("TWS ERROR - %s: %s\n" % (errorCode, errorString)) elif errorCode >= 1100 and errorCode < 2100: sys.stderr.write("TWS SYSTEM-ERROR - %s: %s\n" % (errorCode, errorString)) elif errorCode in (2104, 2106, 2108): sys.stderr.write("TWS INFO - %s: %s\n" % (errorCode, errorString)) elif errorCode >= 2100 and errorCode <= 2110: sys.stderr.write("TWS WARNING - %s: %s\n" % (errorCode, errorString)) else: sys.stderr.write("TWS ERROR - %s: %s\n" % (errorCode, errorString))
Error during communication with TWS
entailment
def updateMktDepth(self, id, position, operation, side, price, size): """updateMktDepth(EWrapper self, TickerId id, int position, int operation, int side, double price, int size)""" return _swigibpy.EWrapper_updateMktDepth(self, id, position, operation, side, price, size)
updateMktDepth(EWrapper self, TickerId id, int position, int operation, int side, double price, int size)
entailment
def updateMktDepthL2(self, id, position, marketMaker, operation, side, price, size): """updateMktDepthL2(EWrapper self, TickerId id, int position, IBString marketMaker, int operation, int side, double price, int size)""" return _swigibpy.EWrapper_updateMktDepthL2(self, id, position, marketMaker, operation, side, price, size)
updateMktDepthL2(EWrapper self, TickerId id, int position, IBString marketMaker, int operation, int side, double price, int size)
entailment
def updateNewsBulletin(self, msgId, msgType, newsMessage, originExch): """updateNewsBulletin(EWrapper self, int msgId, int msgType, IBString const & newsMessage, IBString const & originExch)""" return _swigibpy.EWrapper_updateNewsBulletin(self, msgId, msgType, newsMessage, originExch)
updateNewsBulletin(EWrapper self, int msgId, int msgType, IBString const & newsMessage, IBString const & originExch)
entailment
def historicalData(self, reqId, date, open, high, low, close, volume, barCount, WAP, hasGaps): """historicalData(EWrapper self, TickerId reqId, IBString const & date, double open, double high, double low, double close, int volume, int barCount, double WAP, int hasGaps)""" return _swigibpy.EWrapper_historicalData(self, reqId, date, open, high, low, close, volume, barCount, WAP, hasGaps)
historicalData(EWrapper self, TickerId reqId, IBString const & date, double open, double high, double low, double close, int volume, int barCount, double WAP, int hasGaps)
entailment
def scannerData(self, reqId, rank, contractDetails, distance, benchmark, projection, legsStr): """scannerData(EWrapper self, int reqId, int rank, ContractDetails contractDetails, IBString const & distance, IBString const & benchmark, IBString const & projection, IBString const & legsStr)""" return _swigibpy.EWrapper_scannerData(self, reqId, rank, contractDetails, distance, benchmark, projection, legsStr)
scannerData(EWrapper self, int reqId, int rank, ContractDetails contractDetails, IBString const & distance, IBString const & benchmark, IBString const & projection, IBString const & legsStr)
entailment
def realtimeBar(self, reqId, time, open, high, low, close, volume, wap, count): """realtimeBar(EWrapper self, TickerId reqId, long time, double open, double high, double low, double close, long volume, double wap, int count)""" return _swigibpy.EWrapper_realtimeBar(self, reqId, time, open, high, low, close, volume, wap, count)
realtimeBar(EWrapper self, TickerId reqId, long time, double open, double high, double low, double close, long volume, double wap, int count)
entailment
def position(self, account, contract, position, avgCost): """position(EWrapper self, IBString const & account, Contract contract, int position, double avgCost)""" return _swigibpy.EWrapper_position(self, account, contract, position, avgCost)
position(EWrapper self, IBString const & account, Contract contract, int position, double avgCost)
entailment
def accountSummary(self, reqId, account, tag, value, curency): """accountSummary(EWrapper self, int reqId, IBString const & account, IBString const & tag, IBString const & value, IBString const & curency)""" return _swigibpy.EWrapper_accountSummary(self, reqId, account, tag, value, curency)
accountSummary(EWrapper self, int reqId, IBString const & account, IBString const & tag, IBString const & value, IBString const & curency)
entailment
def pyError(self, type, value, traceback): '''Handles an error thrown during invocation of an EWrapper method. Arguments are those provided by sys.exc_info() ''' sys.stderr.write("Exception thrown during EWrapper method dispatch:\n") print_exception(type, value, traceback)
Handles an error thrown during invocation of an EWrapper method. Arguments are those provided by sys.exc_info()
entailment
def init_counts(self, counts_len): '''Called after instantiating with a compressed payload Params: counts_len counts size to use based on decoded settings in the header ''' assert self._data and counts_len and self.counts_len == 0 self.counts_len = counts_len self._init_counts() results = decode(self._data, payload_header_size, addressof(self.counts), counts_len, self.word_size) # no longer needed self._data = None return results
Called after instantiating with a compressed payload Params: counts_len counts size to use based on decoded settings in the header
entailment
def _decompress(self, compressed_payload): '''Decompress a compressed payload into this payload wrapper. Note that the decompressed buffer is saved in self._data and the counts array is not yet allocated. Args: compressed_payload (string) a payload in zlib compressed form Exception: HdrCookieException: the compressed payload has an invalid cookie HdrLengthException: the decompressed size is too small for the HdrPayload structure or is not aligned or is too large for the passed payload class HdrHistogramSettingsException: mismatch in the significant figures, lowest and highest trackable value ''' # make sure this instance is pristine if self._data: raise RuntimeError('Cannot decompress to an instance with payload') # Here it is important to keep a reference to the decompressed # string so that it does not get garbage collected self._data = zlib.decompress(compressed_payload) len_data = len(self._data) counts_size = len_data - payload_header_size if payload_header_size > counts_size > MAX_COUNTS_SIZE: raise HdrLengthException('Invalid size:' + str(len_data)) # copy the first bytes for the header self.payload = PayloadHeader.from_buffer_copy(self._data) cookie = self.payload.cookie if get_cookie_base(cookie) != V2_ENCODING_COOKIE_BASE: raise HdrCookieException('Invalid cookie: %x' % cookie) word_size = get_word_size_in_bytes_from_cookie(cookie) if word_size != V2_MAX_WORD_SIZE_IN_BYTES: raise HdrCookieException('Invalid V2 cookie: %x' % cookie)
Decompress a compressed payload into this payload wrapper. Note that the decompressed buffer is saved in self._data and the counts array is not yet allocated. Args: compressed_payload (string) a payload in zlib compressed form Exception: HdrCookieException: the compressed payload has an invalid cookie HdrLengthException: the decompressed size is too small for the HdrPayload structure or is not aligned or is too large for the passed payload class HdrHistogramSettingsException: mismatch in the significant figures, lowest and highest trackable value
entailment
def compress(self, counts_limit): '''Compress this payload instance Args: counts_limit how many counters should be encoded starting from index 0 (can be 0), Return: the compressed payload (python string) ''' if self.payload: # worst case varint encoded length is when each counter is at the maximum value # in this case 1 more byte per counter is needed due to the more bits varint_len = counts_limit * (self.word_size + 1) # allocate enough space to fit the header and the varint string encode_buf = (c_byte * (payload_header_size + varint_len))() # encode past the payload header varint_len = encode(addressof(self.counts), counts_limit, self.word_size, addressof(encode_buf) + payload_header_size, varint_len) # copy the header after updating the varint stream length self.payload.payload_len = varint_len ctypes.memmove(addressof(encode_buf), addressof(self.payload), payload_header_size) cdata = zlib.compress(ctypes.string_at(encode_buf, payload_header_size + varint_len)) return cdata # can't compress if no payload raise RuntimeError('No payload to compress')
Compress this payload instance Args: counts_limit how many counters should be encoded starting from index 0 (can be 0), Return: the compressed payload (python string)
entailment
def encode(self): '''Compress the associated encodable payload, prepend the header then encode with base64 if requested Returns: the b64 encoded wire encoding of the histogram (as a string) or the compressed payload (as a string, if b64 wrappinb is disabled) ''' # only compress the first non zero buckets # if histogram is empty we do not encode any counter if self.histogram.total_count: relevant_length = \ self.histogram.get_counts_array_index(self.histogram.max_value) + 1 else: relevant_length = 0 cpayload = self.payload.compress(relevant_length) if self.b64_wrap: self.header.length = len(cpayload) header_str = ctypes.string_at(addressof(self.header), ext_header_size) return base64.b64encode(header_str + cpayload) return cpayload
Compress the associated encodable payload, prepend the header then encode with base64 if requested Returns: the b64 encoded wire encoding of the histogram (as a string) or the compressed payload (as a string, if b64 wrappinb is disabled)
entailment
def decode(encoded_histogram, b64_wrap=True): '''Decode a wire histogram encoding into a read-only Hdr Payload instance Args: encoded_histogram a string containing the wire encoding of a histogram such as one returned from encode() Returns: an hdr_payload instance with all the decoded/uncompressed fields Exception: TypeError in case of base64 decode error HdrCookieException: the main header has an invalid cookie the compressed payload header has an invalid cookie HdrLengthException: the decompressed size is too small for the HdrPayload structure or is not aligned or is too large for the passed payload class HdrHistogramSettingsException: mismatch in the significant figures, lowest and highest trackable value zlib.error: in case of zlib decompression error ''' if b64_wrap: b64decode = base64.b64decode(encoded_histogram) # this string has 2 parts in it: the header (raw) and the payload (compressed) b64dec_len = len(b64decode) if b64dec_len < ext_header_size: raise HdrLengthException('Base64 decoded message too short') header = ExternalHeader.from_buffer_copy(b64decode) if get_cookie_base(header.cookie) != V2_COMPRESSION_COOKIE_BASE: raise HdrCookieException() if header.length != b64dec_len - ext_header_size: raise HdrLengthException('Decoded length=%d buffer length=%d' % (header.length, b64dec_len - ext_header_size)) # this will result in a copy of the compressed payload part # could not find a way to do otherwise since zlib.decompress() # expects a string (and does not like a buffer or a memoryview object) cpayload = b64decode[ext_header_size:] else: cpayload = encoded_histogram hdr_payload = HdrPayload(8, compressed_payload=cpayload) return hdr_payload
Decode a wire histogram encoding into a read-only Hdr Payload instance Args: encoded_histogram a string containing the wire encoding of a histogram such as one returned from encode() Returns: an hdr_payload instance with all the decoded/uncompressed fields Exception: TypeError in case of base64 decode error HdrCookieException: the main header has an invalid cookie the compressed payload header has an invalid cookie HdrLengthException: the decompressed size is too small for the HdrPayload structure or is not aligned or is too large for the passed payload class HdrHistogramSettingsException: mismatch in the significant figures, lowest and highest trackable value zlib.error: in case of zlib decompression error
entailment
def record_value(self, value, count=1): '''Record a new value into the histogram Args: value: the value to record (must be in the valid range) count: incremental count (defaults to 1) ''' if value < 0: return False counts_index = self._counts_index_for(value) if (counts_index < 0) or (self.counts_len <= counts_index): return False self.counts[counts_index] += count self.total_count += count self.min_value = min(self.min_value, value) self.max_value = max(self.max_value, value) return True
Record a new value into the histogram Args: value: the value to record (must be in the valid range) count: incremental count (defaults to 1)
entailment
def record_corrected_value(self, value, expected_interval, count=1): '''Record a new value into the histogram and correct for coordinated omission if needed Args: value: the value to record (must be in the valid range) expected_interval: the expected interval between 2 value samples count: incremental count (defaults to 1) ''' while True: if not self.record_value(value, count): return False if value <= expected_interval or expected_interval <= 0: return True value -= expected_interval
Record a new value into the histogram and correct for coordinated omission if needed Args: value: the value to record (must be in the valid range) expected_interval: the expected interval between 2 value samples count: incremental count (defaults to 1)
entailment
def get_value_at_percentile(self, percentile): '''Get the value for a given percentile Args: percentile: a float in [0.0..100.0] Returns: the value for the given percentile ''' count_at_percentile = self.get_target_count_at_percentile(percentile) total = 0 for index in range(self.counts_len): total += self.get_count_at_index(index) if total >= count_at_percentile: value_at_index = self.get_value_from_index(index) if percentile: return self.get_highest_equivalent_value(value_at_index) return self.get_lowest_equivalent_value(value_at_index) return 0
Get the value for a given percentile Args: percentile: a float in [0.0..100.0] Returns: the value for the given percentile
entailment
def get_percentile_to_value_dict(self, percentile_list): '''A faster alternative to query values for a list of percentiles. Args: percentile_list: a list of percentiles in any order, dups will be ignored each element in the list must be a float value in [0.0 .. 100.0] Returns: a dict of percentile values indexed by the percentile ''' result = {} total = 0 percentile_list_index = 0 count_at_percentile = 0 # remove dups and sort percentile_list = list(set(percentile_list)) percentile_list.sort() for index in range(self.counts_len): total += self.get_count_at_index(index) while True: # recalculate target based on next requested percentile if not count_at_percentile: if percentile_list_index == len(percentile_list): return result percentile = percentile_list[percentile_list_index] percentile_list_index += 1 if percentile > 100: return result count_at_percentile = self.get_target_count_at_percentile(percentile) if total >= count_at_percentile: value_at_index = self.get_value_from_index(index) if percentile: result[percentile] = self.get_highest_equivalent_value(value_at_index) else: result[percentile] = self.get_lowest_equivalent_value(value_at_index) count_at_percentile = 0 else: break return result
A faster alternative to query values for a list of percentiles. Args: percentile_list: a list of percentiles in any order, dups will be ignored each element in the list must be a float value in [0.0 .. 100.0] Returns: a dict of percentile values indexed by the percentile
entailment
def values_are_equivalent(self, val1, val2): '''Check whether 2 values are equivalent (meaning they are in the same bucket/range) Returns: true if the 2 values are equivalent ''' return self.get_lowest_equivalent_value(val1) == self.get_lowest_equivalent_value(val2)
Check whether 2 values are equivalent (meaning they are in the same bucket/range) Returns: true if the 2 values are equivalent
entailment
def reset(self): '''Reset the histogram to a pristine state ''' for index in range(self.counts_len): self.counts[index] = 0 self.total_count = 0 self.min_value = sys.maxsize self.max_value = 0 self.start_time_stamp_msec = sys.maxsize self.end_time_stamp_msec = 0
Reset the histogram to a pristine state
entailment
def adjust_internal_tacking_values(self, min_non_zero_index, max_index, total_added): '''Called during decoding and add to adjust the new min/max value and total count Args: min_non_zero_index min nonzero index of all added counts (-1 if none) max_index max index of all added counts (-1 if none) ''' if max_index >= 0: max_value = self.get_highest_equivalent_value(self.get_value_from_index(max_index)) self.max_value = max(self.max_value, max_value) if min_non_zero_index >= 0: min_value = self.get_value_from_index(min_non_zero_index) self.min_value = min(self.min_value, min_value) self.total_count += total_added
Called during decoding and add to adjust the new min/max value and total count Args: min_non_zero_index min nonzero index of all added counts (-1 if none) max_index max index of all added counts (-1 if none)
entailment
def set_internal_tacking_values(self, min_non_zero_index, max_index, total_added): '''Called during decoding and add to adjust the new min/max value and total count Args: min_non_zero_index min nonzero index of all added counts (-1 if none) max_index max index of all added counts (-1 if none) ''' if max_index >= 0: self.max_value = self.get_highest_equivalent_value(self.get_value_from_index(max_index)) if min_non_zero_index >= 0: self.min_value = self.get_value_from_index(min_non_zero_index) self.total_count = total_added
Called during decoding and add to adjust the new min/max value and total count Args: min_non_zero_index min nonzero index of all added counts (-1 if none) max_index max index of all added counts (-1 if none)
entailment
def get_counts_array_index(self, value): '''Return the index in the counts array for a given value ''' if value < 0: raise ValueError("Histogram recorded value cannot be negative.") bucket_index = self._get_bucket_index(value) sub_bucket_index = self._get_sub_bucket_index(value, bucket_index) # Calculate the index for the first entry in the bucket: bucket_base_index = (bucket_index + 1) << self.sub_bucket_half_count_magnitude # The following is the equivalent of ((bucket_index + 1) * sub_bucket_half_count) # Calculate the offset in the bucket (can be negative for first bucket): offset_in_bucket = sub_bucket_index - self.sub_bucket_half_count # The following is the equivalent of # ((sub_bucket_index - sub_bucket_half_count) + bucket_base_index return bucket_base_index + offset_in_bucket
Return the index in the counts array for a given value
entailment
def decode_and_add(self, encoded_histogram): '''Decode an encoded histogram and add it to this histogram Args: encoded_histogram (string) an encoded histogram following the V1 format, such as one returned by the encode() method Exception: TypeError in case of base64 decode error HdrCookieException: the main header has an invalid cookie the compressed payload header has an invalid cookie HdrLengthException: the decompressed size is too small for the HdrPayload structure or is not aligned or is too large for the passed payload class zlib.error: in case of zlib decompression error ''' other_hist = HdrHistogram.decode(encoded_histogram, self.b64_wrap) self.add(other_hist)
Decode an encoded histogram and add it to this histogram Args: encoded_histogram (string) an encoded histogram following the V1 format, such as one returned by the encode() method Exception: TypeError in case of base64 decode error HdrCookieException: the main header has an invalid cookie the compressed payload header has an invalid cookie HdrLengthException: the decompressed size is too small for the HdrPayload structure or is not aligned or is too large for the passed payload class zlib.error: in case of zlib decompression error
entailment
def decode(encoded_histogram, b64_wrap=True): '''Decode an encoded histogram and return a new histogram instance that has been initialized with the decoded content Return: a new histogram instance representing the decoded content Exception: TypeError in case of base64 decode error HdrCookieException: the main header has an invalid cookie the compressed payload header has an invalid cookie HdrLengthException: the decompressed size is too small for the HdrPayload structure or is not aligned or is too large for the passed payload class zlib.error: in case of zlib decompression error ''' hdr_payload = HdrHistogramEncoder.decode(encoded_histogram, b64_wrap) payload = hdr_payload.payload histogram = HdrHistogram(payload.lowest_trackable_value, payload.highest_trackable_value, payload.significant_figures, hdr_payload=hdr_payload) return histogram
Decode an encoded histogram and return a new histogram instance that has been initialized with the decoded content Return: a new histogram instance representing the decoded content Exception: TypeError in case of base64 decode error HdrCookieException: the main header has an invalid cookie the compressed payload header has an invalid cookie HdrLengthException: the decompressed size is too small for the HdrPayload structure or is not aligned or is too large for the passed payload class zlib.error: in case of zlib decompression error
entailment
def _validate_json(self, checked_json, schema): """ Validate JSON according to JSONSchema *Args*:\n _checked_json_: validated JSON. _schema_: schema that used for validation. """ try: jsonschema.validate(checked_json, schema) except jsonschema.ValidationError as e: print("""Failed validating '{0}' in schema {1}: {2} On instance {3}: {4}""".format(e.validator, list(e.relative_schema_path)[:-1], pprint.pformat(e.schema), "[%s]" % "][".join(repr(index) for index in e.absolute_path), pprint.pformat(e.instance).encode('utf-8'))) raise JsonValidatorError("Failed validating json by schema") except jsonschema.SchemaError as e: raise JsonValidatorError('Json-schema error')
Validate JSON according to JSONSchema *Args*:\n _checked_json_: validated JSON. _schema_: schema that used for validation.
entailment
def validate_jsonschema_from_file(self, json_string, path_to_schema): """ Validate JSON according to schema, loaded from a file. *Args:*\n _json_string_ - JSON string;\n _path_to_schema_ - path to file with JSON schema; *Raises:*\n JsonValidatorError *Example:*\n | *Settings* | *Value* | | Library | JsonValidator | | *Test Cases* | *Action* | *Argument* | *Argument* | | Simple | Validate jsonschema from file | {"foo":bar} | ${CURDIR}${/}schema.json | """ schema = open(path_to_schema).read() load_input_json = self.string_to_json(json_string) try: load_schema = json.loads(schema) except ValueError as e: raise JsonValidatorError('Error in schema: {}'.format(e)) self._validate_json(load_input_json, load_schema)
Validate JSON according to schema, loaded from a file. *Args:*\n _json_string_ - JSON string;\n _path_to_schema_ - path to file with JSON schema; *Raises:*\n JsonValidatorError *Example:*\n | *Settings* | *Value* | | Library | JsonValidator | | *Test Cases* | *Action* | *Argument* | *Argument* | | Simple | Validate jsonschema from file | {"foo":bar} | ${CURDIR}${/}schema.json |
entailment
def validate_jsonschema(self, json_string, input_schema): """ Validate JSON according to schema. *Args:*\n _json_string_ - JSON string;\n _input_schema_ - schema in string format; *Raises:*\n JsonValidatorError *Example:*\n | *Settings* | *Value* | | Library | JsonValidator | | Library | OperatingSystem | | *Test Cases* | *Action* | *Argument* | *Argument* | | Simple | ${schema}= | OperatingSystem.Get File | ${CURDIR}${/}schema_valid.json | | | Validate jsonschema | {"foo":bar} | ${schema} | """ load_input_json = self.string_to_json(json_string) try: load_schema = json.loads(input_schema) except ValueError as e: raise JsonValidatorError('Error in schema: {}'.format(e)) self._validate_json(load_input_json, load_schema)
Validate JSON according to schema. *Args:*\n _json_string_ - JSON string;\n _input_schema_ - schema in string format; *Raises:*\n JsonValidatorError *Example:*\n | *Settings* | *Value* | | Library | JsonValidator | | Library | OperatingSystem | | *Test Cases* | *Action* | *Argument* | *Argument* | | Simple | ${schema}= | OperatingSystem.Get File | ${CURDIR}${/}schema_valid.json | | | Validate jsonschema | {"foo":bar} | ${schema} |
entailment
def string_to_json(self, source): """ Deserialize string into JSON structure. *Args:*\n _source_ - JSON string *Returns:*\n JSON structure *Raises:*\n JsonValidatorError *Example:*\n | *Settings* | *Value* | | Library | JsonValidator | | Library | OperatingSystem | | *Test Cases* | *Action* | *Argument* | *Argument* | | String to json | ${json_string}= | OperatingSystem.Get File | ${CURDIR}${/}json_example.json | | | ${json}= | String to json | ${json_string} | | | Log | ${json["store"]["book"][0]["price"]} | =>\n 8.95 """ try: load_input_json = json.loads(source) except ValueError as e: raise JsonValidatorError("Could not parse '%s' as JSON: %s" % (source, e)) return load_input_json
Deserialize string into JSON structure. *Args:*\n _source_ - JSON string *Returns:*\n JSON structure *Raises:*\n JsonValidatorError *Example:*\n | *Settings* | *Value* | | Library | JsonValidator | | Library | OperatingSystem | | *Test Cases* | *Action* | *Argument* | *Argument* | | String to json | ${json_string}= | OperatingSystem.Get File | ${CURDIR}${/}json_example.json | | | ${json}= | String to json | ${json_string} | | | Log | ${json["store"]["book"][0]["price"]} | =>\n 8.95
entailment
def json_to_string(self, source): """ Serialize JSON structure into string. *Args:*\n _source_ - JSON structure *Returns:*\n JSON string *Raises:*\n JsonValidatorError *Example:*\n | *Settings* | *Value* | | Library | JsonValidator | | Library | OperatingSystem | | *Test Cases* | *Action* | *Argument* | *Argument* | | Json to string | ${json_string}= | OperatingSystem.Get File | ${CURDIR}${/}json_example.json | | | ${json}= | String to json | ${json_string} | | | ${string}= | Json to string | ${json} | | | ${pretty_string}= | Pretty print json | ${string} | | | Log to console | ${pretty_string} | """ try: load_input_json = json.dumps(source) except ValueError as e: raise JsonValidatorError("Could serialize '%s' to JSON: %s" % (source, e)) return load_input_json
Serialize JSON structure into string. *Args:*\n _source_ - JSON structure *Returns:*\n JSON string *Raises:*\n JsonValidatorError *Example:*\n | *Settings* | *Value* | | Library | JsonValidator | | Library | OperatingSystem | | *Test Cases* | *Action* | *Argument* | *Argument* | | Json to string | ${json_string}= | OperatingSystem.Get File | ${CURDIR}${/}json_example.json | | | ${json}= | String to json | ${json_string} | | | ${string}= | Json to string | ${json} | | | ${pretty_string}= | Pretty print json | ${string} | | | Log to console | ${pretty_string} |
entailment
def get_elements(self, json_string, expr): """ Get list of elements from _json_string_, matching [http://goessner.net/articles/JsonPath/|JSONPath] expression. *Args:*\n _json_string_ - JSON string;\n _expr_ - JSONPath expression; *Returns:*\n List of found elements or ``None`` if no elements were found *Example:*\n | *Settings* | *Value* | | Library | JsonValidator | | Library | OperatingSystem | | *Test Cases* | *Action* | *Argument* | *Argument* | | Get json elements | ${json_example}= | OperatingSystem.Get File | ${CURDIR}${/}json_example.json | | | ${json_elements}= | Get elements | ${json_example} | $.store.book[*].author | =>\n | [u'Nigel Rees', u'Evelyn Waugh', u'Herman Melville', u'J. R. R. Tolkien'] """ load_input_json = self.string_to_json(json_string) # parsing jsonpath jsonpath_expr = parse(expr) # list of returned elements value_list = [] for match in jsonpath_expr.find(load_input_json): value_list.append(match.value) if not value_list: return None else: return value_list
Get list of elements from _json_string_, matching [http://goessner.net/articles/JsonPath/|JSONPath] expression. *Args:*\n _json_string_ - JSON string;\n _expr_ - JSONPath expression; *Returns:*\n List of found elements or ``None`` if no elements were found *Example:*\n | *Settings* | *Value* | | Library | JsonValidator | | Library | OperatingSystem | | *Test Cases* | *Action* | *Argument* | *Argument* | | Get json elements | ${json_example}= | OperatingSystem.Get File | ${CURDIR}${/}json_example.json | | | ${json_elements}= | Get elements | ${json_example} | $.store.book[*].author | =>\n | [u'Nigel Rees', u'Evelyn Waugh', u'Herman Melville', u'J. R. R. Tolkien']
entailment
def select_elements(self, json_string, expr): """ Return list of elements from _json_string_, matching [ http://jsonselect.org/ | JSONSelect] expression. *DEPRECATED* JSON Select query language is outdated and not supported any more. Use other keywords of this library to query JSON. *Args:*\n _json_string_ - JSON string;\n _expr_ - JSONSelect expression; *Returns:*\n List of found elements or ``None`` if no elements were found *Example:*\n | *Settings* | *Value* | | Library | JsonValidator | | Library | OperatingSystem | | *Test Cases* | *Action* | *Argument* | *Argument* | | Select json elements | ${json_example}= | OperatingSystem.Get File | ${CURDIR}${/}json_example.json | | | ${json_elements}= | Select elements | ${json_example} | .author:contains("Evelyn Waugh")~.price | =>\n | 12.99 """ load_input_json = self.string_to_json(json_string) # parsing jsonselect match = jsonselect.match(sel=expr, obj=load_input_json) ret = list(match) return ret if ret else None
Return list of elements from _json_string_, matching [ http://jsonselect.org/ | JSONSelect] expression. *DEPRECATED* JSON Select query language is outdated and not supported any more. Use other keywords of this library to query JSON. *Args:*\n _json_string_ - JSON string;\n _expr_ - JSONSelect expression; *Returns:*\n List of found elements or ``None`` if no elements were found *Example:*\n | *Settings* | *Value* | | Library | JsonValidator | | Library | OperatingSystem | | *Test Cases* | *Action* | *Argument* | *Argument* | | Select json elements | ${json_example}= | OperatingSystem.Get File | ${CURDIR}${/}json_example.json | | | ${json_elements}= | Select elements | ${json_example} | .author:contains("Evelyn Waugh")~.price | =>\n | 12.99
entailment
def select_objects(self, json_string, expr): """ Return list of elements from _json_string_, matching [ http://objectpath.org// | ObjectPath] expression. *Args:*\n _json_string_ - JSON string;\n _expr_ - ObjectPath expression; *Returns:*\n List of found elements. If no elements were found, empty list will be returned *Example:*\n | *Settings* | *Value* | | Library | JsonValidator | | Library | OperatingSystem | | *Test Cases* | *Action* | *Argument* | *Argument* | | Select json objects | ${json_example}= | OperatingSystem.Get File | ${CURDIR}${/}json_example.json | | | ${json_objectss}= | Select objects | ${json_example} | $..book[@.author.name is "Evelyn Waugh"].price | =>\n | [12.99] """ load_input_json = self.string_to_json(json_string) # parsing objectpath tree = objectpath.Tree(load_input_json) values = tree.execute(expr) return list(values)
Return list of elements from _json_string_, matching [ http://objectpath.org// | ObjectPath] expression. *Args:*\n _json_string_ - JSON string;\n _expr_ - ObjectPath expression; *Returns:*\n List of found elements. If no elements were found, empty list will be returned *Example:*\n | *Settings* | *Value* | | Library | JsonValidator | | Library | OperatingSystem | | *Test Cases* | *Action* | *Argument* | *Argument* | | Select json objects | ${json_example}= | OperatingSystem.Get File | ${CURDIR}${/}json_example.json | | | ${json_objectss}= | Select objects | ${json_example} | $..book[@.author.name is "Evelyn Waugh"].price | =>\n | [12.99]
entailment
def element_should_exist(self, json_string, expr): """ Check the existence of one or more elements, matching [ http://jsonselect.org/ | JSONSelect] expression. *DEPRECATED* JSON Select query language is outdated and not supported any more. Use other keywords of this library to query JSON. *Args:*\n _json_string_ - JSON string;\n _expr_ - JSONSelect expression;\n *Raises:*\n JsonValidatorError *Example:*\n | *Settings* | *Value* | | Library | JsonValidator | | Library | OperatingSystem | | *Test Cases* | *Action* | *Argument* | *Argument* | | Check element | ${json_example}= | OperatingSystem.Get File | ${CURDIR}${/}json_example.json | | | Element should exist | ${json_example} | .author:contains("Evelyn Waugh") | | | Element should exist | ${json_example} | .store .book .price:expr(x=8.95) | """ value = self.select_elements(json_string, expr) if value is None: raise JsonValidatorError('Elements %s does not exist' % expr)
Check the existence of one or more elements, matching [ http://jsonselect.org/ | JSONSelect] expression. *DEPRECATED* JSON Select query language is outdated and not supported any more. Use other keywords of this library to query JSON. *Args:*\n _json_string_ - JSON string;\n _expr_ - JSONSelect expression;\n *Raises:*\n JsonValidatorError *Example:*\n | *Settings* | *Value* | | Library | JsonValidator | | Library | OperatingSystem | | *Test Cases* | *Action* | *Argument* | *Argument* | | Check element | ${json_example}= | OperatingSystem.Get File | ${CURDIR}${/}json_example.json | | | Element should exist | ${json_example} | .author:contains("Evelyn Waugh") | | | Element should exist | ${json_example} | .store .book .price:expr(x=8.95) |
entailment
def element_should_not_exist(self, json_string, expr): """ Check that one or more elements, matching [ http://jsonselect.org/ | JSONSelect] expression, don't exist. *DEPRECATED* JSON Select query language is outdated and not supported any more. Use other keywords of this library to query JSON. *Args:*\n _json_string_ - JSON string;\n _expr_ - JSONSelect expression;\n *Raises:*\n JsonValidatorError """ value = self.select_elements(json_string, expr) if value is not None: raise JsonValidatorError('Elements %s exist but should not' % expr)
Check that one or more elements, matching [ http://jsonselect.org/ | JSONSelect] expression, don't exist. *DEPRECATED* JSON Select query language is outdated and not supported any more. Use other keywords of this library to query JSON. *Args:*\n _json_string_ - JSON string;\n _expr_ - JSONSelect expression;\n *Raises:*\n JsonValidatorError
entailment
def _json_path_search(self, json_dict, expr): """ Scan JSON dictionary with using json-path passed sting of the format of $.element..element1[index] etc. *Args:*\n _json_dict_ - JSON dictionary;\n _expr_ - string of fuzzy search for items within the directory;\n *Returns:*\n List of DatumInContext objects: ``[DatumInContext(value=..., path=..., context=[DatumInContext])]`` - value - found value - path - value selector inside context.value (in implementation of jsonpath-rw: class Index or Fields) *Raises:*\n JsonValidatorError """ path = parse(expr) results = path.find(json_dict) if len(results) is 0: raise JsonValidatorError("Nothing found in the dictionary {0} using the given path {1}".format( str(json_dict), str(expr))) return results
Scan JSON dictionary with using json-path passed sting of the format of $.element..element1[index] etc. *Args:*\n _json_dict_ - JSON dictionary;\n _expr_ - string of fuzzy search for items within the directory;\n *Returns:*\n List of DatumInContext objects: ``[DatumInContext(value=..., path=..., context=[DatumInContext])]`` - value - found value - path - value selector inside context.value (in implementation of jsonpath-rw: class Index or Fields) *Raises:*\n JsonValidatorError
entailment
def update_json(self, json_string, expr, value, index=0): """ Replace the value in the JSON string. *Args:*\n _json_string_ - JSON string;\n _expr_ - JSONPath expression for determining the value to be replaced;\n _value_ - the value to be replaced with;\n _index_ - index for selecting item within a match list, default value is 0;\n *Returns:*\n Changed JSON in dictionary format. *Example:*\n | *Settings* | *Value* | | Library | JsonValidator | | Library | OperatingSystem | | *Test Cases* | *Action* | *Argument* | *Argument* | | Update element | ${json_example}= | OperatingSystem.Get File | ${CURDIR}${/}json_example.json | | | ${json_update}= | Update_json | ${json_example} | $..color | changed | """ load_input_json = self.string_to_json(json_string) matches = self._json_path_search(load_input_json, expr) datum_object = matches[int(index)] if not isinstance(datum_object, DatumInContext): raise JsonValidatorError("Nothing found by the given json-path") path = datum_object.path # Edit the directory using the received data # If the user specified a list if isinstance(path, Index): datum_object.context.value[datum_object.path.index] = value # If the user specified a value of type (string, bool, integer or complex) elif isinstance(path, Fields): datum_object.context.value[datum_object.path.fields[0]] = value return load_input_json
Replace the value in the JSON string. *Args:*\n _json_string_ - JSON string;\n _expr_ - JSONPath expression for determining the value to be replaced;\n _value_ - the value to be replaced with;\n _index_ - index for selecting item within a match list, default value is 0;\n *Returns:*\n Changed JSON in dictionary format. *Example:*\n | *Settings* | *Value* | | Library | JsonValidator | | Library | OperatingSystem | | *Test Cases* | *Action* | *Argument* | *Argument* | | Update element | ${json_example}= | OperatingSystem.Get File | ${CURDIR}${/}json_example.json | | | ${json_update}= | Update_json | ${json_example} | $..color | changed |
entailment
def pretty_print_json(self, json_string): """ Return formatted JSON string _json_string_.\n Using method json.dumps with settings: _indent=2, ensure_ascii=False_. *Args:*\n _json_string_ - JSON string. *Returns:*\n Formatted JSON string. *Example:*\n | *Settings* | *Value* | | Library | JsonValidator | | Library | OperatingSystem | | *Test Cases* | *Action* | *Argument* | *Argument* | | Check element | ${pretty_json}= | Pretty print json | {a:1,foo:[{b:2,c:3},{d:"baz",e:4}]} | | | Log | ${pretty_json} | =>\n | { | "a": 1, | "foo": [ | { | "c": 3, | "b": 2 | }, | { | "e": 4, | "d": "baz" | } | ] | } """ return json.dumps(self.string_to_json(json_string), indent=2, ensure_ascii=False)
Return formatted JSON string _json_string_.\n Using method json.dumps with settings: _indent=2, ensure_ascii=False_. *Args:*\n _json_string_ - JSON string. *Returns:*\n Formatted JSON string. *Example:*\n | *Settings* | *Value* | | Library | JsonValidator | | Library | OperatingSystem | | *Test Cases* | *Action* | *Argument* | *Argument* | | Check element | ${pretty_json}= | Pretty print json | {a:1,foo:[{b:2,c:3},{d:"baz",e:4}]} | | | Log | ${pretty_json} | =>\n | { | "a": 1, | "foo": [ | { | "c": 3, | "b": 2 | }, | { | "e": 4, | "d": "baz" | } | ] | }
entailment
def output_interval_histogram(self, histogram, start_time_stamp_sec=0, end_time_stamp_sec=0, max_value_unit_ratio=1000000.0): '''Output an interval histogram, with the given timestamp and a configurable maxValueUnitRatio. (note that the specified timestamp will be used, and the timestamp in the actual histogram will be ignored). The max value reported with the interval line will be scaled by the given max_value_unit_ratio. The histogram start and end timestamps are assumed to be in msec units. Logging will be in seconds, realtive by a base time The default base time is 0. By covention, histogram start/end time are generally stamped with absolute times in msec since the epoch. For logging with absolute time stamps, the base time would remain zero. For logging with relative time stamps (time since a start point), Params: histogram The interval histogram to log. start_time_stamp_sec The start timestamp to log with the interval histogram, in seconds. default: using the start/end timestamp indicated in the histogram end_time_stamp_sec The end timestamp to log with the interval histogram, in seconds. default: using the start/end timestamp indicated in the histogram max_value_unit_ratio The ratio by which to divide the histogram's max value when reporting on it. default: 1,000,000 (which is the msec : nsec ratio ''' if not start_time_stamp_sec: start_time_stamp_sec = \ (histogram.get_start_time_stamp() - self.base_time) / 1000.0 if not end_time_stamp_sec: end_time_stamp_sec = (histogram.get_end_time_stamp() - self.base_time) / 1000.0 cpayload = histogram.encode() self.log.write("%f,%f,%f,%s\n" % (start_time_stamp_sec, end_time_stamp_sec - start_time_stamp_sec, histogram.get_max_value() // max_value_unit_ratio, cpayload.decode('utf-8')))
Output an interval histogram, with the given timestamp and a configurable maxValueUnitRatio. (note that the specified timestamp will be used, and the timestamp in the actual histogram will be ignored). The max value reported with the interval line will be scaled by the given max_value_unit_ratio. The histogram start and end timestamps are assumed to be in msec units. Logging will be in seconds, realtive by a base time The default base time is 0. By covention, histogram start/end time are generally stamped with absolute times in msec since the epoch. For logging with absolute time stamps, the base time would remain zero. For logging with relative time stamps (time since a start point), Params: histogram The interval histogram to log. start_time_stamp_sec The start timestamp to log with the interval histogram, in seconds. default: using the start/end timestamp indicated in the histogram end_time_stamp_sec The end timestamp to log with the interval histogram, in seconds. default: using the start/end timestamp indicated in the histogram max_value_unit_ratio The ratio by which to divide the histogram's max value when reporting on it. default: 1,000,000 (which is the msec : nsec ratio
entailment
def output_start_time(self, start_time_msec): '''Log a start time in the log. Params: start_time_msec time (in milliseconds) since the absolute start time (the epoch) ''' self.log.write("#[StartTime: %f (seconds since epoch), %s]\n" % (float(start_time_msec) / 1000.0, datetime.fromtimestamp(start_time_msec).iso_format(' ')))
Log a start time in the log. Params: start_time_msec time (in milliseconds) since the absolute start time (the epoch)
entailment
def _decode_next_interval_histogram(self, dest_histogram, range_start_time_sec=0.0, range_end_time_sec=sys.maxsize, absolute=False): '''Read the next interval histogram from the log, if interval falls within an absolute or relative time range. Timestamps are assumed to appear in order in the log file, and as such this method will return a null upon encountering a timestamp larger than range_end_time_sec. Relative time range: the range is assumed to be in seconds relative to the actual timestamp value found in each interval line in the log Absolute time range: Absolute timestamps are calculated by adding the timestamp found with the recorded interval to the [latest, optional] start time found in the log. The start time is indicated in the log with a "#[StartTime: " followed by the start time in seconds. Params: dest_histogram if None, created a new histogram, else adds the new interval histogram to it range_start_time_sec The absolute or relative start of the expected time range, in seconds. range_start_time_sec The absolute or relative end of the expected time range, in seconds. absolute Defines if the passed range is absolute or relative Return: Returns an histogram object if an interval line was found with an associated start timestamp value that falls between start_time_sec and end_time_sec, or null if no such interval line is found. Upon encountering any unexpected format errors in reading the next interval from the file, this method will return None. The histogram returned will have it's timestamp set to the absolute timestamp calculated from adding the interval's indicated timestamp value to the latest [optional] start time found in the log. Exceptions: ValueError if there is a syntax error in one of the float fields ''' while 1: line = self.input_file.readline() if not line: return None if line[0] == '#': match_res = re_start_time.match(line) if match_res: self.start_time_sec = float(match_res.group(1)) self.observed_start_time = True continue match_res = re_base_time.match(line) if match_res: self.base_time_sec = float(match_res.group(1)) self.observed_base_time = True continue match_res = re_histogram_interval.match(line) if not match_res: # probably a legend line that starts with "\"StartTimestamp" continue # Decode: startTimestamp, intervalLength, maxTime, histogramPayload # Timestamp is expected to be in seconds log_time_stamp_in_sec = float(match_res.group(1)) interval_length_sec = float(match_res.group(2)) cpayload = match_res.group(4) if not self.observed_start_time: # No explicit start time noted. Use 1st observed time: self.start_time_sec = log_time_stamp_in_sec self.observed_start_time = True if not self.observed_base_time: # No explicit base time noted. # Deduce from 1st observed time (compared to start time): if log_time_stamp_in_sec < self.start_time_sec - (365 * 24 * 3600.0): # Criteria Note: if log timestamp is more than a year in # the past (compared to StartTime), # we assume that timestamps in the log are not absolute self.base_time_sec = self.start_time_sec else: # Timestamps are absolute self.base_time_sec = 0.0 self.observed_base_time = True absolute_start_time_stamp_sec = \ log_time_stamp_in_sec + self.base_time_sec offset_start_time_stamp_sec = \ absolute_start_time_stamp_sec - self.start_time_sec # Timestamp length is expect to be in seconds absolute_end_time_stamp_sec = \ absolute_start_time_stamp_sec + interval_length_sec if absolute: start_time_stamp_to_check_range_on = absolute_start_time_stamp_sec else: start_time_stamp_to_check_range_on = offset_start_time_stamp_sec if start_time_stamp_to_check_range_on < range_start_time_sec: continue if start_time_stamp_to_check_range_on > range_end_time_sec: return None if dest_histogram: # add the interval histogram to the destination histogram histogram = dest_histogram histogram.decode_and_add(cpayload) else: histogram = HdrHistogram.decode(cpayload) histogram.set_start_time_stamp(absolute_start_time_stamp_sec * 1000.0) histogram.set_end_time_stamp(absolute_end_time_stamp_sec * 1000.0) return histogram
Read the next interval histogram from the log, if interval falls within an absolute or relative time range. Timestamps are assumed to appear in order in the log file, and as such this method will return a null upon encountering a timestamp larger than range_end_time_sec. Relative time range: the range is assumed to be in seconds relative to the actual timestamp value found in each interval line in the log Absolute time range: Absolute timestamps are calculated by adding the timestamp found with the recorded interval to the [latest, optional] start time found in the log. The start time is indicated in the log with a "#[StartTime: " followed by the start time in seconds. Params: dest_histogram if None, created a new histogram, else adds the new interval histogram to it range_start_time_sec The absolute or relative start of the expected time range, in seconds. range_start_time_sec The absolute or relative end of the expected time range, in seconds. absolute Defines if the passed range is absolute or relative Return: Returns an histogram object if an interval line was found with an associated start timestamp value that falls between start_time_sec and end_time_sec, or null if no such interval line is found. Upon encountering any unexpected format errors in reading the next interval from the file, this method will return None. The histogram returned will have it's timestamp set to the absolute timestamp calculated from adding the interval's indicated timestamp value to the latest [optional] start time found in the log. Exceptions: ValueError if there is a syntax error in one of the float fields
entailment
def get_next_interval_histogram(self, range_start_time_sec=0.0, range_end_time_sec=sys.maxsize, absolute=False): '''Read the next interval histogram from the log, if interval falls within an absolute or relative time range. Timestamps are assumed to appear in order in the log file, and as such this method will return a null upon encountering a timestamp larger than range_end_time_sec. Relative time range: the range is assumed to be in seconds relative to the actual timestamp value found in each interval line in the log Absolute time range: Absolute timestamps are calculated by adding the timestamp found with the recorded interval to the [latest, optional] start time found in the log. The start time is indicated in the log with a "#[StartTime: " followed by the start time in seconds. Params: range_start_time_sec The absolute or relative start of the expected time range, in seconds. range_start_time_sec The absolute or relative end of the expected time range, in seconds. absolute Defines if the passed range is absolute or relative Return: Returns an histogram object if an interval line was found with an associated start timestamp value that falls between start_time_sec and end_time_sec, or null if no such interval line is found. Upon encountering any unexpected format errors in reading the next interval from the file, this method will return None. The histogram returned will have it's timestamp set to the absolute timestamp calculated from adding the interval's indicated timestamp value to the latest [optional] start time found in the log. Exceptions: ValueError if there is a syntax error in one of the float fields ''' return self._decode_next_interval_histogram(None, range_start_time_sec, range_end_time_sec, absolute)
Read the next interval histogram from the log, if interval falls within an absolute or relative time range. Timestamps are assumed to appear in order in the log file, and as such this method will return a null upon encountering a timestamp larger than range_end_time_sec. Relative time range: the range is assumed to be in seconds relative to the actual timestamp value found in each interval line in the log Absolute time range: Absolute timestamps are calculated by adding the timestamp found with the recorded interval to the [latest, optional] start time found in the log. The start time is indicated in the log with a "#[StartTime: " followed by the start time in seconds. Params: range_start_time_sec The absolute or relative start of the expected time range, in seconds. range_start_time_sec The absolute or relative end of the expected time range, in seconds. absolute Defines if the passed range is absolute or relative Return: Returns an histogram object if an interval line was found with an associated start timestamp value that falls between start_time_sec and end_time_sec, or null if no such interval line is found. Upon encountering any unexpected format errors in reading the next interval from the file, this method will return None. The histogram returned will have it's timestamp set to the absolute timestamp calculated from adding the interval's indicated timestamp value to the latest [optional] start time found in the log. Exceptions: ValueError if there is a syntax error in one of the float fields
entailment
def add_next_interval_histogram(self, dest_histogram=None, range_start_time_sec=0.0, range_end_time_sec=sys.maxsize, absolute=False): '''Read the next interval histogram from the log, if interval falls within an absolute or relative time range, and add it to the destination histogram (or to the reference histogram if dest_histogram is None) Timestamps are assumed to appear in order in the log file, and as such this method will return a null upon encountering a timestamp larger than range_end_time_sec. Relative time range: the range is assumed to be in seconds relative to the actual timestamp value found in each interval line in the log Absolute time range: Absolute timestamps are calculated by adding the timestamp found with the recorded interval to the [latest, optional] start time found in the log. The start time is indicated in the log with a "#[StartTime: " followed by the start time in seconds. Params: dest_histogram where to add the next interval histogram, if None the interal histogram will be added to the reference histogram passed in the constructor range_start_time_sec The absolute or relative start of the expected time range, in seconds. range_start_time_sec The absolute or relative end of the expected time range, in seconds. absolute Defines if the passed range is absolute or relative Return: Returns the destination histogram if an interval line was found with an associated start timestamp value that falls between start_time_sec and end_time_sec, or None if no such interval line is found. Upon encountering any unexpected format errors in reading the next interval from the file, this method will return None. The histogram returned will have it's timestamp set to the absolute timestamp calculated from adding the interval's indicated timestamp value to the latest [optional] start time found in the log. Exceptions: ValueError if there is a syntax error in one of the float fields ''' if not dest_histogram: dest_histogram = self.reference_histogram return self._decode_next_interval_histogram(dest_histogram, range_start_time_sec, range_end_time_sec, absolute)
Read the next interval histogram from the log, if interval falls within an absolute or relative time range, and add it to the destination histogram (or to the reference histogram if dest_histogram is None) Timestamps are assumed to appear in order in the log file, and as such this method will return a null upon encountering a timestamp larger than range_end_time_sec. Relative time range: the range is assumed to be in seconds relative to the actual timestamp value found in each interval line in the log Absolute time range: Absolute timestamps are calculated by adding the timestamp found with the recorded interval to the [latest, optional] start time found in the log. The start time is indicated in the log with a "#[StartTime: " followed by the start time in seconds. Params: dest_histogram where to add the next interval histogram, if None the interal histogram will be added to the reference histogram passed in the constructor range_start_time_sec The absolute or relative start of the expected time range, in seconds. range_start_time_sec The absolute or relative end of the expected time range, in seconds. absolute Defines if the passed range is absolute or relative Return: Returns the destination histogram if an interval line was found with an associated start timestamp value that falls between start_time_sec and end_time_sec, or None if no such interval line is found. Upon encountering any unexpected format errors in reading the next interval from the file, this method will return None. The histogram returned will have it's timestamp set to the absolute timestamp calculated from adding the interval's indicated timestamp value to the latest [optional] start time found in the log. Exceptions: ValueError if there is a syntax error in one of the float fields
entailment
def sanitize(value): ''' Sanitizes strings according to SANITIZER_ALLOWED_TAGS, SANITIZER_ALLOWED_ATTRIBUTES and SANITIZER_ALLOWED_STYLES variables in settings. Example usage: {% load sanitizer %} {{ post.content|escape_html }} ''' if isinstance(value, basestring): value = bleach.clean(value, tags=ALLOWED_TAGS, attributes=ALLOWED_ATTRIBUTES, styles=ALLOWED_STYLES, strip=False) return value
Sanitizes strings according to SANITIZER_ALLOWED_TAGS, SANITIZER_ALLOWED_ATTRIBUTES and SANITIZER_ALLOWED_STYLES variables in settings. Example usage: {% load sanitizer %} {{ post.content|escape_html }}
entailment
def strip_filter(value): ''' Strips HTML tags from strings according to SANITIZER_ALLOWED_TAGS, SANITIZER_ALLOWED_ATTRIBUTES and SANITIZER_ALLOWED_STYLES variables in settings. Example usage: {% load sanitizer %} {{ post.content|strip_html }} ''' if isinstance(value, basestring): value = bleach.clean(value, tags=ALLOWED_TAGS, attributes=ALLOWED_ATTRIBUTES, styles=ALLOWED_STYLES, strip=True) return value
Strips HTML tags from strings according to SANITIZER_ALLOWED_TAGS, SANITIZER_ALLOWED_ATTRIBUTES and SANITIZER_ALLOWED_STYLES variables in settings. Example usage: {% load sanitizer %} {{ post.content|strip_html }}
entailment
def sanitize_allow(value, args=''): ''' Strip HTML tags other than provided tags and attributes. Example usage: {% load sanitizer %} {{ post.body|sanitize_allow:'a, strong, img; href, src'}} ''' if isinstance(value, basestring): allowed_tags = [] allowed_attributes = [] allowed_styles = [] args = args.strip().split(';') if len(args) > 0: allowed_tags = [tag.strip() for tag in args[0].split(',')] if len(args) > 1: allowed_attributes = [attr.strip() for attr in args[1].split(',')] value = bleach.clean(value, tags=allowed_tags, attributes=allowed_attributes, strip=True) return value
Strip HTML tags other than provided tags and attributes. Example usage: {% load sanitizer %} {{ post.body|sanitize_allow:'a, strong, img; href, src'}}
entailment
def escape_html(value, allowed_tags=[], allowed_attributes=[], allowed_styles=[]): """ Template tag to sanitize string values. It accepts lists of allowed tags, attributes or styles in comma separated string or list format. For example: {% load sanitizer %} {% escape_html '<a href="">bar</a> <script>alert('baz')</script>' "a,img' 'href',src' %} Will output: <a href="">bar</a> &lt;cript&gt;alert('baz')&lt;/script&gt; On django 1.4 you could also use keyword arguments: {% escape_html '<a href="">bar</a>' allowed_tags="a,img' allowed_attributes='href',src' %} """ if isinstance(value, basestring): value = bleach.clean(value, tags=allowed_tags, attributes=allowed_attributes, styles=allowed_styles, strip=False) return value
Template tag to sanitize string values. It accepts lists of allowed tags, attributes or styles in comma separated string or list format. For example: {% load sanitizer %} {% escape_html '<a href="">bar</a> <script>alert('baz')</script>' "a,img' 'href',src' %} Will output: <a href="">bar</a> &lt;cript&gt;alert('baz')&lt;/script&gt; On django 1.4 you could also use keyword arguments: {% escape_html '<a href="">bar</a>' allowed_tags="a,img' allowed_attributes='href',src' %}
entailment
def strip_html(value, allowed_tags=[], allowed_attributes=[], allowed_styles=[]): """ Template tag to strip html from string values. It accepts lists of allowed tags, attributes or stylesin comma separated string or list format. For example: {% load sanitizer %} {% strip_html '<a href="">bar</a> <script>alert('baz')</script>' "a,img' 'href',src' %} Will output: <a href="">bar</a> alert('baz'); On django 1.4 you could also use keyword arguments: {% strip_html '<a href="">bar</a>' allowed_tags="a,img' allowed_attributes='href',src' %} """ if isinstance(value, basestring): value = bleach.clean(value, tags=allowed_tags, attributes=allowed_attributes, styles=allowed_styles, strip=True) return value
Template tag to strip html from string values. It accepts lists of allowed tags, attributes or stylesin comma separated string or list format. For example: {% load sanitizer %} {% strip_html '<a href="">bar</a> <script>alert('baz')</script>' "a,img' 'href',src' %} Will output: <a href="">bar</a> alert('baz'); On django 1.4 you could also use keyword arguments: {% strip_html '<a href="">bar</a>' allowed_tags="a,img' allowed_attributes='href',src' %}
entailment
def get_censor_char(): """Plucks a letter out of the censor_pool. If the censor_pool is empty, replenishes it. This is done to ensure all censor chars are used before grabbing more (avoids ugly duplicates). """ global _censor_pool if not _censor_pool: # censor pool is empty. fill it back up. _censor_pool = list(_censor_chars) return _censor_pool.pop(random.randrange(len(_censor_pool)))
Plucks a letter out of the censor_pool. If the censor_pool is empty, replenishes it. This is done to ensure all censor chars are used before grabbing more (avoids ugly duplicates).
entailment
def censor(input_text): """ Returns the input string with profanity replaced with a random string of characters plucked from the censor_characters pool. """ ret = input_text words = get_words() for word in words: curse_word = re.compile(re.escape(word), re.IGNORECASE) cen = "".join(get_censor_char() for i in list(word)) ret = curse_word.sub(cen, ret) return ret
Returns the input string with profanity replaced with a random string of characters plucked from the censor_characters pool.
entailment
def load_words(wordlist=None): """ Loads and caches the profanity word list. Input file (if provided) should be a flat text file with one profanity entry per line. """ global words if not wordlist: # no wordlist was provided, load the wordlist from the local store filename = get_data('wordlist.txt') f = open(filename) wordlist = f.readlines() wordlist = [w.strip() for w in wordlist if w] words = wordlist
Loads and caches the profanity word list. Input file (if provided) should be a flat text file with one profanity entry per line.
entailment
def convert(csv, json, **kwargs): '''Convert csv to json. csv: filename or file-like object json: filename or file-like object if csv is '-' or None: stdin is used for input if json is '-' or None: stdout is used for output ''' csv_local, json_local = None, None try: if csv == '-' or csv is None: csv = sys.stdin elif isinstance(csv, str): csv = csv_local = open(csv, 'r') if json == '-' or json is None: json = sys.stdout elif isinstance(json, str): json = json_local = open(json, 'w') data = load_csv(csv, **kwargs) save_json(data, json, **kwargs) finally: if csv_local is not None: csv_local.close() if json_local is not None: json_local.close()
Convert csv to json. csv: filename or file-like object json: filename or file-like object if csv is '-' or None: stdin is used for input if json is '-' or None: stdout is used for output
entailment
def update_dicts(self, sentence): """Add new sentence to generate dictionaries. :param sentence: A list of strings representing the sentence. """ self.dict_generator(sentence=sentence) self.word_dict, self.char_dict = None, None
Add new sentence to generate dictionaries. :param sentence: A list of strings representing the sentence.
entailment
def set_dicts(self, word_dict, char_dict): """Set with custom dictionaries. :param word_dict: The word dictionary. :param char_dict: The character dictionary. """ self.word_dict = word_dict self.char_dict = char_dict
Set with custom dictionaries. :param word_dict: The word dictionary. :param char_dict: The character dictionary.
entailment
def get_dicts(self): """Get word and character dictionaries. :return word_dict, char_dict: """ if self.word_dict is None: self.word_dict, self.char_dict, self.max_word_len = self.dict_generator(return_dict=True) return self.word_dict, self.char_dict
Get word and character dictionaries. :return word_dict, char_dict:
entailment
def get_embedding_layer(self, word_embd_dim=300, char_embd_dim=30, char_hidden_dim=150, char_hidden_layer_type='lstm', word_embd_weights=None, word_embd_file_path=None, char_embd_weights=None, char_embd_file_path=None, word_embd_trainable=None, char_embd_trainable=None, word_mask_zero=True, char_mask_zero=True,): """Get the merged embedding layer. :param word_embd_dim: The dimensions of the word embedding. :param char_embd_dim: The dimensions of the character embedding :param char_hidden_dim: The dimensions of the hidden states of RNN in one direction. :param word_embd_weights: A numpy array representing the pre-trained embeddings for words. :param word_embd_file_path: The file that contains the word embeddings. :param char_embd_weights: A numpy array representing the pre-trained embeddings for characters. :param char_embd_file_path: The file that contains the character embeddings. :param word_embd_trainable: Whether the word embedding layer is trainable. :param char_embd_trainable: Whether the character embedding layer is trainable. :param char_hidden_layer_type: The type of the recurrent layer, 'lstm' or 'gru'. :param word_mask_zero: Whether enable the mask for words. :param char_mask_zero: Whether enable the mask for characters. :return inputs, embd_layer: The keras layer. """ if word_embd_file_path is not None: word_embd_weights = get_embedding_weights_from_file(word_dict=self.get_word_dict(), file_path=word_embd_file_path, ignore_case=self.word_ignore_case) if char_embd_file_path is not None: char_embd_weights = get_embedding_weights_from_file(word_dict=self.get_char_dict(), file_path=char_embd_file_path, ignore_case=self.char_ignore_case) return get_embedding_layer(word_dict_len=len(self.get_word_dict()), char_dict_len=len(self.get_char_dict()), max_word_len=self.max_word_len, word_embd_dim=word_embd_dim, char_embd_dim=char_embd_dim, char_hidden_dim=char_hidden_dim, char_hidden_layer_type=char_hidden_layer_type, word_embd_weights=word_embd_weights, char_embd_weights=char_embd_weights, word_embd_trainable=word_embd_trainable, char_embd_trainable=char_embd_trainable, word_mask_zero=word_mask_zero, char_mask_zero=char_mask_zero)
Get the merged embedding layer. :param word_embd_dim: The dimensions of the word embedding. :param char_embd_dim: The dimensions of the character embedding :param char_hidden_dim: The dimensions of the hidden states of RNN in one direction. :param word_embd_weights: A numpy array representing the pre-trained embeddings for words. :param word_embd_file_path: The file that contains the word embeddings. :param char_embd_weights: A numpy array representing the pre-trained embeddings for characters. :param char_embd_file_path: The file that contains the character embeddings. :param word_embd_trainable: Whether the word embedding layer is trainable. :param char_embd_trainable: Whether the character embedding layer is trainable. :param char_hidden_layer_type: The type of the recurrent layer, 'lstm' or 'gru'. :param word_mask_zero: Whether enable the mask for words. :param char_mask_zero: Whether enable the mask for characters. :return inputs, embd_layer: The keras layer.
entailment
def get_batch_input(self, sentences): """Convert sentences to desired input tensors. :param sentences: A list of lists representing the input sentences. :return word_embd_input, char_embd_input: The desired inputs. """ return get_batch_input(sentences, max_word_len=self.max_word_len, word_dict=self.get_word_dict(), char_dict=self.get_char_dict(), word_ignore_case=self.word_ignore_case, char_ignore_case=self.char_ignore_case)
Convert sentences to desired input tensors. :param sentences: A list of lists representing the input sentences. :return word_embd_input, char_embd_input: The desired inputs.
entailment
def get_batch_input(sentences, max_word_len, word_dict, char_dict, word_unknown=1, char_unknown=1, word_ignore_case=False, char_ignore_case=False): """Convert sentences to desired input tensors. :param sentences: A list of lists representing the input sentences. :param max_word_len: The maximum allowed length of word. :param word_dict: Map a word to an integer. (0 and 1 should be preserved) :param char_dict: Map a character to an integer. (0 and 1 should be preserved) :param word_unknown: An integer representing the unknown word. :param char_unknown: An integer representing the unknown character. :param word_ignore_case: Word will be transformed to lower case before mapping. :param char_ignore_case: Character will be transformed to lower case before mapping. :return word_embd_input, char_embd_input: The desired inputs. """ sentence_num = len(sentences) max_sentence_len = max(map(len, sentences)) word_embd_input = [[0] * max_sentence_len for _ in range(sentence_num)] char_embd_input = [[[0] * max_word_len for _ in range(max_sentence_len)] for _ in range(sentence_num)] for sentence_index, sentence in enumerate(sentences): for word_index, word in enumerate(sentence): if word_ignore_case: word_key = word.lower() else: word_key = word word_embd_input[sentence_index][word_index] = word_dict.get(word_key, word_unknown) for char_index, char in enumerate(word): if char_index >= max_word_len: break if char_ignore_case: char = char.lower() char_embd_input[sentence_index][word_index][char_index] = char_dict.get(char, char_unknown) return [numpy.asarray(word_embd_input), numpy.asarray(char_embd_input)]
Convert sentences to desired input tensors. :param sentences: A list of lists representing the input sentences. :param max_word_len: The maximum allowed length of word. :param word_dict: Map a word to an integer. (0 and 1 should be preserved) :param char_dict: Map a character to an integer. (0 and 1 should be preserved) :param word_unknown: An integer representing the unknown word. :param char_unknown: An integer representing the unknown character. :param word_ignore_case: Word will be transformed to lower case before mapping. :param char_ignore_case: Character will be transformed to lower case before mapping. :return word_embd_input, char_embd_input: The desired inputs.
entailment
def get_embedding_layer(word_dict_len, char_dict_len, max_word_len, word_embd_dim=300, char_embd_dim=30, char_hidden_dim=150, char_hidden_layer_type='lstm', word_embd_weights=None, char_embd_weights=None, word_embd_trainable=None, char_embd_trainable=None, word_mask_zero=True, char_mask_zero=True): """Get the merged embedding layer. :param word_dict_len: The number of words in the dictionary including the ones mapped to 0 or 1. :param char_dict_len: The number of characters in the dictionary including the ones mapped to 0 or 1. :param max_word_len: The maximum allowed length of word. :param word_embd_dim: The dimensions of the word embedding. :param char_embd_dim: The dimensions of the character embedding :param char_hidden_dim: The dimensions of the hidden states of RNN in one direction. :param word_embd_weights: A numpy array representing the pre-trained embeddings for words. :param char_embd_weights: A numpy array representing the pre-trained embeddings for characters. :param word_embd_trainable: Whether the word embedding layer is trainable. :param char_embd_trainable: Whether the character embedding layer is trainable. :param char_hidden_layer_type: The type of the recurrent layer, 'lstm' or 'gru'. :param word_mask_zero: Whether enable the mask for words. :param char_mask_zero: Whether enable the mask for characters. :return inputs, embd_layer: The keras layer. """ if word_embd_weights is not None: word_embd_weights = [word_embd_weights] if word_embd_trainable is None: word_embd_trainable = word_embd_weights is None if char_embd_weights is not None: char_embd_weights = [char_embd_weights] if char_embd_trainable is None: char_embd_trainable = char_embd_weights is None word_input_layer = keras.layers.Input( shape=(None,), name='Input_Word', ) char_input_layer = keras.layers.Input( shape=(None, max_word_len), name='Input_Char', ) word_embd_layer = keras.layers.Embedding( input_dim=word_dict_len, output_dim=word_embd_dim, mask_zero=word_mask_zero, weights=word_embd_weights, trainable=word_embd_trainable, name='Embedding_Word', )(word_input_layer) char_embd_layer = keras.layers.Embedding( input_dim=char_dict_len, output_dim=char_embd_dim, mask_zero=char_mask_zero, weights=char_embd_weights, trainable=char_embd_trainable, name='Embedding_Char_Pre', )(char_input_layer) if char_hidden_layer_type == 'lstm': char_hidden_layer = keras.layers.Bidirectional( keras.layers.LSTM( units=char_hidden_dim, input_shape=(max_word_len, char_dict_len), return_sequences=False, return_state=False, ), name='Bi-LSTM_Char', ) elif char_hidden_layer_type == 'gru': char_hidden_layer = keras.layers.Bidirectional( keras.layers.GRU( units=char_hidden_dim, input_shape=(max_word_len, char_dict_len), return_sequences=False, return_state=False, ), name='Bi-GRU_Char', ) elif char_hidden_layer_type == 'cnn': char_hidden_layer = [ MaskedConv1D( filters=max(1, char_hidden_dim // 5), kernel_size=3, activation='relu', ), MaskedFlatten(), keras.layers.Dense( units=char_hidden_dim, name='Dense_Char', ), ] elif isinstance(char_hidden_layer_type, list) or isinstance(char_hidden_layer_type, keras.layers.Layer): char_hidden_layer = char_hidden_layer_type else: raise NotImplementedError('Unknown character hidden layer type: %s' % char_hidden_layer_type) if not isinstance(char_hidden_layer, list): char_hidden_layer = [char_hidden_layer] for i, layer in enumerate(char_hidden_layer): if i == len(char_hidden_layer) - 1: name = 'Embedding_Char' else: name = 'Embedding_Char_Pre_%d' % (i + 1) char_embd_layer = keras.layers.TimeDistributed(layer=layer, name=name)(char_embd_layer) embd_layer = keras.layers.Concatenate( name='Embedding', )([word_embd_layer, char_embd_layer]) return [word_input_layer, char_input_layer], embd_layer
Get the merged embedding layer. :param word_dict_len: The number of words in the dictionary including the ones mapped to 0 or 1. :param char_dict_len: The number of characters in the dictionary including the ones mapped to 0 or 1. :param max_word_len: The maximum allowed length of word. :param word_embd_dim: The dimensions of the word embedding. :param char_embd_dim: The dimensions of the character embedding :param char_hidden_dim: The dimensions of the hidden states of RNN in one direction. :param word_embd_weights: A numpy array representing the pre-trained embeddings for words. :param char_embd_weights: A numpy array representing the pre-trained embeddings for characters. :param word_embd_trainable: Whether the word embedding layer is trainable. :param char_embd_trainable: Whether the character embedding layer is trainable. :param char_hidden_layer_type: The type of the recurrent layer, 'lstm' or 'gru'. :param word_mask_zero: Whether enable the mask for words. :param char_mask_zero: Whether enable the mask for characters. :return inputs, embd_layer: The keras layer.
entailment
def get_dicts_generator(word_min_freq=4, char_min_freq=2, word_ignore_case=False, char_ignore_case=False): """Get word and character dictionaries from sentences. :param word_min_freq: The minimum frequency of a word. :param char_min_freq: The minimum frequency of a character. :param word_ignore_case: Word will be transformed to lower case before saving to dictionary. :param char_ignore_case: Character will be transformed to lower case before saving to dictionary. :return gen: A closure that accepts sentences and returns the dictionaries. """ word_count, char_count = {}, {} def get_dicts(sentence=None, return_dict=False): """Update and return dictionaries for each sentence. :param sentence: A list of strings representing the sentence. :param return_dict: Returns the dictionaries if it is True. :return word_dict, char_dict, max_word_len: """ if sentence is not None: for word in sentence: if not word: continue if word_ignore_case: word_key = word.lower() else: word_key = word word_count[word_key] = word_count.get(word_key, 0) + 1 for char in word: if char_ignore_case: char_key = char.lower() else: char_key = char char_count[char_key] = char_count.get(char_key, 0) + 1 if not return_dict: return None word_dict, char_dict = {'': 0, '<UNK>': 1}, {'': 0, '<UNK>': 1} max_word_len = 0 for word, count in word_count.items(): if count >= word_min_freq: word_dict[word] = len(word_dict) max_word_len = max(max_word_len, len(word)) for char, count in char_count.items(): if count >= char_min_freq: char_dict[char] = len(char_dict) return word_dict, char_dict, max_word_len return get_dicts
Get word and character dictionaries from sentences. :param word_min_freq: The minimum frequency of a word. :param char_min_freq: The minimum frequency of a character. :param word_ignore_case: Word will be transformed to lower case before saving to dictionary. :param char_ignore_case: Character will be transformed to lower case before saving to dictionary. :return gen: A closure that accepts sentences and returns the dictionaries.
entailment
def get_word_list_eng(text): """A naive function that extracts English words from raw texts. :param text: The raw text. :return words: A list of strings. """ words, index = [''], 0 while index < len(text): while index < len(text) and ('a' <= text[index] <= 'z' or 'A' <= text[index] <= 'Z'): words[-1] += text[index] index += 1 if words[-1]: words.append('') while index < len(text) and not ('a' <= text[index] <= 'z' or 'A' <= text[index] <= 'Z'): if text[index] != ' ': words[-1] += text[index] index += 1 if words[-1]: words.append('') if not words[-1]: words.pop() return words
A naive function that extracts English words from raw texts. :param text: The raw text. :return words: A list of strings.
entailment
def get_embedding_weights_from_file(word_dict, file_path, ignore_case=False): """Load pre-trained embeddings from a text file. Each line in the file should look like this: word feature_dim_1 feature_dim_2 ... feature_dim_n The `feature_dim_i` should be a floating point number. :param word_dict: A dict that maps words to indice. :param file_path: The location of the text file containing the pre-trained embeddings. :param ignore_case: Whether ignoring the case of the words. :return weights: A numpy array. """ pre_trained = {} with codecs.open(file_path, 'r', 'utf8') as reader: for line in reader: line = line.strip() if not line: continue parts = line.split() if ignore_case: parts[0] = parts[0].lower() pre_trained[parts[0]] = list(map(float, parts[1:])) embd_dim = len(next(iter(pre_trained.values()))) weights = [[0.0] * embd_dim for _ in range(max(word_dict.values()) + 1)] for word, index in word_dict.items(): if not word: continue if ignore_case: word = word.lower() if word in pre_trained: weights[index] = pre_trained[word] else: weights[index] = numpy.random.random((embd_dim,)).tolist() return numpy.asarray(weights)
Load pre-trained embeddings from a text file. Each line in the file should look like this: word feature_dim_1 feature_dim_2 ... feature_dim_n The `feature_dim_i` should be a floating point number. :param word_dict: A dict that maps words to indice. :param file_path: The location of the text file containing the pre-trained embeddings. :param ignore_case: Whether ignoring the case of the words. :return weights: A numpy array.
entailment
def init_backends(): """Loads all backends""" global _BACKENDS, _ACTIVE_BACKENDS try: from .cffi_backend import CFFIBackend except ImportError: pass else: _BACKENDS.append(CFFIBackend) from .ctypes_backend import CTypesBackend from .null_backend import NullBackend _BACKENDS.append(CTypesBackend) _ACTIVE_BACKENDS = _BACKENDS[:] # null isn't active by default _BACKENDS.append(NullBackend)
Loads all backends
entailment
def get_backend(name): """Returns the backend by name or raises KeyError""" for backend in _BACKENDS: if backend.NAME == name: return backend raise KeyError("Backend %r not available" % name)
Returns the backend by name or raises KeyError
entailment
def set_backend(name=None): """Set a prefered ffi backend (cffi, ctypes). set_backend() -- default set_backend("cffi") -- cffi first, others as fallback set_backend("ctypes") -- ctypes first, others as fallback """ possible = list(_BACKENDS) if name is None: names = [] else: names = name.split(",") for name in reversed(names): for backend in list(possible): if backend.NAME == name: possible.remove(backend) possible.insert(0, backend) break else: raise LookupError("Unkown backend: %r" % name) # only add null as fallback it explicitly specified if "null" not in names: possible = [b for b in possible if b.NAME != "null"] _ACTIVE_BACKENDS[:] = possible
Set a prefered ffi backend (cffi, ctypes). set_backend() -- default set_backend("cffi") -- cffi first, others as fallback set_backend("ctypes") -- ctypes first, others as fallback
entailment
def pprint(obj, file_=None): """Prints debug information for various public objects like methods, functions, constructors etc. """ if file_ is None: file_ = sys.stdout # functions, methods if callable(obj) and hasattr(obj, "_code"): obj._code.pprint(file_) return # classes if isinstance(obj, type) and hasattr(obj, "_constructors"): constructors = obj._constructors for names, func in sorted(constructors.items()): func._code.pprint(file_) return raise TypeError("unkown type")
Prints debug information for various public objects like methods, functions, constructors etc.
entailment
def get_field_type(info): """A field python type""" type_ = info.get_type() cls = get_field_class(type_) field = cls(info, type_, None) field.setup() return field.py_type
A field python type
entailment
def from_floats(red, green, blue): """Return a new Color object from red/green/blue values from 0.0 to 1.0.""" return Color(int(red * Color.MAX_VALUE), int(green * Color.MAX_VALUE), int(blue * Color.MAX_VALUE))
Return a new Color object from red/green/blue values from 0.0 to 1.0.
entailment
def _construct_target_list(targets): """Create a list of TargetEntry items from a list of tuples in the form (target, flags, info) The list can also contain existing TargetEntry items in which case the existing entry is re-used in the return list. """ target_entries = [] for entry in targets: if not isinstance(entry, Gtk.TargetEntry): entry = Gtk.TargetEntry.new(*entry) target_entries.append(entry) return target_entries
Create a list of TargetEntry items from a list of tuples in the form (target, flags, info) The list can also contain existing TargetEntry items in which case the existing entry is re-used in the return list.
entailment
def style_get_property(self, property_name, value=None): """style_get_property(property_name, value=None) :param property_name: the name of a style property :type property_name: :obj:`str` :param value: Either :obj:`None` or a correctly initialized :obj:`GObject.Value` :type value: :obj:`GObject.Value` or :obj:`None` :returns: The Python value of the style property {{ docs }} """ if value is None: prop = self.find_style_property(property_name) if prop is None: raise ValueError('Class "%s" does not contain style property "%s"' % (self, property_name)) value = GObject.Value(prop.value_type) Gtk.Widget.style_get_property(self, property_name, value) return value.get_value()
style_get_property(property_name, value=None) :param property_name: the name of a style property :type property_name: :obj:`str` :param value: Either :obj:`None` or a correctly initialized :obj:`GObject.Value` :type value: :obj:`GObject.Value` or :obj:`None` :returns: The Python value of the style property {{ docs }}
entailment
def child_get_property(self, child, property_name, value=None): """child_get_property(child, property_name, value=None) :param child: a widget which is a child of `self` :type child: :obj:`Gtk.Widget` :param property_name: the name of the property to get :type property_name: :obj:`str` :param value: Either :obj:`None` or a correctly initialized :obj:`GObject.Value` :type value: :obj:`GObject.Value` or :obj:`None` :returns: The Python value of the child property {{ docs }} """ if value is None: prop = self.find_child_property(property_name) if prop is None: raise ValueError('Class "%s" does not contain child property "%s"' % (self, property_name)) value = GObject.Value(prop.value_type) Gtk.Container.child_get_property(self, child, property_name, value) return value.get_value()
child_get_property(child, property_name, value=None) :param child: a widget which is a child of `self` :type child: :obj:`Gtk.Widget` :param property_name: the name of the property to get :type property_name: :obj:`str` :param value: Either :obj:`None` or a correctly initialized :obj:`GObject.Value` :type value: :obj:`GObject.Value` or :obj:`None` :returns: The Python value of the child property {{ docs }}
entailment
def child_get(self, child, *prop_names): """Returns a list of child property values for the given names.""" return [self.child_get_property(child, name) for name in prop_names]
Returns a list of child property values for the given names.
entailment
def child_set(self, child, **kwargs): """Set a child properties on the given child to key/value pairs.""" for name, value in kwargs.items(): name = name.replace('_', '-') self.child_set_property(child, name, value)
Set a child properties on the given child to key/value pairs.
entailment
def insert_text(self, text, position): """insert_text(self, text, position) :param new_text: the text to append :type new_text: :obj:`str` :param position: location of the position text will be inserted at :type position: :obj:`int` :returns: location of the position text will be inserted at :rtype: :obj:`int` Inserts `new_text` into the contents of the widget, at position `position`. Note that the position is in characters, not in bytes. """ return super(Editable, self).insert_text(text, -1, position)
insert_text(self, text, position) :param new_text: the text to append :type new_text: :obj:`str` :param position: location of the position text will be inserted at :type position: :obj:`int` :returns: location of the position text will be inserted at :rtype: :obj:`int` Inserts `new_text` into the contents of the widget, at position `position`. Note that the position is in characters, not in bytes.
entailment
def add_actions(self, entries, user_data=None): """ The add_actions() method is a convenience method that creates a number of gtk.Action objects based on the information in the list of action entry tuples contained in entries and adds them to the action group. The entry tuples can vary in size from one to six items with the following information: * The name of the action. Must be specified. * The stock id for the action. Optional with a default value of None if a label is specified. * The label for the action. This field should typically be marked for translation, see the set_translation_domain() method. Optional with a default value of None if a stock id is specified. * The accelerator for the action, in the format understood by the gtk.accelerator_parse() function. Optional with a default value of None. * The tooltip for the action. This field should typically be marked for translation, see the set_translation_domain() method. Optional with a default value of None. * The callback function invoked when the action is activated. Optional with a default value of None. The "activate" signals of the actions are connected to the callbacks and their accel paths are set to <Actions>/group-name/action-name. """ try: iter(entries) except (TypeError): raise TypeError('entries must be iterable') def _process_action(name, stock_id=None, label=None, accelerator=None, tooltip=None, callback=None): action = Action(name=name, label=label, tooltip=tooltip, stock_id=stock_id) if callback is not None: if user_data is None: action.connect('activate', callback) else: action.connect('activate', callback, user_data) self.add_action_with_accel(action, accelerator) for e in entries: # using inner function above since entries can leave out optional arguments _process_action(*e)
The add_actions() method is a convenience method that creates a number of gtk.Action objects based on the information in the list of action entry tuples contained in entries and adds them to the action group. The entry tuples can vary in size from one to six items with the following information: * The name of the action. Must be specified. * The stock id for the action. Optional with a default value of None if a label is specified. * The label for the action. This field should typically be marked for translation, see the set_translation_domain() method. Optional with a default value of None if a stock id is specified. * The accelerator for the action, in the format understood by the gtk.accelerator_parse() function. Optional with a default value of None. * The tooltip for the action. This field should typically be marked for translation, see the set_translation_domain() method. Optional with a default value of None. * The callback function invoked when the action is activated. Optional with a default value of None. The "activate" signals of the actions are connected to the callbacks and their accel paths are set to <Actions>/group-name/action-name.
entailment
def add_toggle_actions(self, entries, user_data=None): """ The add_toggle_actions() method is a convenience method that creates a number of gtk.ToggleAction objects based on the information in the list of action entry tuples contained in entries and adds them to the action group. The toggle action entry tuples can vary in size from one to seven items with the following information: * The name of the action. Must be specified. * The stock id for the action. Optional with a default value of None if a label is specified. * The label for the action. This field should typically be marked for translation, see the set_translation_domain() method. Optional with a default value of None if a stock id is specified. * The accelerator for the action, in the format understood by the gtk.accelerator_parse() function. Optional with a default value of None. * The tooltip for the action. This field should typically be marked for translation, see the set_translation_domain() method. Optional with a default value of None. * The callback function invoked when the action is activated. Optional with a default value of None. * A flag indicating whether the toggle action is active. Optional with a default value of False. The "activate" signals of the actions are connected to the callbacks and their accel paths are set to <Actions>/group-name/action-name. """ try: iter(entries) except (TypeError): raise TypeError('entries must be iterable') def _process_action(name, stock_id=None, label=None, accelerator=None, tooltip=None, callback=None, is_active=False): action = Gtk.ToggleAction(name=name, label=label, tooltip=tooltip, stock_id=stock_id) action.set_active(is_active) if callback is not None: if user_data is None: action.connect('activate', callback) else: action.connect('activate', callback, user_data) self.add_action_with_accel(action, accelerator) for e in entries: # using inner function above since entries can leave out optional arguments _process_action(*e)
The add_toggle_actions() method is a convenience method that creates a number of gtk.ToggleAction objects based on the information in the list of action entry tuples contained in entries and adds them to the action group. The toggle action entry tuples can vary in size from one to seven items with the following information: * The name of the action. Must be specified. * The stock id for the action. Optional with a default value of None if a label is specified. * The label for the action. This field should typically be marked for translation, see the set_translation_domain() method. Optional with a default value of None if a stock id is specified. * The accelerator for the action, in the format understood by the gtk.accelerator_parse() function. Optional with a default value of None. * The tooltip for the action. This field should typically be marked for translation, see the set_translation_domain() method. Optional with a default value of None. * The callback function invoked when the action is activated. Optional with a default value of None. * A flag indicating whether the toggle action is active. Optional with a default value of False. The "activate" signals of the actions are connected to the callbacks and their accel paths are set to <Actions>/group-name/action-name.
entailment
def add_radio_actions(self, entries, value=None, on_change=None, user_data=None): """ The add_radio_actions() method is a convenience method that creates a number of gtk.RadioAction objects based on the information in the list of action entry tuples contained in entries and adds them to the action group. The entry tuples can vary in size from one to six items with the following information: * The name of the action. Must be specified. * The stock id for the action. Optional with a default value of None if a label is specified. * The label for the action. This field should typically be marked for translation, see the set_translation_domain() method. Optional with a default value of None if a stock id is specified. * The accelerator for the action, in the format understood by the gtk.accelerator_parse() function. Optional with a default value of None. * The tooltip for the action. This field should typically be marked for translation, see the set_translation_domain() method. Optional with a default value of None. * The value to set on the radio action. Optional with a default value of 0. Should be specified in applications. The value parameter specifies the radio action that should be set active. The "changed" signal of the first radio action is connected to the on_change callback (if specified and not None) and the accel paths of the actions are set to <Actions>/group-name/action-name. """ try: iter(entries) except (TypeError): raise TypeError('entries must be iterable') first_action = None def _process_action(group_source, name, stock_id=None, label=None, accelerator=None, tooltip=None, entry_value=0): action = RadioAction(name=name, label=label, tooltip=tooltip, stock_id=stock_id, value=entry_value) # FIXME: join_group is a patch to Gtk+ 3.0 # otherwise we can't effectively add radio actions to a # group. Should we depend on 3.0 and error out here # or should we offer the functionality via a compat # C module? if hasattr(action, 'join_group'): action.join_group(group_source) if value == entry_value: action.set_active(True) self.add_action_with_accel(action, accelerator) return action for e in entries: # using inner function above since entries can leave out optional arguments action = _process_action(first_action, *e) if first_action is None: first_action = action if first_action is not None and on_change is not None: if user_data is None: first_action.connect('changed', on_change) else: first_action.connect('changed', on_change, user_data)
The add_radio_actions() method is a convenience method that creates a number of gtk.RadioAction objects based on the information in the list of action entry tuples contained in entries and adds them to the action group. The entry tuples can vary in size from one to six items with the following information: * The name of the action. Must be specified. * The stock id for the action. Optional with a default value of None if a label is specified. * The label for the action. This field should typically be marked for translation, see the set_translation_domain() method. Optional with a default value of None if a stock id is specified. * The accelerator for the action, in the format understood by the gtk.accelerator_parse() function. Optional with a default value of None. * The tooltip for the action. This field should typically be marked for translation, see the set_translation_domain() method. Optional with a default value of None. * The value to set on the radio action. Optional with a default value of 0. Should be specified in applications. The value parameter specifies the radio action that should be set active. The "changed" signal of the first radio action is connected to the on_change callback (if specified and not None) and the accel paths of the actions are set to <Actions>/group-name/action-name.
entailment
def add_ui_from_string(self, buffer, length=-1): """add_ui_from_string(buffer, length=-1) {{ all }} """ return Gtk.UIManager.add_ui_from_string(self, buffer, length)
add_ui_from_string(buffer, length=-1) {{ all }}
entailment
def add_from_string(self, buffer, length=-1): """add_from_string(buffer, length=-1) {{ all }} """ return Gtk.Builder.add_from_string(self, buffer, length)
add_from_string(buffer, length=-1) {{ all }}
entailment
def add_objects_from_string(self, buffer, object_ids): """add_objects_from_string(buffer, object_ids) :param buffer: the string to parse :type buffer: :obj:`str` :param object_ids: array of objects to build :type object_ids: [:obj:`str`] :raises: :class:`GLib.Error` :returns: A positive value on success, 0 if an error occurred :rtype: :obj:`int` {{ docs }} """ length = -1 return Gtk.Builder.add_objects_from_string(self, buffer, length, object_ids)
add_objects_from_string(buffer, object_ids) :param buffer: the string to parse :type buffer: :obj:`str` :param object_ids: array of objects to build :type object_ids: [:obj:`str`] :raises: :class:`GLib.Error` :returns: A positive value on success, 0 if an error occurred :rtype: :obj:`int` {{ docs }}
entailment
def add_buttons(self, *args): """add_buttons(*args) The add_buttons() method adds several buttons to the Gtk.Dialog using the button data passed as arguments to the method. This method is the same as calling the Gtk.Dialog.add_button() repeatedly. The button data pairs - button text (or stock ID) and a response ID integer are passed individually. For example: .. code-block:: python dialog.add_buttons(Gtk.STOCK_OPEN, 42, "Close", Gtk.ResponseType.CLOSE) will add "Open" and "Close" buttons to dialog. """ def _button(b): while b: t, r = b[0:2] b = b[2:] yield t, r try: for text, response in _button(args): self.add_button(text, response) except (IndexError): raise TypeError('Must pass an even number of arguments')
add_buttons(*args) The add_buttons() method adds several buttons to the Gtk.Dialog using the button data passed as arguments to the method. This method is the same as calling the Gtk.Dialog.add_button() repeatedly. The button data pairs - button text (or stock ID) and a response ID integer are passed individually. For example: .. code-block:: python dialog.add_buttons(Gtk.STOCK_OPEN, 42, "Close", Gtk.ResponseType.CLOSE) will add "Open" and "Close" buttons to dialog.
entailment
def create_tag(self, tag_name=None, **properties): """Creates a tag and adds it to the tag table of the TextBuffer. :param str tag_name: Name of the new tag, or None :param **properties: Keyword list of properties and their values :returns: A new tag. This is equivalent to creating a Gtk.TextTag and then adding the tag to the buffer's tag table. The returned tag is owned by the buffer's tag table. If ``tag_name`` is None, the tag is anonymous. If ``tag_name`` is not None, a tag called ``tag_name`` must not already exist in the tag table for this buffer. Properties are passed as a keyword list of names and values (e.g. foreground='DodgerBlue', weight=Pango.Weight.BOLD) """ tag = Gtk.TextTag(name=tag_name, **properties) self._get_or_create_tag_table().add(tag) return tag
Creates a tag and adds it to the tag table of the TextBuffer. :param str tag_name: Name of the new tag, or None :param **properties: Keyword list of properties and their values :returns: A new tag. This is equivalent to creating a Gtk.TextTag and then adding the tag to the buffer's tag table. The returned tag is owned by the buffer's tag table. If ``tag_name`` is None, the tag is anonymous. If ``tag_name`` is not None, a tag called ``tag_name`` must not already exist in the tag table for this buffer. Properties are passed as a keyword list of names and values (e.g. foreground='DodgerBlue', weight=Pango.Weight.BOLD)
entailment