sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def _p2_unicode_reader(unicode_csv_data, dialect=csv.excel, **kwargs): """ Encode Unicode as UTF-8 and parse as CSV. This is needed since Python 2's `csv` doesn't do Unicode. Kudos: https://docs.python.org/2/library/csv.html#examples :param unicode_csv_data: The Unicode stream to parse. :param dialect: The CSV dialect to use. :param kwargs: Any other parameters to pass to csv.reader. :returns: An iterator """ # Encode temporarily as UTF-8: utf8_csv_data = _utf_8_encoder(unicode_csv_data) # Now we can parse! csv_reader = csv.reader(utf8_csv_data, dialect=dialect, **kwargs) # Decode UTF-8 back to Unicode, cell by cell: return ([unicode(cell, 'utf-8') for cell in row] for row in csv_reader)
Encode Unicode as UTF-8 and parse as CSV. This is needed since Python 2's `csv` doesn't do Unicode. Kudos: https://docs.python.org/2/library/csv.html#examples :param unicode_csv_data: The Unicode stream to parse. :param dialect: The CSV dialect to use. :param kwargs: Any other parameters to pass to csv.reader. :returns: An iterator
entailment
def file_exists(filename): """ Check if a file exists (and don't error out on unicode inputs) """ try: return os.path.isfile(filename) except (UnicodeDecodeError, UnicodeEncodeError, ValueError): return False
Check if a file exists (and don't error out on unicode inputs)
entailment
def data_preprocess(data, size=None, min_axis=None, batch=False): """ Takes data and prepares it for sending to the api including resizing and image data/structure standardizing. """ if batch: return [data_preprocess(el, size=size, min_axis=min_axis, batch=False) for el in data] if isinstance(data, string_types): if file_exists(data): # probably a path to an image preprocessed = Image.open(data) else: # base 64 encoded image data, a url, or raw content # send raw data to the server and let the server infer type b64_or_url = re.sub('^data:image/.+;base64,', '', data) return b64_or_url elif isinstance(data, Image.Image): # data is image from PIL preprocessed = data elif type(data).__name__ == "ndarray": # data is likely image from numpy/scipy if "float" in str(data.dtype) and data.min() >= 0 and data.max() <= 1: data *= 255. try: preprocessed = Image.fromarray(data.astype("uint8")) except TypeError: raise IndicoError( "Please ensure the numpy array is in a format by PIL. " "Values must be between 0 and 1 or between 0 and 255 in greyscale, rgb, or rgba format." ) else: # at this point we are unsure of the type -- it could be malformatted text or image data. raise IndicoError( "Invalid input datatype: `{}`. " "Ensure input data is one of the following types: " "`str`, `unicode`, `PIL.Image`, `np.ndarray`.".format( data.__class__.__name__ ) ) # if size or min_axis: preprocessed = resize_image(preprocessed, size, min_axis) # standardize on b64 encoding for sending image data over the wire temp_output = BytesIO() preprocessed.save(temp_output, format='PNG') temp_output.seek(0) output_s = temp_output.read() return base64.b64encode(output_s).decode('utf-8') if PY3 else base64.b64encode(output_s)
Takes data and prepares it for sending to the api including resizing and image data/structure standardizing.
entailment
def get_list_dimensions(_list): """ Takes a nested list and returns the size of each dimension followed by the element type in the list """ if isinstance(_list, list) or isinstance(_list, tuple): return [len(_list)] + get_list_dimensions(_list[0]) return []
Takes a nested list and returns the size of each dimension followed by the element type in the list
entailment
def get_element_type(_list, dimens): """ Given the dimensions of a nested list and the list, returns the type of the elements in the inner list. """ elem = _list for _ in range(len(dimens)): elem = elem[0] return type(elem)
Given the dimensions of a nested list and the list, returns the type of the elements in the inner list.
entailment
def image_recognition(image, cloud=None, batch=False, api_key=None, version=None, **kwargs): """ Given an input image, returns a dictionary of image classifications with associated scores * Input can be either grayscale or rgb color and should either be a numpy array or nested list format. * Input data should be either uint8 0-255 range values or floating point between 0 and 1. * Large images (i.e. 1024x768+) are much bigger than needed, minaxis resizing will be done internally to 144 if needed. * For ideal performance, images should be square aspect ratio but non-square aspect ratios are supported as well. Example usage: .. code-block:: python >>> from indicoio import image_recognition >>> features = image_recognition(<filename>) :param image: The image to be analyzed. :type image: str :rtype: dict containing classifications """ image = data_preprocess(image, batch=batch, size=144, min_axis=True) url_params = {"batch": batch, "api_key": api_key, "version": version} return api_handler(image, cloud=cloud, api="imagerecognition", url_params=url_params, **kwargs)
Given an input image, returns a dictionary of image classifications with associated scores * Input can be either grayscale or rgb color and should either be a numpy array or nested list format. * Input data should be either uint8 0-255 range values or floating point between 0 and 1. * Large images (i.e. 1024x768+) are much bigger than needed, minaxis resizing will be done internally to 144 if needed. * For ideal performance, images should be square aspect ratio but non-square aspect ratios are supported as well. Example usage: .. code-block:: python >>> from indicoio import image_recognition >>> features = image_recognition(<filename>) :param image: The image to be analyzed. :type image: str :rtype: dict containing classifications
entailment
def multi(data, datatype, apis, accepted_apis, batch=False,**kwargs): """ Helper to make multi requests of different types. :param data: Data to be sent in API request :param datatype: String type of API request :param apis: List of apis to use. :param batch: Is this a batch request? :rtype: Dictionary of api responses """ # Client side api name checking - strictly only accept func name api invalid_apis = [api for api in apis if api not in accepted_apis or api in MULTIAPI_NOT_SUPPORTED] if invalid_apis: raise IndicoError( "The following are not valid %s APIs: %s. Please reference the available APIs below:\n%s" % (datatype, ", ".join(invalid_apis), ", ".join(accepted_apis.keys())) ) # Convert client api names to server names before sending request cloud = kwargs.pop("cloud", None) api_key = kwargs.pop('api_key', None) api_results_executor = {} for api in apis: api_results_executor[EXECUTOR.submit(accepted_apis[api], data, cloud=cloud, api_key=api_key, batch=batch, **kwargs)] = api api_results = {} for future in concurrent.futures.as_completed(api_results_executor): api_results[api_results_executor[future]] = future.result() return api_results
Helper to make multi requests of different types. :param data: Data to be sent in API request :param datatype: String type of API request :param apis: List of apis to use. :param batch: Is this a batch request? :rtype: Dictionary of api responses
entailment
def analyze_text(input_text, apis=DEFAULT_APIS, **kwargs): """ Given input text, returns the results of specified text apis. Possible apis include: [ 'text_tags', 'political', 'sentiment', 'language' ] Example usage: .. code-block:: python >>> import indicoio >>> text = 'Monday: Delightful with mostly sunny skies. Highs in the low 70s.' >>> results = indicoio.analyze_text(data = text, apis = ["language", "sentiment"]) >>> language_results = results["language"] >>> sentiment_results = results["sentiment"] :param text: The text to be analyzed. :param apis: List of apis to use. :type text: str or unicode :type apis: list of str :rtype: Dictionary of api responses """ cloud = kwargs.pop('cloud', None) batch = kwargs.pop('batch', False) api_key = kwargs.pop('api_key', None) return multi( data=input_text, datatype="text", cloud=cloud, batch=batch, api_key=api_key, apis=apis, accepted_apis=TEXT_APIS, **kwargs )
Given input text, returns the results of specified text apis. Possible apis include: [ 'text_tags', 'political', 'sentiment', 'language' ] Example usage: .. code-block:: python >>> import indicoio >>> text = 'Monday: Delightful with mostly sunny skies. Highs in the low 70s.' >>> results = indicoio.analyze_text(data = text, apis = ["language", "sentiment"]) >>> language_results = results["language"] >>> sentiment_results = results["sentiment"] :param text: The text to be analyzed. :param apis: List of apis to use. :type text: str or unicode :type apis: list of str :rtype: Dictionary of api responses
entailment
def fhp_from_json_dict( json_dict # type: Dict[str, Any] ): # type: (...) -> FieldHashingProperties """ Make a :class:`FieldHashingProperties` object from a dictionary. :param dict json_dict: The dictionary must have have an 'ngram' key and one of k or num_bits. It may have 'positional' key; if missing a default is used. The encoding is always set to the default value. :return: A :class:`FieldHashingProperties` instance. """ h = json_dict.get('hash', {'type': 'blakeHash'}) num_bits = json_dict.get('numBits') k = json_dict.get('k') if not num_bits and not k: num_bits = 200 # default for v2 schema return FieldHashingProperties( ngram=json_dict['ngram'], positional=json_dict.get( 'positional', FieldHashingProperties._DEFAULT_POSITIONAL), hash_type=h['type'], prevent_singularity=h.get('prevent_singularity'), num_bits=num_bits, k=k, missing_value=MissingValueSpec.from_json_dict( json_dict[ 'missingValue']) if 'missingValue' in json_dict else None )
Make a :class:`FieldHashingProperties` object from a dictionary. :param dict json_dict: The dictionary must have have an 'ngram' key and one of k or num_bits. It may have 'positional' key; if missing a default is used. The encoding is always set to the default value. :return: A :class:`FieldHashingProperties` instance.
entailment
def spec_from_json_dict( json_dict # type: Dict[str, Any] ): # type: (...) -> FieldSpec """ Turns a dictionary into the appropriate object. :param dict json_dict: A dictionary with properties. :returns: An initialised instance of the appropriate FieldSpec subclass. """ if 'ignored' in json_dict: return Ignore(json_dict['identifier']) type_str = json_dict['format']['type'] spec_type = cast(FieldSpec, FIELD_TYPE_MAP[type_str]) return spec_type.from_json_dict(json_dict)
Turns a dictionary into the appropriate object. :param dict json_dict: A dictionary with properties. :returns: An initialised instance of the appropriate FieldSpec subclass.
entailment
def ks(self, num_ngrams): # type (int) -> [int] """ Provide a k for each ngram in the field value. :param num_ngrams: number of ngrams in the field value :return: [ k, ... ] a k value for each of num_ngrams such that the sum is exactly num_bits """ if self.num_bits: k = int(self.num_bits / num_ngrams) residue = self.num_bits % num_ngrams return ([k + 1] * residue) + ([k] * (num_ngrams - residue)) else: return [self.k if self.k else 0] * num_ngrams
Provide a k for each ngram in the field value. :param num_ngrams: number of ngrams in the field value :return: [ k, ... ] a k value for each of num_ngrams such that the sum is exactly num_bits
entailment
def replace_missing_value(self, str_in): # type: (Text) -> Text """ returns 'str_in' if it is not equals to the 'sentinel' as defined in the missingValue section of the schema. Else it will return the 'replaceWith' value. :param str_in: :return: str_in or the missingValue replacement value """ if self.missing_value is None: return str_in elif self.missing_value.sentinel == str_in: return self.missing_value.replace_with else: return str_in
returns 'str_in' if it is not equals to the 'sentinel' as defined in the missingValue section of the schema. Else it will return the 'replaceWith' value. :param str_in: :return: str_in or the missingValue replacement value
entailment
def from_json_dict(cls, json_dict # type: Dict[str, Any] ): # type: (...) -> StringSpec """ Make a StringSpec object from a dictionary containing its properties. :param dict json_dict: This dictionary must contain an `'encoding'` key associated with a Python-conformant encoding. It must also contain a `'hashing'` key, whose contents are passed to :class:`FieldHashingProperties`. Permitted keys also include `'pattern'`, `'case'`, `'minLength'`, and `'maxLength'`. :raises InvalidSchemaError: When a regular expression is provided but is not a valid pattern. """ # noinspection PyCompatibility result = cast(StringSpec, # Go away, Mypy. super().from_json_dict(json_dict)) format_ = json_dict['format'] if 'encoding' in format_ and result.hashing_properties: result.hashing_properties.encoding = format_['encoding'] if 'pattern' in format_: pattern = format_['pattern'] try: result.regex = re_compile_full(pattern) except (SyntaxError, re.error) as e: msg = "Invalid regular expression '{}.'".format(pattern) e_new = InvalidSchemaError(msg) raise_from(e_new, e) result.regex_based = True else: result.case = format_.get('case', StringSpec._DEFAULT_CASE) result.min_length = format_.get('minLength') result.max_length = format_.get('maxLength') result.regex_based = False return result
Make a StringSpec object from a dictionary containing its properties. :param dict json_dict: This dictionary must contain an `'encoding'` key associated with a Python-conformant encoding. It must also contain a `'hashing'` key, whose contents are passed to :class:`FieldHashingProperties`. Permitted keys also include `'pattern'`, `'case'`, `'minLength'`, and `'maxLength'`. :raises InvalidSchemaError: When a regular expression is provided but is not a valid pattern.
entailment
def validate(self, str_in): # type: (Text) -> None """ Validates an entry in the field. Raises `InvalidEntryError` iff the entry is invalid. An entry is invalid iff (1) a pattern is part of the specification of the field and the string does not match it; (2) the string does not match the provided casing, minimum length, or maximum length; or (3) the specified encoding cannot represent the string. :param str str_in: String to validate. :raises InvalidEntryError: When entry is invalid. :raises ValueError: When self.case is not one of the permitted values (`'lower'`, `'upper'`, or `'mixed'`). """ if self.is_missing_value(str_in): return # noinspection PyCompatibility super().validate(str_in) # Validate encoding. if self.regex_based: match = self.regex.match(str_in) if match is None: e = InvalidEntryError( 'Expected entry that conforms to regular expression ' "'{}'. Read '{}'.".format(self.regex.pattern, str_in)) e.field_spec = self raise e else: str_len = len(str_in) if self.min_length is not None and str_len < self.min_length: e = InvalidEntryError( "Expected string length of at least {}. Read string '{}' " 'of length {}.'.format(self.min_length, str_in, str_len)) e.field_spec = self raise e if self.max_length is not None and str_len > self.max_length: e = InvalidEntryError( "Expected string length of at most {}. Read string '{}' " 'of length {}.'.format(self.max_length, str_in, str_len)) e.field_spec = self raise e if self.case == 'upper': if str_in.upper() != str_in: msg = "Expected upper case string. Read '{}'.".format( str_in) e = InvalidEntryError(msg) e.field_spec = self raise e elif self.case == 'lower': if str_in.lower() != str_in: msg = "Expected lower case string. Read '{}'.".format( str_in) e = InvalidEntryError(msg) e.field_spec = self raise e elif self.case == 'mixed': pass else: raise ValueError( 'Invalid case property {}.'.format(self.case))
Validates an entry in the field. Raises `InvalidEntryError` iff the entry is invalid. An entry is invalid iff (1) a pattern is part of the specification of the field and the string does not match it; (2) the string does not match the provided casing, minimum length, or maximum length; or (3) the specified encoding cannot represent the string. :param str str_in: String to validate. :raises InvalidEntryError: When entry is invalid. :raises ValueError: When self.case is not one of the permitted values (`'lower'`, `'upper'`, or `'mixed'`).
entailment
def from_json_dict(cls, json_dict # type: Dict[str, Any] ): # type: (...) -> IntegerSpec """ Make a IntegerSpec object from a dictionary containing its properties. :param dict json_dict: This dictionary may contain `'minimum'` and `'maximum'` keys. In addition, it must contain a `'hashing'` key, whose contents are passed to :class:`FieldHashingProperties`. :param dict json_dict: The properties dictionary. """ # noinspection PyCompatibility result = cast(IntegerSpec, # For Mypy. super().from_json_dict(json_dict)) format_ = json_dict['format'] result.minimum = format_.get('minimum') result.maximum = format_.get('maximum') return result
Make a IntegerSpec object from a dictionary containing its properties. :param dict json_dict: This dictionary may contain `'minimum'` and `'maximum'` keys. In addition, it must contain a `'hashing'` key, whose contents are passed to :class:`FieldHashingProperties`. :param dict json_dict: The properties dictionary.
entailment
def validate(self, str_in): # type: (Text) -> None """ Validates an entry in the field. Raises `InvalidEntryError` iff the entry is invalid. An entry is invalid iff (1) the string does not represent a base-10 integer; (2) the integer is not between `self.minimum` and `self.maximum`, if those exist; or (3) the integer is negative. :param str str_in: String to validate. :raises InvalidEntryError: When entry is invalid. """ if self.is_missing_value(str_in): return # noinspection PyCompatibility super().validate(str_in) try: value = int(str_in, base=10) except ValueError as e: msg = "Invalid integer. Read '{}'.".format(str_in) e_new = InvalidEntryError(msg) e_new.field_spec = self raise_from(e_new, e) return # to stop PyCharm thinking that value might be undefined # later if self.minimum is not None and value < self.minimum: msg = ("Expected integer value of at least {}. Read '{}'." .format(self.minimum, value)) e_new = InvalidEntryError(msg) e_new.field_spec = self raise e_new if self.maximum is not None and value > self.maximum: msg = ("Expected integer value of at most {}. Read '{}'." .format(self.maximum, value)) e_new = InvalidEntryError(msg) e_new.field_spec = self raise e_new
Validates an entry in the field. Raises `InvalidEntryError` iff the entry is invalid. An entry is invalid iff (1) the string does not represent a base-10 integer; (2) the integer is not between `self.minimum` and `self.maximum`, if those exist; or (3) the integer is negative. :param str str_in: String to validate. :raises InvalidEntryError: When entry is invalid.
entailment
def _format_regular_value(self, str_in): # type: (Text) -> Text """ we need to reformat integer strings, as there can be different strings for the same integer. The strategy of unification here is to first parse the integer string to an Integer type. Thus all of '+13', ' 13', '13' will be parsed to 13. We then convert the integer value to an unambiguous string (no whitespaces, leading '-' for negative numbers, no leading '+'). :param str_in: integer string :return: integer string without whitespaces, leading '-' for negative numbers, no leading '+' """ try: value = int(str_in, base=10) return str(value) except ValueError as e: msg = "Invalid integer. Read '{}'.".format(str_in) e_new = InvalidEntryError(msg) e_new.field_spec = self raise_from(e_new, e)
we need to reformat integer strings, as there can be different strings for the same integer. The strategy of unification here is to first parse the integer string to an Integer type. Thus all of '+13', ' 13', '13' will be parsed to 13. We then convert the integer value to an unambiguous string (no whitespaces, leading '-' for negative numbers, no leading '+'). :param str_in: integer string :return: integer string without whitespaces, leading '-' for negative numbers, no leading '+'
entailment
def from_json_dict(cls, json_dict # type: Dict[str, Any] ): # type: (...) -> DateSpec """ Make a DateSpec object from a dictionary containing its properties. :param dict json_dict: This dictionary must contain a `'format'` key. In addition, it must contain a `'hashing'` key, whose contents are passed to :class:`FieldHashingProperties`. :param json_dict: The properties dictionary. """ # noinspection PyCompatibility result = cast(DateSpec, # For Mypy. super().from_json_dict(json_dict)) format_ = json_dict['format'] result.format = format_['format'] return result
Make a DateSpec object from a dictionary containing its properties. :param dict json_dict: This dictionary must contain a `'format'` key. In addition, it must contain a `'hashing'` key, whose contents are passed to :class:`FieldHashingProperties`. :param json_dict: The properties dictionary.
entailment
def validate(self, str_in): # type: (Text) -> None """ Validates an entry in the field. Raises `InvalidEntryError` iff the entry is invalid. An entry is invalid iff (1) the string does not represent a date in the correct format; or (2) the date it represents is invalid (such as 30 February). :param str str_in: String to validate. :raises InvalidEntryError: Iff entry is invalid. :raises ValueError: When self.format is unrecognised. """ if self.is_missing_value(str_in): return # noinspection PyCompatibility super().validate(str_in) try: datetime.strptime(str_in, self.format) except ValueError as e: msg = "Validation error for date type: {}".format(e) e_new = InvalidEntryError(msg) e_new.field_spec = self raise_from(e_new, e)
Validates an entry in the field. Raises `InvalidEntryError` iff the entry is invalid. An entry is invalid iff (1) the string does not represent a date in the correct format; or (2) the date it represents is invalid (such as 30 February). :param str str_in: String to validate. :raises InvalidEntryError: Iff entry is invalid. :raises ValueError: When self.format is unrecognised.
entailment
def _format_regular_value(self, str_in): # type: (Text) -> Text """ we overwrite default behaviour as we want to hash the numbers only, no fillers like '-', or '/' :param str str_in: date string :return: str date string with format DateSpec.OUTPUT_FORMAT """ try: dt = datetime.strptime(str_in, self.format) return strftime(dt, DateSpec.OUTPUT_FORMAT) except ValueError as e: msg = "Unable to format date value '{}'. Reason: {}".format(str_in, e) e_new = InvalidEntryError(msg) e_new.field_spec = self raise_from(e_new, e)
we overwrite default behaviour as we want to hash the numbers only, no fillers like '-', or '/' :param str str_in: date string :return: str date string with format DateSpec.OUTPUT_FORMAT
entailment
def from_json_dict(cls, json_dict # type: Dict[str, Any] ): # type: (...) -> EnumSpec """ Make a EnumSpec object from a dictionary containing its properties. :param dict json_dict: This dictionary must contain an `'enum'` key specifying the permitted values. In addition, it must contain a `'hashing'` key, whose contents are passed to :class:`FieldHashingProperties`. """ # noinspection PyCompatibility result = cast(EnumSpec, # Appease the gods of Mypy. super().from_json_dict(json_dict)) format_ = json_dict['format'] result.values = set(format_['values']) return result
Make a EnumSpec object from a dictionary containing its properties. :param dict json_dict: This dictionary must contain an `'enum'` key specifying the permitted values. In addition, it must contain a `'hashing'` key, whose contents are passed to :class:`FieldHashingProperties`.
entailment
def validate(self, str_in): # type: (Text) -> None """ Validates an entry in the field. Raises `InvalidEntryError` iff the entry is invalid. An entry is invalid iff it is not one of the permitted values. :param str str_in: String to validate. :raises InvalidEntryError: When entry is invalid. """ if self.is_missing_value(str_in): return # noinspection PyCompatibility super().validate(str_in) if str_in not in self.values: msg = ("Expected enum value to be one of {}. Read '{}'." .format(list(self.values), str_in)) e = InvalidEntryError(msg) e.field_spec = self raise e
Validates an entry in the field. Raises `InvalidEntryError` iff the entry is invalid. An entry is invalid iff it is not one of the permitted values. :param str str_in: String to validate. :raises InvalidEntryError: When entry is invalid.
entailment
def batched(iterable, size): """ Split an iterable into constant sized chunks Recipe from http://stackoverflow.com/a/8290514 """ length = len(iterable) for batch_start in range(0, length, size): yield iterable[batch_start:batch_start+size]
Split an iterable into constant sized chunks Recipe from http://stackoverflow.com/a/8290514
entailment
def standardize_input_data(data): """ Ensure utf-8 encoded strings are passed to the indico API """ if type(data) == bytes: data = data.decode('utf-8') if type(data) == list: data = [ el.decode('utf-8') if type(data) == bytes else el for el in data ] return data
Ensure utf-8 encoded strings are passed to the indico API
entailment
def api_handler(input_data, cloud, api, url_params=None, batch_size=None, **kwargs): """ Sends finalized request data to ML server and receives response. If a batch_size is specified, breaks down a request into smaller component requests and aggregates the results. """ url_params = url_params or {} input_data = standardize_input_data(input_data) cloud = cloud or config.cloud host = "%s.indico.domains" % cloud if cloud else config.host # LOCAL DEPLOYMENTS if not (host.endswith('indico.domains') or host.endswith('indico.io')): url_protocol = "http" else: url_protocol = config.url_protocol headers = dict(JSON_HEADERS) headers["X-ApiKey"] = url_params.get("api_key") or config.api_key url = create_url(url_protocol, host, api, dict(kwargs, **url_params)) return collect_api_results(input_data, url, headers, api, batch_size, kwargs)
Sends finalized request data to ML server and receives response. If a batch_size is specified, breaks down a request into smaller component requests and aggregates the results.
entailment
def collect_api_results(input_data, url, headers, api, batch_size, kwargs): """ Optionally split up a single request into a series of requests to ensure timely HTTP responses. Could eventually speed up the time required to receive a response by sending batches to the indico API concurrently """ if batch_size: results = [] for batch in batched(input_data, size=batch_size): try: result = send_request(batch, api, url, headers, kwargs) if isinstance(result, list): results.extend(result) else: results.append(result) except IndicoError as e: # Log results so far to file timestamp = datetime.datetime.now().strftime('%Y-%m-%d-%H:%M:%S') filename = "indico-{api}-{timestamp}.json".format( api=api, timestamp=timestamp ) if sys.version_info > (3, 0): json.dump(results, open(filename, mode='w', encoding='utf-8'), cls=NumpyEncoder) else: json.dump(results, open(filename, mode='w'), cls=NumpyEncoder) raise BatchProcessingError( "The following error occurred while processing your data: `{err}` " "Partial results have been saved to {filename}".format( err=e, filename=os.path.abspath(filename) ) ) return results else: return send_request(input_data, api, url, headers, kwargs)
Optionally split up a single request into a series of requests to ensure timely HTTP responses. Could eventually speed up the time required to receive a response by sending batches to the indico API concurrently
entailment
def send_request(input_data, api, url, headers, kwargs): """ Use the requests library to send of an HTTP call to the indico servers """ data = {} if input_data != None: data['data'] = input_data # request that the API respond with a msgpack encoded result serializer = kwargs.pop("serializer", config.serializer) data['serializer'] = serializer data.update(**kwargs) json_data = json.dumps(data) response = requests.post(url, data=json_data, headers=headers) warning = response.headers.get('x-warning') if warning: warnings.warn(warning) cloud = urlparse(url).hostname if response.status_code == 503 and not cloud.endswith('.indico.io'): raise APIDoesNotExist("Private cloud '%s' does not include api '%s'" % (cloud, api)) try: if serializer == 'msgpack': json_results = msgpack.unpackb(response.content) else: json_results = response.json() except (msgpack.exceptions.UnpackException, msgpack.exceptions.ExtraData): try: json_results = response.json() except: json_results = {"error": response.text} if config.PY3: json_results = convert(json_results) results = json_results.get('results', False) if results is False: error = json_results.get('error') raise convert_to_py_error(error) return results
Use the requests library to send of an HTTP call to the indico servers
entailment
def create_url(url_protocol, host, api, url_params): """ Generate the proper url for sending off data for analysis """ is_batch = url_params.pop("batch", None) apis = url_params.pop("apis", None) version = url_params.pop("version", None) or url_params.pop("v", None) method = url_params.pop('method', None) host_url_seg = url_protocol + "://%s" % host api_url_seg = "/%s" % api batch_url_seg = "/batch" if is_batch else "" method_url_seg = "/%s" % method if method else "" params = {} if apis: params["apis"] = ",".join(apis) if version: params["version"] = version url = host_url_seg + api_url_seg + batch_url_seg + method_url_seg if params: url += "?" + urlencode(params) return url
Generate the proper url for sending off data for analysis
entailment
def keywords(text, cloud=None, batch=False, api_key=None, version=2, batch_size=None, **kwargs): """ Given input text, returns series of keywords and associated scores Example usage: .. code-block:: python >>> import indicoio >>> import numpy as np >>> text = 'Monday: Delightful with mostly sunny skies. Highs in the low 70s.' >>> keywords = indicoio.keywords(text, top_n=3) >>> print "The keywords are: "+str(keywords.keys()) u'The keywords are ['delightful', 'highs', 'skies'] :param text: The text to be analyzed. :type text: str or unicode :rtype: Dictionary of feature score pairs """ if kwargs.get("language", "english") != "english": version = 1 url_params = {"batch": batch, "api_key": api_key, "version": version} return api_handler(text, cloud=cloud, api="keywords", url_params=url_params, batch_size=batch_size, **kwargs)
Given input text, returns series of keywords and associated scores Example usage: .. code-block:: python >>> import indicoio >>> import numpy as np >>> text = 'Monday: Delightful with mostly sunny skies. Highs in the low 70s.' >>> keywords = indicoio.keywords(text, top_n=3) >>> print "The keywords are: "+str(keywords.keys()) u'The keywords are ['delightful', 'highs', 'skies'] :param text: The text to be analyzed. :type text: str or unicode :rtype: Dictionary of feature score pairs
entailment
def personas(text, cloud=None, batch=False, api_key=None, version=None, **kwargs): """ Given input text, returns the authors likelihood of being 16 different personality types in a dict. Example usage: .. code-block:: python >>> text = "I love going out with my friends" >>> entities = indicoio.personas(text) {'architect': 0.2191890478134155, 'logician': 0.0158474326133728, 'commander': 0.07654544115066528 ...} :param text: The text to be analyzed. :type text: str or unicode :rtype: The authors 'Extraversion', 'Conscientiousness', 'Openness', and 'Agreeableness' score (a float between 0 and 1) in a dictionary. """ url_params = {"batch": batch, "api_key": api_key, "version": version} kwargs['persona'] = True return api_handler(text, cloud=cloud, api="personality", url_params=url_params, **kwargs)
Given input text, returns the authors likelihood of being 16 different personality types in a dict. Example usage: .. code-block:: python >>> text = "I love going out with my friends" >>> entities = indicoio.personas(text) {'architect': 0.2191890478134155, 'logician': 0.0158474326133728, 'commander': 0.07654544115066528 ...} :param text: The text to be analyzed. :type text: str or unicode :rtype: The authors 'Extraversion', 'Conscientiousness', 'Openness', and 'Agreeableness' score (a float between 0 and 1) in a dictionary.
entailment
def pdf_extraction(pdf, cloud=None, batch=False, api_key=None, version=None, **kwargs): """ Given a pdf, returns the text and metadata associated with the given pdf. PDFs may be provided as base64 encoded data or as a filepath. Base64 image data and formatted table is optionally returned by setting `images=True` or `tables=True`. Example usage: .. code-block:: python >>> from indicoio import pdf_extraction >>> results = pdf_extraction(pdf_file) >>> results.keys() ['text', 'metadata'] :param pdf: The pdf to be analyzed. :type pdf: str or list of strs :rtype: dict or list of dicts """ pdf = pdf_preprocess(pdf, batch=batch) url_params = {"batch": batch, "api_key": api_key, "version": version} results = api_handler(pdf, cloud=cloud, api="pdfextraction", url_params=url_params, **kwargs) if batch: for result in results: result["images"] = postprocess_images(result.get("images", [])) else: results['images'] = postprocess_images(results.get("images", [])) return results
Given a pdf, returns the text and metadata associated with the given pdf. PDFs may be provided as base64 encoded data or as a filepath. Base64 image data and formatted table is optionally returned by setting `images=True` or `tables=True`. Example usage: .. code-block:: python >>> from indicoio import pdf_extraction >>> results = pdf_extraction(pdf_file) >>> results.keys() ['text', 'metadata'] :param pdf: The pdf to be analyzed. :type pdf: str or list of strs :rtype: dict or list of dicts
entailment
def get_tokenizer(fhp # type: Optional[field_formats.FieldHashingProperties] ): # type: (...) -> Callable[[Text, Optional[Text]], Iterable[Text]] """ Get tokeniser function from the hash settings. This function takes a FieldHashingProperties object. It returns a function that takes a string and tokenises based on those properties. """ def dummy(word, ignore=None): # type: (Text, Optional[Text]) -> Iterable[Text] """ Null tokenizer returns empty Iterable. FieldSpec Ignore has hashing_properties = None and get_tokenizer has to return something for this case, even though it's never called. An alternative would be to use an Optional[Callable]]. :param word: not used :param ignore: not used :return: empty Iterable """ return ('' for i in range(0)) if not fhp: return dummy n = fhp.ngram if n < 0: raise ValueError('`n` in `n`-gram must be non-negative.') positional = fhp.positional def tok(word, ignore=None): # type: (Text, Optional[Text]) -> Iterable[Text] """ Produce `n`-grams of `word`. :param word: The string to tokenize. :param ignore: The substring whose occurrences we remove from `word` before tokenization. :return: Tuple of n-gram strings. """ if ignore is not None: word = word.replace(ignore, '') if n > 1: word = ' {} '.format(word) if positional: # These are 1-indexed. return ('{} {}'.format(i + 1, word[i:i + n]) for i in range(len(word) - n + 1)) else: return (word[i:i + n] for i in range(len(word) - n + 1)) return tok
Get tokeniser function from the hash settings. This function takes a FieldHashingProperties object. It returns a function that takes a string and tokenises based on those properties.
entailment
def pdf_preprocess(pdf, batch=False): """ Load pdfs from local filepath if not already b64 encoded """ if batch: return [pdf_preprocess(doc, batch=False) for doc in pdf] if os.path.isfile(pdf): # a filepath is provided, read and encode return b64encode(open(pdf, 'rb').read()) else: # assume pdf is already b64 encoded return pdf
Load pdfs from local filepath if not already b64 encoded
entailment
def people(text, cloud=None, batch=None, api_key=None, version=2, **kwargs): """ Given input text, returns references to specific persons found in the text Example usage: .. code-block:: python >>> text = "London Underground's boss Mike Brown warned that the strike ..." >>> entities = indicoio.people(text) [ { u'text': "Mike Brown", u'confidence': 0.09470917284488678, u'position': [26, 36] }, ... ] :param text: The text to be analyzed. :type text: str or unicode :rtype: Dictionary of language probability pairs """ url_params = {"batch": batch, "api_key": api_key, "version": version} return api_handler(text, cloud=cloud, api="people", url_params=url_params, **kwargs)
Given input text, returns references to specific persons found in the text Example usage: .. code-block:: python >>> text = "London Underground's boss Mike Brown warned that the strike ..." >>> entities = indicoio.people(text) [ { u'text': "Mike Brown", u'confidence': 0.09470917284488678, u'position': [26, 36] }, ... ] :param text: The text to be analyzed. :type text: str or unicode :rtype: Dictionary of language probability pairs
entailment
def update(self, x # type: Sequence[Union[int, float]] ): # type: (...) -> None """ updates the statistics with the given list of numbers It uses an online algorithm which uses compensated summation to reduce numerical errors. See https://angelacorasaniti.wordpress.com/2015/05/06/hw2-mean-and-variance-of-data-stream/ for details. :param x: list of numbers :return: nothing """ if any(math.isnan(float(i)) or math.isinf(float(i)) for i in x): raise ValueError('input contains non-finite numbers like "nan" or "+/- inf"') t = sum(x) m = float(len(x)) norm_t = t / m S = sum((xi - norm_t) ** 2 for xi in x) if self.n == 0: self.S = self.S + S else: self.S = self.S + S + self.n / (m * (m + self.n)) * (m / self.n * self.t - t) ** 2 self.t = self.t + t self.n = self.n + len(x)
updates the statistics with the given list of numbers It uses an online algorithm which uses compensated summation to reduce numerical errors. See https://angelacorasaniti.wordpress.com/2015/05/06/hw2-mean-and-variance-of-data-stream/ for details. :param x: list of numbers :return: nothing
entailment
def hash_and_serialize_chunk(chunk_pii_data, # type: Sequence[Sequence[str]] keys, # type: Sequence[Sequence[bytes]] schema # type: Schema ): # type: (...) -> Tuple[List[str], Sequence[int]] """ Generate Bloom filters (ie hash) from chunks of PII then serialize the generated Bloom filters. It also computes and outputs the Hamming weight (or popcount) -- the number of bits set to one -- of the generated Bloom filters. :param chunk_pii_data: An iterable of indexable records. :param keys: A tuple of two lists of secret keys used in the HMAC. :param Schema schema: Schema specifying the entry formats and hashing settings. :return: A list of serialized Bloom filters and a list of corresponding popcounts """ clk_data = [] clk_popcounts = [] for clk in stream_bloom_filters(chunk_pii_data, keys, schema): clk_data.append(serialize_bitarray(clk[0]).strip()) clk_popcounts.append(clk[2]) return clk_data, clk_popcounts
Generate Bloom filters (ie hash) from chunks of PII then serialize the generated Bloom filters. It also computes and outputs the Hamming weight (or popcount) -- the number of bits set to one -- of the generated Bloom filters. :param chunk_pii_data: An iterable of indexable records. :param keys: A tuple of two lists of secret keys used in the HMAC. :param Schema schema: Schema specifying the entry formats and hashing settings. :return: A list of serialized Bloom filters and a list of corresponding popcounts
entailment
def generate_clk_from_csv(input_f, # type: TextIO keys, # type: Tuple[AnyStr, AnyStr] schema, # type: Schema validate=True, # type: bool header=True, # type: Union[bool, AnyStr] progress_bar=True # type: bool ): # type: (...) -> List[str] """ Generate Bloom filters from CSV file, then serialise them. This function also computes and outputs the Hamming weight (a.k.a popcount -- the number of bits set to high) of the generated Bloom filters. :param input_f: A file-like object of csv data to hash. :param keys: A tuple of two lists of secret keys. :param schema: Schema specifying the record formats and hashing settings. :param validate: Set to `False` to disable validation of data against the schema. Note that this will silence warnings whose aim is to keep the hashes consistent between data sources; this may affect linkage accuracy. :param header: Set to `False` if the CSV file does not have a header. Set to `'ignore'` if the CSV file does have a header but it should not be checked against the schema. :param bool progress_bar: Set to `False` to disable the progress bar. :return: A list of serialized Bloom filters and a list of corresponding popcounts. """ if header not in {False, True, 'ignore'}: raise ValueError("header must be False, True or 'ignore' but is {}." .format(header)) log.info("Hashing data") # Read from CSV file reader = unicode_reader(input_f) if header: column_names = next(reader) if header != 'ignore': validate_header(schema.fields, column_names) start_time = time.time() # Read the lines in CSV file and add it to PII pii_data = [] for line in reader: pii_data.append(tuple(element.strip() for element in line)) validate_row_lengths(schema.fields, pii_data) if progress_bar: stats = OnlineMeanVariance() with tqdm(desc="generating CLKs", total=len(pii_data), unit='clk', unit_scale=True, postfix={'mean': stats.mean(), 'std': stats.std()}) as pbar: def callback(tics, clk_stats): stats.update(clk_stats) pbar.set_postfix(mean=stats.mean(), std=stats.std(), refresh=False) pbar.update(tics) results = generate_clks(pii_data, schema, keys, validate=validate, callback=callback) else: results = generate_clks(pii_data, schema, keys, validate=validate) log.info("Hashing took {:.2f} seconds".format(time.time() - start_time)) return results
Generate Bloom filters from CSV file, then serialise them. This function also computes and outputs the Hamming weight (a.k.a popcount -- the number of bits set to high) of the generated Bloom filters. :param input_f: A file-like object of csv data to hash. :param keys: A tuple of two lists of secret keys. :param schema: Schema specifying the record formats and hashing settings. :param validate: Set to `False` to disable validation of data against the schema. Note that this will silence warnings whose aim is to keep the hashes consistent between data sources; this may affect linkage accuracy. :param header: Set to `False` if the CSV file does not have a header. Set to `'ignore'` if the CSV file does have a header but it should not be checked against the schema. :param bool progress_bar: Set to `False` to disable the progress bar. :return: A list of serialized Bloom filters and a list of corresponding popcounts.
entailment
def chunks(seq, chunk_size): # type: (Sequence[T], int) -> Iterable[Sequence[T]] """ Split seq into chunk_size-sized chunks. :param seq: A sequence to chunk. :param chunk_size: The size of chunk. """ return (seq[i:i + chunk_size] for i in range(0, len(seq), chunk_size))
Split seq into chunk_size-sized chunks. :param seq: A sequence to chunk. :param chunk_size: The size of chunk.
entailment
def load_csv_data(resource_name): # type: (str) -> List[str] """ Loads first column of specified CSV file from package data. """ data_bytes = pkgutil.get_data('clkhash', 'data/{}'.format(resource_name)) if data_bytes is None: raise ValueError("No data resource found with name {}".format(resource_name)) else: data = data_bytes.decode('utf8') reader = csv.reader(data.splitlines()) next(reader, None) # skip the headers return [row[0] for row in reader]
Loads first column of specified CSV file from package data.
entailment
def save_csv(data, # type: Iterable[Tuple[Union[str, int], ...]] headers, # type: Iterable[str] file # type: TextIO ): # type: (...) -> None """ Output generated data to file as CSV with header. :param data: An iterable of tuples containing raw data. :param headers: Iterable of feature names :param file: A writeable stream in which to write the CSV """ print(','.join(headers), file=file) writer = csv.writer(file) writer.writerows(data)
Output generated data to file as CSV with header. :param data: An iterable of tuples containing raw data. :param headers: Iterable of feature names :param file: A writeable stream in which to write the CSV
entailment
def random_date(start, end): # type: (datetime, datetime) -> datetime """ Generate a random datetime between two datetime objects. :param start: datetime of start :param end: datetime of end :return: random datetime between start and end """ delta = end - start int_delta = (delta.days * 24 * 60 * 60) + delta.seconds random_second = random.randrange(int_delta) return start + timedelta(seconds=random_second)
Generate a random datetime between two datetime objects. :param start: datetime of start :param end: datetime of end :return: random datetime between start and end
entailment
def generate_random_person(self, n): # type: (int) -> Iterable[Tuple[str, str, str, str]] """ Generator that yields details on a person with plausible name, sex and age. :yields: Generated data for one person tuple - (id: int, name: str('First Last'), birthdate: str('DD/MM/YYYY'), sex: str('M' | 'F') ) """ assert self.all_male_first_names is not None assert self.all_female_first_names is not None assert self.all_last_names is not None for i in range(n): sex = 'M' if random.random() > 0.5 else 'F' dob = random_date(self.earliest_birthday, self.latest_birthday).strftime("%Y/%m/%d") first_name = random.choice(self.all_male_first_names) if sex == 'M' else random.choice( self.all_female_first_names) last_name = random.choice(self.all_last_names) yield ( str(i), first_name + ' ' + last_name, dob, sex )
Generator that yields details on a person with plausible name, sex and age. :yields: Generated data for one person tuple - (id: int, name: str('First Last'), birthdate: str('DD/MM/YYYY'), sex: str('M' | 'F') )
entailment
def load_names(self): # type: () -> None """ Loads a name database from package data Uses data files sourced from http://www.quietaffiliate.com/free-first-name-and-last-name-databases-csv-and-sql/ """ self.all_male_first_names = load_csv_data('male-first-names.csv') self.all_female_first_names = load_csv_data('female-first-names.csv') self.all_last_names = load_csv_data('CSV_Database_of_Last_Names.csv')
Loads a name database from package data Uses data files sourced from http://www.quietaffiliate.com/free-first-name-and-last-name-databases-csv-and-sql/
entailment
def generate_subsets(self, sz, overlap=0.8, subsets=2): # type: (int, float, int) -> Tuple[List, ...] """ Return random subsets with nonempty intersection. The random subsets are of specified size. If an element is common to two subsets, then it is common to all subsets. This overlap is controlled by a parameter. :param sz: size of subsets to generate :param overlap: size of the intersection, as fraction of the subset length :param subsets: number of subsets to generate :raises ValueError: if there aren't sufficiently many names in the list to satisfy the request; more precisely, raises if (1 - subsets) * floor(overlap * sz) + subsets * sz > len(self.names). :return: tuple of subsets """ overlap_sz = int(math.floor(overlap * sz)) unique_sz = sz - overlap_sz # Unique names per subset total_unique_sz = unique_sz * subsets # Uniques in all subsets total_sz = overlap_sz + total_unique_sz if total_sz > len(self.names): msg = 'insufficient names for requested size and overlap' raise ValueError(msg) sset = random.sample(self.names, total_sz) # Overlapping subset, pool of unique names sset_overlap, sset_unique = sset[:overlap_sz], sset[overlap_sz:] assert len(sset_unique) == subsets * unique_sz # Split pool of unique names into `subsets` chunks uniques = (sset_unique[p * unique_sz: (p + 1) * unique_sz] for p in range(subsets)) return tuple(sset_overlap + u for u in uniques)
Return random subsets with nonempty intersection. The random subsets are of specified size. If an element is common to two subsets, then it is common to all subsets. This overlap is controlled by a parameter. :param sz: size of subsets to generate :param overlap: size of the intersection, as fraction of the subset length :param subsets: number of subsets to generate :raises ValueError: if there aren't sufficiently many names in the list to satisfy the request; more precisely, raises if (1 - subsets) * floor(overlap * sz) + subsets * sz > len(self.names). :return: tuple of subsets
entailment
def _unpack_list(example): """ Input data format standardization """ try: x = example[0] y = example[1] meta = None return x, y, meta except IndexError: raise IndicoError( "Invalid input data. Please ensure input data is " "formatted as a list of `[data, target]` pairs." )
Input data format standardization
entailment
def _unpack_dict(example): """ Input data format standardization """ try: x = example['data'] y = example['target'] meta = example.get('metadata', {}) return x, y, meta except KeyError: raise IndicoError( "Invalid input data. Please ensure input data is " "formatted as a list of dicts with `data` and `target` keys. " "A `metadata` key may optionally be included." )
Input data format standardization
entailment
def _unpack_data(data): """ Break Xs, Ys, and metadata out into separate lists for data preprocessing. Run basic data validation. """ xs = [None] * len(data) ys = [None] * len(data) metadata = [None] * len(data) for idx, example in enumerate(data): if isinstance(example, (list, tuple)): xs[idx], ys[idx], metadata[idx] = _unpack_list(example) if isinstance(example, dict): xs[idx], ys[idx], metadata[idx] = _unpack_dict(example) return xs, ys, metadata
Break Xs, Ys, and metadata out into separate lists for data preprocessing. Run basic data validation.
entailment
def _pack_data(X, Y, metadata): """ After modifying / preprocessing inputs, reformat the data in preparation for JSON serialization """ if not any(metadata): # legacy list of list format is acceptable return list(zip(X, Y)) else: # newer dictionary-based format is required in order to save metadata return [ { 'data': x, 'target': y, 'metadata': meta } for x, y, meta in zip(X, Y, metadata) ]
After modifying / preprocessing inputs, reformat the data in preparation for JSON serialization
entailment
def visualize_explanation(explanation, label=None): """ Given the output of the explain() endpoint, produces a terminal visual that plots response strength over a sequence """ if not sys.version_info[:2] >= (3, 5): raise IndicoError("Python >= 3.5+ is required for explanation visualization") try: from colr import Colr as C except ImportError: raise IndicoError("Package colr >= 0.8.1 is required for explanation visualization.") cursor = 0 text = explanation['text'] for token in explanation.get('token_predictions'): try: class_confidence = token.get('prediction')[label] except KeyError: raise IndicoError("Invalid label: {}".format(label)) if class_confidence > 0.5: fg_color = (255, 255, 255) else: fg_color = (0, 0, 0) rg_value = 255 - int(class_confidence * 255) token_end = token.get('token').get('end') token_text = text[cursor:token_end] cursor = token_end sys.stdout.write( str(C().b_rgb( rg_value, rg_value, 255 ).rgb( fg_color[0], fg_color[1], fg_color[2], token_text )) ) sys.stdout.write("\n") sys.stdout.flush()
Given the output of the explain() endpoint, produces a terminal visual that plots response strength over a sequence
entailment
def collections(cloud=None, api_key=None, version=None, **kwargs): """ This is a status report endpoint. It is used to get the status on all of the collections currently trained, as well as some basic statistics on their accuracies. Inputs api_key (optional) - String: Your API key, required only if the key has not been declared elsewhere. This allows the API to recognize a request as yours and automatically route it to the appropriate destination. cloud (optional) - String: Your private cloud domain, required only if the key has not been declared elsewhere. This allows the API to recognize a request as yours and automatically route it to the appropriate destination. Example usage: .. code-block:: python >>> collections = indicoio.collections() { "tag_predictor": { "input_type": "text", "model_type": "classification", "number_of_samples": 224 'status': 'ready' }, "popularity_predictor": { "input_type": "text", "model_type": "regression", "number_of_samples": 231 'status': 'training' } } } """ url_params = {"batch": False, "api_key": api_key, "version": version, "method": "collections"} return api_handler(None, cloud=cloud, api="custom", url_params=url_params, **kwargs)
This is a status report endpoint. It is used to get the status on all of the collections currently trained, as well as some basic statistics on their accuracies. Inputs api_key (optional) - String: Your API key, required only if the key has not been declared elsewhere. This allows the API to recognize a request as yours and automatically route it to the appropriate destination. cloud (optional) - String: Your private cloud domain, required only if the key has not been declared elsewhere. This allows the API to recognize a request as yours and automatically route it to the appropriate destination. Example usage: .. code-block:: python >>> collections = indicoio.collections() { "tag_predictor": { "input_type": "text", "model_type": "classification", "number_of_samples": 224 'status': 'ready' }, "popularity_predictor": { "input_type": "text", "model_type": "regression", "number_of_samples": 231 'status': 'training' } } }
entailment
def vectorize(data, cloud=None, api_key=None, version=None, **kwargs): """ Support for raw features from the custom collections API """ batch = detect_batch(data) data = data_preprocess(data, batch=batch) url_params = {"batch": batch, "api_key": api_key, "version": version, "method": "vectorize"} return api_handler(data, cloud=cloud, api="custom", url_params=url_params, **kwargs)
Support for raw features from the custom collections API
entailment
def _api_handler(self, *args, **kwargs): """ Thin wrapper around api_handler from `indicoio.utils.api` to add in stored keyword argument to the JSON body """ keyword_arguments = {} keyword_arguments.update(self.keywords) keyword_arguments.update(kwargs) return api_handler(*args, **keyword_arguments)
Thin wrapper around api_handler from `indicoio.utils.api` to add in stored keyword argument to the JSON body
entailment
def add_data(self, data, cloud=None, batch=False, api_key=None, version=None, **kwargs): """ This is the basic training endpoint. Given a piece of text and a score, either categorical or numeric, this endpoint will train a new model given the additional piece of information. Inputs data - List: The text and collection/score associated with it. The length of the text (string) should ideally be longer than 100 characters and contain at least 10 words. While the API will support shorter text, you will find that the accuracy of results improves significantly with longer examples. For an additional fee, this end point will support image input as well. The collection/score can be a string or float. This is the variable associated with the text. This can either be categorical (the tag associated with the post) or numeric (the number of Facebook shares the post received). However it can only be one or another within a given label. domain (optional) - String: This is an identifier that helps determine the appropriate techniques for indico to use behind the scenes to train your model. One of {"standard", "topics"}. api_key (optional) - String: Your API key, required only if the key has not been declared elsewhere. This allows the API to recognize a request as yours and automatically route it to the appropriate destination. cloud (optional) - String: Your private cloud domain, required only if the key has not been declared elsewhere. This allows the API to recognize a request as yours and automatically route it to the appropriate destination. """ if not len(data): raise IndicoError("No input data provided.") batch = isinstance(data[0], (list, tuple, dict)) # standarize format for preprocessing batch of examples if not batch: data = [data] X, Y, metadata = _unpack_data(data) X = data_preprocess(X, batch=True) data = _pack_data(X, Y, metadata) # if a single example was passed in, unpack if not batch: data = data[0] url_params = {"batch": batch, "api_key": api_key, "version": version, 'method': "add_data"} return self._api_handler(data, cloud=cloud, api="custom", url_params=url_params, **kwargs)
This is the basic training endpoint. Given a piece of text and a score, either categorical or numeric, this endpoint will train a new model given the additional piece of information. Inputs data - List: The text and collection/score associated with it. The length of the text (string) should ideally be longer than 100 characters and contain at least 10 words. While the API will support shorter text, you will find that the accuracy of results improves significantly with longer examples. For an additional fee, this end point will support image input as well. The collection/score can be a string or float. This is the variable associated with the text. This can either be categorical (the tag associated with the post) or numeric (the number of Facebook shares the post received). However it can only be one or another within a given label. domain (optional) - String: This is an identifier that helps determine the appropriate techniques for indico to use behind the scenes to train your model. One of {"standard", "topics"}. api_key (optional) - String: Your API key, required only if the key has not been declared elsewhere. This allows the API to recognize a request as yours and automatically route it to the appropriate destination. cloud (optional) - String: Your private cloud domain, required only if the key has not been declared elsewhere. This allows the API to recognize a request as yours and automatically route it to the appropriate destination.
entailment
def train(self, cloud=None, batch=False, api_key=None, version=None, **kwargs): """ This is the basic training endpoint. Given an existing dataset this endpoint will train a model. Inputs api_key (optional) - String: Your API key, required only if the key has not been declared elsewhere. This allows the API to recognize a request as yours and automatically route it to the appropriate destination. cloud (optional) - String: Your private cloud domain, required only if the key has not been declared elsewhere. This allows the API to recognize a request as yours and automatically route it to the appropriate destination. """ url_params = {"batch": batch, "api_key": api_key, "version": version, 'method': "train"} return self._api_handler(self.keywords['collection'], cloud=cloud, api="custom", url_params=url_params, **kwargs)
This is the basic training endpoint. Given an existing dataset this endpoint will train a model. Inputs api_key (optional) - String: Your API key, required only if the key has not been declared elsewhere. This allows the API to recognize a request as yours and automatically route it to the appropriate destination. cloud (optional) - String: Your private cloud domain, required only if the key has not been declared elsewhere. This allows the API to recognize a request as yours and automatically route it to the appropriate destination.
entailment
def info(self, cloud=None, api_key=None, version=None, **kwargs): """ Return the current state of the model associated with a given collection """ url_params = {"batch": False, "api_key": api_key, "version": version, "method": "info"} return self._api_handler(None, cloud=cloud, api="custom", url_params=url_params, **kwargs)
Return the current state of the model associated with a given collection
entailment
def remove_example(self, data, cloud=None, batch=False, api_key=None, version=None, **kwargs): """ This is an API made to remove a single instance of training data. This is useful in cases where a single instance of content has been modified, but the remaining examples remain valid. For example, if a piece of content has been retagged. Inputs data - String: The exact text you wish to remove from the given collection. If the string provided does not match a known piece of text then this will fail. Again, this is required if an id is not provided, and vice-versa. api_key (optional) - String: Your API key, required only if the key has not been declared elsewhere. This allows the API to recognize a request as yours and automatically route it to the appropriate destination. cloud (optional) - String: Your private cloud domain, required only if the key has not been declared elsewhere. This allows the API to recognize a request as yours and automatically route it to the appropriate destination. """ batch = detect_batch(data) data = data_preprocess(data, batch=batch) url_params = {"batch": batch, "api_key": api_key, "version": version, 'method': 'remove_example'} return self._api_handler(data, cloud=cloud, api="custom", url_params=url_params, **kwargs)
This is an API made to remove a single instance of training data. This is useful in cases where a single instance of content has been modified, but the remaining examples remain valid. For example, if a piece of content has been retagged. Inputs data - String: The exact text you wish to remove from the given collection. If the string provided does not match a known piece of text then this will fail. Again, this is required if an id is not provided, and vice-versa. api_key (optional) - String: Your API key, required only if the key has not been declared elsewhere. This allows the API to recognize a request as yours and automatically route it to the appropriate destination. cloud (optional) - String: Your private cloud domain, required only if the key has not been declared elsewhere. This allows the API to recognize a request as yours and automatically route it to the appropriate destination.
entailment
def wait(self, interval=1, **kwargs): """ Block until the collection's model is completed training """ while True: status = self.info(**kwargs).get('status') if status == "ready": break if status != "training": raise IndicoError("Collection status failed with: {0}".format(status)) time.sleep(interval)
Block until the collection's model is completed training
entailment
def register(self, make_public=False, cloud=None, api_key=None, version=None, **kwargs): """ This API endpoint allows you to register you collection in order to share read or write access to the collection with another user. Inputs: api_key (optional) - String: Your API key, required only if the key has not been declared elsewhere. This allows the API to recognize a request as yours and automatically route it to the appropriate destination. cloud (optional) - String: Your private cloud domain, required only if the key has not been declared elsewhere. This allows the API to recognize a request as yours and automatically route it to the appropriate destination. make_public (optional) - Boolean: When True, this option gives all indico users read access to your model. """ kwargs['make_public'] = make_public url_params = {"batch": False, "api_key": api_key, "version": version, "method": "register"} return self._api_handler(None, cloud=cloud, api="custom", url_params=url_params, **kwargs)
This API endpoint allows you to register you collection in order to share read or write access to the collection with another user. Inputs: api_key (optional) - String: Your API key, required only if the key has not been declared elsewhere. This allows the API to recognize a request as yours and automatically route it to the appropriate destination. cloud (optional) - String: Your private cloud domain, required only if the key has not been declared elsewhere. This allows the API to recognize a request as yours and automatically route it to the appropriate destination. make_public (optional) - Boolean: When True, this option gives all indico users read access to your model.
entailment
def authorize(self, email, permission_type='read', cloud=None, api_key=None, version=None, **kwargs): """ This API endpoint allows you to authorize another user to access your model in a read or write capacity. Before calling authorize, you must first make sure your model has been registered. Inputs: email - String: The email of the user you would like to share access with. permission_type (optional) - String: One of ['read', 'write']. Users with read permissions can only call `predict`. Users with `write` permissions can add new input examples and train models. api_key (optional) - String: Your API key, required only if the key has not been declared elsewhere. This allows the API to recognize a request as yours and automatically route it to the appropriate destination. cloud (optional) - String: Your private cloud domain, required only if the key has not been declared elsewhere. This allows the API to recognize a request as yours and automatically route it to the appropriate destination. """ kwargs['permission_type'] = permission_type kwargs['email'] = email url_params = {"batch": False, "api_key": api_key, "version": version, "method": "authorize"} return self._api_handler(None, cloud=cloud, api="custom", url_params=url_params, **kwargs)
This API endpoint allows you to authorize another user to access your model in a read or write capacity. Before calling authorize, you must first make sure your model has been registered. Inputs: email - String: The email of the user you would like to share access with. permission_type (optional) - String: One of ['read', 'write']. Users with read permissions can only call `predict`. Users with `write` permissions can add new input examples and train models. api_key (optional) - String: Your API key, required only if the key has not been declared elsewhere. This allows the API to recognize a request as yours and automatically route it to the appropriate destination. cloud (optional) - String: Your private cloud domain, required only if the key has not been declared elsewhere. This allows the API to recognize a request as yours and automatically route it to the appropriate destination.
entailment
def deauthorize(self, email, cloud=None, api_key=None, version=None, **kwargs): """ This API endpoint allows you to remove another user's access to your collection. Inputs: email - String: The email of the user you would like to share access with. api_key (optional) - String: Your API key, required only if the key has not been declared elsewhere. This allows the API to recognize a request as yours and automatically route it to the appropriate destination. cloud (optional) - String: Your private cloud domain, required only if the key has not been declared elsewhere. This allows the API to recognize a request as yours and automatically route it to the appropriate destination. """ kwargs['email'] = email url_params = {"batch": False, "api_key": api_key, "version": version, "method": "deauthorize"} return self._api_handler(None, cloud=cloud, api="custom", url_params=url_params, **kwargs)
This API endpoint allows you to remove another user's access to your collection. Inputs: email - String: The email of the user you would like to share access with. api_key (optional) - String: Your API key, required only if the key has not been declared elsewhere. This allows the API to recognize a request as yours and automatically route it to the appropriate destination. cloud (optional) - String: Your private cloud domain, required only if the key has not been declared elsewhere. This allows the API to recognize a request as yours and automatically route it to the appropriate destination.
entailment
def rename(self, name, cloud=None, api_key=None, version=None, **kwargs): """ If you'd like to change the name you use to access a given collection, you can call the rename endpoint. This is especially useful if the name you use for your model is not available for registration. Inputs: name - String: The new name used to access your model. api_key (optional) - String: Your API key, required only if the key has not been declared elsewhere. This allows the API to recognize a request as yours and automatically route it to the appropriate destination. cloud (optional) - String: Your private cloud domain, required only if the key has not been declared elsewhere. This allows the API to recognize a request as yours and automatically route it to the appropriate destination. """ kwargs['name'] = name url_params = {"batch": False, "api_key": api_key, "version": version, "method": "rename"} result = self._api_handler(None, cloud=cloud, api="custom", url_params=url_params, **kwargs) self.keywords['collection'] = name return result
If you'd like to change the name you use to access a given collection, you can call the rename endpoint. This is especially useful if the name you use for your model is not available for registration. Inputs: name - String: The new name used to access your model. api_key (optional) - String: Your API key, required only if the key has not been declared elsewhere. This allows the API to recognize a request as yours and automatically route it to the appropriate destination. cloud (optional) - String: Your private cloud domain, required only if the key has not been declared elsewhere. This allows the API to recognize a request as yours and automatically route it to the appropriate destination.
entailment
def convert_to_py_error(error_message): """ Raise specific exceptions for ease of error handling """ message = error_message.lower() for err_msg, err_type in ERR_MSGS: if err_msg in message: return err_type(error_message) else: return IndicoError(error_message)
Raise specific exceptions for ease of error handling
entailment
def facial_localization(image, cloud=None, batch=False, api_key=None, version=None, **kwargs): """ Given an image, returns a list of faces found within the image. For each face, we return a dictionary containing the upper left corner and lower right corner. If crop is True, the cropped face is included in the dictionary. Input should be in a numpy ndarray or a filename. Example usage: .. code-block:: python >>> from indicoio import facial_localization >>> import numpy as np >>> img = np.zeros([image of a face]) >>> faces = facial_localization(img) >>> len(faces) 1 :param image: The image to be analyzed. :type image: filepath or ndarray :rtype: List of faces (dict) found. """ image = data_preprocess(image, batch=batch) url_params = {"batch": batch, "api_key": api_key, "version": version} return api_handler(image, cloud=cloud, api="faciallocalization", url_params=url_params, **kwargs)
Given an image, returns a list of faces found within the image. For each face, we return a dictionary containing the upper left corner and lower right corner. If crop is True, the cropped face is included in the dictionary. Input should be in a numpy ndarray or a filename. Example usage: .. code-block:: python >>> from indicoio import facial_localization >>> import numpy as np >>> img = np.zeros([image of a face]) >>> faces = facial_localization(img) >>> len(faces) 1 :param image: The image to be analyzed. :type image: filepath or ndarray :rtype: List of faces (dict) found.
entailment
def summarization(text, cloud=None, batch=False, api_key=None, version=1, **kwargs): """ Given input text, returns a `top_n` length sentence summary. Example usage: .. code-block:: python >>> from indicoio import summarization >>> summary = summarization("https://en.wikipedia.org/wiki/Yahoo!_data_breach") >>> summary ["This information was disclosed two years later on September 22, 2016.", "[1] The data breach is one of the largest in the history of the Internet.", "Specific details of material taken include names, email addresses, telephone numbers, dates of birth, and encrypted passwords.", "[2]\\n\\nEvents [ edit ]\\n\\nYahoo alleged in its statement that the breach was carried out by \\"state-sponsored\\" hackers,[3] but the organization did not name any country.", "We had our own use for it and other buyers did as well."] :param text: The text to be analyzed. :type text: str or unicode :rtype: Dictionary of party probability pairs """ url_params = {"batch": batch, "api_key": api_key, "version": version} return api_handler(text, cloud=cloud, api="summarization", url_params=url_params, **kwargs)
Given input text, returns a `top_n` length sentence summary. Example usage: .. code-block:: python >>> from indicoio import summarization >>> summary = summarization("https://en.wikipedia.org/wiki/Yahoo!_data_breach") >>> summary ["This information was disclosed two years later on September 22, 2016.", "[1] The data breach is one of the largest in the history of the Internet.", "Specific details of material taken include names, email addresses, telephone numbers, dates of birth, and encrypted passwords.", "[2]\\n\\nEvents [ edit ]\\n\\nYahoo alleged in its statement that the breach was carried out by \\"state-sponsored\\" hackers,[3] but the organization did not name any country.", "We had our own use for it and other buyers did as well."] :param text: The text to be analyzed. :type text: str or unicode :rtype: Dictionary of party probability pairs
entailment
def hkdf(master_secret, # type: bytes num_keys, # type: int hash_algo='SHA256', # type: str salt=None, # type: Optional[bytes] info=None, # type: Optional[bytes] key_size=DEFAULT_KEY_SIZE # type: int ): # type: (...) -> Tuple[bytes, ...] """ Executes the HKDF key derivation function as described in rfc5869 to derive `num_keys` keys of size `key_size` from the master_secret. :param master_secret: input keying material :param num_keys: the number of keys the kdf should produce :param hash_algo: The hash function used by HKDF for the internal HMAC calls. The choice of hash function defines the maximum length of the output key material. Output bytes <= 255 * hash digest size (in bytes). :param salt: HKDF is defined to operate with and without random salt. This is done to accommodate applications where a salt value is not available. We stress, however, that the use of salt adds significantly to themstrength of HKDF, ensuring independence between different uses of the hash function, supporting "source-independent" extraction, and strengthening the analytical results that back the HKDF design. Random salt differs fundamentally from the initial keying material in two ways: it is non-secret and can be re-used. Ideally, the salt value is a random (or pseudorandom) string of the length HashLen. Yet, even a salt value of less quality (shorter in size or with limited entropy) may still make a significant contribution to the security of the output keying material. :param info: While the 'info' value is optional in the definition of HKDF, it is often of great importance in applications. Its main objective is to bind the derived key material to application- and context-specific information. For example, 'info' may contain a protocol number, algorithm identifiers, user identities, etc. In particular, it may prevent the derivation of the same keying material for different contexts (when the same input key material (IKM) is used in such different contexts). It may also accommodate additional inputs to the key expansion part, if so desired (e.g., an application may want to bind the key material to its length L, thus making L part of the 'info' field). There is one technical requirement from 'info': it should be independent of the input key material value IKM. :param key_size: the size of the produced keys :return: Derived keys """ try: hash_function = _HASH_FUNCTIONS[hash_algo] except KeyError: msg = "unsupported hash function '{}'".format(hash_algo) raise_from(ValueError(msg), None) hkdf = HKDF(algorithm=hash_function(), length=num_keys * key_size, salt=salt, info=info, backend=default_backend()) # hkdf.derive returns a block of num_keys * key_size bytes which we # divide up into num_keys chunks, each of size key_size keybytes = hkdf.derive(master_secret) keys = tuple(keybytes[i * key_size:(i + 1) * key_size] for i in range(num_keys)) return keys
Executes the HKDF key derivation function as described in rfc5869 to derive `num_keys` keys of size `key_size` from the master_secret. :param master_secret: input keying material :param num_keys: the number of keys the kdf should produce :param hash_algo: The hash function used by HKDF for the internal HMAC calls. The choice of hash function defines the maximum length of the output key material. Output bytes <= 255 * hash digest size (in bytes). :param salt: HKDF is defined to operate with and without random salt. This is done to accommodate applications where a salt value is not available. We stress, however, that the use of salt adds significantly to themstrength of HKDF, ensuring independence between different uses of the hash function, supporting "source-independent" extraction, and strengthening the analytical results that back the HKDF design. Random salt differs fundamentally from the initial keying material in two ways: it is non-secret and can be re-used. Ideally, the salt value is a random (or pseudorandom) string of the length HashLen. Yet, even a salt value of less quality (shorter in size or with limited entropy) may still make a significant contribution to the security of the output keying material. :param info: While the 'info' value is optional in the definition of HKDF, it is often of great importance in applications. Its main objective is to bind the derived key material to application- and context-specific information. For example, 'info' may contain a protocol number, algorithm identifiers, user identities, etc. In particular, it may prevent the derivation of the same keying material for different contexts (when the same input key material (IKM) is used in such different contexts). It may also accommodate additional inputs to the key expansion part, if so desired (e.g., an application may want to bind the key material to its length L, thus making L part of the 'info' field). There is one technical requirement from 'info': it should be independent of the input key material value IKM. :param key_size: the size of the produced keys :return: Derived keys
entailment
def generate_key_lists(master_secrets, # type: Sequence[Union[bytes, str]] num_identifier, # type: int key_size=DEFAULT_KEY_SIZE, # type: int salt=None, # type: Optional[bytes] info=None, # type: Optional[bytes] kdf='HKDF', # type: str hash_algo='SHA256' # type: str ): # type: (...) -> Tuple[Tuple[bytes, ...], ...] """ Generates a derived key for each identifier for each master secret using a key derivation function (KDF). The only supported key derivation function for now is 'HKDF'. The previous key usage can be reproduced by setting kdf to 'legacy'. This is highly discouraged, as this strategy will map the same n-grams in different identifier to the same bits in the Bloom filter and thus does not lead to good results. :param master_secrets: a list of master secrets (either as bytes or strings) :param num_identifier: the number of identifiers :param key_size: the size of the derived keys :param salt: salt for the KDF as bytes :param info: optional context and application specific information as bytes :param kdf: the key derivation function algorithm to use :param hash_algo: the hashing algorithm to use (ignored if `kdf` is not 'HKDF') :return: The derived keys. First dimension is of size num_identifier, second dimension is the same as master_secrets. A key is represented as bytes. """ keys = [] try: for key in master_secrets: if isinstance(key, bytes): keys.append(key) else: keys.append(key.encode('UTF-8')) except AttributeError: raise TypeError("provided 'master_secrets' have to be either of type bytes or strings.") if kdf == 'HKDF': key_lists = [hkdf(key, num_identifier, hash_algo=hash_algo, salt=salt, info=info, key_size=key_size) for key in keys] # regroup such that we get a tuple of keys for each identifier return tuple(zip(*key_lists)) if kdf == 'legacy': return tuple(tuple(keys) for _ in range(num_identifier)) raise ValueError('kdf: "{}" is not supported.'.format(kdf))
Generates a derived key for each identifier for each master secret using a key derivation function (KDF). The only supported key derivation function for now is 'HKDF'. The previous key usage can be reproduced by setting kdf to 'legacy'. This is highly discouraged, as this strategy will map the same n-grams in different identifier to the same bits in the Bloom filter and thus does not lead to good results. :param master_secrets: a list of master secrets (either as bytes or strings) :param num_identifier: the number of identifiers :param key_size: the size of the derived keys :param salt: salt for the KDF as bytes :param info: optional context and application specific information as bytes :param kdf: the key derivation function algorithm to use :param hash_algo: the hashing algorithm to use (ignored if `kdf` is not 'HKDF') :return: The derived keys. First dimension is of size num_identifier, second dimension is the same as master_secrets. A key is represented as bytes.
entailment
def validate_row_lengths(fields, # type: Sequence[FieldSpec] data # type: Sequence[Sequence[str]] ): # type: (...) -> None """ Validate the `data` row lengths according to the specification in `fields`. :param fields: The `FieldSpec` objects forming the specification. :param data: The rows to check. :raises FormatError: When the number of entries in a row does not match expectation. """ for i, row in enumerate(data): if len(fields) != len(row): msg = 'Row {} has {} entries when {} are expected.'.format( i, len(row), len(fields)) raise FormatError(msg)
Validate the `data` row lengths according to the specification in `fields`. :param fields: The `FieldSpec` objects forming the specification. :param data: The rows to check. :raises FormatError: When the number of entries in a row does not match expectation.
entailment
def validate_entries(fields, # type: Sequence[FieldSpec] data # type: Sequence[Sequence[str]] ): # type: (...) -> None """ Validate the `data` entries according to the specification in `fields`. :param fields: The `FieldSpec` objects forming the specification. :param data: The data to validate. :raises EntryError: When an entry is not valid according to its :class:`FieldSpec`. """ validators = [f.validate for f in fields] for i, row in enumerate(data): for entry, v in zip(row, validators): try: v(entry) except InvalidEntryError as e: msg = ( 'Invalid entry in row {row_index}, column ' "'{column_name}'. {original_message}" ).format( row_index=i, column_name=cast(FieldSpec, e.field_spec).identifier, original_message=e.args[0]) e_invalid_entry = EntryError(msg) e_invalid_entry.field_spec = e.field_spec e_invalid_entry.row_index = i raise_from(e_invalid_entry, e)
Validate the `data` entries according to the specification in `fields`. :param fields: The `FieldSpec` objects forming the specification. :param data: The data to validate. :raises EntryError: When an entry is not valid according to its :class:`FieldSpec`.
entailment
def validate_header(fields, # type: Sequence[FieldSpec] column_names # type: Sequence[str] ): # type: (...) -> None """ Validate the `column_names` according to the specification in `fields`. :param fields: The `FieldSpec` objects forming the specification. :param column_names: A sequence of column identifier. :raises FormatError: When the number of columns or the column identifiers don't match the specification. """ if len(fields) != len(column_names): msg = 'Header has {} columns when {} are expected.'.format( len(column_names), len(fields)) raise FormatError(msg) for f, column in zip(fields, column_names): if f.identifier != column: msg = "Column has identifier '{}' when '{}' is expected.".format( column, f.identifier) raise FormatError(msg)
Validate the `column_names` according to the specification in `fields`. :param fields: The `FieldSpec` objects forming the specification. :param column_names: A sequence of column identifier. :raises FormatError: When the number of columns or the column identifiers don't match the specification.
entailment
def config(env=DEFAULT_ENV, default=None): """Returns a dictionary with EMAIL_* settings from EMAIL_URL.""" conf = {} s = os.environ.get(env, default) if s: conf = parse(s) return conf
Returns a dictionary with EMAIL_* settings from EMAIL_URL.
entailment
def parse(url): """Parses an email URL.""" conf = {} url = urlparse.urlparse(url) qs = urlparse.parse_qs(url.query) # Remove query strings path = url.path[1:] path = path.split('?', 2)[0] # Update with environment configuration conf.update({ 'EMAIL_FILE_PATH': path, 'EMAIL_HOST_USER': unquote(url.username), 'EMAIL_HOST_PASSWORD': unquote(url.password), 'EMAIL_HOST': url.hostname, 'EMAIL_PORT': url.port, 'EMAIL_USE_SSL': False, 'EMAIL_USE_TLS': False, }) if url.scheme in SCHEMES: conf['EMAIL_BACKEND'] = SCHEMES[url.scheme] # Set defaults for `smtp` if url.scheme == 'smtp': if not conf['EMAIL_HOST']: conf['EMAIL_HOST'] = 'localhost' if not conf['EMAIL_PORT']: conf['EMAIL_PORT'] = 25 # Set defaults for `smtps` if url.scheme == 'smtps': warnings.warn( "`smpts` scheme will be deprecated in a future version," " use `submission` instead", UserWarning, ) conf['EMAIL_USE_TLS'] = True # Set defaults for `submission`/`submit` if url.scheme in ('submission', 'submit'): conf['EMAIL_USE_TLS'] = True if not conf['EMAIL_PORT']: conf['EMAIL_PORT'] = 587 # Query args overwrite defaults if 'ssl' in qs and qs['ssl']: if qs['ssl'][0] in TRUTHY: conf['EMAIL_USE_SSL'] = True conf['EMAIL_USE_TLS'] = False elif 'tls' in qs and qs['tls']: if qs['tls'][0] in TRUTHY: conf['EMAIL_USE_SSL'] = False conf['EMAIL_USE_TLS'] = True # From addresses if '_server_email' in qs: conf['SERVER_EMAIL'] = qs['_server_email'][0] if '_default_from_email' in qs: conf['DEFAULT_FROM_EMAIL'] = qs['_default_from_email'][0] return conf
Parses an email URL.
entailment
def to_tf_matrix(expression_matrix, gene_names, tf_names): """ :param expression_matrix: numpy matrix. Rows are observations and columns are genes. :param gene_names: a list of gene names. Each entry corresponds to the expression_matrix column with same index. :param tf_names: a list of transcription factor names. Should be a subset of gene_names. :return: tuple of: 0: A numpy matrix representing the predictor matrix for the regressions. 1: The gene names corresponding to the columns in the predictor matrix. """ tuples = [(index, gene) for index, gene in enumerate(gene_names) if gene in tf_names] tf_indices = [t[0] for t in tuples] tf_matrix_names = [t[1] for t in tuples] return expression_matrix[:, tf_indices], tf_matrix_names
:param expression_matrix: numpy matrix. Rows are observations and columns are genes. :param gene_names: a list of gene names. Each entry corresponds to the expression_matrix column with same index. :param tf_names: a list of transcription factor names. Should be a subset of gene_names. :return: tuple of: 0: A numpy matrix representing the predictor matrix for the regressions. 1: The gene names corresponding to the columns in the predictor matrix.
entailment
def fit_model(regressor_type, regressor_kwargs, tf_matrix, target_gene_expression, early_stop_window_length=EARLY_STOP_WINDOW_LENGTH, seed=DEMON_SEED): """ :param regressor_type: string. Case insensitive. :param regressor_kwargs: a dictionary of key-value pairs that configures the regressor. :param tf_matrix: the predictor matrix (transcription factor matrix) as a numpy array. :param target_gene_expression: the target (y) gene expression to predict in function of the tf_matrix (X). :param early_stop_window_length: window length of the early stopping monitor. :param seed: (optional) random seed for the regressors. :return: a trained regression model. """ regressor_type = regressor_type.upper() assert tf_matrix.shape[0] == len(target_gene_expression) def do_sklearn_regression(): regressor = SKLEARN_REGRESSOR_FACTORY[regressor_type](random_state=seed, **regressor_kwargs) with_early_stopping = is_oob_heuristic_supported(regressor_type, regressor_kwargs) if with_early_stopping: regressor.fit(tf_matrix, target_gene_expression, monitor=EarlyStopMonitor(early_stop_window_length)) else: regressor.fit(tf_matrix, target_gene_expression) return regressor if is_sklearn_regressor(regressor_type): return do_sklearn_regression() # elif is_xgboost_regressor(regressor_type): # raise ValueError('XGB regressor not yet supported') else: raise ValueError('Unsupported regressor type: {0}'.format(regressor_type))
:param regressor_type: string. Case insensitive. :param regressor_kwargs: a dictionary of key-value pairs that configures the regressor. :param tf_matrix: the predictor matrix (transcription factor matrix) as a numpy array. :param target_gene_expression: the target (y) gene expression to predict in function of the tf_matrix (X). :param early_stop_window_length: window length of the early stopping monitor. :param seed: (optional) random seed for the regressors. :return: a trained regression model.
entailment
def to_feature_importances(regressor_type, regressor_kwargs, trained_regressor): """ Motivation: when the out-of-bag improvement heuristic is used, we cancel the effect of normalization by dividing by the number of trees in the regression ensemble by multiplying again by the number of trees used. This enables prioritizing links that were inferred in a regression where lots of :param regressor_type: string. Case insensitive. :param regressor_kwargs: a dictionary of key-value pairs that configures the regressor. :param trained_regressor: the trained model from which to extract the feature importances. :return: the feature importances inferred from the trained model. """ if is_oob_heuristic_supported(regressor_type, regressor_kwargs): n_estimators = len(trained_regressor.estimators_) denormalized_importances = trained_regressor.feature_importances_ * n_estimators return denormalized_importances else: return trained_regressor.feature_importances_
Motivation: when the out-of-bag improvement heuristic is used, we cancel the effect of normalization by dividing by the number of trees in the regression ensemble by multiplying again by the number of trees used. This enables prioritizing links that were inferred in a regression where lots of :param regressor_type: string. Case insensitive. :param regressor_kwargs: a dictionary of key-value pairs that configures the regressor. :param trained_regressor: the trained model from which to extract the feature importances. :return: the feature importances inferred from the trained model.
entailment
def to_meta_df(trained_regressor, target_gene_name): """ :param trained_regressor: the trained model from which to extract the meta information. :param target_gene_name: the name of the target gene. :return: a Pandas DataFrame containing side information about the regression. """ n_estimators = len(trained_regressor.estimators_) return pd.DataFrame({'target': [target_gene_name], 'n_estimators': [n_estimators]})
:param trained_regressor: the trained model from which to extract the meta information. :param target_gene_name: the name of the target gene. :return: a Pandas DataFrame containing side information about the regression.
entailment
def to_links_df(regressor_type, regressor_kwargs, trained_regressor, tf_matrix_gene_names, target_gene_name): """ :param regressor_type: string. Case insensitive. :param regressor_kwargs: dict of key-value pairs that configures the regressor. :param trained_regressor: the trained model from which to extract the feature importances. :param tf_matrix_gene_names: the list of names corresponding to the columns of the tf_matrix used to train the model. :param target_gene_name: the name of the target gene. :return: a Pandas DataFrame['TF', 'target', 'importance'] representing inferred regulatory links and their connection strength. """ def pythonic(): # feature_importances = trained_regressor.feature_importances_ feature_importances = to_feature_importances(regressor_type, regressor_kwargs, trained_regressor) links_df = pd.DataFrame({'TF': tf_matrix_gene_names, 'importance': feature_importances}) links_df['target'] = target_gene_name clean_links_df = links_df[links_df.importance > 0].sort_values(by='importance', ascending=False) return clean_links_df[['TF', 'target', 'importance']] if is_sklearn_regressor(regressor_type): return pythonic() elif is_xgboost_regressor(regressor_type): raise ValueError('XGB regressor not yet supported') else: raise ValueError('Unsupported regressor type: ' + regressor_type)
:param regressor_type: string. Case insensitive. :param regressor_kwargs: dict of key-value pairs that configures the regressor. :param trained_regressor: the trained model from which to extract the feature importances. :param tf_matrix_gene_names: the list of names corresponding to the columns of the tf_matrix used to train the model. :param target_gene_name: the name of the target gene. :return: a Pandas DataFrame['TF', 'target', 'importance'] representing inferred regulatory links and their connection strength.
entailment
def clean(tf_matrix, tf_matrix_gene_names, target_gene_name): """ :param tf_matrix: numpy array. The full transcription factor matrix. :param tf_matrix_gene_names: the full list of transcription factor names, corresponding to the tf_matrix columns. :param target_gene_name: the target gene to remove from the tf_matrix and tf_names. :return: a tuple of (matrix, names) equal to the specified ones minus the target_gene_name if the target happens to be one of the transcription factors. If not, the specified (tf_matrix, tf_names) is returned verbatim. """ if target_gene_name not in tf_matrix_gene_names: clean_tf_matrix = tf_matrix else: clean_tf_matrix = np.delete(tf_matrix, tf_matrix_gene_names.index(target_gene_name), 1) clean_tf_names = [tf for tf in tf_matrix_gene_names if tf != target_gene_name] assert clean_tf_matrix.shape[1] == len(clean_tf_names) # sanity check return clean_tf_matrix, clean_tf_names
:param tf_matrix: numpy array. The full transcription factor matrix. :param tf_matrix_gene_names: the full list of transcription factor names, corresponding to the tf_matrix columns. :param target_gene_name: the target gene to remove from the tf_matrix and tf_names. :return: a tuple of (matrix, names) equal to the specified ones minus the target_gene_name if the target happens to be one of the transcription factors. If not, the specified (tf_matrix, tf_names) is returned verbatim.
entailment
def retry(fn, max_retries=10, warning_msg=None, fallback_result=None): """ Minimalistic retry strategy to compensate for failures probably caused by a thread-safety bug in scikit-learn: * https://github.com/scikit-learn/scikit-learn/issues/2755 * https://github.com/scikit-learn/scikit-learn/issues/7346 :param fn: the function to retry. :param max_retries: the maximum number of retries to attempt. :param warning_msg: a warning message to display when an attempt fails. :param fallback_result: result to return when all attempts fail. :return: Returns the result of fn if one attempt succeeds, else return fallback_result. """ nr_retries = 0 result = fallback_result for attempt in range(max_retries): try: result = fn() except Exception as cause: nr_retries += 1 msg_head = '' if warning_msg is None else repr(warning_msg) + ' ' msg_tail = "Retry ({1}/{2}). Failure caused by {0}.".format(repr(cause), nr_retries, max_retries) logger.warning(msg_head + msg_tail) else: break return result
Minimalistic retry strategy to compensate for failures probably caused by a thread-safety bug in scikit-learn: * https://github.com/scikit-learn/scikit-learn/issues/2755 * https://github.com/scikit-learn/scikit-learn/issues/7346 :param fn: the function to retry. :param max_retries: the maximum number of retries to attempt. :param warning_msg: a warning message to display when an attempt fails. :param fallback_result: result to return when all attempts fail. :return: Returns the result of fn if one attempt succeeds, else return fallback_result.
entailment
def infer_partial_network(regressor_type, regressor_kwargs, tf_matrix, tf_matrix_gene_names, target_gene_name, target_gene_expression, include_meta=False, early_stop_window_length=EARLY_STOP_WINDOW_LENGTH, seed=DEMON_SEED): """ Ties together regressor model training with regulatory links and meta data extraction. :param regressor_type: string. Case insensitive. :param regressor_kwargs: dict of key-value pairs that configures the regressor. :param tf_matrix: numpy matrix. The feature matrix X to use for the regression. :param tf_matrix_gene_names: list of transcription factor names corresponding to the columns of the tf_matrix used to train the regression model. :param target_gene_name: the name of the target gene to infer the regulatory links for. :param target_gene_expression: the expression profile of the target gene. Numpy array. :param include_meta: whether to also return the meta information DataFrame. :param early_stop_window_length: window length of the early stopping monitor. :param seed: (optional) random seed for the regressors. :return: if include_meta == True, return links_df, meta_df link_df: a Pandas DataFrame['TF', 'target', 'importance'] containing inferred regulatory links and their connection strength. meta_df: a Pandas DataFrame['target', 'meta', 'value'] containing meta information regarding the trained regression model. """ def fn(): (clean_tf_matrix, clean_tf_matrix_gene_names) = clean(tf_matrix, tf_matrix_gene_names, target_gene_name) try: trained_regressor = fit_model(regressor_type, regressor_kwargs, clean_tf_matrix, target_gene_expression, early_stop_window_length, seed) except ValueError as e: raise ValueError("Regression for target gene {0} failed. Cause {1}.".format(target_gene_name, repr(e))) links_df = to_links_df(regressor_type, regressor_kwargs, trained_regressor, clean_tf_matrix_gene_names, target_gene_name) if include_meta: meta_df = to_meta_df(trained_regressor, target_gene_name) return links_df, meta_df else: return links_df fallback_result = (None, None) if include_meta else None return retry(fn, fallback_result=fallback_result, warning_msg='infer_data failed for target {0}'.format(target_gene_name))
Ties together regressor model training with regulatory links and meta data extraction. :param regressor_type: string. Case insensitive. :param regressor_kwargs: dict of key-value pairs that configures the regressor. :param tf_matrix: numpy matrix. The feature matrix X to use for the regression. :param tf_matrix_gene_names: list of transcription factor names corresponding to the columns of the tf_matrix used to train the regression model. :param target_gene_name: the name of the target gene to infer the regulatory links for. :param target_gene_expression: the expression profile of the target gene. Numpy array. :param include_meta: whether to also return the meta information DataFrame. :param early_stop_window_length: window length of the early stopping monitor. :param seed: (optional) random seed for the regressors. :return: if include_meta == True, return links_df, meta_df link_df: a Pandas DataFrame['TF', 'target', 'importance'] containing inferred regulatory links and their connection strength. meta_df: a Pandas DataFrame['target', 'meta', 'value'] containing meta information regarding the trained regression model.
entailment
def target_gene_indices(gene_names, target_genes): """ :param gene_names: list of gene names. :param target_genes: either int (the top n), 'all', or a collection (subset of gene_names). :return: the (column) indices of the target genes in the expression_matrix. """ if isinstance(target_genes, list) and len(target_genes) == 0: return [] if isinstance(target_genes, str) and target_genes.upper() == 'ALL': return list(range(len(gene_names))) elif isinstance(target_genes, int): top_n = target_genes assert top_n > 0 return list(range(min(top_n, len(gene_names)))) elif isinstance(target_genes, list): if not target_genes: # target_genes is empty return target_genes elif all(isinstance(target_gene, str) for target_gene in target_genes): return [index for index, gene in enumerate(gene_names) if gene in target_genes] elif all(isinstance(target_gene, int) for target_gene in target_genes): return target_genes else: raise ValueError("Mixed types in target genes.") else: raise ValueError("Unable to interpret target_genes.")
:param gene_names: list of gene names. :param target_genes: either int (the top n), 'all', or a collection (subset of gene_names). :return: the (column) indices of the target genes in the expression_matrix.
entailment
def create_graph(expression_matrix, gene_names, tf_names, regressor_type, regressor_kwargs, client, target_genes='all', limit=None, include_meta=False, early_stop_window_length=EARLY_STOP_WINDOW_LENGTH, repartition_multiplier=1, seed=DEMON_SEED): """ Main API function. Create a Dask computation graph. Note: fixing the GC problems was fixed by 2 changes: [1] and [2] !!! :param expression_matrix: numpy matrix. Rows are observations and columns are genes. :param gene_names: list of gene names. Each entry corresponds to the expression_matrix column with same index. :param tf_names: list of transcription factor names. Should have a non-empty intersection with gene_names. :param regressor_type: regressor type. Case insensitive. :param regressor_kwargs: dict of key-value pairs that configures the regressor. :param client: a dask.distributed client instance. * Used to scatter-broadcast the tf matrix to the workers instead of simply wrapping in a delayed(). :param target_genes: either int, 'all' or a collection that is a subset of gene_names. :param limit: optional number of top regulatory links to return. Default None. :param include_meta: Also return the meta DataFrame. Default False. :param early_stop_window_length: window length of the early stopping monitor. :param repartition_multiplier: multiplier :param seed: (optional) random seed for the regressors. Default 666. :return: if include_meta is False, returns a Dask graph that computes the links DataFrame. If include_meta is True, returns a tuple: the links DataFrame and the meta DataFrame. """ assert expression_matrix.shape[1] == len(gene_names) assert client, "client is required" tf_matrix, tf_matrix_gene_names = to_tf_matrix(expression_matrix, gene_names, tf_names) future_tf_matrix = client.scatter(tf_matrix, broadcast=True) # [1] wrap in a list of 1 -> unsure why but Matt. Rocklin does this often... [future_tf_matrix_gene_names] = client.scatter([tf_matrix_gene_names], broadcast=True) delayed_link_dfs = [] # collection of delayed link DataFrames delayed_meta_dfs = [] # collection of delayed meta DataFrame for target_gene_index in target_gene_indices(gene_names, target_genes): target_gene_name = delayed(gene_names[target_gene_index], pure=True) target_gene_expression = delayed(expression_matrix[:, target_gene_index], pure=True) if include_meta: delayed_link_df, delayed_meta_df = delayed(infer_partial_network, pure=True, nout=2)( regressor_type, regressor_kwargs, future_tf_matrix, future_tf_matrix_gene_names, target_gene_name, target_gene_expression, include_meta, early_stop_window_length, seed) if delayed_link_df is not None: delayed_link_dfs.append(delayed_link_df) delayed_meta_dfs.append(delayed_meta_df) else: delayed_link_df = delayed(infer_partial_network, pure=True)( regressor_type, regressor_kwargs, future_tf_matrix, future_tf_matrix_gene_names, target_gene_name, target_gene_expression, include_meta, early_stop_window_length, seed) if delayed_link_df is not None: delayed_link_dfs.append(delayed_link_df) # gather the DataFrames into one distributed DataFrame all_links_df = from_delayed(delayed_link_dfs, meta=_GRN_SCHEMA) all_meta_df = from_delayed(delayed_meta_dfs, meta=_META_SCHEMA) # optionally limit the number of resulting regulatory links, descending by top importance if limit: maybe_limited_links_df = all_links_df.nlargest(limit, columns=['importance']) else: maybe_limited_links_df = all_links_df # [2] repartition to nr of workers -> important to avoid GC problems! # see: http://dask.pydata.org/en/latest/dataframe-performance.html#repartition-to-reduce-overhead n_parts = len(client.ncores()) * repartition_multiplier if include_meta: return maybe_limited_links_df.repartition(npartitions=n_parts), \ all_meta_df.repartition(npartitions=n_parts) else: return maybe_limited_links_df.repartition(npartitions=n_parts)
Main API function. Create a Dask computation graph. Note: fixing the GC problems was fixed by 2 changes: [1] and [2] !!! :param expression_matrix: numpy matrix. Rows are observations and columns are genes. :param gene_names: list of gene names. Each entry corresponds to the expression_matrix column with same index. :param tf_names: list of transcription factor names. Should have a non-empty intersection with gene_names. :param regressor_type: regressor type. Case insensitive. :param regressor_kwargs: dict of key-value pairs that configures the regressor. :param client: a dask.distributed client instance. * Used to scatter-broadcast the tf matrix to the workers instead of simply wrapping in a delayed(). :param target_genes: either int, 'all' or a collection that is a subset of gene_names. :param limit: optional number of top regulatory links to return. Default None. :param include_meta: Also return the meta DataFrame. Default False. :param early_stop_window_length: window length of the early stopping monitor. :param repartition_multiplier: multiplier :param seed: (optional) random seed for the regressors. Default 666. :return: if include_meta is False, returns a Dask graph that computes the links DataFrame. If include_meta is True, returns a tuple: the links DataFrame and the meta DataFrame.
entailment
def window_boundaries(self, current_round): """ :param current_round: :return: the low and high boundaries of the estimators window to consider. """ lo = max(0, current_round - self.window_length + 1) hi = current_round + 1 return lo, hi
:param current_round: :return: the low and high boundaries of the estimators window to consider.
entailment
def generate(self, **options): '''Generates an encrypted URL with the specified options''' if options.get('unsafe', False): return unsafe_url(**options) else: return self.generate_new(options)
Generates an encrypted URL with the specified options
entailment
def getToken(self, userId, name, portraitUri): """ 获取 Token 方法 方法 @param userId:用户 Id,最大长度 64 字节.是用户在 App 中的唯一标识码,必须保证在同一个 App 内不重复,重复的用户 Id 将被当作是同一用户。(必传) @param name:用户名称,最大长度 128 字节.用来在 Push 推送时显示用户的名称.用户名称,最大长度 128 字节.用来在 Push 推送时显示用户的名称。(必传) @param portraitUri:用户头像 URI,最大长度 1024 字节.用来在 Push 推送时显示用户的头像。(必传) @return code:返回码,200 为正常.如果您正在使用开发环境的 AppKey,您的应用只能注册 100 名用户,达到上限后,将返回错误码 2007.如果您需要更多的测试账户数量,您需要在应用配置中申请“增加测试人数”。 @return token:用户 Token,可以保存应用内,长度在 256 字节以内.用户 Token,可以保存应用内,长度在 256 字节以内。 @return userId:用户 Id,与输入的用户 Id 相同.用户 Id,与输入的用户 Id 相同。 @return errorMessage:错误信息。 """ desc = { "name": "TokenReslut", "desc": "getToken 返回结果", "fields": [{ "name": "code", "type": "Integer", "desc": "返回码,200 为正常.如果您正在使用开发环境的 AppKey,您的应用只能注册 100 名用户,达到上限后,将返回错误码 2007.如果您需要更多的测试账户数量,您需要在应用配置中申请“增加测试人数”。" }, { "name": "token", "type": "String", "desc": "用户 Token,可以保存应用内,长度在 256 字节以内.用户 Token,可以保存应用内,长度在 256 字节以内。" }, { "name": "userId", "type": "String", "desc": "用户 Id,与输入的用户 Id 相同.用户 Id,与输入的用户 Id 相同。" }, { "name": "errorMessage", "type": "String", "desc": "错误信息。" }] } r = self.call_api( method=('API', 'POST', 'application/x-www-form-urlencoded'), action='/user/getToken.json', params={ "userId": userId, "name": name, "portraitUri": portraitUri }) return Response(r, desc)
获取 Token 方法 方法 @param userId:用户 Id,最大长度 64 字节.是用户在 App 中的唯一标识码,必须保证在同一个 App 内不重复,重复的用户 Id 将被当作是同一用户。(必传) @param name:用户名称,最大长度 128 字节.用来在 Push 推送时显示用户的名称.用户名称,最大长度 128 字节.用来在 Push 推送时显示用户的名称。(必传) @param portraitUri:用户头像 URI,最大长度 1024 字节.用来在 Push 推送时显示用户的头像。(必传) @return code:返回码,200 为正常.如果您正在使用开发环境的 AppKey,您的应用只能注册 100 名用户,达到上限后,将返回错误码 2007.如果您需要更多的测试账户数量,您需要在应用配置中申请“增加测试人数”。 @return token:用户 Token,可以保存应用内,长度在 256 字节以内.用户 Token,可以保存应用内,长度在 256 字节以内。 @return userId:用户 Id,与输入的用户 Id 相同.用户 Id,与输入的用户 Id 相同。 @return errorMessage:错误信息。
entailment
def checkOnline(self, userId): """ 检查用户在线状态 方法 方法 @param userId:用户 Id,最大长度 64 字节。是用户在 App 中的唯一标识码,必须保证在同一个 App 内不重复,重复的用户 Id 将被当作是同一用户。(必传) @return code:返回码,200 为正常。 @return status:在线状态,1为在线,0为不在线。 @return errorMessage:错误信息。 """ desc = { "name": "CheckOnlineReslut", "desc": "checkOnlineUser返回结果", "fields": [{ "name": "code", "type": "Integer", "desc": "返回码,200 为正常。" }, { "name": "status", "type": "String", "desc": "在线状态,1为在线,0为不在线。" }, { "name": "errorMessage", "type": "String", "desc": "错误信息。" }] } r = self.call_api( method=('API', 'POST', 'application/x-www-form-urlencoded'), action='/user/checkOnline.json', params={"userId": userId}) return Response(r, desc)
检查用户在线状态 方法 方法 @param userId:用户 Id,最大长度 64 字节。是用户在 App 中的唯一标识码,必须保证在同一个 App 内不重复,重复的用户 Id 将被当作是同一用户。(必传) @return code:返回码,200 为正常。 @return status:在线状态,1为在线,0为不在线。 @return errorMessage:错误信息。
entailment
def block(self, userId, minute): """ 封禁用户方法(每秒钟限 100 次) 方法 @param userId:用户 Id。(必传) @param minute:封禁时长,单位为分钟,最大值为43200分钟。(必传) @return code:返回码,200 为正常。 @return errorMessage:错误信息。 """ desc = { "name": "CodeSuccessReslut", "desc": " http 成功返回结果", "fields": [{ "name": "code", "type": "Integer", "desc": "返回码,200 为正常。" }, { "name": "errorMessage", "type": "String", "desc": "错误信息。" }] } r = self.call_api( method=('API', 'POST', 'application/x-www-form-urlencoded'), action='/user/block.json', params={"userId": userId, "minute": minute}) return Response(r, desc)
封禁用户方法(每秒钟限 100 次) 方法 @param userId:用户 Id。(必传) @param minute:封禁时长,单位为分钟,最大值为43200分钟。(必传) @return code:返回码,200 为正常。 @return errorMessage:错误信息。
entailment
def addBlacklist(self, userId, blackUserId): """ 添加用户到黑名单方法(每秒钟限 100 次) 方法 @param userId:用户 Id。(必传) @param blackUserId:被加到黑名单的用户Id。(必传) @return code:返回码,200 为正常。 @return errorMessage:错误信息。 """ desc = { "name": "CodeSuccessReslut", "desc": " http 成功返回结果", "fields": [{ "name": "code", "type": "Integer", "desc": "返回码,200 为正常。" }, { "name": "errorMessage", "type": "String", "desc": "错误信息。" }] } r = self.call_api( method=('API', 'POST', 'application/x-www-form-urlencoded'), action='/user/blacklist/add.json', params={"userId": userId, "blackUserId": blackUserId}) return Response(r, desc)
添加用户到黑名单方法(每秒钟限 100 次) 方法 @param userId:用户 Id。(必传) @param blackUserId:被加到黑名单的用户Id。(必传) @return code:返回码,200 为正常。 @return errorMessage:错误信息。
entailment
def publishPrivate(self, fromUserId, toUserId, objectName, content, pushContent=None, pushData=None, count=None, verifyBlacklist=None, isPersisted=None, isCounted=None, isIncludeSender=None): """ 发送单聊消息方法(一个用户向另外一个用户发送消息,单条消息最大 128k。每分钟最多发送 6000 条信息,每次发送用户上限为 1000 人,如:一次发送 1000 人时,示为 1000 条消息。) 方法 @param fromUserId:发送人用户 Id。(必传) @param toUserId:接收用户 Id,可以实现向多人发送消息,每次上限为 1000 人。(必传) @param voiceMessage:消息。 @param pushContent:定义显示的 Push 内容,如果 objectName 为融云内置消息类型时,则发送后用户一定会收到 Push 信息。如果为自定义消息,则 pushContent 为自定义消息显示的 Push 内容,如果不传则用户不会收到 Push 通知。(可选) @param pushData:针对 iOS 平台为 Push 通知时附加到 payload 中,Android 客户端收到推送消息时对应字段名为 pushData。(可选) @param count:针对 iOS 平台,Push 时用来控制未读消息显示数,只有在 toUserId 为一个用户 Id 的时候有效。(可选) @param verifyBlacklist:是否过滤发送人黑名单列表,0 表示为不过滤、 1 表示为过滤,默认为 0 不过滤。(可选) @param isPersisted:当前版本有新的自定义消息,而老版本没有该自定义消息时,老版本客户端收到消息后是否进行存储,0 表示为不存储、 1 表示为存储,默认为 1 存储消息。(可选) @param isCounted:当前版本有新的自定义消息,而老版本没有该自定义消息时,老版本客户端收到消息后是否进行未读消息计数,0 表示为不计数、 1 表示为计数,默认为 1 计数,未读消息数增加 1。(可选) @param isIncludeSender:发送用户自已是否接收消息,0 表示为不接收,1 表示为接收,默认为 0 不接收。(可选) @return code:返回码,200 为正常。 @return errorMessage:错误信息。 """ desc = { "name": "CodeSuccessReslut", "desc": " http 成功返回结果", "fields": [{ "name": "code", "type": "Integer", "desc": "返回码,200 为正常。" }, { "name": "errorMessage", "type": "String", "desc": "错误信息。" }] } r = self.call_api( method=('API', 'POST', 'application/x-www-form-urlencoded'), action='/message/private/publish.json', params={ "fromUserId": fromUserId, "toUserId": toUserId, "objectName": objectName, "content": content, "pushContent": pushContent, "pushData": pushData, "count": count, "verifyBlacklist": verifyBlacklist, "isPersisted": isPersisted, "isCounted": isCounted, "isIncludeSender": isIncludeSender }) return Response(r, desc)
发送单聊消息方法(一个用户向另外一个用户发送消息,单条消息最大 128k。每分钟最多发送 6000 条信息,每次发送用户上限为 1000 人,如:一次发送 1000 人时,示为 1000 条消息。) 方法 @param fromUserId:发送人用户 Id。(必传) @param toUserId:接收用户 Id,可以实现向多人发送消息,每次上限为 1000 人。(必传) @param voiceMessage:消息。 @param pushContent:定义显示的 Push 内容,如果 objectName 为融云内置消息类型时,则发送后用户一定会收到 Push 信息。如果为自定义消息,则 pushContent 为自定义消息显示的 Push 内容,如果不传则用户不会收到 Push 通知。(可选) @param pushData:针对 iOS 平台为 Push 通知时附加到 payload 中,Android 客户端收到推送消息时对应字段名为 pushData。(可选) @param count:针对 iOS 平台,Push 时用来控制未读消息显示数,只有在 toUserId 为一个用户 Id 的时候有效。(可选) @param verifyBlacklist:是否过滤发送人黑名单列表,0 表示为不过滤、 1 表示为过滤,默认为 0 不过滤。(可选) @param isPersisted:当前版本有新的自定义消息,而老版本没有该自定义消息时,老版本客户端收到消息后是否进行存储,0 表示为不存储、 1 表示为存储,默认为 1 存储消息。(可选) @param isCounted:当前版本有新的自定义消息,而老版本没有该自定义消息时,老版本客户端收到消息后是否进行未读消息计数,0 表示为不计数、 1 表示为计数,默认为 1 计数,未读消息数增加 1。(可选) @param isIncludeSender:发送用户自已是否接收消息,0 表示为不接收,1 表示为接收,默认为 0 不接收。(可选) @return code:返回码,200 为正常。 @return errorMessage:错误信息。
entailment
def publishTemplate(self, templateMessage): """ 发送单聊模板消息方法(一个用户向多个用户发送不同消息内容,单条消息最大 128k。每分钟最多发送 6000 条信息,每次发送用户上限为 1000 人。) 方法 @param templateMessage:单聊模版消息。 @return code:返回码,200 为正常。 @return errorMessage:错误信息。 """ desc = { "name": "CodeSuccessReslut", "desc": " http 成功返回结果", "fields": [{ "name": "code", "type": "Integer", "desc": "返回码,200 为正常。" }, { "name": "errorMessage", "type": "String", "desc": "错误信息。" }] } r = self.call_api( method=('API', 'POST', 'application/json'), action='/message/private/publish_template.json', params=templateMessage) return Response(r, desc)
发送单聊模板消息方法(一个用户向多个用户发送不同消息内容,单条消息最大 128k。每分钟最多发送 6000 条信息,每次发送用户上限为 1000 人。) 方法 @param templateMessage:单聊模版消息。 @return code:返回码,200 为正常。 @return errorMessage:错误信息。
entailment
def PublishSystem(self, fromUserId, toUserId, objectName, content, pushContent=None, pushData=None, isPersisted=None, isCounted=None): """ 发送系统消息方法(一个用户向一个或多个用户发送系统消息,单条消息最大 128k,会话类型为 SYSTEM。每秒钟最多发送 100 条消息,每次最多同时向 100 人发送,如:一次发送 100 人时,示为 100 条消息。) 方法 @param fromUserId:发送人用户 Id。(必传) @param toUserId:接收用户 Id,提供多个本参数可以实现向多人发送消息,上限为 1000 人。(必传) @param txtMessage:发送消息内容(必传) @param pushContent:如果为自定义消息,定义显示的 Push 内容,内容中定义标识通过 values 中设置的标识位内容进行替换.如消息类型为自定义不需要 Push 通知,则对应数组传空值即可。(可选) @param pushData:针对 iOS 平台为 Push 通知时附加到 payload 中,Android 客户端收到推送消息时对应字段名为 pushData。如不需要 Push 功能对应数组传空值即可。(可选) @param isPersisted:当前版本有新的自定义消息,而老版本没有该自定义消息时,老版本客户端收到消息后是否进行存储,0 表示为不存储、 1 表示为存储,默认为 1 存储消息。(可选) @param isCounted:当前版本有新的自定义消息,而老版本没有该自定义消息时,老版本客户端收到消息后是否进行未读消息计数,0 表示为不计数、 1 表示为计数,默认为 1 计数,未读消息数增加 1。(可选) @return code:返回码,200 为正常。 @return errorMessage:错误信息。 """ desc = { "name": "CodeSuccessReslut", "desc": " http 成功返回结果", "fields": [{ "name": "code", "type": "Integer", "desc": "返回码,200 为正常。" }, { "name": "errorMessage", "type": "String", "desc": "错误信息。" }] } r = self.call_api( method=('API', 'POST', 'application/x-www-form-urlencoded'), action='/message/system/publish.json', params={ "fromUserId": fromUserId, "toUserId": toUserId, "objectName": objectName, "content": content, "pushContent": pushContent, "pushData": pushData, "isPersisted": isPersisted, "isCounted": isCounted }) return Response(r, desc)
发送系统消息方法(一个用户向一个或多个用户发送系统消息,单条消息最大 128k,会话类型为 SYSTEM。每秒钟最多发送 100 条消息,每次最多同时向 100 人发送,如:一次发送 100 人时,示为 100 条消息。) 方法 @param fromUserId:发送人用户 Id。(必传) @param toUserId:接收用户 Id,提供多个本参数可以实现向多人发送消息,上限为 1000 人。(必传) @param txtMessage:发送消息内容(必传) @param pushContent:如果为自定义消息,定义显示的 Push 内容,内容中定义标识通过 values 中设置的标识位内容进行替换.如消息类型为自定义不需要 Push 通知,则对应数组传空值即可。(可选) @param pushData:针对 iOS 平台为 Push 通知时附加到 payload 中,Android 客户端收到推送消息时对应字段名为 pushData。如不需要 Push 功能对应数组传空值即可。(可选) @param isPersisted:当前版本有新的自定义消息,而老版本没有该自定义消息时,老版本客户端收到消息后是否进行存储,0 表示为不存储、 1 表示为存储,默认为 1 存储消息。(可选) @param isCounted:当前版本有新的自定义消息,而老版本没有该自定义消息时,老版本客户端收到消息后是否进行未读消息计数,0 表示为不计数、 1 表示为计数,默认为 1 计数,未读消息数增加 1。(可选) @return code:返回码,200 为正常。 @return errorMessage:错误信息。
entailment
def publishGroup(self, fromUserId, toGroupId, objectName, content, pushContent=None, pushData=None, isPersisted=None, isCounted=None, isIncludeSender=None): """ 发送群组消息方法(以一个用户身份向群组发送消息,单条消息最大 128k.每秒钟最多发送 20 条消息,每次最多向 3 个群组发送,如:一次向 3 个群组发送消息,示为 3 条消息。) 方法 @param fromUserId:发送人用户 Id 。(必传) @param toGroupId:接收群Id,提供多个本参数可以实现向多群发送消息,最多不超过 3 个群组。(必传) @param txtMessage:发送消息内容(必传) @param pushContent:定义显示的 Push 内容,如果 objectName 为融云内置消息类型时,则发送后用户一定会收到 Push 信息. 如果为自定义消息,则 pushContent 为自定义消息显示的 Push 内容,如果不传则用户不会收到 Push 通知。(可选) @param pushData:针对 iOS 平台为 Push 通知时附加到 payload 中,Android 客户端收到推送消息时对应字段名为 pushData。(可选) @param isPersisted:当前版本有新的自定义消息,而老版本没有该自定义消息时,老版本客户端收到消息后是否进行存储,0 表示为不存储、 1 表示为存储,默认为 1 存储消息。(可选) @param isCounted:当前版本有新的自定义消息,而老版本没有该自定义消息时,老版本客户端收到消息后是否进行未读消息计数,0 表示为不计数、 1 表示为计数,默认为 1 计数,未读消息数增加 1。(可选) @param isIncludeSender:发送用户自已是否接收消息,0 表示为不接收,1 表示为接收,默认为 0 不接收。(可选) @return code:返回码,200 为正常。 @return errorMessage:错误信息。 """ desc = { "name": "CodeSuccessReslut", "desc": " http 成功返回结果", "fields": [{ "name": "code", "type": "Integer", "desc": "返回码,200 为正常。" }, { "name": "errorMessage", "type": "String", "desc": "错误信息。" }] } r = self.call_api( method=('API', 'POST', 'application/x-www-form-urlencoded'), action='/message/group/publish.json', params={ "fromUserId": fromUserId, "toGroupId": toGroupId, "objectName": objectName, "content": content, "pushContent": pushContent, "pushData": pushData, "isPersisted": isPersisted, "isCounted": isCounted, "isIncludeSender": isIncludeSender }) return Response(r, desc)
发送群组消息方法(以一个用户身份向群组发送消息,单条消息最大 128k.每秒钟最多发送 20 条消息,每次最多向 3 个群组发送,如:一次向 3 个群组发送消息,示为 3 条消息。) 方法 @param fromUserId:发送人用户 Id 。(必传) @param toGroupId:接收群Id,提供多个本参数可以实现向多群发送消息,最多不超过 3 个群组。(必传) @param txtMessage:发送消息内容(必传) @param pushContent:定义显示的 Push 内容,如果 objectName 为融云内置消息类型时,则发送后用户一定会收到 Push 信息. 如果为自定义消息,则 pushContent 为自定义消息显示的 Push 内容,如果不传则用户不会收到 Push 通知。(可选) @param pushData:针对 iOS 平台为 Push 通知时附加到 payload 中,Android 客户端收到推送消息时对应字段名为 pushData。(可选) @param isPersisted:当前版本有新的自定义消息,而老版本没有该自定义消息时,老版本客户端收到消息后是否进行存储,0 表示为不存储、 1 表示为存储,默认为 1 存储消息。(可选) @param isCounted:当前版本有新的自定义消息,而老版本没有该自定义消息时,老版本客户端收到消息后是否进行未读消息计数,0 表示为不计数、 1 表示为计数,默认为 1 计数,未读消息数增加 1。(可选) @param isIncludeSender:发送用户自已是否接收消息,0 表示为不接收,1 表示为接收,默认为 0 不接收。(可选) @return code:返回码,200 为正常。 @return errorMessage:错误信息。
entailment
def publishChatroom(self, fromUserId, toChatroomId, objectName, content): """ 发送聊天室消息方法(一个用户向聊天室发送消息,单条消息最大 128k。每秒钟限 100 次。) 方法 @param fromUserId:发送人用户 Id。(必传) @param toChatroomId:接收聊天室Id,提供多个本参数可以实现向多个聊天室发送消息。(必传) @param txtMessage:发送消息内容(必传) @return code:返回码,200 为正常。 @return errorMessage:错误信息。 """ desc = { "name": "CodeSuccessReslut", "desc": " http 成功返回结果", "fields": [{ "name": "code", "type": "Integer", "desc": "返回码,200 为正常。" }, { "name": "errorMessage", "type": "String", "desc": "错误信息。" }] } r = self.call_api( method=('API', 'POST', 'application/x-www-form-urlencoded'), action='/message/chatroom/publish.json', params={ "fromUserId": fromUserId, "toChatroomId": toChatroomId, "objectName": objectName, "content": content }) return Response(r, desc)
发送聊天室消息方法(一个用户向聊天室发送消息,单条消息最大 128k。每秒钟限 100 次。) 方法 @param fromUserId:发送人用户 Id。(必传) @param toChatroomId:接收聊天室Id,提供多个本参数可以实现向多个聊天室发送消息。(必传) @param txtMessage:发送消息内容(必传) @return code:返回码,200 为正常。 @return errorMessage:错误信息。
entailment
def broadcast(self, fromUserId, objectName, content, pushContent=None, pushData=None, os=None): """ 发送广播消息方法(发送消息给一个应用下的所有注册用户,如用户未在线会对满足条件(绑定手机终端)的用户发送 Push 信息,单条消息最大 128k,会话类型为 SYSTEM。每小时只能发送 1 次,每天最多发送 3 次。) 方法 @param fromUserId:发送人用户 Id。(必传) @param txtMessage:文本消息。 @param pushContent:定义显示的 Push 内容,如果 objectName 为融云内置消息类型时,则发送后用户一定会收到 Push 信息. 如果为自定义消息,则 pushContent 为自定义消息显示的 Push 内容,如果不传则用户不会收到 Push 通知.(可选) @param pushData:针对 iOS 平台为 Push 通知时附加到 payload 中,Android 客户端收到推送消息时对应字段名为 pushData。(可选) @param os:针对操作系统发送 Push,值为 iOS 表示对 iOS 手机用户发送 Push ,为 Android 时表示对 Android 手机用户发送 Push ,如对所有用户发送 Push 信息,则不需要传 os 参数。(可选) @return code:返回码,200 为正常。 @return errorMessage:错误信息。 """ desc = { "name": "CodeSuccessReslut", "desc": " http 成功返回结果", "fields": [{ "name": "code", "type": "Integer", "desc": "返回码,200 为正常。" }, { "name": "errorMessage", "type": "String", "desc": "错误信息。" }] } r = self.call_api( method=('API', 'POST', 'application/x-www-form-urlencoded'), action='/message/broadcast.json', params={ "fromUserId": fromUserId, "objectName": objectName, "content": content, "pushContent": pushContent, "pushData": pushData, "os": os }) return Response(r, desc)
发送广播消息方法(发送消息给一个应用下的所有注册用户,如用户未在线会对满足条件(绑定手机终端)的用户发送 Push 信息,单条消息最大 128k,会话类型为 SYSTEM。每小时只能发送 1 次,每天最多发送 3 次。) 方法 @param fromUserId:发送人用户 Id。(必传) @param txtMessage:文本消息。 @param pushContent:定义显示的 Push 内容,如果 objectName 为融云内置消息类型时,则发送后用户一定会收到 Push 信息. 如果为自定义消息,则 pushContent 为自定义消息显示的 Push 内容,如果不传则用户不会收到 Push 通知.(可选) @param pushData:针对 iOS 平台为 Push 通知时附加到 payload 中,Android 客户端收到推送消息时对应字段名为 pushData。(可选) @param os:针对操作系统发送 Push,值为 iOS 表示对 iOS 手机用户发送 Push ,为 Android 时表示对 Android 手机用户发送 Push ,如对所有用户发送 Push 信息,则不需要传 os 参数。(可选) @return code:返回码,200 为正常。 @return errorMessage:错误信息。
entailment
def deleteMessage(self, date): """ 消息历史记录删除方法(删除 APP 内指定某天某小时内的所有会话消息记录。调用该接口返回成功后,date参数指定的某小时的消息记录文件将在随后的5-10分钟内被永久删除。) 方法 @param date:指定北京时间某天某小时,格式为2014010101,表示:2014年1月1日凌晨1点。(必传) @return code:返回码,200 为正常。 @return errorMessage:错误信息。 """ desc = { "name": "CodeSuccessReslut", "desc": " http 成功返回结果", "fields": [{ "name": "code", "type": "Integer", "desc": "返回码,200 为正常。" }, { "name": "errorMessage", "type": "String", "desc": "错误信息。" }] } r = self.call_api( method=('API', 'POST', 'application/x-www-form-urlencoded'), action='/message/history/delete.json', params={"date": date}) return Response(r, desc)
消息历史记录删除方法(删除 APP 内指定某天某小时内的所有会话消息记录。调用该接口返回成功后,date参数指定的某小时的消息记录文件将在随后的5-10分钟内被永久删除。) 方法 @param date:指定北京时间某天某小时,格式为2014010101,表示:2014年1月1日凌晨1点。(必传) @return code:返回码,200 为正常。 @return errorMessage:错误信息。
entailment
def to_utf8(x): """ Tries to utf-8 encode x when possible If x is a string returns it encoded, otherwise tries to iter x and encode utf-8 all strings it contains, returning a list. """ if isinstance(x, basestring): return x.encode('utf-8') if isinstance(x, unicode) else x try: l = iter(x) except TypeError: return x return [to_utf8(i) for i in l]
Tries to utf-8 encode x when possible If x is a string returns it encoded, otherwise tries to iter x and encode utf-8 all strings it contains, returning a list.
entailment
def signing_base(self, request, consumer, token): """ This method generates the OAuth signature. It's defined here to avoid circular imports. """ sig = ( escape(request.method), escape(OAuthHook.get_normalized_url(request.url)), escape(OAuthHook.get_normalized_parameters(request)), ) key = '%s&' % escape(consumer.secret) if token is not None: key += escape(token.secret) raw = '&'.join(sig) return key, raw
This method generates the OAuth signature. It's defined here to avoid circular imports.
entailment
def _split_url_string(query_string): """ Turns a `query_string` into a Python dictionary with unquoted values """ parameters = parse_qs(to_utf8(query_string), keep_blank_values=True) for k, v in parameters.iteritems(): parameters[k] = urllib.unquote(v[0]) return parameters
Turns a `query_string` into a Python dictionary with unquoted values
entailment
def get_normalized_parameters(request): """ Returns a string that contains the parameters that must be signed. This function is called by SignatureMethod subclass CustomSignatureMethod_HMAC_SHA1 """ # See issues #10 and #12 if ('Content-Type' not in request.headers or \ request.headers.get('Content-Type').startswith('application/x-www-form-urlencoded')) \ and not isinstance(request.data, basestring): data_and_params = dict(request.data.items() + request.params.items()) for key,value in data_and_params.items(): request.data_and_params[to_utf8(key)] = to_utf8(value) if request.data_and_params.has_key('oauth_signature'): del request.data_and_params['oauth_signature'] items = [] for key, value in request.data_and_params.iteritems(): # 1.0a/9.1.1 states that kvp must be sorted by key, then by value, # so we unpack sequence values into multiple items for sorting. if isinstance(value, basestring): items.append((key, value)) else: try: value = list(value) except TypeError, e: assert 'is not iterable' in str(e) items.append((key, value)) else: items.extend((key, item) for item in value) # Include any query string parameters included in the url query_string = urlparse(request.url)[4] items.extend([(to_utf8(k), to_utf8(v)) for k, v in OAuthHook._split_url_string(query_string).items()]) items.sort() return urllib.urlencode(items).replace('+', '%20').replace('%7E', '~')
Returns a string that contains the parameters that must be signed. This function is called by SignatureMethod subclass CustomSignatureMethod_HMAC_SHA1
entailment
def get_normalized_url(url): """ Returns a normalized url, without params """ scheme, netloc, path, params, query, fragment = urlparse(url) # Exclude default port numbers. if scheme == 'http' and netloc[-3:] == ':80': netloc = netloc[:-3] elif scheme == 'https' and netloc[-4:] == ':443': netloc = netloc[:-4] if scheme not in ('http', 'https'): raise ValueError("Unsupported URL %s (%s)." % (url, scheme)) # Normalized URL excludes params, query, and fragment. return urlunparse((scheme, netloc, path, None, None, None))
Returns a normalized url, without params
entailment
def to_url(request): """Serialize as a URL for a GET request.""" scheme, netloc, path, query, fragment = urlsplit(to_utf8(request.url)) query = parse_qs(query) for key, value in request.data_and_params.iteritems(): query.setdefault(key, []).append(value) query = urllib.urlencode(query, True) return urlunsplit((scheme, netloc, path, query, fragment))
Serialize as a URL for a GET request.
entailment