id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
226,200
census-instrumentation/opencensus-python
contrib/opencensus-ext-stackdriver/opencensus/ext/stackdriver/trace_exporter/__init__.py
set_attributes
def set_attributes(trace): """Automatically set attributes for Google Cloud environment.""" spans = trace.get('spans') for span in spans: if span.get('attributes') is None: span['attributes'] = {} if is_gae_environment(): set_gae_attributes(span) set_common_attributes(span) set_monitored_resource_attributes(span)
python
def set_attributes(trace): """Automatically set attributes for Google Cloud environment.""" spans = trace.get('spans') for span in spans: if span.get('attributes') is None: span['attributes'] = {} if is_gae_environment(): set_gae_attributes(span) set_common_attributes(span) set_monitored_resource_attributes(span)
[ "def", "set_attributes", "(", "trace", ")", ":", "spans", "=", "trace", ".", "get", "(", "'spans'", ")", "for", "span", "in", "spans", ":", "if", "span", ".", "get", "(", "'attributes'", ")", "is", "None", ":", "span", "[", "'attributes'", "]", "=", ...
Automatically set attributes for Google Cloud environment.
[ "Automatically", "set", "attributes", "for", "Google", "Cloud", "environment", "." ]
992b223f7e34c5dcb65922b7d5c827e7a1351e7d
https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/contrib/opencensus-ext-stackdriver/opencensus/ext/stackdriver/trace_exporter/__init__.py#L62-L74
226,201
census-instrumentation/opencensus-python
contrib/opencensus-ext-stackdriver/opencensus/ext/stackdriver/trace_exporter/__init__.py
set_common_attributes
def set_common_attributes(span): """Set the common attributes.""" common = { attributes_helper.COMMON_ATTRIBUTES.get('AGENT'): AGENT, } common_attrs = Attributes(common)\ .format_attributes_json()\ .get('attributeMap') _update_attr_map(span, common_attrs)
python
def set_common_attributes(span): """Set the common attributes.""" common = { attributes_helper.COMMON_ATTRIBUTES.get('AGENT'): AGENT, } common_attrs = Attributes(common)\ .format_attributes_json()\ .get('attributeMap') _update_attr_map(span, common_attrs)
[ "def", "set_common_attributes", "(", "span", ")", ":", "common", "=", "{", "attributes_helper", ".", "COMMON_ATTRIBUTES", ".", "get", "(", "'AGENT'", ")", ":", "AGENT", ",", "}", "common_attrs", "=", "Attributes", "(", "common", ")", ".", "format_attributes_js...
Set the common attributes.
[ "Set", "the", "common", "attributes", "." ]
992b223f7e34c5dcb65922b7d5c827e7a1351e7d
https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/contrib/opencensus-ext-stackdriver/opencensus/ext/stackdriver/trace_exporter/__init__.py#L142-L151
226,202
census-instrumentation/opencensus-python
contrib/opencensus-ext-stackdriver/opencensus/ext/stackdriver/trace_exporter/__init__.py
set_gae_attributes
def set_gae_attributes(span): """Set the GAE environment common attributes.""" for env_var, attribute_key in GAE_ATTRIBUTES.items(): attribute_value = os.environ.get(env_var) if attribute_value is not None: pair = {attribute_key: attribute_value} pair_attrs = Attributes(pair)\ .format_attributes_json()\ .get('attributeMap') _update_attr_map(span, pair_attrs)
python
def set_gae_attributes(span): """Set the GAE environment common attributes.""" for env_var, attribute_key in GAE_ATTRIBUTES.items(): attribute_value = os.environ.get(env_var) if attribute_value is not None: pair = {attribute_key: attribute_value} pair_attrs = Attributes(pair)\ .format_attributes_json()\ .get('attributeMap') _update_attr_map(span, pair_attrs)
[ "def", "set_gae_attributes", "(", "span", ")", ":", "for", "env_var", ",", "attribute_key", "in", "GAE_ATTRIBUTES", ".", "items", "(", ")", ":", "attribute_value", "=", "os", ".", "environ", ".", "get", "(", "env_var", ")", "if", "attribute_value", "is", "...
Set the GAE environment common attributes.
[ "Set", "the", "GAE", "environment", "common", "attributes", "." ]
992b223f7e34c5dcb65922b7d5c827e7a1351e7d
https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/contrib/opencensus-ext-stackdriver/opencensus/ext/stackdriver/trace_exporter/__init__.py#L154-L165
226,203
census-instrumentation/opencensus-python
contrib/opencensus-ext-stackdriver/opencensus/ext/stackdriver/trace_exporter/__init__.py
StackdriverExporter.translate_to_stackdriver
def translate_to_stackdriver(self, trace): """Translate the spans json to Stackdriver format. See: https://cloud.google.com/trace/docs/reference/v2/rest/v2/ projects.traces/batchWrite :type trace: dict :param trace: Trace dictionary :rtype: dict :returns: Spans in Google Cloud StackDriver Trace format. """ set_attributes(trace) spans_json = trace.get('spans') trace_id = trace.get('traceId') for span in spans_json: span_name = 'projects/{}/traces/{}/spans/{}'.format( self.project_id, trace_id, span.get('spanId')) span_json = { 'name': span_name, 'displayName': span.get('displayName'), 'startTime': span.get('startTime'), 'endTime': span.get('endTime'), 'spanId': str(span.get('spanId')), 'attributes': self.map_attributes(span.get('attributes')), 'links': span.get('links'), 'status': span.get('status'), 'stackTrace': span.get('stackTrace'), 'timeEvents': span.get('timeEvents'), 'sameProcessAsParentSpan': span.get('sameProcessAsParentSpan'), 'childSpanCount': span.get('childSpanCount') } if span.get('parentSpanId') is not None: parent_span_id = str(span.get('parentSpanId')) span_json['parentSpanId'] = parent_span_id yield span_json
python
def translate_to_stackdriver(self, trace): """Translate the spans json to Stackdriver format. See: https://cloud.google.com/trace/docs/reference/v2/rest/v2/ projects.traces/batchWrite :type trace: dict :param trace: Trace dictionary :rtype: dict :returns: Spans in Google Cloud StackDriver Trace format. """ set_attributes(trace) spans_json = trace.get('spans') trace_id = trace.get('traceId') for span in spans_json: span_name = 'projects/{}/traces/{}/spans/{}'.format( self.project_id, trace_id, span.get('spanId')) span_json = { 'name': span_name, 'displayName': span.get('displayName'), 'startTime': span.get('startTime'), 'endTime': span.get('endTime'), 'spanId': str(span.get('spanId')), 'attributes': self.map_attributes(span.get('attributes')), 'links': span.get('links'), 'status': span.get('status'), 'stackTrace': span.get('stackTrace'), 'timeEvents': span.get('timeEvents'), 'sameProcessAsParentSpan': span.get('sameProcessAsParentSpan'), 'childSpanCount': span.get('childSpanCount') } if span.get('parentSpanId') is not None: parent_span_id = str(span.get('parentSpanId')) span_json['parentSpanId'] = parent_span_id yield span_json
[ "def", "translate_to_stackdriver", "(", "self", ",", "trace", ")", ":", "set_attributes", "(", "trace", ")", "spans_json", "=", "trace", ".", "get", "(", "'spans'", ")", "trace_id", "=", "trace", ".", "get", "(", "'traceId'", ")", "for", "span", "in", "s...
Translate the spans json to Stackdriver format. See: https://cloud.google.com/trace/docs/reference/v2/rest/v2/ projects.traces/batchWrite :type trace: dict :param trace: Trace dictionary :rtype: dict :returns: Spans in Google Cloud StackDriver Trace format.
[ "Translate", "the", "spans", "json", "to", "Stackdriver", "format", "." ]
992b223f7e34c5dcb65922b7d5c827e7a1351e7d
https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/contrib/opencensus-ext-stackdriver/opencensus/ext/stackdriver/trace_exporter/__init__.py#L236-L275
226,204
census-instrumentation/opencensus-python
opencensus/common/resource/__init__.py
merge_resources
def merge_resources(resource_list): """Merge multiple resources to get a new resource. Resources earlier in the list take precedence: if multiple resources share a label key, use the value from the first resource in the list with that key. The combined resource's type will be the first non-null type in the list. :type resource_list: list(:class:`Resource`) :param resource_list: The list of resources to combine. :rtype: :class:`Resource` :return: The new combined resource. """ if not resource_list: raise ValueError rtype = None for rr in resource_list: if rr.type: rtype = rr.type break labels = {} for rr in reversed(resource_list): labels.update(rr.labels) return Resource(rtype, labels)
python
def merge_resources(resource_list): """Merge multiple resources to get a new resource. Resources earlier in the list take precedence: if multiple resources share a label key, use the value from the first resource in the list with that key. The combined resource's type will be the first non-null type in the list. :type resource_list: list(:class:`Resource`) :param resource_list: The list of resources to combine. :rtype: :class:`Resource` :return: The new combined resource. """ if not resource_list: raise ValueError rtype = None for rr in resource_list: if rr.type: rtype = rr.type break labels = {} for rr in reversed(resource_list): labels.update(rr.labels) return Resource(rtype, labels)
[ "def", "merge_resources", "(", "resource_list", ")", ":", "if", "not", "resource_list", ":", "raise", "ValueError", "rtype", "=", "None", "for", "rr", "in", "resource_list", ":", "if", "rr", ".", "type", ":", "rtype", "=", "rr", ".", "type", "break", "la...
Merge multiple resources to get a new resource. Resources earlier in the list take precedence: if multiple resources share a label key, use the value from the first resource in the list with that key. The combined resource's type will be the first non-null type in the list. :type resource_list: list(:class:`Resource`) :param resource_list: The list of resources to combine. :rtype: :class:`Resource` :return: The new combined resource.
[ "Merge", "multiple", "resources", "to", "get", "a", "new", "resource", "." ]
992b223f7e34c5dcb65922b7d5c827e7a1351e7d
https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/opencensus/common/resource/__init__.py#L52-L76
226,205
census-instrumentation/opencensus-python
opencensus/common/resource/__init__.py
check_ascii_256
def check_ascii_256(string): """Check that `string` is printable ASCII and at most 256 chars. Raise a `ValueError` if this check fails. Note that `string` itself doesn't have to be ASCII-encoded. :type string: str :param string: The string to check. """ if string is None: return if len(string) > 256: raise ValueError("Value is longer than 256 characters") bad_char = _NON_PRINTABLE_ASCII.search(string) if bad_char: raise ValueError(u'Character "{}" at position {} is not printable ' 'ASCII' .format( string[bad_char.start():bad_char.end()], bad_char.start()))
python
def check_ascii_256(string): """Check that `string` is printable ASCII and at most 256 chars. Raise a `ValueError` if this check fails. Note that `string` itself doesn't have to be ASCII-encoded. :type string: str :param string: The string to check. """ if string is None: return if len(string) > 256: raise ValueError("Value is longer than 256 characters") bad_char = _NON_PRINTABLE_ASCII.search(string) if bad_char: raise ValueError(u'Character "{}" at position {} is not printable ' 'ASCII' .format( string[bad_char.start():bad_char.end()], bad_char.start()))
[ "def", "check_ascii_256", "(", "string", ")", ":", "if", "string", "is", "None", ":", "return", "if", "len", "(", "string", ")", ">", "256", ":", "raise", "ValueError", "(", "\"Value is longer than 256 characters\"", ")", "bad_char", "=", "_NON_PRINTABLE_ASCII",...
Check that `string` is printable ASCII and at most 256 chars. Raise a `ValueError` if this check fails. Note that `string` itself doesn't have to be ASCII-encoded. :type string: str :param string: The string to check.
[ "Check", "that", "string", "is", "printable", "ASCII", "and", "at", "most", "256", "chars", "." ]
992b223f7e34c5dcb65922b7d5c827e7a1351e7d
https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/opencensus/common/resource/__init__.py#L79-L98
226,206
census-instrumentation/opencensus-python
opencensus/common/resource/__init__.py
parse_labels
def parse_labels(labels_str): """Parse label keys and values following the Resource spec. >>> parse_labels("k=v") {'k': 'v'} >>> parse_labels("k1=v1, k2=v2") {'k1': 'v1', 'k2': 'v2'} >>> parse_labels("k1='v1,=z1'") {'k1': 'v1,=z1'} """ if not _LABELS_RE.match(labels_str): return None labels = {} for kv in _KV_RE.finditer(labels_str): gd = kv.groupdict() key = unquote(gd['key']) if key in labels: logger.warning('Duplicate label key "%s"', key) labels[key] = unquote(gd['val']) return labels
python
def parse_labels(labels_str): """Parse label keys and values following the Resource spec. >>> parse_labels("k=v") {'k': 'v'} >>> parse_labels("k1=v1, k2=v2") {'k1': 'v1', 'k2': 'v2'} >>> parse_labels("k1='v1,=z1'") {'k1': 'v1,=z1'} """ if not _LABELS_RE.match(labels_str): return None labels = {} for kv in _KV_RE.finditer(labels_str): gd = kv.groupdict() key = unquote(gd['key']) if key in labels: logger.warning('Duplicate label key "%s"', key) labels[key] = unquote(gd['val']) return labels
[ "def", "parse_labels", "(", "labels_str", ")", ":", "if", "not", "_LABELS_RE", ".", "match", "(", "labels_str", ")", ":", "return", "None", "labels", "=", "{", "}", "for", "kv", "in", "_KV_RE", ".", "finditer", "(", "labels_str", ")", ":", "gd", "=", ...
Parse label keys and values following the Resource spec. >>> parse_labels("k=v") {'k': 'v'} >>> parse_labels("k1=v1, k2=v2") {'k1': 'v1', 'k2': 'v2'} >>> parse_labels("k1='v1,=z1'") {'k1': 'v1,=z1'}
[ "Parse", "label", "keys", "and", "values", "following", "the", "Resource", "spec", "." ]
992b223f7e34c5dcb65922b7d5c827e7a1351e7d
https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/opencensus/common/resource/__init__.py#L180-L199
226,207
census-instrumentation/opencensus-python
opencensus/common/resource/__init__.py
get_from_env
def get_from_env(): """Get a Resource from environment variables. :rtype: :class:`Resource` :return: A resource with type and labels from the environment. """ type_env = os.getenv(OC_RESOURCE_TYPE) if type_env is None: return None type_env = type_env.strip() labels_env = os.getenv(OC_RESOURCE_LABELS) if labels_env is None: return Resource(type_env) labels = parse_labels(labels_env) return Resource(type_env, labels)
python
def get_from_env(): """Get a Resource from environment variables. :rtype: :class:`Resource` :return: A resource with type and labels from the environment. """ type_env = os.getenv(OC_RESOURCE_TYPE) if type_env is None: return None type_env = type_env.strip() labels_env = os.getenv(OC_RESOURCE_LABELS) if labels_env is None: return Resource(type_env) labels = parse_labels(labels_env) return Resource(type_env, labels)
[ "def", "get_from_env", "(", ")", ":", "type_env", "=", "os", ".", "getenv", "(", "OC_RESOURCE_TYPE", ")", "if", "type_env", "is", "None", ":", "return", "None", "type_env", "=", "type_env", ".", "strip", "(", ")", "labels_env", "=", "os", ".", "getenv", ...
Get a Resource from environment variables. :rtype: :class:`Resource` :return: A resource with type and labels from the environment.
[ "Get", "a", "Resource", "from", "environment", "variables", "." ]
992b223f7e34c5dcb65922b7d5c827e7a1351e7d
https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/opencensus/common/resource/__init__.py#L202-L219
226,208
census-instrumentation/opencensus-python
context/opencensus-context/opencensus/common/runtime_context/__init__.py
_RuntimeContext.apply
def apply(self, snapshot): """Set the current context from a given snapshot dictionary""" for name in snapshot: setattr(self, name, snapshot[name])
python
def apply(self, snapshot): """Set the current context from a given snapshot dictionary""" for name in snapshot: setattr(self, name, snapshot[name])
[ "def", "apply", "(", "self", ",", "snapshot", ")", ":", "for", "name", "in", "snapshot", ":", "setattr", "(", "self", ",", "name", ",", "snapshot", "[", "name", "]", ")" ]
Set the current context from a given snapshot dictionary
[ "Set", "the", "current", "context", "from", "a", "given", "snapshot", "dictionary" ]
992b223f7e34c5dcb65922b7d5c827e7a1351e7d
https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/context/opencensus-context/opencensus/common/runtime_context/__init__.py#L48-L52
226,209
census-instrumentation/opencensus-python
context/opencensus-context/opencensus/common/runtime_context/__init__.py
_RuntimeContext.snapshot
def snapshot(self): """Return a dictionary of current slots by reference.""" return dict((n, self._slots[n].get()) for n in self._slots.keys())
python
def snapshot(self): """Return a dictionary of current slots by reference.""" return dict((n, self._slots[n].get()) for n in self._slots.keys())
[ "def", "snapshot", "(", "self", ")", ":", "return", "dict", "(", "(", "n", ",", "self", ".", "_slots", "[", "n", "]", ".", "get", "(", ")", ")", "for", "n", "in", "self", ".", "_slots", ".", "keys", "(", ")", ")" ]
Return a dictionary of current slots by reference.
[ "Return", "a", "dictionary", "of", "current", "slots", "by", "reference", "." ]
992b223f7e34c5dcb65922b7d5c827e7a1351e7d
https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/context/opencensus-context/opencensus/common/runtime_context/__init__.py#L54-L57
226,210
census-instrumentation/opencensus-python
context/opencensus-context/opencensus/common/runtime_context/__init__.py
_RuntimeContext.with_current_context
def with_current_context(self, func): """Capture the current context and apply it to the provided func""" caller_context = self.snapshot() def call_with_current_context(*args, **kwargs): try: backup_context = self.snapshot() self.apply(caller_context) return func(*args, **kwargs) finally: self.apply(backup_context) return call_with_current_context
python
def with_current_context(self, func): """Capture the current context and apply it to the provided func""" caller_context = self.snapshot() def call_with_current_context(*args, **kwargs): try: backup_context = self.snapshot() self.apply(caller_context) return func(*args, **kwargs) finally: self.apply(backup_context) return call_with_current_context
[ "def", "with_current_context", "(", "self", ",", "func", ")", ":", "caller_context", "=", "self", ".", "snapshot", "(", ")", "def", "call_with_current_context", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "try", ":", "backup_context", "=", "self",...
Capture the current context and apply it to the provided func
[ "Capture", "the", "current", "context", "and", "apply", "it", "to", "the", "provided", "func" ]
992b223f7e34c5dcb65922b7d5c827e7a1351e7d
https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/context/opencensus-context/opencensus/common/runtime_context/__init__.py#L76-L89
226,211
census-instrumentation/opencensus-python
opencensus/common/monitored_resource/aws_identity_doc_utils.py
AwsIdentityDocumentUtils._initialize_aws_identity_document
def _initialize_aws_identity_document(cls): """This method, tries to establish an HTTP connection to AWS instance identity document url. If the application is running on an EC2 instance, we should be able to get back a valid JSON document. Make a http get request call and store data in local map. This method should only be called once. """ if cls.inited: return content = get_request(_AWS_INSTANCE_IDENTITY_DOCUMENT_URI) if content is not None: content = json.loads(content) for env_var, attribute_key in _AWS_ATTRIBUTES.items(): attribute_value = content.get(env_var) if attribute_value is not None: aws_metadata_map[attribute_key] = attribute_value cls.is_running = True cls.inited = True
python
def _initialize_aws_identity_document(cls): """This method, tries to establish an HTTP connection to AWS instance identity document url. If the application is running on an EC2 instance, we should be able to get back a valid JSON document. Make a http get request call and store data in local map. This method should only be called once. """ if cls.inited: return content = get_request(_AWS_INSTANCE_IDENTITY_DOCUMENT_URI) if content is not None: content = json.loads(content) for env_var, attribute_key in _AWS_ATTRIBUTES.items(): attribute_value = content.get(env_var) if attribute_value is not None: aws_metadata_map[attribute_key] = attribute_value cls.is_running = True cls.inited = True
[ "def", "_initialize_aws_identity_document", "(", "cls", ")", ":", "if", "cls", ".", "inited", ":", "return", "content", "=", "get_request", "(", "_AWS_INSTANCE_IDENTITY_DOCUMENT_URI", ")", "if", "content", "is", "not", "None", ":", "content", "=", "json", ".", ...
This method, tries to establish an HTTP connection to AWS instance identity document url. If the application is running on an EC2 instance, we should be able to get back a valid JSON document. Make a http get request call and store data in local map. This method should only be called once.
[ "This", "method", "tries", "to", "establish", "an", "HTTP", "connection", "to", "AWS", "instance", "identity", "document", "url", ".", "If", "the", "application", "is", "running", "on", "an", "EC2", "instance", "we", "should", "be", "able", "to", "get", "b...
992b223f7e34c5dcb65922b7d5c827e7a1351e7d
https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/opencensus/common/monitored_resource/aws_identity_doc_utils.py#L53-L74
226,212
census-instrumentation/opencensus-python
opencensus/stats/view.py
View.get_metric_descriptor
def get_metric_descriptor(self): """Get a MetricDescriptor for this view. Lazily creates a MetricDescriptor for metrics conversion. :rtype: :class: `opencensus.metrics.export.metric_descriptor.MetricDescriptor` :return: A converted Metric. """ # noqa with self._md_cache_lock: if self._metric_descriptor is None: self._metric_descriptor = metric_descriptor.MetricDescriptor( self.name, self.description, self.measure.unit, metric_utils.get_metric_type(self.measure, self.aggregation), # TODO: add label key description [label_key.LabelKey(tk, "") for tk in self.columns]) return self._metric_descriptor
python
def get_metric_descriptor(self): """Get a MetricDescriptor for this view. Lazily creates a MetricDescriptor for metrics conversion. :rtype: :class: `opencensus.metrics.export.metric_descriptor.MetricDescriptor` :return: A converted Metric. """ # noqa with self._md_cache_lock: if self._metric_descriptor is None: self._metric_descriptor = metric_descriptor.MetricDescriptor( self.name, self.description, self.measure.unit, metric_utils.get_metric_type(self.measure, self.aggregation), # TODO: add label key description [label_key.LabelKey(tk, "") for tk in self.columns]) return self._metric_descriptor
[ "def", "get_metric_descriptor", "(", "self", ")", ":", "# noqa", "with", "self", ".", "_md_cache_lock", ":", "if", "self", ".", "_metric_descriptor", "is", "None", ":", "self", ".", "_metric_descriptor", "=", "metric_descriptor", ".", "MetricDescriptor", "(", "s...
Get a MetricDescriptor for this view. Lazily creates a MetricDescriptor for metrics conversion. :rtype: :class: `opencensus.metrics.export.metric_descriptor.MetricDescriptor` :return: A converted Metric.
[ "Get", "a", "MetricDescriptor", "for", "this", "view", "." ]
992b223f7e34c5dcb65922b7d5c827e7a1351e7d
https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/opencensus/stats/view.py#L81-L100
226,213
census-instrumentation/opencensus-python
contrib/opencensus-ext-ocagent/opencensus/ext/ocagent/stats_exporter/__init__.py
new_stats_exporter
def new_stats_exporter(service_name, hostname=None, endpoint=None, interval=None): """Create a new worker thread and attach the exporter to it. :type endpoint: str :param endpoint: address of the opencensus service. :type service_name: str :param service_name: name of the service :type host_name: str :param host_name: name of the host (machine or host name) :type interval: int or float :param interval: Seconds between export calls. """ endpoint = utils.DEFAULT_ENDPOINT if endpoint is None else endpoint exporter = StatsExporter( ExportRpcHandler(_create_stub(endpoint), service_name, hostname)) transport.get_exporter_thread(stats.stats, exporter, interval) return exporter
python
def new_stats_exporter(service_name, hostname=None, endpoint=None, interval=None): """Create a new worker thread and attach the exporter to it. :type endpoint: str :param endpoint: address of the opencensus service. :type service_name: str :param service_name: name of the service :type host_name: str :param host_name: name of the host (machine or host name) :type interval: int or float :param interval: Seconds between export calls. """ endpoint = utils.DEFAULT_ENDPOINT if endpoint is None else endpoint exporter = StatsExporter( ExportRpcHandler(_create_stub(endpoint), service_name, hostname)) transport.get_exporter_thread(stats.stats, exporter, interval) return exporter
[ "def", "new_stats_exporter", "(", "service_name", ",", "hostname", "=", "None", ",", "endpoint", "=", "None", ",", "interval", "=", "None", ")", ":", "endpoint", "=", "utils", ".", "DEFAULT_ENDPOINT", "if", "endpoint", "is", "None", "else", "endpoint", "expo...
Create a new worker thread and attach the exporter to it. :type endpoint: str :param endpoint: address of the opencensus service. :type service_name: str :param service_name: name of the service :type host_name: str :param host_name: name of the host (machine or host name) :type interval: int or float :param interval: Seconds between export calls.
[ "Create", "a", "new", "worker", "thread", "and", "attach", "the", "exporter", "to", "it", "." ]
992b223f7e34c5dcb65922b7d5c827e7a1351e7d
https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/contrib/opencensus-ext-ocagent/opencensus/ext/ocagent/stats_exporter/__init__.py#L238-L261
226,214
census-instrumentation/opencensus-python
contrib/opencensus-ext-ocagent/opencensus/ext/ocagent/stats_exporter/__init__.py
StatsExporter.export_metrics
def export_metrics(self, metrics): """ Exports given metrics to target metric service. """ metric_protos = [] for metric in metrics: metric_protos.append(_get_metric_proto(metric)) self._rpc_handler.send( metrics_service_pb2.ExportMetricsServiceRequest( metrics=metric_protos))
python
def export_metrics(self, metrics): """ Exports given metrics to target metric service. """ metric_protos = [] for metric in metrics: metric_protos.append(_get_metric_proto(metric)) self._rpc_handler.send( metrics_service_pb2.ExportMetricsServiceRequest( metrics=metric_protos))
[ "def", "export_metrics", "(", "self", ",", "metrics", ")", ":", "metric_protos", "=", "[", "]", "for", "metric", "in", "metrics", ":", "metric_protos", ".", "append", "(", "_get_metric_proto", "(", "metric", ")", ")", "self", ".", "_rpc_handler", ".", "sen...
Exports given metrics to target metric service.
[ "Exports", "given", "metrics", "to", "target", "metric", "service", "." ]
992b223f7e34c5dcb65922b7d5c827e7a1351e7d
https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/contrib/opencensus-ext-ocagent/opencensus/ext/ocagent/stats_exporter/__init__.py#L41-L50
226,215
census-instrumentation/opencensus-python
contrib/opencensus-ext-ocagent/opencensus/ext/ocagent/stats_exporter/__init__.py
ExportRpcHandler.send
def send(self, request): """Dispatches incoming request on rpc. Initializes rpc if necessary and dispatches incoming request. If a rpc error is thrown, this function will attempt to recreate the stream and retry sending given request once. :type request: class: `~.metrics_service_pb2.ExportMetricsServiceRequest` :param request: incoming export request """ if not self._initialized: self._initialize(request) return try: self._rpc.send(request) except grpc.RpcError as e: logging.info('Found rpc error %s', e, exc_info=True) # If stream has closed due to error, attempt to reopen with the # incoming request. self._initialize(request)
python
def send(self, request): """Dispatches incoming request on rpc. Initializes rpc if necessary and dispatches incoming request. If a rpc error is thrown, this function will attempt to recreate the stream and retry sending given request once. :type request: class: `~.metrics_service_pb2.ExportMetricsServiceRequest` :param request: incoming export request """ if not self._initialized: self._initialize(request) return try: self._rpc.send(request) except grpc.RpcError as e: logging.info('Found rpc error %s', e, exc_info=True) # If stream has closed due to error, attempt to reopen with the # incoming request. self._initialize(request)
[ "def", "send", "(", "self", ",", "request", ")", ":", "if", "not", "self", ".", "_initialized", ":", "self", ".", "_initialize", "(", "request", ")", "return", "try", ":", "self", ".", "_rpc", ".", "send", "(", "request", ")", "except", "grpc", ".", ...
Dispatches incoming request on rpc. Initializes rpc if necessary and dispatches incoming request. If a rpc error is thrown, this function will attempt to recreate the stream and retry sending given request once. :type request: class: `~.metrics_service_pb2.ExportMetricsServiceRequest` :param request: incoming export request
[ "Dispatches", "incoming", "request", "on", "rpc", "." ]
992b223f7e34c5dcb65922b7d5c827e7a1351e7d
https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/contrib/opencensus-ext-ocagent/opencensus/ext/ocagent/stats_exporter/__init__.py#L189-L210
226,216
census-instrumentation/opencensus-python
contrib/opencensus-ext-ocagent/opencensus/ext/ocagent/stats_exporter/__init__.py
ExportRpcHandler._initialize
def _initialize(self, request): """Initializes the exporter rpc stream.""" # Add node information on the first request dispatched on a stream. request.node.MergeFrom(self._node) request.resource.MergeFrom(self._resource) self._initial_request = request self._rpc.open() self._initialized = True
python
def _initialize(self, request): """Initializes the exporter rpc stream.""" # Add node information on the first request dispatched on a stream. request.node.MergeFrom(self._node) request.resource.MergeFrom(self._resource) self._initial_request = request self._rpc.open() self._initialized = True
[ "def", "_initialize", "(", "self", ",", "request", ")", ":", "# Add node information on the first request dispatched on a stream.", "request", ".", "node", ".", "MergeFrom", "(", "self", ".", "_node", ")", "request", ".", "resource", ".", "MergeFrom", "(", "self", ...
Initializes the exporter rpc stream.
[ "Initializes", "the", "exporter", "rpc", "stream", "." ]
992b223f7e34c5dcb65922b7d5c827e7a1351e7d
https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/contrib/opencensus-ext-ocagent/opencensus/ext/ocagent/stats_exporter/__init__.py#L212-L221
226,217
census-instrumentation/opencensus-python
contrib/opencensus-ext-postgresql/opencensus/ext/postgresql/trace.py
connect
def connect(*args, **kwargs): """Create database connection, use TraceCursor as the cursor_factory.""" kwargs['cursor_factory'] = TraceCursor conn = pg_connect(*args, **kwargs) return conn
python
def connect(*args, **kwargs): """Create database connection, use TraceCursor as the cursor_factory.""" kwargs['cursor_factory'] = TraceCursor conn = pg_connect(*args, **kwargs) return conn
[ "def", "connect", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'cursor_factory'", "]", "=", "TraceCursor", "conn", "=", "pg_connect", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "conn" ]
Create database connection, use TraceCursor as the cursor_factory.
[ "Create", "database", "connection", "use", "TraceCursor", "as", "the", "cursor_factory", "." ]
992b223f7e34c5dcb65922b7d5c827e7a1351e7d
https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/contrib/opencensus-ext-postgresql/opencensus/ext/postgresql/trace.py#L42-L46
226,218
census-instrumentation/opencensus-python
opencensus/stats/measure_to_view_map.py
MeasureToViewMap.get_view
def get_view(self, view_name, timestamp): """get the View Data from the given View name""" view = self._registered_views.get(view_name) if view is None: return None view_data_list = self._measure_to_view_data_list_map.get( view.measure.name) if not view_data_list: return None for view_data in view_data_list: if view_data.view.name == view_name: break else: return None return self.copy_and_finalize_view_data(view_data)
python
def get_view(self, view_name, timestamp): """get the View Data from the given View name""" view = self._registered_views.get(view_name) if view is None: return None view_data_list = self._measure_to_view_data_list_map.get( view.measure.name) if not view_data_list: return None for view_data in view_data_list: if view_data.view.name == view_name: break else: return None return self.copy_and_finalize_view_data(view_data)
[ "def", "get_view", "(", "self", ",", "view_name", ",", "timestamp", ")", ":", "view", "=", "self", ".", "_registered_views", ".", "get", "(", "view_name", ")", "if", "view", "is", "None", ":", "return", "None", "view_data_list", "=", "self", ".", "_measu...
get the View Data from the given View name
[ "get", "the", "View", "Data", "from", "the", "given", "View", "name" ]
992b223f7e34c5dcb65922b7d5c827e7a1351e7d
https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/opencensus/stats/measure_to_view_map.py#L51-L69
226,219
census-instrumentation/opencensus-python
opencensus/stats/measure_to_view_map.py
MeasureToViewMap.register_view
def register_view(self, view, timestamp): """registers the view's measure name to View Datas given a view""" if len(self.exporters) > 0: try: for e in self.exporters: e.on_register_view(view) except AttributeError: pass self._exported_views = None existing_view = self._registered_views.get(view.name) if existing_view is not None: if existing_view == view: # ignore the views that are already registered return else: logging.warning( "A different view with the same name is already registered" ) # pragma: NO COVER measure = view.measure registered_measure = self._registered_measures.get(measure.name) if registered_measure is not None and registered_measure != measure: logging.warning( "A different measure with the same name is already registered") self._registered_views[view.name] = view if registered_measure is None: self._registered_measures[measure.name] = measure self._measure_to_view_data_list_map[view.measure.name].append( view_data_module.ViewData(view=view, start_time=timestamp, end_time=timestamp))
python
def register_view(self, view, timestamp): """registers the view's measure name to View Datas given a view""" if len(self.exporters) > 0: try: for e in self.exporters: e.on_register_view(view) except AttributeError: pass self._exported_views = None existing_view = self._registered_views.get(view.name) if existing_view is not None: if existing_view == view: # ignore the views that are already registered return else: logging.warning( "A different view with the same name is already registered" ) # pragma: NO COVER measure = view.measure registered_measure = self._registered_measures.get(measure.name) if registered_measure is not None and registered_measure != measure: logging.warning( "A different measure with the same name is already registered") self._registered_views[view.name] = view if registered_measure is None: self._registered_measures[measure.name] = measure self._measure_to_view_data_list_map[view.measure.name].append( view_data_module.ViewData(view=view, start_time=timestamp, end_time=timestamp))
[ "def", "register_view", "(", "self", ",", "view", ",", "timestamp", ")", ":", "if", "len", "(", "self", ".", "exporters", ")", ">", "0", ":", "try", ":", "for", "e", "in", "self", ".", "exporters", ":", "e", ".", "on_register_view", "(", "view", ")...
registers the view's measure name to View Datas given a view
[ "registers", "the", "view", "s", "measure", "name", "to", "View", "Datas", "given", "a", "view" ]
992b223f7e34c5dcb65922b7d5c827e7a1351e7d
https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/opencensus/stats/measure_to_view_map.py#L77-L106
226,220
census-instrumentation/opencensus-python
opencensus/stats/measure_to_view_map.py
MeasureToViewMap.record
def record(self, tags, measurement_map, timestamp, attachments=None): """records stats with a set of tags""" assert all(vv >= 0 for vv in measurement_map.values()) for measure, value in measurement_map.items(): if measure != self._registered_measures.get(measure.name): return view_datas = [] for measure_name, view_data_list \ in self._measure_to_view_data_list_map.items(): if measure_name == measure.name: view_datas.extend(view_data_list) for view_data in view_datas: view_data.record( context=tags, value=value, timestamp=timestamp, attachments=attachments) self.export(view_datas)
python
def record(self, tags, measurement_map, timestamp, attachments=None): """records stats with a set of tags""" assert all(vv >= 0 for vv in measurement_map.values()) for measure, value in measurement_map.items(): if measure != self._registered_measures.get(measure.name): return view_datas = [] for measure_name, view_data_list \ in self._measure_to_view_data_list_map.items(): if measure_name == measure.name: view_datas.extend(view_data_list) for view_data in view_datas: view_data.record( context=tags, value=value, timestamp=timestamp, attachments=attachments) self.export(view_datas)
[ "def", "record", "(", "self", ",", "tags", ",", "measurement_map", ",", "timestamp", ",", "attachments", "=", "None", ")", ":", "assert", "all", "(", "vv", ">=", "0", "for", "vv", "in", "measurement_map", ".", "values", "(", ")", ")", "for", "measure",...
records stats with a set of tags
[ "records", "stats", "with", "a", "set", "of", "tags" ]
992b223f7e34c5dcb65922b7d5c827e7a1351e7d
https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/opencensus/stats/measure_to_view_map.py#L108-L123
226,221
census-instrumentation/opencensus-python
opencensus/stats/measure_to_view_map.py
MeasureToViewMap.export
def export(self, view_datas): """export view datas to registered exporters""" view_datas_copy = \ [self.copy_and_finalize_view_data(vd) for vd in view_datas] if len(self.exporters) > 0: for e in self.exporters: try: e.export(view_datas_copy) except AttributeError: pass
python
def export(self, view_datas): """export view datas to registered exporters""" view_datas_copy = \ [self.copy_and_finalize_view_data(vd) for vd in view_datas] if len(self.exporters) > 0: for e in self.exporters: try: e.export(view_datas_copy) except AttributeError: pass
[ "def", "export", "(", "self", ",", "view_datas", ")", ":", "view_datas_copy", "=", "[", "self", ".", "copy_and_finalize_view_data", "(", "vd", ")", "for", "vd", "in", "view_datas", "]", "if", "len", "(", "self", ".", "exporters", ")", ">", "0", ":", "f...
export view datas to registered exporters
[ "export", "view", "datas", "to", "registered", "exporters" ]
992b223f7e34c5dcb65922b7d5c827e7a1351e7d
https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/opencensus/stats/measure_to_view_map.py#L126-L135
226,222
census-instrumentation/opencensus-python
opencensus/stats/measure_to_view_map.py
MeasureToViewMap.get_metrics
def get_metrics(self, timestamp): """Get a Metric for each registered view. Convert each registered view's associated `ViewData` into a `Metric` to be exported. :type timestamp: :class: `datetime.datetime` :param timestamp: The timestamp to use for metric conversions, usually the current time. :rtype: Iterator[:class: `opencensus.metrics.export.metric.Metric`] """ for vdl in self._measure_to_view_data_list_map.values(): for vd in vdl: metric = metric_utils.view_data_to_metric(vd, timestamp) if metric is not None: yield metric
python
def get_metrics(self, timestamp): """Get a Metric for each registered view. Convert each registered view's associated `ViewData` into a `Metric` to be exported. :type timestamp: :class: `datetime.datetime` :param timestamp: The timestamp to use for metric conversions, usually the current time. :rtype: Iterator[:class: `opencensus.metrics.export.metric.Metric`] """ for vdl in self._measure_to_view_data_list_map.values(): for vd in vdl: metric = metric_utils.view_data_to_metric(vd, timestamp) if metric is not None: yield metric
[ "def", "get_metrics", "(", "self", ",", "timestamp", ")", ":", "for", "vdl", "in", "self", ".", "_measure_to_view_data_list_map", ".", "values", "(", ")", ":", "for", "vd", "in", "vdl", ":", "metric", "=", "metric_utils", ".", "view_data_to_metric", "(", "...
Get a Metric for each registered view. Convert each registered view's associated `ViewData` into a `Metric` to be exported. :type timestamp: :class: `datetime.datetime` :param timestamp: The timestamp to use for metric conversions, usually the current time. :rtype: Iterator[:class: `opencensus.metrics.export.metric.Metric`]
[ "Get", "a", "Metric", "for", "each", "registered", "view", "." ]
992b223f7e34c5dcb65922b7d5c827e7a1351e7d
https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/opencensus/stats/measure_to_view_map.py#L137-L153
226,223
census-instrumentation/opencensus-python
contrib/opencensus-ext-azure/opencensus/ext/azure/trace_exporter/__init__.py
AzureExporter._transmit
def _transmit(self, envelopes): """ Transmit the data envelopes to the ingestion service. Return a negative value for partial success or non-retryable failure. Return 0 if all envelopes have been successfully ingested. Return the next retry time in seconds for retryable failure. This function should never throw exception. """ if not envelopes: return 0 # TODO: prevent requests being tracked blacklist_hostnames = execution_context.get_opencensus_attr( 'blacklist_hostnames', ) execution_context.set_opencensus_attr( 'blacklist_hostnames', ['dc.services.visualstudio.com'], ) try: response = requests.post( url=self.options.endpoint, data=json.dumps(envelopes), headers={ 'Accept': 'application/json', 'Content-Type': 'application/json; charset=utf-8', }, timeout=self.options.timeout, ) except Exception as ex: # TODO: consider RequestException logger.warning('Transient client side error %s.', ex) # client side error (retryable) return self.options.minimum_retry_interval finally: execution_context.set_opencensus_attr( 'blacklist_hostnames', blacklist_hostnames, ) text = 'N/A' data = None try: text = response.text except Exception as ex: logger.warning('Error while reading response body %s.', ex) else: try: data = json.loads(text) except Exception: pass if response.status_code == 200: logger.info('Transmission succeeded: %s.', text) return 0 if response.status_code == 206: # Partial Content # TODO: store the unsent data if data: try: resend_envelopes = [] for error in data['errors']: if error['statusCode'] in ( 429, # Too Many Requests 500, # Internal Server Error 503, # Service Unavailable ): resend_envelopes.append(envelopes[error['index']]) else: logger.error( 'Data drop %s: %s %s.', error['statusCode'], error['message'], envelopes[error['index']], ) if resend_envelopes: self.storage.put(resend_envelopes) except Exception as ex: logger.error( 'Error while processing %s: %s %s.', response.status_code, text, ex, ) return -response.status_code # cannot parse response body, fallback to retry if response.status_code in ( 206, # Partial Content 429, # Too Many Requests 500, # Internal Server Error 503, # Service Unavailable ): logger.warning( 'Transient server side error %s: %s.', response.status_code, text, ) # server side error (retryable) return self.options.minimum_retry_interval logger.error( 'Non-retryable server side error %s: %s.', response.status_code, text, ) # server side error (non-retryable) return -response.status_code
python
def _transmit(self, envelopes): """ Transmit the data envelopes to the ingestion service. Return a negative value for partial success or non-retryable failure. Return 0 if all envelopes have been successfully ingested. Return the next retry time in seconds for retryable failure. This function should never throw exception. """ if not envelopes: return 0 # TODO: prevent requests being tracked blacklist_hostnames = execution_context.get_opencensus_attr( 'blacklist_hostnames', ) execution_context.set_opencensus_attr( 'blacklist_hostnames', ['dc.services.visualstudio.com'], ) try: response = requests.post( url=self.options.endpoint, data=json.dumps(envelopes), headers={ 'Accept': 'application/json', 'Content-Type': 'application/json; charset=utf-8', }, timeout=self.options.timeout, ) except Exception as ex: # TODO: consider RequestException logger.warning('Transient client side error %s.', ex) # client side error (retryable) return self.options.minimum_retry_interval finally: execution_context.set_opencensus_attr( 'blacklist_hostnames', blacklist_hostnames, ) text = 'N/A' data = None try: text = response.text except Exception as ex: logger.warning('Error while reading response body %s.', ex) else: try: data = json.loads(text) except Exception: pass if response.status_code == 200: logger.info('Transmission succeeded: %s.', text) return 0 if response.status_code == 206: # Partial Content # TODO: store the unsent data if data: try: resend_envelopes = [] for error in data['errors']: if error['statusCode'] in ( 429, # Too Many Requests 500, # Internal Server Error 503, # Service Unavailable ): resend_envelopes.append(envelopes[error['index']]) else: logger.error( 'Data drop %s: %s %s.', error['statusCode'], error['message'], envelopes[error['index']], ) if resend_envelopes: self.storage.put(resend_envelopes) except Exception as ex: logger.error( 'Error while processing %s: %s %s.', response.status_code, text, ex, ) return -response.status_code # cannot parse response body, fallback to retry if response.status_code in ( 206, # Partial Content 429, # Too Many Requests 500, # Internal Server Error 503, # Service Unavailable ): logger.warning( 'Transient server side error %s: %s.', response.status_code, text, ) # server side error (retryable) return self.options.minimum_retry_interval logger.error( 'Non-retryable server side error %s: %s.', response.status_code, text, ) # server side error (non-retryable) return -response.status_code
[ "def", "_transmit", "(", "self", ",", "envelopes", ")", ":", "if", "not", "envelopes", ":", "return", "0", "# TODO: prevent requests being tracked", "blacklist_hostnames", "=", "execution_context", ".", "get_opencensus_attr", "(", "'blacklist_hostnames'", ",", ")", "e...
Transmit the data envelopes to the ingestion service. Return a negative value for partial success or non-retryable failure. Return 0 if all envelopes have been successfully ingested. Return the next retry time in seconds for retryable failure. This function should never throw exception.
[ "Transmit", "the", "data", "envelopes", "to", "the", "ingestion", "service", ".", "Return", "a", "negative", "value", "for", "partial", "success", "or", "non", "-", "retryable", "failure", ".", "Return", "0", "if", "all", "envelopes", "have", "been", "succes...
992b223f7e34c5dcb65922b7d5c827e7a1351e7d
https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/contrib/opencensus-ext-azure/opencensus/ext/azure/trace_exporter/__init__.py#L137-L237
226,224
census-instrumentation/opencensus-python
opencensus/common/monitored_resource/monitored_resource.py
get_instance
def get_instance(): """Get a resource based on the application environment. Returns a `Resource` configured for the current environment, or None if the environment is unknown or unsupported. :rtype: :class:`opencensus.common.resource.Resource` or None :return: A `Resource` configured for the current environment. """ resources = [] env_resource = resource.get_from_env() if env_resource is not None: resources.append(env_resource) if k8s_utils.is_k8s_environment(): resources.append(resource.Resource( _K8S_CONTAINER, k8s_utils.get_k8s_metadata())) if is_gce_environment(): resources.append(resource.Resource( _GCE_INSTANCE, gcp_metadata_config.GcpMetadataConfig().get_gce_metadata())) elif is_aws_environment(): resources.append(resource.Resource( _AWS_EC2_INSTANCE, (aws_identity_doc_utils.AwsIdentityDocumentUtils() .get_aws_metadata()))) if not resources: return None return resource.merge_resources(resources)
python
def get_instance(): """Get a resource based on the application environment. Returns a `Resource` configured for the current environment, or None if the environment is unknown or unsupported. :rtype: :class:`opencensus.common.resource.Resource` or None :return: A `Resource` configured for the current environment. """ resources = [] env_resource = resource.get_from_env() if env_resource is not None: resources.append(env_resource) if k8s_utils.is_k8s_environment(): resources.append(resource.Resource( _K8S_CONTAINER, k8s_utils.get_k8s_metadata())) if is_gce_environment(): resources.append(resource.Resource( _GCE_INSTANCE, gcp_metadata_config.GcpMetadataConfig().get_gce_metadata())) elif is_aws_environment(): resources.append(resource.Resource( _AWS_EC2_INSTANCE, (aws_identity_doc_utils.AwsIdentityDocumentUtils() .get_aws_metadata()))) if not resources: return None return resource.merge_resources(resources)
[ "def", "get_instance", "(", ")", ":", "resources", "=", "[", "]", "env_resource", "=", "resource", ".", "get_from_env", "(", ")", "if", "env_resource", "is", "not", "None", ":", "resources", ".", "append", "(", "env_resource", ")", "if", "k8s_utils", ".", ...
Get a resource based on the application environment. Returns a `Resource` configured for the current environment, or None if the environment is unknown or unsupported. :rtype: :class:`opencensus.common.resource.Resource` or None :return: A `Resource` configured for the current environment.
[ "Get", "a", "resource", "based", "on", "the", "application", "environment", "." ]
992b223f7e34c5dcb65922b7d5c827e7a1351e7d
https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/opencensus/common/monitored_resource/monitored_resource.py#L37-L67
226,225
census-instrumentation/opencensus-python
nox.py
lint
def lint(session): """Run flake8. Returns a failure if flake8 finds linting errors or sufficiently serious code quality issues. """ session.interpreter = 'python3.6' session.install('flake8') # Install dev packages. _install_dev_packages(session) session.run( 'flake8', '--exclude=contrib/opencensus-ext-ocagent/opencensus/ext/ocagent/trace_exporter/gen/', 'context/', 'contrib/', 'opencensus/', 'tests/', 'examples/')
python
def lint(session): """Run flake8. Returns a failure if flake8 finds linting errors or sufficiently serious code quality issues. """ session.interpreter = 'python3.6' session.install('flake8') # Install dev packages. _install_dev_packages(session) session.run( 'flake8', '--exclude=contrib/opencensus-ext-ocagent/opencensus/ext/ocagent/trace_exporter/gen/', 'context/', 'contrib/', 'opencensus/', 'tests/', 'examples/')
[ "def", "lint", "(", "session", ")", ":", "session", ".", "interpreter", "=", "'python3.6'", "session", ".", "install", "(", "'flake8'", ")", "# Install dev packages.", "_install_dev_packages", "(", "session", ")", "session", ".", "run", "(", "'flake8'", ",", "...
Run flake8. Returns a failure if flake8 finds linting errors or sufficiently serious code quality issues.
[ "Run", "flake8", ".", "Returns", "a", "failure", "if", "flake8", "finds", "linting", "errors", "or", "sufficiently", "serious", "code", "quality", "issues", "." ]
992b223f7e34c5dcb65922b7d5c827e7a1351e7d
https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/nox.py#L111-L125
226,226
census-instrumentation/opencensus-python
contrib/opencensus-ext-pymongo/opencensus/ext/pymongo/trace.py
trace_integration
def trace_integration(tracer=None): """Integrate with pymongo to trace it using event listener.""" log.info('Integrated module: {}'.format(MODULE_NAME)) monitoring.register(MongoCommandListener(tracer=tracer))
python
def trace_integration(tracer=None): """Integrate with pymongo to trace it using event listener.""" log.info('Integrated module: {}'.format(MODULE_NAME)) monitoring.register(MongoCommandListener(tracer=tracer))
[ "def", "trace_integration", "(", "tracer", "=", "None", ")", ":", "log", ".", "info", "(", "'Integrated module: {}'", ".", "format", "(", "MODULE_NAME", ")", ")", "monitoring", ".", "register", "(", "MongoCommandListener", "(", "tracer", "=", "tracer", ")", ...
Integrate with pymongo to trace it using event listener.
[ "Integrate", "with", "pymongo", "to", "trace", "it", "using", "event", "listener", "." ]
992b223f7e34c5dcb65922b7d5c827e7a1351e7d
https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/contrib/opencensus-ext-pymongo/opencensus/ext/pymongo/trace.py#L30-L33
226,227
census-instrumentation/opencensus-python
opencensus/common/http_handler/__init__.py
get_request
def get_request(request_url, request_headers=dict()): """Execute http get request on given request_url with optional headers """ request = Request(request_url) for key, val in request_headers.items(): request.add_header(key, val) try: response = urlopen(request, timeout=_REQUEST_TIMEOUT) response_content = response.read() except (HTTPError, URLError, socket.timeout): response_content = None return response_content
python
def get_request(request_url, request_headers=dict()): """Execute http get request on given request_url with optional headers """ request = Request(request_url) for key, val in request_headers.items(): request.add_header(key, val) try: response = urlopen(request, timeout=_REQUEST_TIMEOUT) response_content = response.read() except (HTTPError, URLError, socket.timeout): response_content = None return response_content
[ "def", "get_request", "(", "request_url", ",", "request_headers", "=", "dict", "(", ")", ")", ":", "request", "=", "Request", "(", "request_url", ")", "for", "key", ",", "val", "in", "request_headers", ".", "items", "(", ")", ":", "request", ".", "add_he...
Execute http get request on given request_url with optional headers
[ "Execute", "http", "get", "request", "on", "given", "request_url", "with", "optional", "headers" ]
992b223f7e34c5dcb65922b7d5c827e7a1351e7d
https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/opencensus/common/http_handler/__init__.py#L30-L43
226,228
census-instrumentation/opencensus-python
contrib/opencensus-ext-requests/opencensus/ext/requests/trace.py
trace_integration
def trace_integration(tracer=None): """Wrap the requests library to trace it.""" log.info('Integrated module: {}'.format(MODULE_NAME)) if tracer is not None: # The execution_context tracer should never be None - if it has not # been set it returns a no-op tracer. Most code in this library does # not handle None being used in the execution context. execution_context.set_opencensus_tracer(tracer) # Wrap the requests functions for func in REQUESTS_WRAP_METHODS: requests_func = getattr(requests, func) wrapped = wrap_requests(requests_func) setattr(requests, requests_func.__name__, wrapped) # Wrap Session class wrapt.wrap_function_wrapper( MODULE_NAME, 'Session.request', wrap_session_request)
python
def trace_integration(tracer=None): """Wrap the requests library to trace it.""" log.info('Integrated module: {}'.format(MODULE_NAME)) if tracer is not None: # The execution_context tracer should never be None - if it has not # been set it returns a no-op tracer. Most code in this library does # not handle None being used in the execution context. execution_context.set_opencensus_tracer(tracer) # Wrap the requests functions for func in REQUESTS_WRAP_METHODS: requests_func = getattr(requests, func) wrapped = wrap_requests(requests_func) setattr(requests, requests_func.__name__, wrapped) # Wrap Session class wrapt.wrap_function_wrapper( MODULE_NAME, 'Session.request', wrap_session_request)
[ "def", "trace_integration", "(", "tracer", "=", "None", ")", ":", "log", ".", "info", "(", "'Integrated module: {}'", ".", "format", "(", "MODULE_NAME", ")", ")", "if", "tracer", "is", "not", "None", ":", "# The execution_context tracer should never be None - if it ...
Wrap the requests library to trace it.
[ "Wrap", "the", "requests", "library", "to", "trace", "it", "." ]
992b223f7e34c5dcb65922b7d5c827e7a1351e7d
https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/contrib/opencensus-ext-requests/opencensus/ext/requests/trace.py#L40-L58
226,229
census-instrumentation/opencensus-python
contrib/opencensus-ext-requests/opencensus/ext/requests/trace.py
wrap_requests
def wrap_requests(requests_func): """Wrap the requests function to trace it.""" def call(url, *args, **kwargs): blacklist_hostnames = execution_context.get_opencensus_attr( 'blacklist_hostnames') parsed_url = urlparse(url) if parsed_url.port is None: dest_url = parsed_url.hostname else: dest_url = '{}:{}'.format(parsed_url.hostname, parsed_url.port) if utils.disable_tracing_hostname(dest_url, blacklist_hostnames): return requests_func(url, *args, **kwargs) _tracer = execution_context.get_opencensus_tracer() _span = _tracer.start_span() _span.name = '[requests]{}'.format(requests_func.__name__) _span.span_kind = span_module.SpanKind.CLIENT # Add the requests url to attributes _tracer.add_attribute_to_current_span(HTTP_URL, url) result = requests_func(url, *args, **kwargs) # Add the status code to attributes _tracer.add_attribute_to_current_span( HTTP_STATUS_CODE, str(result.status_code)) _tracer.end_span() return result return call
python
def wrap_requests(requests_func): """Wrap the requests function to trace it.""" def call(url, *args, **kwargs): blacklist_hostnames = execution_context.get_opencensus_attr( 'blacklist_hostnames') parsed_url = urlparse(url) if parsed_url.port is None: dest_url = parsed_url.hostname else: dest_url = '{}:{}'.format(parsed_url.hostname, parsed_url.port) if utils.disable_tracing_hostname(dest_url, blacklist_hostnames): return requests_func(url, *args, **kwargs) _tracer = execution_context.get_opencensus_tracer() _span = _tracer.start_span() _span.name = '[requests]{}'.format(requests_func.__name__) _span.span_kind = span_module.SpanKind.CLIENT # Add the requests url to attributes _tracer.add_attribute_to_current_span(HTTP_URL, url) result = requests_func(url, *args, **kwargs) # Add the status code to attributes _tracer.add_attribute_to_current_span( HTTP_STATUS_CODE, str(result.status_code)) _tracer.end_span() return result return call
[ "def", "wrap_requests", "(", "requests_func", ")", ":", "def", "call", "(", "url", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "blacklist_hostnames", "=", "execution_context", ".", "get_opencensus_attr", "(", "'blacklist_hostnames'", ")", "parsed_url", ...
Wrap the requests function to trace it.
[ "Wrap", "the", "requests", "function", "to", "trace", "it", "." ]
992b223f7e34c5dcb65922b7d5c827e7a1351e7d
https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/contrib/opencensus-ext-requests/opencensus/ext/requests/trace.py#L61-L91
226,230
census-instrumentation/opencensus-python
contrib/opencensus-ext-requests/opencensus/ext/requests/trace.py
wrap_session_request
def wrap_session_request(wrapped, instance, args, kwargs): """Wrap the session function to trace it.""" method = kwargs.get('method') or args[0] url = kwargs.get('url') or args[1] blacklist_hostnames = execution_context.get_opencensus_attr( 'blacklist_hostnames') parsed_url = urlparse(url) if parsed_url.port is None: dest_url = parsed_url.hostname else: dest_url = '{}:{}'.format(parsed_url.hostname, parsed_url.port) if utils.disable_tracing_hostname(dest_url, blacklist_hostnames): return wrapped(*args, **kwargs) _tracer = execution_context.get_opencensus_tracer() _span = _tracer.start_span() _span.name = '[requests]{}'.format(method) _span.span_kind = span_module.SpanKind.CLIENT try: tracer_headers = _tracer.propagator.to_headers( _tracer.span_context) kwargs.setdefault('headers', {}).update( tracer_headers) except Exception: # pragma: NO COVER pass # Add the requests url to attributes _tracer.add_attribute_to_current_span(HTTP_URL, url) result = wrapped(*args, **kwargs) # Add the status code to attributes _tracer.add_attribute_to_current_span( HTTP_STATUS_CODE, str(result.status_code)) _tracer.end_span() return result
python
def wrap_session_request(wrapped, instance, args, kwargs): """Wrap the session function to trace it.""" method = kwargs.get('method') or args[0] url = kwargs.get('url') or args[1] blacklist_hostnames = execution_context.get_opencensus_attr( 'blacklist_hostnames') parsed_url = urlparse(url) if parsed_url.port is None: dest_url = parsed_url.hostname else: dest_url = '{}:{}'.format(parsed_url.hostname, parsed_url.port) if utils.disable_tracing_hostname(dest_url, blacklist_hostnames): return wrapped(*args, **kwargs) _tracer = execution_context.get_opencensus_tracer() _span = _tracer.start_span() _span.name = '[requests]{}'.format(method) _span.span_kind = span_module.SpanKind.CLIENT try: tracer_headers = _tracer.propagator.to_headers( _tracer.span_context) kwargs.setdefault('headers', {}).update( tracer_headers) except Exception: # pragma: NO COVER pass # Add the requests url to attributes _tracer.add_attribute_to_current_span(HTTP_URL, url) result = wrapped(*args, **kwargs) # Add the status code to attributes _tracer.add_attribute_to_current_span( HTTP_STATUS_CODE, str(result.status_code)) _tracer.end_span() return result
[ "def", "wrap_session_request", "(", "wrapped", ",", "instance", ",", "args", ",", "kwargs", ")", ":", "method", "=", "kwargs", ".", "get", "(", "'method'", ")", "or", "args", "[", "0", "]", "url", "=", "kwargs", ".", "get", "(", "'url'", ")", "or", ...
Wrap the session function to trace it.
[ "Wrap", "the", "session", "function", "to", "trace", "it", "." ]
992b223f7e34c5dcb65922b7d5c827e7a1351e7d
https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/contrib/opencensus-ext-requests/opencensus/ext/requests/trace.py#L94-L133
226,231
census-instrumentation/opencensus-python
opencensus/stats/metric_utils.py
get_metric_type
def get_metric_type(measure, aggregation): """Get the corresponding metric type for the given stats type. :type measure: (:class: '~opencensus.stats.measure.BaseMeasure') :param measure: the measure for which to find a metric type :type aggregation: (:class: '~opencensus.stats.aggregation.BaseAggregation') :param aggregation: the aggregation for which to find a metric type """ if aggregation.aggregation_type == aggregation_module.Type.NONE: raise ValueError("aggregation type must not be NONE") assert isinstance(aggregation, AGGREGATION_TYPE_MAP[aggregation.aggregation_type]) if aggregation.aggregation_type == aggregation_module.Type.SUM: if isinstance(measure, measure_module.MeasureInt): return metric_descriptor.MetricDescriptorType.CUMULATIVE_INT64 elif isinstance(measure, measure_module.MeasureFloat): return metric_descriptor.MetricDescriptorType.CUMULATIVE_DOUBLE else: raise ValueError elif aggregation.aggregation_type == aggregation_module.Type.COUNT: return metric_descriptor.MetricDescriptorType.CUMULATIVE_INT64 elif aggregation.aggregation_type == aggregation_module.Type.DISTRIBUTION: return metric_descriptor.MetricDescriptorType.CUMULATIVE_DISTRIBUTION elif aggregation.aggregation_type == aggregation_module.Type.LASTVALUE: if isinstance(measure, measure_module.MeasureInt): return metric_descriptor.MetricDescriptorType.GAUGE_INT64 elif isinstance(measure, measure_module.MeasureFloat): return metric_descriptor.MetricDescriptorType.GAUGE_DOUBLE else: raise ValueError else: raise AssertionError
python
def get_metric_type(measure, aggregation): """Get the corresponding metric type for the given stats type. :type measure: (:class: '~opencensus.stats.measure.BaseMeasure') :param measure: the measure for which to find a metric type :type aggregation: (:class: '~opencensus.stats.aggregation.BaseAggregation') :param aggregation: the aggregation for which to find a metric type """ if aggregation.aggregation_type == aggregation_module.Type.NONE: raise ValueError("aggregation type must not be NONE") assert isinstance(aggregation, AGGREGATION_TYPE_MAP[aggregation.aggregation_type]) if aggregation.aggregation_type == aggregation_module.Type.SUM: if isinstance(measure, measure_module.MeasureInt): return metric_descriptor.MetricDescriptorType.CUMULATIVE_INT64 elif isinstance(measure, measure_module.MeasureFloat): return metric_descriptor.MetricDescriptorType.CUMULATIVE_DOUBLE else: raise ValueError elif aggregation.aggregation_type == aggregation_module.Type.COUNT: return metric_descriptor.MetricDescriptorType.CUMULATIVE_INT64 elif aggregation.aggregation_type == aggregation_module.Type.DISTRIBUTION: return metric_descriptor.MetricDescriptorType.CUMULATIVE_DISTRIBUTION elif aggregation.aggregation_type == aggregation_module.Type.LASTVALUE: if isinstance(measure, measure_module.MeasureInt): return metric_descriptor.MetricDescriptorType.GAUGE_INT64 elif isinstance(measure, measure_module.MeasureFloat): return metric_descriptor.MetricDescriptorType.GAUGE_DOUBLE else: raise ValueError else: raise AssertionError
[ "def", "get_metric_type", "(", "measure", ",", "aggregation", ")", ":", "if", "aggregation", ".", "aggregation_type", "==", "aggregation_module", ".", "Type", ".", "NONE", ":", "raise", "ValueError", "(", "\"aggregation type must not be NONE\"", ")", "assert", "isin...
Get the corresponding metric type for the given stats type. :type measure: (:class: '~opencensus.stats.measure.BaseMeasure') :param measure: the measure for which to find a metric type :type aggregation: (:class: '~opencensus.stats.aggregation.BaseAggregation') :param aggregation: the aggregation for which to find a metric type
[ "Get", "the", "corresponding", "metric", "type", "for", "the", "given", "stats", "type", "." ]
992b223f7e34c5dcb65922b7d5c827e7a1351e7d
https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/opencensus/stats/metric_utils.py#L38-L72
226,232
census-instrumentation/opencensus-python
opencensus/stats/metric_utils.py
is_gauge
def is_gauge(md_type): """Whether a given MetricDescriptorType value is a gauge. :type md_type: int :param md_type: A MetricDescriptorType enum value. """ if md_type not in metric_descriptor.MetricDescriptorType: raise ValueError # pragma: NO COVER return md_type in { metric_descriptor.MetricDescriptorType.GAUGE_INT64, metric_descriptor.MetricDescriptorType.GAUGE_DOUBLE, metric_descriptor.MetricDescriptorType.GAUGE_DISTRIBUTION }
python
def is_gauge(md_type): """Whether a given MetricDescriptorType value is a gauge. :type md_type: int :param md_type: A MetricDescriptorType enum value. """ if md_type not in metric_descriptor.MetricDescriptorType: raise ValueError # pragma: NO COVER return md_type in { metric_descriptor.MetricDescriptorType.GAUGE_INT64, metric_descriptor.MetricDescriptorType.GAUGE_DOUBLE, metric_descriptor.MetricDescriptorType.GAUGE_DISTRIBUTION }
[ "def", "is_gauge", "(", "md_type", ")", ":", "if", "md_type", "not", "in", "metric_descriptor", ".", "MetricDescriptorType", ":", "raise", "ValueError", "# pragma: NO COVER", "return", "md_type", "in", "{", "metric_descriptor", ".", "MetricDescriptorType", ".", "GAU...
Whether a given MetricDescriptorType value is a gauge. :type md_type: int :param md_type: A MetricDescriptorType enum value.
[ "Whether", "a", "given", "MetricDescriptorType", "value", "is", "a", "gauge", "." ]
992b223f7e34c5dcb65922b7d5c827e7a1351e7d
https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/opencensus/stats/metric_utils.py#L75-L88
226,233
census-instrumentation/opencensus-python
opencensus/stats/metric_utils.py
view_data_to_metric
def view_data_to_metric(view_data, timestamp): """Convert a ViewData to a Metric at time `timestamp`. :type view_data: :class: `opencensus.stats.view_data.ViewData` :param view_data: The ViewData to convert. :type timestamp: :class: `datetime.datetime` :param timestamp: The time to set on the metric's point's aggregation, usually the current time. :rtype: :class: `opencensus.metrics.export.metric.Metric` :return: A converted Metric. """ if not view_data.tag_value_aggregation_data_map: return None md = view_data.view.get_metric_descriptor() # TODO: implement gauges if is_gauge(md.type): ts_start = None # pragma: NO COVER else: ts_start = view_data.start_time ts_list = [] for tag_vals, agg_data in view_data.tag_value_aggregation_data_map.items(): label_values = get_label_values(tag_vals) point = agg_data.to_point(timestamp) ts_list.append(time_series.TimeSeries(label_values, [point], ts_start)) return metric.Metric(md, ts_list)
python
def view_data_to_metric(view_data, timestamp): """Convert a ViewData to a Metric at time `timestamp`. :type view_data: :class: `opencensus.stats.view_data.ViewData` :param view_data: The ViewData to convert. :type timestamp: :class: `datetime.datetime` :param timestamp: The time to set on the metric's point's aggregation, usually the current time. :rtype: :class: `opencensus.metrics.export.metric.Metric` :return: A converted Metric. """ if not view_data.tag_value_aggregation_data_map: return None md = view_data.view.get_metric_descriptor() # TODO: implement gauges if is_gauge(md.type): ts_start = None # pragma: NO COVER else: ts_start = view_data.start_time ts_list = [] for tag_vals, agg_data in view_data.tag_value_aggregation_data_map.items(): label_values = get_label_values(tag_vals) point = agg_data.to_point(timestamp) ts_list.append(time_series.TimeSeries(label_values, [point], ts_start)) return metric.Metric(md, ts_list)
[ "def", "view_data_to_metric", "(", "view_data", ",", "timestamp", ")", ":", "if", "not", "view_data", ".", "tag_value_aggregation_data_map", ":", "return", "None", "md", "=", "view_data", ".", "view", ".", "get_metric_descriptor", "(", ")", "# TODO: implement gauges...
Convert a ViewData to a Metric at time `timestamp`. :type view_data: :class: `opencensus.stats.view_data.ViewData` :param view_data: The ViewData to convert. :type timestamp: :class: `datetime.datetime` :param timestamp: The time to set on the metric's point's aggregation, usually the current time. :rtype: :class: `opencensus.metrics.export.metric.Metric` :return: A converted Metric.
[ "Convert", "a", "ViewData", "to", "a", "Metric", "at", "time", "timestamp", "." ]
992b223f7e34c5dcb65922b7d5c827e7a1351e7d
https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/opencensus/stats/metric_utils.py#L103-L132
226,234
census-instrumentation/opencensus-python
contrib/opencensus-ext-django/opencensus/ext/django/middleware.py
_set_django_attributes
def _set_django_attributes(span, request): """Set the django related attributes.""" django_user = getattr(request, 'user', None) if django_user is None: return user_id = django_user.pk try: user_name = django_user.get_username() except AttributeError: # AnonymousUser in some older versions of Django doesn't implement # get_username return # User id is the django autofield for User model as the primary key if user_id is not None: span.add_attribute('django.user.id', str(user_id)) if user_name is not None: span.add_attribute('django.user.name', str(user_name))
python
def _set_django_attributes(span, request): """Set the django related attributes.""" django_user = getattr(request, 'user', None) if django_user is None: return user_id = django_user.pk try: user_name = django_user.get_username() except AttributeError: # AnonymousUser in some older versions of Django doesn't implement # get_username return # User id is the django autofield for User model as the primary key if user_id is not None: span.add_attribute('django.user.id', str(user_id)) if user_name is not None: span.add_attribute('django.user.name', str(user_name))
[ "def", "_set_django_attributes", "(", "span", ",", "request", ")", ":", "django_user", "=", "getattr", "(", "request", ",", "'user'", ",", "None", ")", "if", "django_user", "is", "None", ":", "return", "user_id", "=", "django_user", ".", "pk", "try", ":", ...
Set the django related attributes.
[ "Set", "the", "django", "related", "attributes", "." ]
992b223f7e34c5dcb65922b7d5c827e7a1351e7d
https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/contrib/opencensus-ext-django/opencensus/ext/django/middleware.py#L85-L105
226,235
census-instrumentation/opencensus-python
contrib/opencensus-ext-django/opencensus/ext/django/middleware.py
OpencensusMiddleware.process_request
def process_request(self, request): """Called on each request, before Django decides which view to execute. :type request: :class:`~django.http.request.HttpRequest` :param request: Django http request. """ # Do not trace if the url is blacklisted if utils.disable_tracing_url(request.path, self.blacklist_paths): return # Add the request to thread local execution_context.set_opencensus_attr( REQUEST_THREAD_LOCAL_KEY, request) execution_context.set_opencensus_attr( 'blacklist_hostnames', self.blacklist_hostnames) try: # Start tracing this request span_context = self.propagator.from_headers( _DjangoMetaWrapper(_get_django_request().META)) # Reload the tracer with the new span context tracer = tracer_module.Tracer( span_context=span_context, sampler=self.sampler, exporter=self.exporter, propagator=self.propagator) # Span name is being set at process_view span = tracer.start_span() span.span_kind = span_module.SpanKind.SERVER tracer.add_attribute_to_current_span( attribute_key=HTTP_METHOD, attribute_value=request.method) tracer.add_attribute_to_current_span( attribute_key=HTTP_URL, attribute_value=str(request.path)) # Add the span to thread local # in some cases (exceptions, timeouts) currentspan in # response event will be one of a child spans. # let's keep reference to 'django' span and # use it in response event execution_context.set_opencensus_attr( SPAN_THREAD_LOCAL_KEY, span) except Exception: # pragma: NO COVER log.error('Failed to trace request', exc_info=True)
python
def process_request(self, request): """Called on each request, before Django decides which view to execute. :type request: :class:`~django.http.request.HttpRequest` :param request: Django http request. """ # Do not trace if the url is blacklisted if utils.disable_tracing_url(request.path, self.blacklist_paths): return # Add the request to thread local execution_context.set_opencensus_attr( REQUEST_THREAD_LOCAL_KEY, request) execution_context.set_opencensus_attr( 'blacklist_hostnames', self.blacklist_hostnames) try: # Start tracing this request span_context = self.propagator.from_headers( _DjangoMetaWrapper(_get_django_request().META)) # Reload the tracer with the new span context tracer = tracer_module.Tracer( span_context=span_context, sampler=self.sampler, exporter=self.exporter, propagator=self.propagator) # Span name is being set at process_view span = tracer.start_span() span.span_kind = span_module.SpanKind.SERVER tracer.add_attribute_to_current_span( attribute_key=HTTP_METHOD, attribute_value=request.method) tracer.add_attribute_to_current_span( attribute_key=HTTP_URL, attribute_value=str(request.path)) # Add the span to thread local # in some cases (exceptions, timeouts) currentspan in # response event will be one of a child spans. # let's keep reference to 'django' span and # use it in response event execution_context.set_opencensus_attr( SPAN_THREAD_LOCAL_KEY, span) except Exception: # pragma: NO COVER log.error('Failed to trace request', exc_info=True)
[ "def", "process_request", "(", "self", ",", "request", ")", ":", "# Do not trace if the url is blacklisted", "if", "utils", ".", "disable_tracing_url", "(", "request", ".", "path", ",", "self", ".", "blacklist_paths", ")", ":", "return", "# Add the request to thread l...
Called on each request, before Django decides which view to execute. :type request: :class:`~django.http.request.HttpRequest` :param request: Django http request.
[ "Called", "on", "each", "request", "before", "Django", "decides", "which", "view", "to", "execute", "." ]
992b223f7e34c5dcb65922b7d5c827e7a1351e7d
https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/contrib/opencensus-ext-django/opencensus/ext/django/middleware.py#L135-L186
226,236
census-instrumentation/opencensus-python
contrib/opencensus-ext-django/opencensus/ext/django/middleware.py
OpencensusMiddleware.process_view
def process_view(self, request, view_func, *args, **kwargs): """Process view is executed before the view function, here we get the function name add set it as the span name. """ # Do not trace if the url is blacklisted if utils.disable_tracing_url(request.path, self.blacklist_paths): return try: # Get the current span and set the span name to the current # function name of the request. tracer = _get_current_tracer() span = tracer.current_span() span.name = utils.get_func_name(view_func) except Exception: # pragma: NO COVER log.error('Failed to trace request', exc_info=True)
python
def process_view(self, request, view_func, *args, **kwargs): """Process view is executed before the view function, here we get the function name add set it as the span name. """ # Do not trace if the url is blacklisted if utils.disable_tracing_url(request.path, self.blacklist_paths): return try: # Get the current span and set the span name to the current # function name of the request. tracer = _get_current_tracer() span = tracer.current_span() span.name = utils.get_func_name(view_func) except Exception: # pragma: NO COVER log.error('Failed to trace request', exc_info=True)
[ "def", "process_view", "(", "self", ",", "request", ",", "view_func", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# Do not trace if the url is blacklisted", "if", "utils", ".", "disable_tracing_url", "(", "request", ".", "path", ",", "self", ".", "...
Process view is executed before the view function, here we get the function name add set it as the span name.
[ "Process", "view", "is", "executed", "before", "the", "view", "function", "here", "we", "get", "the", "function", "name", "add", "set", "it", "as", "the", "span", "name", "." ]
992b223f7e34c5dcb65922b7d5c827e7a1351e7d
https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/contrib/opencensus-ext-django/opencensus/ext/django/middleware.py#L188-L204
226,237
census-instrumentation/opencensus-python
opencensus/trace/span.py
format_span_json
def format_span_json(span): """Helper to format a Span in JSON format. :type span: :class:`~opencensus.trace.span.Span` :param span: A Span to be transferred to JSON format. :rtype: dict :returns: Formatted Span. """ span_json = { 'displayName': utils.get_truncatable_str(span.name), 'spanId': span.span_id, 'startTime': span.start_time, 'endTime': span.end_time, 'childSpanCount': len(span._child_spans) } parent_span_id = None if span.parent_span is not None: parent_span_id = span.parent_span.span_id if parent_span_id is not None: span_json['parentSpanId'] = parent_span_id if span.attributes: span_json['attributes'] = attributes.Attributes( span.attributes).format_attributes_json() if span.stack_trace is not None: span_json['stackTrace'] = span.stack_trace.format_stack_trace_json() if span.time_events: span_json['timeEvents'] = { 'timeEvent': [time_event.format_time_event_json() for time_event in span.time_events] } if span.links: span_json['links'] = { 'link': [ link.format_link_json() for link in span.links] } if span.status is not None: span_json['status'] = span.status.format_status_json() if span.same_process_as_parent_span is not None: span_json['sameProcessAsParentSpan'] = \ span.same_process_as_parent_span return span_json
python
def format_span_json(span): """Helper to format a Span in JSON format. :type span: :class:`~opencensus.trace.span.Span` :param span: A Span to be transferred to JSON format. :rtype: dict :returns: Formatted Span. """ span_json = { 'displayName': utils.get_truncatable_str(span.name), 'spanId': span.span_id, 'startTime': span.start_time, 'endTime': span.end_time, 'childSpanCount': len(span._child_spans) } parent_span_id = None if span.parent_span is not None: parent_span_id = span.parent_span.span_id if parent_span_id is not None: span_json['parentSpanId'] = parent_span_id if span.attributes: span_json['attributes'] = attributes.Attributes( span.attributes).format_attributes_json() if span.stack_trace is not None: span_json['stackTrace'] = span.stack_trace.format_stack_trace_json() if span.time_events: span_json['timeEvents'] = { 'timeEvent': [time_event.format_time_event_json() for time_event in span.time_events] } if span.links: span_json['links'] = { 'link': [ link.format_link_json() for link in span.links] } if span.status is not None: span_json['status'] = span.status.format_status_json() if span.same_process_as_parent_span is not None: span_json['sameProcessAsParentSpan'] = \ span.same_process_as_parent_span return span_json
[ "def", "format_span_json", "(", "span", ")", ":", "span_json", "=", "{", "'displayName'", ":", "utils", ".", "get_truncatable_str", "(", "span", ".", "name", ")", ",", "'spanId'", ":", "span", ".", "span_id", ",", "'startTime'", ":", "span", ".", "start_ti...
Helper to format a Span in JSON format. :type span: :class:`~opencensus.trace.span.Span` :param span: A Span to be transferred to JSON format. :rtype: dict :returns: Formatted Span.
[ "Helper", "to", "format", "a", "Span", "in", "JSON", "format", "." ]
992b223f7e34c5dcb65922b7d5c827e7a1351e7d
https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/opencensus/trace/span.py#L258-L309
226,238
census-instrumentation/opencensus-python
opencensus/trace/span.py
Span.add_annotation
def add_annotation(self, description, **attrs): """Add an annotation to span. :type description: str :param description: A user-supplied message describing the event. The maximum length for the description is 256 bytes. :type attrs: kwargs :param attrs: keyworded arguments e.g. failed=True, name='Caching' """ at = attributes.Attributes(attrs) self.add_time_event(time_event_module.TimeEvent(datetime.utcnow(), time_event_module.Annotation(description, at)))
python
def add_annotation(self, description, **attrs): """Add an annotation to span. :type description: str :param description: A user-supplied message describing the event. The maximum length for the description is 256 bytes. :type attrs: kwargs :param attrs: keyworded arguments e.g. failed=True, name='Caching' """ at = attributes.Attributes(attrs) self.add_time_event(time_event_module.TimeEvent(datetime.utcnow(), time_event_module.Annotation(description, at)))
[ "def", "add_annotation", "(", "self", ",", "description", ",", "*", "*", "attrs", ")", ":", "at", "=", "attributes", ".", "Attributes", "(", "attrs", ")", "self", ".", "add_time_event", "(", "time_event_module", ".", "TimeEvent", "(", "datetime", ".", "utc...
Add an annotation to span. :type description: str :param description: A user-supplied message describing the event. The maximum length for the description is 256 bytes. :type attrs: kwargs :param attrs: keyworded arguments e.g. failed=True, name='Caching'
[ "Add", "an", "annotation", "to", "span", "." ]
992b223f7e34c5dcb65922b7d5c827e7a1351e7d
https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/opencensus/trace/span.py#L188-L200
226,239
census-instrumentation/opencensus-python
opencensus/trace/span.py
Span.add_time_event
def add_time_event(self, time_event): """Add a TimeEvent. :type time_event: :class: `~opencensus.trace.time_event.TimeEvent` :param time_event: A TimeEvent object. """ if isinstance(time_event, time_event_module.TimeEvent): self.time_events.append(time_event) else: raise TypeError("Type Error: received {}, but requires TimeEvent.". format(type(time_event).__name__))
python
def add_time_event(self, time_event): """Add a TimeEvent. :type time_event: :class: `~opencensus.trace.time_event.TimeEvent` :param time_event: A TimeEvent object. """ if isinstance(time_event, time_event_module.TimeEvent): self.time_events.append(time_event) else: raise TypeError("Type Error: received {}, but requires TimeEvent.". format(type(time_event).__name__))
[ "def", "add_time_event", "(", "self", ",", "time_event", ")", ":", "if", "isinstance", "(", "time_event", ",", "time_event_module", ".", "TimeEvent", ")", ":", "self", ".", "time_events", ".", "append", "(", "time_event", ")", "else", ":", "raise", "TypeErro...
Add a TimeEvent. :type time_event: :class: `~opencensus.trace.time_event.TimeEvent` :param time_event: A TimeEvent object.
[ "Add", "a", "TimeEvent", "." ]
992b223f7e34c5dcb65922b7d5c827e7a1351e7d
https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/opencensus/trace/span.py#L202-L212
226,240
census-instrumentation/opencensus-python
opencensus/trace/span.py
Span.add_link
def add_link(self, link): """Add a Link. :type link: :class: `~opencensus.trace.link.Link` :param link: A Link object. """ if isinstance(link, link_module.Link): self.links.append(link) else: raise TypeError("Type Error: received {}, but requires Link.". format(type(link).__name__))
python
def add_link(self, link): """Add a Link. :type link: :class: `~opencensus.trace.link.Link` :param link: A Link object. """ if isinstance(link, link_module.Link): self.links.append(link) else: raise TypeError("Type Error: received {}, but requires Link.". format(type(link).__name__))
[ "def", "add_link", "(", "self", ",", "link", ")", ":", "if", "isinstance", "(", "link", ",", "link_module", ".", "Link", ")", ":", "self", ".", "links", ".", "append", "(", "link", ")", "else", ":", "raise", "TypeError", "(", "\"Type Error: received {}, ...
Add a Link. :type link: :class: `~opencensus.trace.link.Link` :param link: A Link object.
[ "Add", "a", "Link", "." ]
992b223f7e34c5dcb65922b7d5c827e7a1351e7d
https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/opencensus/trace/span.py#L214-L224
226,241
census-instrumentation/opencensus-python
opencensus/metrics/export/metric.py
Metric._check_type
def _check_type(self): """Check that point value types match the descriptor type.""" check_type = metric_descriptor.MetricDescriptorType.to_type_class( self.descriptor.type) for ts in self.time_series: if not ts.check_points_type(check_type): raise ValueError("Invalid point value type")
python
def _check_type(self): """Check that point value types match the descriptor type.""" check_type = metric_descriptor.MetricDescriptorType.to_type_class( self.descriptor.type) for ts in self.time_series: if not ts.check_points_type(check_type): raise ValueError("Invalid point value type")
[ "def", "_check_type", "(", "self", ")", ":", "check_type", "=", "metric_descriptor", ".", "MetricDescriptorType", ".", "to_type_class", "(", "self", ".", "descriptor", ".", "type", ")", "for", "ts", "in", "self", ".", "time_series", ":", "if", "not", "ts", ...
Check that point value types match the descriptor type.
[ "Check", "that", "point", "value", "types", "match", "the", "descriptor", "type", "." ]
992b223f7e34c5dcb65922b7d5c827e7a1351e7d
https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/opencensus/metrics/export/metric.py#L61-L67
226,242
census-instrumentation/opencensus-python
opencensus/metrics/export/metric.py
Metric._check_start_timestamp
def _check_start_timestamp(self): """Check that starting timestamp exists for cumulative metrics.""" if self.descriptor.type in ( metric_descriptor.MetricDescriptorType.CUMULATIVE_INT64, metric_descriptor.MetricDescriptorType.CUMULATIVE_DOUBLE, metric_descriptor.MetricDescriptorType.CUMULATIVE_DISTRIBUTION, ): for ts in self.time_series: if ts.start_timestamp is None: raise ValueError("time_series.start_timestamp must exist " "for cumulative metrics")
python
def _check_start_timestamp(self): """Check that starting timestamp exists for cumulative metrics.""" if self.descriptor.type in ( metric_descriptor.MetricDescriptorType.CUMULATIVE_INT64, metric_descriptor.MetricDescriptorType.CUMULATIVE_DOUBLE, metric_descriptor.MetricDescriptorType.CUMULATIVE_DISTRIBUTION, ): for ts in self.time_series: if ts.start_timestamp is None: raise ValueError("time_series.start_timestamp must exist " "for cumulative metrics")
[ "def", "_check_start_timestamp", "(", "self", ")", ":", "if", "self", ".", "descriptor", ".", "type", "in", "(", "metric_descriptor", ".", "MetricDescriptorType", ".", "CUMULATIVE_INT64", ",", "metric_descriptor", ".", "MetricDescriptorType", ".", "CUMULATIVE_DOUBLE",...
Check that starting timestamp exists for cumulative metrics.
[ "Check", "that", "starting", "timestamp", "exists", "for", "cumulative", "metrics", "." ]
992b223f7e34c5dcb65922b7d5c827e7a1351e7d
https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/opencensus/metrics/export/metric.py#L69-L79
226,243
census-instrumentation/opencensus-python
opencensus/stats/bucket_boundaries.py
BucketBoundaries.is_valid_boundaries
def is_valid_boundaries(self, boundaries): """checks if the boundaries are in ascending order""" if boundaries is not None: min_ = boundaries[0] for value in boundaries: if value < min_: return False else: min_ = value return True return False
python
def is_valid_boundaries(self, boundaries): """checks if the boundaries are in ascending order""" if boundaries is not None: min_ = boundaries[0] for value in boundaries: if value < min_: return False else: min_ = value return True return False
[ "def", "is_valid_boundaries", "(", "self", ",", "boundaries", ")", ":", "if", "boundaries", "is", "not", "None", ":", "min_", "=", "boundaries", "[", "0", "]", "for", "value", "in", "boundaries", ":", "if", "value", "<", "min_", ":", "return", "False", ...
checks if the boundaries are in ascending order
[ "checks", "if", "the", "boundaries", "are", "in", "ascending", "order" ]
992b223f7e34c5dcb65922b7d5c827e7a1351e7d
https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/opencensus/stats/bucket_boundaries.py#L31-L41
226,244
census-instrumentation/opencensus-python
opencensus/metrics/transport.py
get_exporter_thread
def get_exporter_thread(metric_producer, exporter, interval=None): """Get a running task that periodically exports metrics. Get a `PeriodicTask` that periodically calls: exporter.export_metrics(metric_producer.get_metrics()) :type metric_producer: :class:`opencensus.metrics.export.metric_producer.MetricProducer` :param exporter: The producer to use to get metrics to export. :type exporter: :class:`opencensus.stats.base_exporter.MetricsExporter` :param exporter: The exporter to use to export metrics. :type interval: int or float :param interval: Seconds between export calls. :rtype: :class:`PeriodicTask` :return: A running thread responsible calling the exporter. """ weak_get = utils.get_weakref(metric_producer.get_metrics) weak_export = utils.get_weakref(exporter.export_metrics) def export_all(): get = weak_get() if get is None: raise TransportError("Metric producer is not available") export = weak_export() if export is None: raise TransportError("Metric exporter is not available") export(get()) tt = MetricExporterTask(interval, export_all) tt.start() return tt
python
def get_exporter_thread(metric_producer, exporter, interval=None): """Get a running task that periodically exports metrics. Get a `PeriodicTask` that periodically calls: exporter.export_metrics(metric_producer.get_metrics()) :type metric_producer: :class:`opencensus.metrics.export.metric_producer.MetricProducer` :param exporter: The producer to use to get metrics to export. :type exporter: :class:`opencensus.stats.base_exporter.MetricsExporter` :param exporter: The exporter to use to export metrics. :type interval: int or float :param interval: Seconds between export calls. :rtype: :class:`PeriodicTask` :return: A running thread responsible calling the exporter. """ weak_get = utils.get_weakref(metric_producer.get_metrics) weak_export = utils.get_weakref(exporter.export_metrics) def export_all(): get = weak_get() if get is None: raise TransportError("Metric producer is not available") export = weak_export() if export is None: raise TransportError("Metric exporter is not available") export(get()) tt = MetricExporterTask(interval, export_all) tt.start() return tt
[ "def", "get_exporter_thread", "(", "metric_producer", ",", "exporter", ",", "interval", "=", "None", ")", ":", "weak_get", "=", "utils", ".", "get_weakref", "(", "metric_producer", ".", "get_metrics", ")", "weak_export", "=", "utils", ".", "get_weakref", "(", ...
Get a running task that periodically exports metrics. Get a `PeriodicTask` that periodically calls: exporter.export_metrics(metric_producer.get_metrics()) :type metric_producer: :class:`opencensus.metrics.export.metric_producer.MetricProducer` :param exporter: The producer to use to get metrics to export. :type exporter: :class:`opencensus.stats.base_exporter.MetricsExporter` :param exporter: The exporter to use to export metrics. :type interval: int or float :param interval: Seconds between export calls. :rtype: :class:`PeriodicTask` :return: A running thread responsible calling the exporter.
[ "Get", "a", "running", "task", "that", "periodically", "exports", "metrics", "." ]
992b223f7e34c5dcb65922b7d5c827e7a1351e7d
https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/opencensus/metrics/transport.py#L67-L102
226,245
census-instrumentation/opencensus-python
contrib/opencensus-ext-flask/opencensus/ext/flask/flask_middleware.py
FlaskMiddleware._before_request
def _before_request(self): """A function to be run before each request. See: http://flask.pocoo.org/docs/0.12/api/#flask.Flask.before_request """ # Do not trace if the url is blacklisted if utils.disable_tracing_url(flask.request.url, self.blacklist_paths): return try: span_context = self.propagator.from_headers(flask.request.headers) tracer = tracer_module.Tracer( span_context=span_context, sampler=self.sampler, exporter=self.exporter, propagator=self.propagator) span = tracer.start_span() span.span_kind = span_module.SpanKind.SERVER # Set the span name as the name of the current module name span.name = '[{}]{}'.format( flask.request.method, flask.request.url) tracer.add_attribute_to_current_span( HTTP_METHOD, flask.request.method) tracer.add_attribute_to_current_span( HTTP_URL, str(flask.request.url)) execution_context.set_opencensus_attr( 'blacklist_hostnames', self.blacklist_hostnames) except Exception: # pragma: NO COVER log.error('Failed to trace request', exc_info=True)
python
def _before_request(self): """A function to be run before each request. See: http://flask.pocoo.org/docs/0.12/api/#flask.Flask.before_request """ # Do not trace if the url is blacklisted if utils.disable_tracing_url(flask.request.url, self.blacklist_paths): return try: span_context = self.propagator.from_headers(flask.request.headers) tracer = tracer_module.Tracer( span_context=span_context, sampler=self.sampler, exporter=self.exporter, propagator=self.propagator) span = tracer.start_span() span.span_kind = span_module.SpanKind.SERVER # Set the span name as the name of the current module name span.name = '[{}]{}'.format( flask.request.method, flask.request.url) tracer.add_attribute_to_current_span( HTTP_METHOD, flask.request.method) tracer.add_attribute_to_current_span( HTTP_URL, str(flask.request.url)) execution_context.set_opencensus_attr( 'blacklist_hostnames', self.blacklist_hostnames) except Exception: # pragma: NO COVER log.error('Failed to trace request', exc_info=True)
[ "def", "_before_request", "(", "self", ")", ":", "# Do not trace if the url is blacklisted", "if", "utils", ".", "disable_tracing_url", "(", "flask", ".", "request", ".", "url", ",", "self", ".", "blacklist_paths", ")", ":", "return", "try", ":", "span_context", ...
A function to be run before each request. See: http://flask.pocoo.org/docs/0.12/api/#flask.Flask.before_request
[ "A", "function", "to", "be", "run", "before", "each", "request", "." ]
992b223f7e34c5dcb65922b7d5c827e7a1351e7d
https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/contrib/opencensus-ext-flask/opencensus/ext/flask/flask_middleware.py#L122-L154
226,246
census-instrumentation/opencensus-python
contrib/opencensus-ext-flask/opencensus/ext/flask/flask_middleware.py
FlaskMiddleware._after_request
def _after_request(self, response): """A function to be run after each request. See: http://flask.pocoo.org/docs/0.12/api/#flask.Flask.after_request """ # Do not trace if the url is blacklisted if utils.disable_tracing_url(flask.request.url, self.blacklist_paths): return response try: tracer = execution_context.get_opencensus_tracer() tracer.add_attribute_to_current_span( HTTP_STATUS_CODE, str(response.status_code)) except Exception: # pragma: NO COVER log.error('Failed to trace request', exc_info=True) finally: return response
python
def _after_request(self, response): """A function to be run after each request. See: http://flask.pocoo.org/docs/0.12/api/#flask.Flask.after_request """ # Do not trace if the url is blacklisted if utils.disable_tracing_url(flask.request.url, self.blacklist_paths): return response try: tracer = execution_context.get_opencensus_tracer() tracer.add_attribute_to_current_span( HTTP_STATUS_CODE, str(response.status_code)) except Exception: # pragma: NO COVER log.error('Failed to trace request', exc_info=True) finally: return response
[ "def", "_after_request", "(", "self", ",", "response", ")", ":", "# Do not trace if the url is blacklisted", "if", "utils", ".", "disable_tracing_url", "(", "flask", ".", "request", ".", "url", ",", "self", ".", "blacklist_paths", ")", ":", "return", "response", ...
A function to be run after each request. See: http://flask.pocoo.org/docs/0.12/api/#flask.Flask.after_request
[ "A", "function", "to", "be", "run", "after", "each", "request", "." ]
992b223f7e34c5dcb65922b7d5c827e7a1351e7d
https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/contrib/opencensus-ext-flask/opencensus/ext/flask/flask_middleware.py#L156-L173
226,247
census-instrumentation/opencensus-python
opencensus/trace/utils.py
get_func_name
def get_func_name(func): """Return a name which includes the module name and function name.""" func_name = getattr(func, '__name__', func.__class__.__name__) module_name = func.__module__ if module_name is not None: module_name = func.__module__ return '{}.{}'.format(module_name, func_name) return func_name
python
def get_func_name(func): """Return a name which includes the module name and function name.""" func_name = getattr(func, '__name__', func.__class__.__name__) module_name = func.__module__ if module_name is not None: module_name = func.__module__ return '{}.{}'.format(module_name, func_name) return func_name
[ "def", "get_func_name", "(", "func", ")", ":", "func_name", "=", "getattr", "(", "func", ",", "'__name__'", ",", "func", ".", "__class__", ".", "__name__", ")", "module_name", "=", "func", ".", "__module__", "if", "module_name", "is", "not", "None", ":", ...
Return a name which includes the module name and function name.
[ "Return", "a", "name", "which", "includes", "the", "module", "name", "and", "function", "name", "." ]
992b223f7e34c5dcb65922b7d5c827e7a1351e7d
https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/opencensus/trace/utils.py#L30-L39
226,248
census-instrumentation/opencensus-python
opencensus/trace/utils.py
disable_tracing_url
def disable_tracing_url(url, blacklist_paths=None): """Disable tracing on the provided blacklist paths, by default not tracing the health check request. If the url path starts with the blacklisted path, return True. :type blacklist_paths: list :param blacklist_paths: Paths that not tracing. :rtype: bool :returns: True if not tracing, False if tracing. """ if blacklist_paths is None: blacklist_paths = DEFAULT_BLACKLIST_PATHS # Remove the 'https?|ftp://' if exists url = re.sub(URL_PATTERN, '', url) # Split the url by the first '/' and get the path part url_path = url.split('/', 1)[1] for path in blacklist_paths: if url_path.startswith(path): return True return False
python
def disable_tracing_url(url, blacklist_paths=None): """Disable tracing on the provided blacklist paths, by default not tracing the health check request. If the url path starts with the blacklisted path, return True. :type blacklist_paths: list :param blacklist_paths: Paths that not tracing. :rtype: bool :returns: True if not tracing, False if tracing. """ if blacklist_paths is None: blacklist_paths = DEFAULT_BLACKLIST_PATHS # Remove the 'https?|ftp://' if exists url = re.sub(URL_PATTERN, '', url) # Split the url by the first '/' and get the path part url_path = url.split('/', 1)[1] for path in blacklist_paths: if url_path.startswith(path): return True return False
[ "def", "disable_tracing_url", "(", "url", ",", "blacklist_paths", "=", "None", ")", ":", "if", "blacklist_paths", "is", "None", ":", "blacklist_paths", "=", "DEFAULT_BLACKLIST_PATHS", "# Remove the 'https?|ftp://' if exists", "url", "=", "re", ".", "sub", "(", "URL_...
Disable tracing on the provided blacklist paths, by default not tracing the health check request. If the url path starts with the blacklisted path, return True. :type blacklist_paths: list :param blacklist_paths: Paths that not tracing. :rtype: bool :returns: True if not tracing, False if tracing.
[ "Disable", "tracing", "on", "the", "provided", "blacklist", "paths", "by", "default", "not", "tracing", "the", "health", "check", "request", "." ]
992b223f7e34c5dcb65922b7d5c827e7a1351e7d
https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/opencensus/trace/utils.py#L42-L67
226,249
census-instrumentation/opencensus-python
opencensus/trace/utils.py
disable_tracing_hostname
def disable_tracing_hostname(url, blacklist_hostnames=None): """Disable tracing for the provided blacklist URLs, by default not tracing the exporter url. If the url path starts with the blacklisted path, return True. :type blacklist_hostnames: list :param blacklist_hostnames: URL that not tracing. :rtype: bool :returns: True if not tracing, False if tracing. """ if blacklist_hostnames is None: # Exporter host_name are not traced by default _tracer = execution_context.get_opencensus_tracer() try: blacklist_hostnames = [ '{}:{}'.format( _tracer.exporter.host_name, _tracer.exporter.port ) ] except(AttributeError): blacklist_hostnames = [] return url in blacklist_hostnames
python
def disable_tracing_hostname(url, blacklist_hostnames=None): """Disable tracing for the provided blacklist URLs, by default not tracing the exporter url. If the url path starts with the blacklisted path, return True. :type blacklist_hostnames: list :param blacklist_hostnames: URL that not tracing. :rtype: bool :returns: True if not tracing, False if tracing. """ if blacklist_hostnames is None: # Exporter host_name are not traced by default _tracer = execution_context.get_opencensus_tracer() try: blacklist_hostnames = [ '{}:{}'.format( _tracer.exporter.host_name, _tracer.exporter.port ) ] except(AttributeError): blacklist_hostnames = [] return url in blacklist_hostnames
[ "def", "disable_tracing_hostname", "(", "url", ",", "blacklist_hostnames", "=", "None", ")", ":", "if", "blacklist_hostnames", "is", "None", ":", "# Exporter host_name are not traced by default", "_tracer", "=", "execution_context", ".", "get_opencensus_tracer", "(", ")",...
Disable tracing for the provided blacklist URLs, by default not tracing the exporter url. If the url path starts with the blacklisted path, return True. :type blacklist_hostnames: list :param blacklist_hostnames: URL that not tracing. :rtype: bool :returns: True if not tracing, False if tracing.
[ "Disable", "tracing", "for", "the", "provided", "blacklist", "URLs", "by", "default", "not", "tracing", "the", "exporter", "url", "." ]
992b223f7e34c5dcb65922b7d5c827e7a1351e7d
https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/opencensus/trace/utils.py#L70-L95
226,250
census-instrumentation/opencensus-python
opencensus/trace/trace_options.py
TraceOptions.set_enabled
def set_enabled(self, enabled): """Update the last bit of the trace options byte str. :type enabled: bool :param enabled: Whether enable tracing in this span context or not. """ enabled_bit = '1' if enabled else '0' self.trace_options_byte = str( self.trace_options_byte)[:-1] + enabled_bit self.enabled = self.get_enabled
python
def set_enabled(self, enabled): """Update the last bit of the trace options byte str. :type enabled: bool :param enabled: Whether enable tracing in this span context or not. """ enabled_bit = '1' if enabled else '0' self.trace_options_byte = str( self.trace_options_byte)[:-1] + enabled_bit self.enabled = self.get_enabled
[ "def", "set_enabled", "(", "self", ",", "enabled", ")", ":", "enabled_bit", "=", "'1'", "if", "enabled", "else", "'0'", "self", ".", "trace_options_byte", "=", "str", "(", "self", ".", "trace_options_byte", ")", "[", ":", "-", "1", "]", "+", "enabled_bit...
Update the last bit of the trace options byte str. :type enabled: bool :param enabled: Whether enable tracing in this span context or not.
[ "Update", "the", "last", "bit", "of", "the", "trace", "options", "byte", "str", "." ]
992b223f7e34c5dcb65922b7d5c827e7a1351e7d
https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/opencensus/trace/trace_options.py#L70-L79
226,251
census-instrumentation/opencensus-python
opencensus/common/monitored_resource/k8s_utils.py
get_k8s_metadata
def get_k8s_metadata(): """Get kubernetes container metadata, as on GCP GKE.""" k8s_metadata = {} gcp_cluster = (gcp_metadata_config.GcpMetadataConfig .get_attribute(gcp_metadata_config.CLUSTER_NAME_KEY)) if gcp_cluster is not None: k8s_metadata[CLUSTER_NAME_KEY] = gcp_cluster for attribute_key, attribute_env in _K8S_ENV_ATTRIBUTES.items(): attribute_value = os.environ.get(attribute_env) if attribute_value is not None: k8s_metadata[attribute_key] = attribute_value return k8s_metadata
python
def get_k8s_metadata(): """Get kubernetes container metadata, as on GCP GKE.""" k8s_metadata = {} gcp_cluster = (gcp_metadata_config.GcpMetadataConfig .get_attribute(gcp_metadata_config.CLUSTER_NAME_KEY)) if gcp_cluster is not None: k8s_metadata[CLUSTER_NAME_KEY] = gcp_cluster for attribute_key, attribute_env in _K8S_ENV_ATTRIBUTES.items(): attribute_value = os.environ.get(attribute_env) if attribute_value is not None: k8s_metadata[attribute_key] = attribute_value return k8s_metadata
[ "def", "get_k8s_metadata", "(", ")", ":", "k8s_metadata", "=", "{", "}", "gcp_cluster", "=", "(", "gcp_metadata_config", ".", "GcpMetadataConfig", ".", "get_attribute", "(", "gcp_metadata_config", ".", "CLUSTER_NAME_KEY", ")", ")", "if", "gcp_cluster", "is", "not"...
Get kubernetes container metadata, as on GCP GKE.
[ "Get", "kubernetes", "container", "metadata", "as", "on", "GCP", "GKE", "." ]
992b223f7e34c5dcb65922b7d5c827e7a1351e7d
https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/opencensus/common/monitored_resource/k8s_utils.py#L50-L64
226,252
census-instrumentation/opencensus-python
opencensus/tags/tag_map.py
TagMap.insert
def insert(self, key, value): """Inserts a key and value in the map if the map does not already contain the key. :type key: :class: '~opencensus.tags.tag_key.TagKey' :param key: a tag key to insert into the map :type value: :class: '~opencensus.tags.tag_value.TagValue' :param value: a tag value that is associated with the tag key and the value to insert into the tag map """ if key in self.map: return try: tag_key = TagKey(key) tag_val = TagValue(value) self.map[tag_key] = tag_val except ValueError: raise
python
def insert(self, key, value): """Inserts a key and value in the map if the map does not already contain the key. :type key: :class: '~opencensus.tags.tag_key.TagKey' :param key: a tag key to insert into the map :type value: :class: '~opencensus.tags.tag_value.TagValue' :param value: a tag value that is associated with the tag key and the value to insert into the tag map """ if key in self.map: return try: tag_key = TagKey(key) tag_val = TagValue(value) self.map[tag_key] = tag_val except ValueError: raise
[ "def", "insert", "(", "self", ",", "key", ",", "value", ")", ":", "if", "key", "in", "self", ".", "map", ":", "return", "try", ":", "tag_key", "=", "TagKey", "(", "key", ")", "tag_val", "=", "TagValue", "(", "value", ")", "self", ".", "map", "[",...
Inserts a key and value in the map if the map does not already contain the key. :type key: :class: '~opencensus.tags.tag_key.TagKey' :param key: a tag key to insert into the map :type value: :class: '~opencensus.tags.tag_value.TagValue' :param value: a tag value that is associated with the tag key and the value to insert into the tag map
[ "Inserts", "a", "key", "and", "value", "in", "the", "map", "if", "the", "map", "does", "not", "already", "contain", "the", "key", "." ]
992b223f7e34c5dcb65922b7d5c827e7a1351e7d
https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/opencensus/tags/tag_map.py#L35-L55
226,253
census-instrumentation/opencensus-python
opencensus/tags/tag_map.py
TagMap.update
def update(self, key, value): """Updates the map by updating the value of a key :type key: :class: '~opencensus.tags.tag_key.TagKey' :param key: A tag key to be updated :type value: :class: '~opencensus.tags.tag_value.TagValue' :param value: The value to update the key to in the map """ if key in self.map: self.map[key] = value
python
def update(self, key, value): """Updates the map by updating the value of a key :type key: :class: '~opencensus.tags.tag_key.TagKey' :param key: A tag key to be updated :type value: :class: '~opencensus.tags.tag_value.TagValue' :param value: The value to update the key to in the map """ if key in self.map: self.map[key] = value
[ "def", "update", "(", "self", ",", "key", ",", "value", ")", ":", "if", "key", "in", "self", ".", "map", ":", "self", ".", "map", "[", "key", "]", "=", "value" ]
Updates the map by updating the value of a key :type key: :class: '~opencensus.tags.tag_key.TagKey' :param key: A tag key to be updated :type value: :class: '~opencensus.tags.tag_value.TagValue' :param value: The value to update the key to in the map
[ "Updates", "the", "map", "by", "updating", "the", "value", "of", "a", "key" ]
992b223f7e34c5dcb65922b7d5c827e7a1351e7d
https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/opencensus/tags/tag_map.py#L68-L79
226,254
census-instrumentation/opencensus-python
contrib/opencensus-ext-threading/opencensus/ext/threading/trace.py
trace_integration
def trace_integration(tracer=None): """Wrap threading functions to trace.""" log.info("Integrated module: {}".format(MODULE_NAME)) # Wrap the threading start function start_func = getattr(threading.Thread, "start") setattr( threading.Thread, start_func.__name__, wrap_threading_start(start_func) ) # Wrap the threading run function run_func = getattr(threading.Thread, "run") setattr(threading.Thread, run_func.__name__, wrap_threading_run(run_func)) # Wrap the threading run function apply_async_func = getattr(pool.Pool, "apply_async") setattr( pool.Pool, apply_async_func.__name__, wrap_apply_async(apply_async_func), ) # Wrap the threading run function submit_func = getattr(futures.ThreadPoolExecutor, "submit") setattr( futures.ThreadPoolExecutor, submit_func.__name__, wrap_submit(submit_func), )
python
def trace_integration(tracer=None): """Wrap threading functions to trace.""" log.info("Integrated module: {}".format(MODULE_NAME)) # Wrap the threading start function start_func = getattr(threading.Thread, "start") setattr( threading.Thread, start_func.__name__, wrap_threading_start(start_func) ) # Wrap the threading run function run_func = getattr(threading.Thread, "run") setattr(threading.Thread, run_func.__name__, wrap_threading_run(run_func)) # Wrap the threading run function apply_async_func = getattr(pool.Pool, "apply_async") setattr( pool.Pool, apply_async_func.__name__, wrap_apply_async(apply_async_func), ) # Wrap the threading run function submit_func = getattr(futures.ThreadPoolExecutor, "submit") setattr( futures.ThreadPoolExecutor, submit_func.__name__, wrap_submit(submit_func), )
[ "def", "trace_integration", "(", "tracer", "=", "None", ")", ":", "log", ".", "info", "(", "\"Integrated module: {}\"", ".", "format", "(", "MODULE_NAME", ")", ")", "# Wrap the threading start function", "start_func", "=", "getattr", "(", "threading", ".", "Thread...
Wrap threading functions to trace.
[ "Wrap", "threading", "functions", "to", "trace", "." ]
992b223f7e34c5dcb65922b7d5c827e7a1351e7d
https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/contrib/opencensus-ext-threading/opencensus/ext/threading/trace.py#L29-L56
226,255
census-instrumentation/opencensus-python
contrib/opencensus-ext-threading/opencensus/ext/threading/trace.py
wrap_threading_start
def wrap_threading_start(start_func): """Wrap the start function from thread. Put the tracer informations in the threading object. """ def call(self): self._opencensus_context = ( execution_context.get_opencensus_full_context() ) return start_func(self) return call
python
def wrap_threading_start(start_func): """Wrap the start function from thread. Put the tracer informations in the threading object. """ def call(self): self._opencensus_context = ( execution_context.get_opencensus_full_context() ) return start_func(self) return call
[ "def", "wrap_threading_start", "(", "start_func", ")", ":", "def", "call", "(", "self", ")", ":", "self", ".", "_opencensus_context", "=", "(", "execution_context", ".", "get_opencensus_full_context", "(", ")", ")", "return", "start_func", "(", "self", ")", "r...
Wrap the start function from thread. Put the tracer informations in the threading object.
[ "Wrap", "the", "start", "function", "from", "thread", ".", "Put", "the", "tracer", "informations", "in", "the", "threading", "object", "." ]
992b223f7e34c5dcb65922b7d5c827e7a1351e7d
https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/contrib/opencensus-ext-threading/opencensus/ext/threading/trace.py#L59-L70
226,256
census-instrumentation/opencensus-python
contrib/opencensus-ext-threading/opencensus/ext/threading/trace.py
wrap_threading_run
def wrap_threading_run(run_func): """Wrap the run function from thread. Get the tracer informations from the threading object and set it as current tracer. """ def call(self): execution_context.set_opencensus_full_context( *self._opencensus_context ) return run_func(self) return call
python
def wrap_threading_run(run_func): """Wrap the run function from thread. Get the tracer informations from the threading object and set it as current tracer. """ def call(self): execution_context.set_opencensus_full_context( *self._opencensus_context ) return run_func(self) return call
[ "def", "wrap_threading_run", "(", "run_func", ")", ":", "def", "call", "(", "self", ")", ":", "execution_context", ".", "set_opencensus_full_context", "(", "*", "self", ".", "_opencensus_context", ")", "return", "run_func", "(", "self", ")", "return", "call" ]
Wrap the run function from thread. Get the tracer informations from the threading object and set it as current tracer.
[ "Wrap", "the", "run", "function", "from", "thread", ".", "Get", "the", "tracer", "informations", "from", "the", "threading", "object", "and", "set", "it", "as", "current", "tracer", "." ]
992b223f7e34c5dcb65922b7d5c827e7a1351e7d
https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/contrib/opencensus-ext-threading/opencensus/ext/threading/trace.py#L73-L84
226,257
census-instrumentation/opencensus-python
opencensus/trace/attributes.py
Attributes.format_attributes_json
def format_attributes_json(self): """Convert the Attributes object to json format.""" attributes_json = {} for key, value in self.attributes.items(): key = utils.check_str_length(key)[0] value = _format_attribute_value(value) if value is not None: attributes_json[key] = value result = { 'attributeMap': attributes_json } return result
python
def format_attributes_json(self): """Convert the Attributes object to json format.""" attributes_json = {} for key, value in self.attributes.items(): key = utils.check_str_length(key)[0] value = _format_attribute_value(value) if value is not None: attributes_json[key] = value result = { 'attributeMap': attributes_json } return result
[ "def", "format_attributes_json", "(", "self", ")", ":", "attributes_json", "=", "{", "}", "for", "key", ",", "value", "in", "self", ".", "attributes", ".", "items", "(", ")", ":", "key", "=", "utils", ".", "check_str_length", "(", "key", ")", "[", "0",...
Convert the Attributes object to json format.
[ "Convert", "the", "Attributes", "object", "to", "json", "format", "." ]
992b223f7e34c5dcb65922b7d5c827e7a1351e7d
https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/opencensus/trace/attributes.py#L59-L74
226,258
census-instrumentation/opencensus-python
opencensus/stats/aggregation_data.py
DistributionAggregationData.add_sample
def add_sample(self, value, timestamp, attachments): """Adding a sample to Distribution Aggregation Data""" self._count_data += 1 bucket = self.increment_bucket_count(value) if attachments is not None and self.exemplars is not None: self.exemplars[bucket] = Exemplar(value, timestamp, attachments) if self.count_data == 1: self._mean_data = value return old_mean = self._mean_data self._mean_data = self._mean_data + ( (value - self._mean_data) / self._count_data) self._sum_of_sqd_deviations = self._sum_of_sqd_deviations + ( (value - old_mean) * (value - self._mean_data))
python
def add_sample(self, value, timestamp, attachments): """Adding a sample to Distribution Aggregation Data""" self._count_data += 1 bucket = self.increment_bucket_count(value) if attachments is not None and self.exemplars is not None: self.exemplars[bucket] = Exemplar(value, timestamp, attachments) if self.count_data == 1: self._mean_data = value return old_mean = self._mean_data self._mean_data = self._mean_data + ( (value - self._mean_data) / self._count_data) self._sum_of_sqd_deviations = self._sum_of_sqd_deviations + ( (value - old_mean) * (value - self._mean_data))
[ "def", "add_sample", "(", "self", ",", "value", ",", "timestamp", ",", "attachments", ")", ":", "self", ".", "_count_data", "+=", "1", "bucket", "=", "self", ".", "increment_bucket_count", "(", "value", ")", "if", "attachments", "is", "not", "None", "and",...
Adding a sample to Distribution Aggregation Data
[ "Adding", "a", "sample", "to", "Distribution", "Aggregation", "Data" ]
992b223f7e34c5dcb65922b7d5c827e7a1351e7d
https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/opencensus/stats/aggregation_data.py#L250-L265
226,259
census-instrumentation/opencensus-python
opencensus/stats/aggregation_data.py
DistributionAggregationData.increment_bucket_count
def increment_bucket_count(self, value): """Increment the bucket count based on a given value from the user""" if len(self._bounds) == 0: self._counts_per_bucket[0] += 1 return 0 for ii, bb in enumerate(self._bounds): if value < bb: self._counts_per_bucket[ii] += 1 return ii else: last_bucket_index = len(self._bounds) self._counts_per_bucket[last_bucket_index] += 1 return last_bucket_index
python
def increment_bucket_count(self, value): """Increment the bucket count based on a given value from the user""" if len(self._bounds) == 0: self._counts_per_bucket[0] += 1 return 0 for ii, bb in enumerate(self._bounds): if value < bb: self._counts_per_bucket[ii] += 1 return ii else: last_bucket_index = len(self._bounds) self._counts_per_bucket[last_bucket_index] += 1 return last_bucket_index
[ "def", "increment_bucket_count", "(", "self", ",", "value", ")", ":", "if", "len", "(", "self", ".", "_bounds", ")", "==", "0", ":", "self", ".", "_counts_per_bucket", "[", "0", "]", "+=", "1", "return", "0", "for", "ii", ",", "bb", "in", "enumerate"...
Increment the bucket count based on a given value from the user
[ "Increment", "the", "bucket", "count", "based", "on", "a", "given", "value", "from", "the", "user" ]
992b223f7e34c5dcb65922b7d5c827e7a1351e7d
https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/opencensus/stats/aggregation_data.py#L267-L280
226,260
fhamborg/news-please
newsplease/single_crawler.py
SingleCrawler.load_crawler
def load_crawler(self, crawler, url, ignore_regex): """ Loads the given crawler with the given url. :param class crawler: class of the crawler to load :param str url: url to start the crawler with :param regex ignore_regex: to be able to ignore urls that match this regex code """ self.process = CrawlerProcess(self.cfg.get_scrapy_options()) self.process.crawl( crawler, self.helper, url=url, config=self.cfg, ignore_regex=ignore_regex)
python
def load_crawler(self, crawler, url, ignore_regex): """ Loads the given crawler with the given url. :param class crawler: class of the crawler to load :param str url: url to start the crawler with :param regex ignore_regex: to be able to ignore urls that match this regex code """ self.process = CrawlerProcess(self.cfg.get_scrapy_options()) self.process.crawl( crawler, self.helper, url=url, config=self.cfg, ignore_regex=ignore_regex)
[ "def", "load_crawler", "(", "self", ",", "crawler", ",", "url", ",", "ignore_regex", ")", ":", "self", ".", "process", "=", "CrawlerProcess", "(", "self", ".", "cfg", ".", "get_scrapy_options", "(", ")", ")", "self", ".", "process", ".", "crawl", "(", ...
Loads the given crawler with the given url. :param class crawler: class of the crawler to load :param str url: url to start the crawler with :param regex ignore_regex: to be able to ignore urls that match this regex code
[ "Loads", "the", "given", "crawler", "with", "the", "given", "url", "." ]
731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9
https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/single_crawler.py#L224-L239
226,261
fhamborg/news-please
newsplease/pipeline/extractor/extractors/lang_detect_extractor.py
LangExtractor._language
def _language(self, item): """Returns the language of the extracted article by analyzing metatags and inspecting the visible text with langdetect""" response = item['spider_response'].body root = html.fromstring(response) # Check for lang-attributes lang = root.get('lang') if lang is None: lang = root.get('xml:lang') # Check for general meta tags if lang is None: meta = root.cssselect('meta[name="language"]') if len(meta) > 0: lang = meta[0].get('content') # Check for open graph tags if lang is None: meta = root.cssselect('meta[property="og:locale"]') if len(meta) > 0: lang = meta[0].get('content') # Look for <article> elements and inspect the one with the largest payload with langdetect if lang is None: article_list = [] for article in root.xpath('//article'): article_list.append(re.sub(r'\s+', ' ', article.text_content().strip())) if len(article_list) > 0: lang = detect(max(article_list)) # Analyze the whole body with langdetect if lang is None: try: lang = detect(root.text_content().strip()) except LangDetectException: pass # Try to normalize output if lang is not None: # First search for suitable locale in the original output matches = self.langcode_pattern.search(lang) if matches is not None: lang = matches.group(0) else: # If no match was found, normalize the original output and search again normalized = locale.normalize(re.split(r'\s|;|,', lang.strip())[0]) matches = self.langcode_pattern.search(normalized) if matches is not None: lang = matches.group(0) return lang
python
def _language(self, item): """Returns the language of the extracted article by analyzing metatags and inspecting the visible text with langdetect""" response = item['spider_response'].body root = html.fromstring(response) # Check for lang-attributes lang = root.get('lang') if lang is None: lang = root.get('xml:lang') # Check for general meta tags if lang is None: meta = root.cssselect('meta[name="language"]') if len(meta) > 0: lang = meta[0].get('content') # Check for open graph tags if lang is None: meta = root.cssselect('meta[property="og:locale"]') if len(meta) > 0: lang = meta[0].get('content') # Look for <article> elements and inspect the one with the largest payload with langdetect if lang is None: article_list = [] for article in root.xpath('//article'): article_list.append(re.sub(r'\s+', ' ', article.text_content().strip())) if len(article_list) > 0: lang = detect(max(article_list)) # Analyze the whole body with langdetect if lang is None: try: lang = detect(root.text_content().strip()) except LangDetectException: pass # Try to normalize output if lang is not None: # First search for suitable locale in the original output matches = self.langcode_pattern.search(lang) if matches is not None: lang = matches.group(0) else: # If no match was found, normalize the original output and search again normalized = locale.normalize(re.split(r'\s|;|,', lang.strip())[0]) matches = self.langcode_pattern.search(normalized) if matches is not None: lang = matches.group(0) return lang
[ "def", "_language", "(", "self", ",", "item", ")", ":", "response", "=", "item", "[", "'spider_response'", "]", ".", "body", "root", "=", "html", ".", "fromstring", "(", "response", ")", "# Check for lang-attributes", "lang", "=", "root", ".", "get", "(", ...
Returns the language of the extracted article by analyzing metatags and inspecting the visible text with langdetect
[ "Returns", "the", "language", "of", "the", "extracted", "article", "by", "analyzing", "metatags", "and", "inspecting", "the", "visible", "text", "with", "langdetect" ]
731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9
https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/pipeline/extractor/extractors/lang_detect_extractor.py#L21-L74
226,262
fhamborg/news-please
newsplease/crawler/spiders/download_crawler.py
Download.parse
def parse(self, response): """ Passes the response to the pipeline. :param obj response: The scrapy response """ if not self.helper.parse_crawler.content_type(response): return yield self.helper.parse_crawler.pass_to_pipeline( response, self.helper.url_extractor.get_allowed_domain(response.url) )
python
def parse(self, response): """ Passes the response to the pipeline. :param obj response: The scrapy response """ if not self.helper.parse_crawler.content_type(response): return yield self.helper.parse_crawler.pass_to_pipeline( response, self.helper.url_extractor.get_allowed_domain(response.url) )
[ "def", "parse", "(", "self", ",", "response", ")", ":", "if", "not", "self", ".", "helper", ".", "parse_crawler", ".", "content_type", "(", "response", ")", ":", "return", "yield", "self", ".", "helper", ".", "parse_crawler", ".", "pass_to_pipeline", "(", ...
Passes the response to the pipeline. :param obj response: The scrapy response
[ "Passes", "the", "response", "to", "the", "pipeline", "." ]
731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9
https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/crawler/spiders/download_crawler.py#L28-L40
226,263
fhamborg/news-please
newsplease/pipeline/extractor/comparer/comparer_Language.py
ComparerLanguage.extract
def extract(self, item, list_article_candidate): """Compares how often any language was detected. :param item: The corresponding NewscrawlerItem :param list_article_candidate: A list, the list of ArticleCandidate-Objects which have been extracted :return: A string, the language which was most frequently detected """ # Save extracted languages in list languages_extracted = [] # Save the extracted language of newspaper in extra variable, because newspaper extract meta-language # which is very accurate. language_newspaper = None for article_candidate in list_article_candidate: if article_candidate.language is not None: languages_extracted.append(article_candidate.language) if article_candidate.extractor == "newspaper": language_newspaper = article_candidate.language if not languages_extracted: return None # Create a set of the extracted languages, so every lang appears once languages_extracted_set = set(languages_extracted) # Count how often every language has been extracted languages_extracted_number = [] for language in languages_extracted_set: languages_extracted_number.append((languages_extracted.count(language), language)) if not (languages_extracted_number): return None # If there is no favorite language, return the language extracted by newspaper if max(languages_extracted_number)[0] == min(languages_extracted_number)[0] and language_newspaper is not None: return language_newspaper if languages_extracted_number: return (max(languages_extracted_number))[1] else: return None
python
def extract(self, item, list_article_candidate): """Compares how often any language was detected. :param item: The corresponding NewscrawlerItem :param list_article_candidate: A list, the list of ArticleCandidate-Objects which have been extracted :return: A string, the language which was most frequently detected """ # Save extracted languages in list languages_extracted = [] # Save the extracted language of newspaper in extra variable, because newspaper extract meta-language # which is very accurate. language_newspaper = None for article_candidate in list_article_candidate: if article_candidate.language is not None: languages_extracted.append(article_candidate.language) if article_candidate.extractor == "newspaper": language_newspaper = article_candidate.language if not languages_extracted: return None # Create a set of the extracted languages, so every lang appears once languages_extracted_set = set(languages_extracted) # Count how often every language has been extracted languages_extracted_number = [] for language in languages_extracted_set: languages_extracted_number.append((languages_extracted.count(language), language)) if not (languages_extracted_number): return None # If there is no favorite language, return the language extracted by newspaper if max(languages_extracted_number)[0] == min(languages_extracted_number)[0] and language_newspaper is not None: return language_newspaper if languages_extracted_number: return (max(languages_extracted_number))[1] else: return None
[ "def", "extract", "(", "self", ",", "item", ",", "list_article_candidate", ")", ":", "# Save extracted languages in list", "languages_extracted", "=", "[", "]", "# Save the extracted language of newspaper in extra variable, because newspaper extract meta-language", "# which is very a...
Compares how often any language was detected. :param item: The corresponding NewscrawlerItem :param list_article_candidate: A list, the list of ArticleCandidate-Objects which have been extracted :return: A string, the language which was most frequently detected
[ "Compares", "how", "often", "any", "language", "was", "detected", "." ]
731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9
https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/pipeline/extractor/comparer/comparer_Language.py#L4-L49
226,264
fhamborg/news-please
newsplease/pipeline/extractor/comparer/comparer_date.py
ComparerDate.extract
def extract(self, item, list_article_candidate): """Compares the extracted publish dates. :param item: The corresponding NewscrawlerItem :param list_article_candidate: A list, the list of ArticleCandidate-Objects which have been extracted :return: A string, the most likely publish date """ list_publish_date = [] for article_candidate in list_article_candidate: if article_candidate.publish_date != None: list_publish_date.append((article_candidate.publish_date, article_candidate.extractor)) # If there is no value in the list, return None. if len(list_publish_date) == 0: return None # If there are more options than one, return the result from date_extractor. list_date_extractor = [x for x in list_publish_date if x[1] == "date_extractor"] if len(list_date_extractor) == 0: # If there is no date extracted by date_extractor, return the first result of list_publish_date. return list_publish_date[0][0] else: return list_date_extractor[0][0]
python
def extract(self, item, list_article_candidate): """Compares the extracted publish dates. :param item: The corresponding NewscrawlerItem :param list_article_candidate: A list, the list of ArticleCandidate-Objects which have been extracted :return: A string, the most likely publish date """ list_publish_date = [] for article_candidate in list_article_candidate: if article_candidate.publish_date != None: list_publish_date.append((article_candidate.publish_date, article_candidate.extractor)) # If there is no value in the list, return None. if len(list_publish_date) == 0: return None # If there are more options than one, return the result from date_extractor. list_date_extractor = [x for x in list_publish_date if x[1] == "date_extractor"] if len(list_date_extractor) == 0: # If there is no date extracted by date_extractor, return the first result of list_publish_date. return list_publish_date[0][0] else: return list_date_extractor[0][0]
[ "def", "extract", "(", "self", ",", "item", ",", "list_article_candidate", ")", ":", "list_publish_date", "=", "[", "]", "for", "article_candidate", "in", "list_article_candidate", ":", "if", "article_candidate", ".", "publish_date", "!=", "None", ":", "list_publi...
Compares the extracted publish dates. :param item: The corresponding NewscrawlerItem :param list_article_candidate: A list, the list of ArticleCandidate-Objects which have been extracted :return: A string, the most likely publish date
[ "Compares", "the", "extracted", "publish", "dates", "." ]
731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9
https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/pipeline/extractor/comparer/comparer_date.py#L4-L28
226,265
fhamborg/news-please
newsplease/helper_classes/heuristics.py
Heuristics.meta_contains_article_keyword
def meta_contains_article_keyword(self, response, site_dict): """ Determines wether the response's meta data contains the keyword 'article' :param obj response: The scrapy response :param dict site_dict: The site object from the JSON-File :return bool: Determines wether the reponse's meta data contains the keyword 'article' """ contains_meta = response.xpath('//meta') \ .re('(= ?["\'][^"\']*article[^"\']*["\'])') if not contains_meta: return False return True
python
def meta_contains_article_keyword(self, response, site_dict): """ Determines wether the response's meta data contains the keyword 'article' :param obj response: The scrapy response :param dict site_dict: The site object from the JSON-File :return bool: Determines wether the reponse's meta data contains the keyword 'article' """ contains_meta = response.xpath('//meta') \ .re('(= ?["\'][^"\']*article[^"\']*["\'])') if not contains_meta: return False return True
[ "def", "meta_contains_article_keyword", "(", "self", ",", "response", ",", "site_dict", ")", ":", "contains_meta", "=", "response", ".", "xpath", "(", "'//meta'", ")", ".", "re", "(", "'(= ?[\"\\'][^\"\\']*article[^\"\\']*[\"\\'])'", ")", "if", "not", "contains_meta...
Determines wether the response's meta data contains the keyword 'article' :param obj response: The scrapy response :param dict site_dict: The site object from the JSON-File :return bool: Determines wether the reponse's meta data contains the keyword 'article'
[ "Determines", "wether", "the", "response", "s", "meta", "data", "contains", "the", "keyword", "article" ]
731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9
https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/helper_classes/heuristics.py#L36-L52
226,266
fhamborg/news-please
newsplease/helper_classes/heuristics.py
Heuristics.linked_headlines
def linked_headlines(self, response, site_dict, check_self=False): """ Checks how many of the headlines on the site contain links. :param obj response: The scrapy response :param dict site_dict: The site object from the JSON-File :param bool check_self: Check headlines/ headlines_containing_link_to_same_domain instead of headline/headline_containing_link :return float: ratio headlines/headlines_containing_link """ h_all = 0 h_linked = 0 domain = UrlExtractor.get_allowed_domain(site_dict["url"], False) # This regex checks, if a link containing site_domain as domain # is contained in a string. site_regex = r"href=[\"'][^\/]*\/\/(?:[^\"']*\.|)%s[\"'\/]" % domain for i in range(1, 7): for headline in response.xpath('//h%s' % i).extract(): h_all += 1 if "href" in headline and ( not check_self or re.search(site_regex, headline) is not None): h_linked += 1 self.log.debug("Linked headlines test: headlines = %s, linked = %s", h_all, h_linked) min_headlines = self.cfg_heuristics["min_headlines_for_linked_test"] if min_headlines > h_all: self.log.debug("Linked headlines test: Not enough headlines " "(%s < %s): Passing!", h_all, min_headlines) return True return float(h_linked) / float(h_all)
python
def linked_headlines(self, response, site_dict, check_self=False): """ Checks how many of the headlines on the site contain links. :param obj response: The scrapy response :param dict site_dict: The site object from the JSON-File :param bool check_self: Check headlines/ headlines_containing_link_to_same_domain instead of headline/headline_containing_link :return float: ratio headlines/headlines_containing_link """ h_all = 0 h_linked = 0 domain = UrlExtractor.get_allowed_domain(site_dict["url"], False) # This regex checks, if a link containing site_domain as domain # is contained in a string. site_regex = r"href=[\"'][^\/]*\/\/(?:[^\"']*\.|)%s[\"'\/]" % domain for i in range(1, 7): for headline in response.xpath('//h%s' % i).extract(): h_all += 1 if "href" in headline and ( not check_self or re.search(site_regex, headline) is not None): h_linked += 1 self.log.debug("Linked headlines test: headlines = %s, linked = %s", h_all, h_linked) min_headlines = self.cfg_heuristics["min_headlines_for_linked_test"] if min_headlines > h_all: self.log.debug("Linked headlines test: Not enough headlines " "(%s < %s): Passing!", h_all, min_headlines) return True return float(h_linked) / float(h_all)
[ "def", "linked_headlines", "(", "self", ",", "response", ",", "site_dict", ",", "check_self", "=", "False", ")", ":", "h_all", "=", "0", "h_linked", "=", "0", "domain", "=", "UrlExtractor", ".", "get_allowed_domain", "(", "site_dict", "[", "\"url\"", "]", ...
Checks how many of the headlines on the site contain links. :param obj response: The scrapy response :param dict site_dict: The site object from the JSON-File :param bool check_self: Check headlines/ headlines_containing_link_to_same_domain instead of headline/headline_containing_link :return float: ratio headlines/headlines_containing_link
[ "Checks", "how", "many", "of", "the", "headlines", "on", "the", "site", "contain", "links", "." ]
731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9
https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/helper_classes/heuristics.py#L73-L109
226,267
fhamborg/news-please
newsplease/helper_classes/heuristics.py
Heuristics.is_not_from_subdomain
def is_not_from_subdomain(self, response, site_dict): """ Ensures the response's url isn't from a subdomain. :param obj response: The scrapy response :param dict site_dict: The site object from the JSON-File :return bool: Determines if the response's url is from a subdomain """ root_url = re.sub(re_url_root, '', site_dict["url"]) return UrlExtractor.get_allowed_domain(response.url) == root_url
python
def is_not_from_subdomain(self, response, site_dict): """ Ensures the response's url isn't from a subdomain. :param obj response: The scrapy response :param dict site_dict: The site object from the JSON-File :return bool: Determines if the response's url is from a subdomain """ root_url = re.sub(re_url_root, '', site_dict["url"]) return UrlExtractor.get_allowed_domain(response.url) == root_url
[ "def", "is_not_from_subdomain", "(", "self", ",", "response", ",", "site_dict", ")", ":", "root_url", "=", "re", ".", "sub", "(", "re_url_root", ",", "''", ",", "site_dict", "[", "\"url\"", "]", ")", "return", "UrlExtractor", ".", "get_allowed_domain", "(", ...
Ensures the response's url isn't from a subdomain. :param obj response: The scrapy response :param dict site_dict: The site object from the JSON-File :return bool: Determines if the response's url is from a subdomain
[ "Ensures", "the", "response", "s", "url", "isn", "t", "from", "a", "subdomain", "." ]
731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9
https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/helper_classes/heuristics.py#L122-L133
226,268
fhamborg/news-please
newsplease/pipeline/extractor/comparer/comparer_description.py
ComparerDescription.extract
def extract(self, item, list_article_candidate): """Compares the extracted descriptions. :param item: The corresponding NewscrawlerItem :param list_article_candidate: A list, the list of ArticleCandidate-Objects which have been extracted :return: A string, the most likely description """ list_description = [] """ The descriptions of the article candidates and the respective extractors are saved in a tuple in list_description. """ for article_candidate in list_article_candidate: if article_candidate.description != None: list_description.append((article_candidate.description, article_candidate.extractor)) # If there is no value in the list, return None. if len(list_description) == 0: return None # If there are more options than one, return the result from newspaper. list_newspaper = [x for x in list_description if x[1] == "newspaper"] if len(list_newspaper) == 0: # If there is no description extracted by newspaper, return the first result of list_description. return list_description[0][0] else: return list_newspaper[0][0]
python
def extract(self, item, list_article_candidate): """Compares the extracted descriptions. :param item: The corresponding NewscrawlerItem :param list_article_candidate: A list, the list of ArticleCandidate-Objects which have been extracted :return: A string, the most likely description """ list_description = [] """ The descriptions of the article candidates and the respective extractors are saved in a tuple in list_description. """ for article_candidate in list_article_candidate: if article_candidate.description != None: list_description.append((article_candidate.description, article_candidate.extractor)) # If there is no value in the list, return None. if len(list_description) == 0: return None # If there are more options than one, return the result from newspaper. list_newspaper = [x for x in list_description if x[1] == "newspaper"] if len(list_newspaper) == 0: # If there is no description extracted by newspaper, return the first result of list_description. return list_description[0][0] else: return list_newspaper[0][0]
[ "def", "extract", "(", "self", ",", "item", ",", "list_article_candidate", ")", ":", "list_description", "=", "[", "]", "\"\"\" The descriptions of the article candidates and the respective extractors are saved\n in a tuple in list_description.\n \"\"\"", "for", "articl...
Compares the extracted descriptions. :param item: The corresponding NewscrawlerItem :param list_article_candidate: A list, the list of ArticleCandidate-Objects which have been extracted :return: A string, the most likely description
[ "Compares", "the", "extracted", "descriptions", "." ]
731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9
https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/pipeline/extractor/comparer/comparer_description.py#L6-L33
226,269
fhamborg/news-please
newsplease/pipeline/pipelines.py
PandasStorage.close_spider
def close_spider(self, _spider): """ Write out to file """ self.df['date_download'] = pd.to_datetime( self.df['date_download'], errors='coerce', infer_datetime_format=True ) self.df['date_modify'] = pd.to_datetime( self.df['date_modify'], errors='coerce', infer_datetime_format=True ) self.df['date_publish'] = pd.to_datetime( self.df['date_publish'], errors='coerce', infer_datetime_format=True ) self.df.to_pickle(self.full_path) self.log.info("Wrote to Pandas to %s", self.full_path)
python
def close_spider(self, _spider): """ Write out to file """ self.df['date_download'] = pd.to_datetime( self.df['date_download'], errors='coerce', infer_datetime_format=True ) self.df['date_modify'] = pd.to_datetime( self.df['date_modify'], errors='coerce', infer_datetime_format=True ) self.df['date_publish'] = pd.to_datetime( self.df['date_publish'], errors='coerce', infer_datetime_format=True ) self.df.to_pickle(self.full_path) self.log.info("Wrote to Pandas to %s", self.full_path)
[ "def", "close_spider", "(", "self", ",", "_spider", ")", ":", "self", ".", "df", "[", "'date_download'", "]", "=", "pd", ".", "to_datetime", "(", "self", ".", "df", "[", "'date_download'", "]", ",", "errors", "=", "'coerce'", ",", "infer_datetime_format", ...
Write out to file
[ "Write", "out", "to", "file" ]
731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9
https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/pipeline/pipelines.py#L643-L657
226,270
fhamborg/news-please
newsplease/pipeline/extractor/extractors/date_extractor.py
DateExtractor._publish_date
def _publish_date(self, item): """Returns the publish_date of the extracted article.""" url = item['url'] html = deepcopy(item['spider_response'].body) publish_date = None try: if html is None: request = urllib2.Request(url) # Using a browser user agent, decreases the change of sites blocking this request - just a suggestion # request.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) # Chrome/41.0.2228.0 Safari/537.36') html = urllib2.build_opener().open(request).read() html = BeautifulSoup(html, "lxml") publish_date = self._extract_from_json(html) if publish_date is None: publish_date = self._extract_from_meta(html) if publish_date is None: publish_date = self._extract_from_html_tag(html) if publish_date is None: publish_date = self._extract_from_url(url) except Exception as e: # print(e.message, e.args) pass return publish_date
python
def _publish_date(self, item): """Returns the publish_date of the extracted article.""" url = item['url'] html = deepcopy(item['spider_response'].body) publish_date = None try: if html is None: request = urllib2.Request(url) # Using a browser user agent, decreases the change of sites blocking this request - just a suggestion # request.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) # Chrome/41.0.2228.0 Safari/537.36') html = urllib2.build_opener().open(request).read() html = BeautifulSoup(html, "lxml") publish_date = self._extract_from_json(html) if publish_date is None: publish_date = self._extract_from_meta(html) if publish_date is None: publish_date = self._extract_from_html_tag(html) if publish_date is None: publish_date = self._extract_from_url(url) except Exception as e: # print(e.message, e.args) pass return publish_date
[ "def", "_publish_date", "(", "self", ",", "item", ")", ":", "url", "=", "item", "[", "'url'", "]", "html", "=", "deepcopy", "(", "item", "[", "'spider_response'", "]", ".", "body", ")", "publish_date", "=", "None", "try", ":", "if", "html", "is", "No...
Returns the publish_date of the extracted article.
[ "Returns", "the", "publish_date", "of", "the", "extracted", "article", "." ]
731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9
https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/pipeline/extractor/extractors/date_extractor.py#L30-L58
226,271
fhamborg/news-please
newsplease/pipeline/extractor/extractors/date_extractor.py
DateExtractor._extract_from_url
def _extract_from_url(self, url): """Try to extract from the article URL - simple but might work as a fallback""" # Regex by Newspaper3k - https://github.com/codelucas/newspaper/blob/master/newspaper/urls.py m = re.search(re_pub_date, url) if m: return self.parse_date_str(m.group(0)) return None
python
def _extract_from_url(self, url): """Try to extract from the article URL - simple but might work as a fallback""" # Regex by Newspaper3k - https://github.com/codelucas/newspaper/blob/master/newspaper/urls.py m = re.search(re_pub_date, url) if m: return self.parse_date_str(m.group(0)) return None
[ "def", "_extract_from_url", "(", "self", ",", "url", ")", ":", "# Regex by Newspaper3k - https://github.com/codelucas/newspaper/blob/master/newspaper/urls.py", "m", "=", "re", ".", "search", "(", "re_pub_date", ",", "url", ")", "if", "m", ":", "return", "self", ".", ...
Try to extract from the article URL - simple but might work as a fallback
[ "Try", "to", "extract", "from", "the", "article", "URL", "-", "simple", "but", "might", "work", "as", "a", "fallback" ]
731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9
https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/pipeline/extractor/extractors/date_extractor.py#L67-L74
226,272
fhamborg/news-please
newsplease/pipeline/extractor/cleaner.py
Cleaner.delete_tags
def delete_tags(self, arg): """Removes html-tags from extracted data. :param arg: A string, the string which shall be cleaned :return: A string, the cleaned string """ if len(arg) > 0: raw = html.fromstring(arg) return raw.text_content().strip() return arg
python
def delete_tags(self, arg): """Removes html-tags from extracted data. :param arg: A string, the string which shall be cleaned :return: A string, the cleaned string """ if len(arg) > 0: raw = html.fromstring(arg) return raw.text_content().strip() return arg
[ "def", "delete_tags", "(", "self", ",", "arg", ")", ":", "if", "len", "(", "arg", ")", ">", "0", ":", "raw", "=", "html", ".", "fromstring", "(", "arg", ")", "return", "raw", ".", "text_content", "(", ")", ".", "strip", "(", ")", "return", "arg" ...
Removes html-tags from extracted data. :param arg: A string, the string which shall be cleaned :return: A string, the cleaned string
[ "Removes", "html", "-", "tags", "from", "extracted", "data", "." ]
731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9
https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/pipeline/extractor/cleaner.py#L22-L33
226,273
fhamborg/news-please
newsplease/pipeline/extractor/cleaner.py
Cleaner.delete_whitespaces
def delete_whitespaces(self, arg): """Removes newlines, tabs and whitespaces at the beginning, the end and if there is more than one. :param arg: A string, the string which shell be cleaned :return: A string, the cleaned string """ # Deletes whitespaces after a newline arg = re.sub(re_newline_spc, '', arg) # Deletes every whitespace, tabulator, newline at the beginning of the string arg = re.sub(re_starting_whitespc, '', arg) # Deletes whitespace or tabulator if followed by whitespace or tabulator arg = re.sub(re_multi_spc_tab, '', arg) # Deletes newline if it is followed by an other one arg = re.sub(re_double_newline, '', arg) # Deletes newlines and whitespaces at the end of the string arg = re.sub(re_ending_spc_newline, '', arg) return arg
python
def delete_whitespaces(self, arg): """Removes newlines, tabs and whitespaces at the beginning, the end and if there is more than one. :param arg: A string, the string which shell be cleaned :return: A string, the cleaned string """ # Deletes whitespaces after a newline arg = re.sub(re_newline_spc, '', arg) # Deletes every whitespace, tabulator, newline at the beginning of the string arg = re.sub(re_starting_whitespc, '', arg) # Deletes whitespace or tabulator if followed by whitespace or tabulator arg = re.sub(re_multi_spc_tab, '', arg) # Deletes newline if it is followed by an other one arg = re.sub(re_double_newline, '', arg) # Deletes newlines and whitespaces at the end of the string arg = re.sub(re_ending_spc_newline, '', arg) return arg
[ "def", "delete_whitespaces", "(", "self", ",", "arg", ")", ":", "# Deletes whitespaces after a newline", "arg", "=", "re", ".", "sub", "(", "re_newline_spc", ",", "''", ",", "arg", ")", "# Deletes every whitespace, tabulator, newline at the beginning of the string", "arg"...
Removes newlines, tabs and whitespaces at the beginning, the end and if there is more than one. :param arg: A string, the string which shell be cleaned :return: A string, the cleaned string
[ "Removes", "newlines", "tabs", "and", "whitespaces", "at", "the", "beginning", "the", "end", "and", "if", "there", "is", "more", "than", "one", "." ]
731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9
https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/pipeline/extractor/cleaner.py#L35-L51
226,274
fhamborg/news-please
newsplease/pipeline/extractor/cleaner.py
Cleaner.do_cleaning
def do_cleaning(self, arg): """Does the actual cleaning by using the delete methods above. :param arg: A string, the string which shell be cleaned. Or a list, in which case each of the strings within the list is cleaned. :return: A string, the cleaned string. Or a list with cleaned string entries. """ if arg is not None: if isinstance(arg, list): newlist = [] for entry in arg: newlist.append(self.do_cleaning(entry)) return newlist else: if sys.version_info[0] < 3: arg = unicode(arg) else: arg = str(arg) arg = self.delete_tags(arg) arg = self.delete_whitespaces(arg) return arg else: return None
python
def do_cleaning(self, arg): """Does the actual cleaning by using the delete methods above. :param arg: A string, the string which shell be cleaned. Or a list, in which case each of the strings within the list is cleaned. :return: A string, the cleaned string. Or a list with cleaned string entries. """ if arg is not None: if isinstance(arg, list): newlist = [] for entry in arg: newlist.append(self.do_cleaning(entry)) return newlist else: if sys.version_info[0] < 3: arg = unicode(arg) else: arg = str(arg) arg = self.delete_tags(arg) arg = self.delete_whitespaces(arg) return arg else: return None
[ "def", "do_cleaning", "(", "self", ",", "arg", ")", ":", "if", "arg", "is", "not", "None", ":", "if", "isinstance", "(", "arg", ",", "list", ")", ":", "newlist", "=", "[", "]", "for", "entry", "in", "arg", ":", "newlist", ".", "append", "(", "sel...
Does the actual cleaning by using the delete methods above. :param arg: A string, the string which shell be cleaned. Or a list, in which case each of the strings within the list is cleaned. :return: A string, the cleaned string. Or a list with cleaned string entries.
[ "Does", "the", "actual", "cleaning", "by", "using", "the", "delete", "methods", "above", "." ]
731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9
https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/pipeline/extractor/cleaner.py#L53-L75
226,275
fhamborg/news-please
newsplease/pipeline/extractor/cleaner.py
Cleaner.clean
def clean(self, list_article_candidates): """Iterates over each article_candidate and cleans every extracted data. :param list_article_candidates: A list, the list of ArticleCandidate-Objects which have been extracted :return: A list, the list with the cleaned ArticleCandidate-Objects """ # Save cleaned article_candidates in results. results = [] for article_candidate in list_article_candidates: article_candidate.title = self.do_cleaning(article_candidate.title) article_candidate.description = self.do_cleaning(article_candidate.description) article_candidate.text = self.do_cleaning(article_candidate.text) article_candidate.topimage = self.do_cleaning(article_candidate.topimage) article_candidate.author = self.do_cleaning(article_candidate.author) article_candidate.publish_date = self.do_cleaning(article_candidate.publish_date) results.append(article_candidate) return results
python
def clean(self, list_article_candidates): """Iterates over each article_candidate and cleans every extracted data. :param list_article_candidates: A list, the list of ArticleCandidate-Objects which have been extracted :return: A list, the list with the cleaned ArticleCandidate-Objects """ # Save cleaned article_candidates in results. results = [] for article_candidate in list_article_candidates: article_candidate.title = self.do_cleaning(article_candidate.title) article_candidate.description = self.do_cleaning(article_candidate.description) article_candidate.text = self.do_cleaning(article_candidate.text) article_candidate.topimage = self.do_cleaning(article_candidate.topimage) article_candidate.author = self.do_cleaning(article_candidate.author) article_candidate.publish_date = self.do_cleaning(article_candidate.publish_date) results.append(article_candidate) return results
[ "def", "clean", "(", "self", ",", "list_article_candidates", ")", ":", "# Save cleaned article_candidates in results.", "results", "=", "[", "]", "for", "article_candidate", "in", "list_article_candidates", ":", "article_candidate", ".", "title", "=", "self", ".", "do...
Iterates over each article_candidate and cleans every extracted data. :param list_article_candidates: A list, the list of ArticleCandidate-Objects which have been extracted :return: A list, the list with the cleaned ArticleCandidate-Objects
[ "Iterates", "over", "each", "article_candidate", "and", "cleans", "every", "extracted", "data", "." ]
731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9
https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/pipeline/extractor/cleaner.py#L77-L96
226,276
fhamborg/news-please
newsplease/pipeline/extractor/comparer/comparer.py
Comparer.compare
def compare(self, item, article_candidates): """Compares the article candidates using the different submodules and saves the best results in new ArticleCandidate object :param item: The NewscrawlerItem related to the ArticleCandidates :param article_candidates: The list of ArticleCandidate-Objects which have been extracted :return: An ArticleCandidate-object containing the best results """ result = ArticleCandidate() result.title = self.comparer_title.extract(item, article_candidates) result.description = self.comparer_desciption.extract(item, article_candidates) result.text = self.comparer_text.extract(item, article_candidates) result.topimage = self.comparer_topimage.extract(item, article_candidates) result.author = self.comparer_author.extract(item, article_candidates) result.publish_date = self.comparer_date.extract(item, article_candidates) result.language = self.comparer_language.extract(item, article_candidates) return result
python
def compare(self, item, article_candidates): """Compares the article candidates using the different submodules and saves the best results in new ArticleCandidate object :param item: The NewscrawlerItem related to the ArticleCandidates :param article_candidates: The list of ArticleCandidate-Objects which have been extracted :return: An ArticleCandidate-object containing the best results """ result = ArticleCandidate() result.title = self.comparer_title.extract(item, article_candidates) result.description = self.comparer_desciption.extract(item, article_candidates) result.text = self.comparer_text.extract(item, article_candidates) result.topimage = self.comparer_topimage.extract(item, article_candidates) result.author = self.comparer_author.extract(item, article_candidates) result.publish_date = self.comparer_date.extract(item, article_candidates) result.language = self.comparer_language.extract(item, article_candidates) return result
[ "def", "compare", "(", "self", ",", "item", ",", "article_candidates", ")", ":", "result", "=", "ArticleCandidate", "(", ")", "result", ".", "title", "=", "self", ".", "comparer_title", ".", "extract", "(", "item", ",", "article_candidates", ")", "result", ...
Compares the article candidates using the different submodules and saves the best results in new ArticleCandidate object :param item: The NewscrawlerItem related to the ArticleCandidates :param article_candidates: The list of ArticleCandidate-Objects which have been extracted :return: An ArticleCandidate-object containing the best results
[ "Compares", "the", "article", "candidates", "using", "the", "different", "submodules", "and", "saves", "the", "best", "results", "in", "new", "ArticleCandidate", "object" ]
731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9
https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/pipeline/extractor/comparer/comparer.py#L24-L42
226,277
fhamborg/news-please
newsplease/crawler/spiders/rss_crawler.py
RssCrawler.parse
def parse(self, response): """ Extracts the Rss Feed and initiates crawling it. :param obj response: The scrapy response """ yield scrapy.Request(self.helper.url_extractor.get_rss_url(response), callback=self.rss_parse)
python
def parse(self, response): """ Extracts the Rss Feed and initiates crawling it. :param obj response: The scrapy response """ yield scrapy.Request(self.helper.url_extractor.get_rss_url(response), callback=self.rss_parse)
[ "def", "parse", "(", "self", ",", "response", ")", ":", "yield", "scrapy", ".", "Request", "(", "self", ".", "helper", ".", "url_extractor", ".", "get_rss_url", "(", "response", ")", ",", "callback", "=", "self", ".", "rss_parse", ")" ]
Extracts the Rss Feed and initiates crawling it. :param obj response: The scrapy response
[ "Extracts", "the", "Rss", "Feed", "and", "initiates", "crawling", "it", "." ]
731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9
https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/crawler/spiders/rss_crawler.py#L42-L49
226,278
fhamborg/news-please
newsplease/crawler/spiders/rss_crawler.py
RssCrawler.supports_site
def supports_site(url): """ Rss Crawler are supported if by every site containing an rss feed. Determines if this crawler works on the given url. :param str url: The url to test :return bool: Determines wether this crawler work on the given url """ # Follow redirects opener = urllib2.build_opener(urllib2.HTTPRedirectHandler) redirect = opener.open(url).url response = urllib2.urlopen(redirect).read() # Check if a standard rss feed exists return re.search(re_rss, response.decode('utf-8')) is not None
python
def supports_site(url): """ Rss Crawler are supported if by every site containing an rss feed. Determines if this crawler works on the given url. :param str url: The url to test :return bool: Determines wether this crawler work on the given url """ # Follow redirects opener = urllib2.build_opener(urllib2.HTTPRedirectHandler) redirect = opener.open(url).url response = urllib2.urlopen(redirect).read() # Check if a standard rss feed exists return re.search(re_rss, response.decode('utf-8')) is not None
[ "def", "supports_site", "(", "url", ")", ":", "# Follow redirects", "opener", "=", "urllib2", ".", "build_opener", "(", "urllib2", ".", "HTTPRedirectHandler", ")", "redirect", "=", "opener", ".", "open", "(", "url", ")", ".", "url", "response", "=", "urllib2...
Rss Crawler are supported if by every site containing an rss feed. Determines if this crawler works on the given url. :param str url: The url to test :return bool: Determines wether this crawler work on the given url
[ "Rss", "Crawler", "are", "supported", "if", "by", "every", "site", "containing", "an", "rss", "feed", "." ]
731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9
https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/crawler/spiders/rss_crawler.py#L86-L102
226,279
fhamborg/news-please
newsplease/helper_classes/url_extractor.py
UrlExtractor.get_allowed_domain
def get_allowed_domain(url, allow_subdomains=True): """ Determines the url's domain. :param str url: the url to extract the allowed domain from :param bool allow_subdomains: determines wether to include subdomains :return str: subdomains.domain.topleveldomain or domain.topleveldomain """ if allow_subdomains: return re.sub(re_www, '', re.search(r'[^/]+\.[^/]+', url).group(0)) else: return re.search(re_domain, UrlExtractor.get_allowed_domain(url)).group(0)
python
def get_allowed_domain(url, allow_subdomains=True): """ Determines the url's domain. :param str url: the url to extract the allowed domain from :param bool allow_subdomains: determines wether to include subdomains :return str: subdomains.domain.topleveldomain or domain.topleveldomain """ if allow_subdomains: return re.sub(re_www, '', re.search(r'[^/]+\.[^/]+', url).group(0)) else: return re.search(re_domain, UrlExtractor.get_allowed_domain(url)).group(0)
[ "def", "get_allowed_domain", "(", "url", ",", "allow_subdomains", "=", "True", ")", ":", "if", "allow_subdomains", ":", "return", "re", ".", "sub", "(", "re_www", ",", "''", ",", "re", ".", "search", "(", "r'[^/]+\\.[^/]+'", ",", "url", ")", ".", "group"...
Determines the url's domain. :param str url: the url to extract the allowed domain from :param bool allow_subdomains: determines wether to include subdomains :return str: subdomains.domain.topleveldomain or domain.topleveldomain
[ "Determines", "the", "url", "s", "domain", "." ]
731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9
https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/helper_classes/url_extractor.py#L31-L42
226,280
fhamborg/news-please
newsplease/helper_classes/url_extractor.py
UrlExtractor.get_subdomain
def get_subdomain(url): """ Determines the domain's subdomains. :param str url: the url to extract any subdomains from :return str: subdomains of url """ allowed_domain = UrlExtractor.get_allowed_domain(url) return allowed_domain[:len(allowed_domain) - len( UrlExtractor.get_allowed_domain(url, False))]
python
def get_subdomain(url): """ Determines the domain's subdomains. :param str url: the url to extract any subdomains from :return str: subdomains of url """ allowed_domain = UrlExtractor.get_allowed_domain(url) return allowed_domain[:len(allowed_domain) - len( UrlExtractor.get_allowed_domain(url, False))]
[ "def", "get_subdomain", "(", "url", ")", ":", "allowed_domain", "=", "UrlExtractor", ".", "get_allowed_domain", "(", "url", ")", "return", "allowed_domain", "[", ":", "len", "(", "allowed_domain", ")", "-", "len", "(", "UrlExtractor", ".", "get_allowed_domain", ...
Determines the domain's subdomains. :param str url: the url to extract any subdomains from :return str: subdomains of url
[ "Determines", "the", "domain", "s", "subdomains", "." ]
731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9
https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/helper_classes/url_extractor.py#L45-L54
226,281
fhamborg/news-please
newsplease/helper_classes/url_extractor.py
UrlExtractor.follow_redirects
def follow_redirects(url): """ Get's the url actual address by following forwards :param str url: the url to work on :return str: actual address of url """ opener = urllib2.build_opener(urllib2.HTTPRedirectHandler) return opener.open(url).url
python
def follow_redirects(url): """ Get's the url actual address by following forwards :param str url: the url to work on :return str: actual address of url """ opener = urllib2.build_opener(urllib2.HTTPRedirectHandler) return opener.open(url).url
[ "def", "follow_redirects", "(", "url", ")", ":", "opener", "=", "urllib2", ".", "build_opener", "(", "urllib2", ".", "HTTPRedirectHandler", ")", "return", "opener", ".", "open", "(", "url", ")", ".", "url" ]
Get's the url actual address by following forwards :param str url: the url to work on :return str: actual address of url
[ "Get", "s", "the", "url", "actual", "address", "by", "following", "forwards" ]
731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9
https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/helper_classes/url_extractor.py#L57-L65
226,282
fhamborg/news-please
newsplease/helper_classes/url_extractor.py
UrlExtractor.get_sitemap_url
def get_sitemap_url(url, allow_subdomains): """ Determines the domain's robot.txt :param str url: the url to work on :param bool allow_subdomains: Determines if the robot.txt may be the subdomain's :return: the robot.txt's address :raises Exception: if there's no robot.txt on the site's domain """ if allow_subdomains: redirect = UrlExtractor.follow_redirects( "http://" + UrlExtractor.get_allowed_domain(url) ) else: redirect = UrlExtractor.follow_redirects( "http://" + UrlExtractor.get_allowed_domain(url, False) ) redirect = UrlExtractor.follow_redirects(url) # Get robots.txt parsed = urlparse(redirect) if allow_subdomains: url_netloc = parsed.netloc else: url_netloc = UrlExtractor.get_allowed_domain( parsed.netloc, False) robots = '{url.scheme}://{url_netloc}/robots.txt'.format( url=parsed, url_netloc=url_netloc) try: urllib2.urlopen(robots) return robots except: if allow_subdomains: return UrlExtractor.get_sitemap_url(url, False) else: raise Exception('Fatal: no robots.txt found.')
python
def get_sitemap_url(url, allow_subdomains): """ Determines the domain's robot.txt :param str url: the url to work on :param bool allow_subdomains: Determines if the robot.txt may be the subdomain's :return: the robot.txt's address :raises Exception: if there's no robot.txt on the site's domain """ if allow_subdomains: redirect = UrlExtractor.follow_redirects( "http://" + UrlExtractor.get_allowed_domain(url) ) else: redirect = UrlExtractor.follow_redirects( "http://" + UrlExtractor.get_allowed_domain(url, False) ) redirect = UrlExtractor.follow_redirects(url) # Get robots.txt parsed = urlparse(redirect) if allow_subdomains: url_netloc = parsed.netloc else: url_netloc = UrlExtractor.get_allowed_domain( parsed.netloc, False) robots = '{url.scheme}://{url_netloc}/robots.txt'.format( url=parsed, url_netloc=url_netloc) try: urllib2.urlopen(robots) return robots except: if allow_subdomains: return UrlExtractor.get_sitemap_url(url, False) else: raise Exception('Fatal: no robots.txt found.')
[ "def", "get_sitemap_url", "(", "url", ",", "allow_subdomains", ")", ":", "if", "allow_subdomains", ":", "redirect", "=", "UrlExtractor", ".", "follow_redirects", "(", "\"http://\"", "+", "UrlExtractor", ".", "get_allowed_domain", "(", "url", ")", ")", "else", ":...
Determines the domain's robot.txt :param str url: the url to work on :param bool allow_subdomains: Determines if the robot.txt may be the subdomain's :return: the robot.txt's address :raises Exception: if there's no robot.txt on the site's domain
[ "Determines", "the", "domain", "s", "robot", ".", "txt" ]
731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9
https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/helper_classes/url_extractor.py#L68-L107
226,283
fhamborg/news-please
newsplease/helper_classes/url_extractor.py
UrlExtractor.sitemap_check
def sitemap_check(url): """ Sitemap-Crawler are supported by every site which have a Sitemap set in the robots.txt. :param str url: the url to work on :return bool: Determines if Sitemap is set in the site's robots.txt """ response = urllib2.urlopen(UrlExtractor.get_sitemap_url(url, True)) # Check if "Sitemap" is set return "Sitemap:" in response.read().decode('utf-8')
python
def sitemap_check(url): """ Sitemap-Crawler are supported by every site which have a Sitemap set in the robots.txt. :param str url: the url to work on :return bool: Determines if Sitemap is set in the site's robots.txt """ response = urllib2.urlopen(UrlExtractor.get_sitemap_url(url, True)) # Check if "Sitemap" is set return "Sitemap:" in response.read().decode('utf-8')
[ "def", "sitemap_check", "(", "url", ")", ":", "response", "=", "urllib2", ".", "urlopen", "(", "UrlExtractor", ".", "get_sitemap_url", "(", "url", ",", "True", ")", ")", "# Check if \"Sitemap\" is set", "return", "\"Sitemap:\"", "in", "response", ".", "read", ...
Sitemap-Crawler are supported by every site which have a Sitemap set in the robots.txt. :param str url: the url to work on :return bool: Determines if Sitemap is set in the site's robots.txt
[ "Sitemap", "-", "Crawler", "are", "supported", "by", "every", "site", "which", "have", "a", "Sitemap", "set", "in", "the", "robots", ".", "txt", "." ]
731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9
https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/helper_classes/url_extractor.py#L110-L121
226,284
fhamborg/news-please
newsplease/helper_classes/url_extractor.py
UrlExtractor.get_url_directory_string
def get_url_directory_string(url): """ Determines the url's directory string. :param str url: the url to extract the directory string from :return str: the directory string on the server """ domain = UrlExtractor.get_allowed_domain(url) splitted_url = url.split('/') # the following commented list comprehension could replace # the following for, if not and break statement # index = [index for index in range(len(splitted_url)) # if not re.search(domain, splitted_url[index]) is None][0] for index in range(len(splitted_url)): if not re.search(domain, splitted_url[index]) is None: if splitted_url[-1] is "": splitted_url = splitted_url[index + 1:-2] else: splitted_url = splitted_url[index + 1:-1] break return '_'.join(splitted_url)
python
def get_url_directory_string(url): """ Determines the url's directory string. :param str url: the url to extract the directory string from :return str: the directory string on the server """ domain = UrlExtractor.get_allowed_domain(url) splitted_url = url.split('/') # the following commented list comprehension could replace # the following for, if not and break statement # index = [index for index in range(len(splitted_url)) # if not re.search(domain, splitted_url[index]) is None][0] for index in range(len(splitted_url)): if not re.search(domain, splitted_url[index]) is None: if splitted_url[-1] is "": splitted_url = splitted_url[index + 1:-2] else: splitted_url = splitted_url[index + 1:-1] break return '_'.join(splitted_url)
[ "def", "get_url_directory_string", "(", "url", ")", ":", "domain", "=", "UrlExtractor", ".", "get_allowed_domain", "(", "url", ")", "splitted_url", "=", "url", ".", "split", "(", "'/'", ")", "# the following commented list comprehension could replace", "# the following ...
Determines the url's directory string. :param str url: the url to extract the directory string from :return str: the directory string on the server
[ "Determines", "the", "url", "s", "directory", "string", "." ]
731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9
https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/helper_classes/url_extractor.py#L149-L172
226,285
fhamborg/news-please
newsplease/helper_classes/url_extractor.py
UrlExtractor.get_url_file_name
def get_url_file_name(url): """ Determines the url's file name. :param str url: the url to extract the file name from :return str: the filename (without the file extension) on the server """ url_root_ext = os.path.splitext(url) if len(url_root_ext[1]) <= MAX_FILE_EXTENSION_LENGTH: return os.path.split(url_root_ext[0])[1] else: return os.path.split(url)[1]
python
def get_url_file_name(url): """ Determines the url's file name. :param str url: the url to extract the file name from :return str: the filename (without the file extension) on the server """ url_root_ext = os.path.splitext(url) if len(url_root_ext[1]) <= MAX_FILE_EXTENSION_LENGTH: return os.path.split(url_root_ext[0])[1] else: return os.path.split(url)[1]
[ "def", "get_url_file_name", "(", "url", ")", ":", "url_root_ext", "=", "os", ".", "path", ".", "splitext", "(", "url", ")", "if", "len", "(", "url_root_ext", "[", "1", "]", ")", "<=", "MAX_FILE_EXTENSION_LENGTH", ":", "return", "os", ".", "path", ".", ...
Determines the url's file name. :param str url: the url to extract the file name from :return str: the filename (without the file extension) on the server
[ "Determines", "the", "url", "s", "file", "name", "." ]
731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9
https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/helper_classes/url_extractor.py#L175-L187
226,286
fhamborg/news-please
newsplease/config.py
CrawlerConfig.load_config
def load_config(self): """ Loads the config-file """ self.__config = {} # Parse sections, its options and put it in self.config. for section in self.sections: self.__config[section] = {} options = self.parser.options(section) # Parse options of each section for option in options: try: opt = self.parser \ .get(section, option) try: self.__config[section][option] = literal_eval(opt) except (SyntaxError, ValueError): self.__config[section][option] = opt self.log_output.append( {"level": "debug", "msg": "Option not literal_eval-parsable" " (maybe string): [{0}] {1}" .format(section, option)}) if self.__config[section][option] == -1: self.log_output.append( {"level": "debug", "msg": "Skipping: [%s] %s" % (section, option)} ) except ConfigParser.NoOptionError as exc: self.log_output.append( {"level": "error", "msg": "Exception on [%s] %s: %s" % (section, option, exc)} ) self.__config[section][option] = None
python
def load_config(self): """ Loads the config-file """ self.__config = {} # Parse sections, its options and put it in self.config. for section in self.sections: self.__config[section] = {} options = self.parser.options(section) # Parse options of each section for option in options: try: opt = self.parser \ .get(section, option) try: self.__config[section][option] = literal_eval(opt) except (SyntaxError, ValueError): self.__config[section][option] = opt self.log_output.append( {"level": "debug", "msg": "Option not literal_eval-parsable" " (maybe string): [{0}] {1}" .format(section, option)}) if self.__config[section][option] == -1: self.log_output.append( {"level": "debug", "msg": "Skipping: [%s] %s" % (section, option)} ) except ConfigParser.NoOptionError as exc: self.log_output.append( {"level": "error", "msg": "Exception on [%s] %s: %s" % (section, option, exc)} ) self.__config[section][option] = None
[ "def", "load_config", "(", "self", ")", ":", "self", ".", "__config", "=", "{", "}", "# Parse sections, its options and put it in self.config.", "for", "section", "in", "self", ".", "sections", ":", "self", ".", "__config", "[", "section", "]", "=", "{", "}", ...
Loads the config-file
[ "Loads", "the", "config", "-", "file" ]
731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9
https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/config.py#L95-L134
226,287
fhamborg/news-please
newsplease/config.py
CrawlerConfig.handle_logging
def handle_logging(self): """ To allow devs to log as early as possible, logging will already be handled here """ configure_logging(self.get_scrapy_options()) # Disable duplicates self.__scrapy_options["LOG_ENABLED"] = False # Now, after log-level is correctly set, lets log them. for msg in self.log_output: if msg["level"] is "error": self.log.error(msg["msg"]) elif msg["level"] is "info": self.log.info(msg["msg"]) elif msg["level"] is "debug": self.log.debug(msg["msg"])
python
def handle_logging(self): """ To allow devs to log as early as possible, logging will already be handled here """ configure_logging(self.get_scrapy_options()) # Disable duplicates self.__scrapy_options["LOG_ENABLED"] = False # Now, after log-level is correctly set, lets log them. for msg in self.log_output: if msg["level"] is "error": self.log.error(msg["msg"]) elif msg["level"] is "info": self.log.info(msg["msg"]) elif msg["level"] is "debug": self.log.debug(msg["msg"])
[ "def", "handle_logging", "(", "self", ")", ":", "configure_logging", "(", "self", ".", "get_scrapy_options", "(", ")", ")", "# Disable duplicates", "self", ".", "__scrapy_options", "[", "\"LOG_ENABLED\"", "]", "=", "False", "# Now, after log-level is correctly set, lets...
To allow devs to log as early as possible, logging will already be handled here
[ "To", "allow", "devs", "to", "log", "as", "early", "as", "possible", "logging", "will", "already", "be", "handled", "here" ]
731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9
https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/config.py#L148-L166
226,288
fhamborg/news-please
newsplease/config.py
CrawlerConfig.option
def option(self, option): """ Gets the option, set_section needs to be set before. :param option (string): The option to get. :return mixed: The option from from the config. """ if self.__current_section is None: raise RuntimeError('No section set in option-getting') return self.__config[self.__current_section][option]
python
def option(self, option): """ Gets the option, set_section needs to be set before. :param option (string): The option to get. :return mixed: The option from from the config. """ if self.__current_section is None: raise RuntimeError('No section set in option-getting') return self.__config[self.__current_section][option]
[ "def", "option", "(", "self", ",", "option", ")", ":", "if", "self", ".", "__current_section", "is", "None", ":", "raise", "RuntimeError", "(", "'No section set in option-getting'", ")", "return", "self", ".", "__config", "[", "self", ".", "__current_section", ...
Gets the option, set_section needs to be set before. :param option (string): The option to get. :return mixed: The option from from the config.
[ "Gets", "the", "option", "set_section", "needs", "to", "be", "set", "before", "." ]
731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9
https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/config.py#L194-L203
226,289
fhamborg/news-please
newsplease/config.py
JsonConfig.get_url_array
def get_url_array(self): """ Get all url-objects in an array :return sites (array): The sites from the JSON-file """ urlarray = [] for urlobjects in self.__json_object["base_urls"]: urlarray.append(urlobjects["url"]) return urlarray
python
def get_url_array(self): """ Get all url-objects in an array :return sites (array): The sites from the JSON-file """ urlarray = [] for urlobjects in self.__json_object["base_urls"]: urlarray.append(urlobjects["url"]) return urlarray
[ "def", "get_url_array", "(", "self", ")", ":", "urlarray", "=", "[", "]", "for", "urlobjects", "in", "self", ".", "__json_object", "[", "\"base_urls\"", "]", ":", "urlarray", ".", "append", "(", "urlobjects", "[", "\"url\"", "]", ")", "return", "urlarray" ...
Get all url-objects in an array :return sites (array): The sites from the JSON-file
[ "Get", "all", "url", "-", "objects", "in", "an", "array" ]
731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9
https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/config.py#L293-L302
226,290
fhamborg/news-please
newsplease/pipeline/extractor/comparer/comparer_title.py
ComparerTitle.find_matches
def find_matches(self, list_title): """Checks if there are any matches between extracted titles. :param list_title: A list, the extracted titles saved in a list :return: A list, the matched titles """ list_title_matches = [] # Generate every possible tuple of titles and safe the matched string in a list. for a, b, in itertools.combinations(list_title, 2): if a == b: list_title_matches.append(a) return list_title_matches
python
def find_matches(self, list_title): """Checks if there are any matches between extracted titles. :param list_title: A list, the extracted titles saved in a list :return: A list, the matched titles """ list_title_matches = [] # Generate every possible tuple of titles and safe the matched string in a list. for a, b, in itertools.combinations(list_title, 2): if a == b: list_title_matches.append(a) return list_title_matches
[ "def", "find_matches", "(", "self", ",", "list_title", ")", ":", "list_title_matches", "=", "[", "]", "# Generate every possible tuple of titles and safe the matched string in a list.", "for", "a", ",", "b", ",", "in", "itertools", ".", "combinations", "(", "list_title"...
Checks if there are any matches between extracted titles. :param list_title: A list, the extracted titles saved in a list :return: A list, the matched titles
[ "Checks", "if", "there", "are", "any", "matches", "between", "extracted", "titles", "." ]
731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9
https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/pipeline/extractor/comparer/comparer_title.py#L7-L19
226,291
fhamborg/news-please
newsplease/pipeline/extractor/comparer/comparer_title.py
ComparerTitle.extract_match
def extract_match(self, list_title_matches): """Extract the title with the most matches from the list. :param list_title_matches: A list, the extracted titles which match with others :return: A string, the most frequently extracted title. """ # Create a set of the extracted titles list_title_matches_set = set(list_title_matches) list_title_count = [] # Count how often a title was matched and safe as tuple in list. for match in list_title_matches_set: list_title_count.append((list_title_matches.count(match), match)) if list_title_count and max(list_title_count)[0] != min(list_title_count)[0]: return max(list_title_count)[1] return None
python
def extract_match(self, list_title_matches): """Extract the title with the most matches from the list. :param list_title_matches: A list, the extracted titles which match with others :return: A string, the most frequently extracted title. """ # Create a set of the extracted titles list_title_matches_set = set(list_title_matches) list_title_count = [] # Count how often a title was matched and safe as tuple in list. for match in list_title_matches_set: list_title_count.append((list_title_matches.count(match), match)) if list_title_count and max(list_title_count)[0] != min(list_title_count)[0]: return max(list_title_count)[1] return None
[ "def", "extract_match", "(", "self", ",", "list_title_matches", ")", ":", "# Create a set of the extracted titles", "list_title_matches_set", "=", "set", "(", "list_title_matches", ")", "list_title_count", "=", "[", "]", "# Count how often a title was matched and safe as tuple ...
Extract the title with the most matches from the list. :param list_title_matches: A list, the extracted titles which match with others :return: A string, the most frequently extracted title.
[ "Extract", "the", "title", "with", "the", "most", "matches", "from", "the", "list", "." ]
731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9
https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/pipeline/extractor/comparer/comparer_title.py#L21-L38
226,292
fhamborg/news-please
newsplease/pipeline/extractor/comparer/comparer_title.py
ComparerTitle.extract
def extract(self, item, list_article_candidate): """Compares the extracted titles. :param item: The corresponding NewscrawlerItem :param list_article_candidate: A list, the list of ArticleCandidate-Objects which have been extracted :return: A string, the most likely title """ list_title = [] # Save every title from the candidates in list_title. for article_candidate in list_article_candidate: if article_candidate.title is not None: list_title.append(article_candidate.title) if not list_title: return None # Creates a list with matched titles list_title_matches = self.find_matches(list_title) # Extract title with the most matches matched_title = self.extract_match(list_title_matches) # Returns the matched title if there is one, else returns the shortest title if matched_title: return matched_title else: if list_title_matches: return self.choose_shortest_title(set(list_title_matches)) else: return self.choose_shortest_title(list_title)
python
def extract(self, item, list_article_candidate): """Compares the extracted titles. :param item: The corresponding NewscrawlerItem :param list_article_candidate: A list, the list of ArticleCandidate-Objects which have been extracted :return: A string, the most likely title """ list_title = [] # Save every title from the candidates in list_title. for article_candidate in list_article_candidate: if article_candidate.title is not None: list_title.append(article_candidate.title) if not list_title: return None # Creates a list with matched titles list_title_matches = self.find_matches(list_title) # Extract title with the most matches matched_title = self.extract_match(list_title_matches) # Returns the matched title if there is one, else returns the shortest title if matched_title: return matched_title else: if list_title_matches: return self.choose_shortest_title(set(list_title_matches)) else: return self.choose_shortest_title(list_title)
[ "def", "extract", "(", "self", ",", "item", ",", "list_article_candidate", ")", ":", "list_title", "=", "[", "]", "# Save every title from the candidates in list_title.", "for", "article_candidate", "in", "list_article_candidate", ":", "if", "article_candidate", ".", "t...
Compares the extracted titles. :param item: The corresponding NewscrawlerItem :param list_article_candidate: A list, the list of ArticleCandidate-Objects which have been extracted :return: A string, the most likely title
[ "Compares", "the", "extracted", "titles", "." ]
731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9
https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/pipeline/extractor/comparer/comparer_title.py#L53-L82
226,293
fhamborg/news-please
newsplease/__main__.py
cli
def cli(cfg_file_path, resume, reset_elasticsearch, reset_mysql, reset_json, reset_all, no_confirm): "A generic news crawler and extractor." if reset_all: reset_elasticsearch = True reset_json = True reset_mysql = True if cfg_file_path and not cfg_file_path.endswith(os.path.sep): cfg_file_path += os.path.sep NewsPleaseLauncher(cfg_file_path, resume, reset_elasticsearch, reset_json, reset_mysql, no_confirm)
python
def cli(cfg_file_path, resume, reset_elasticsearch, reset_mysql, reset_json, reset_all, no_confirm): "A generic news crawler and extractor." if reset_all: reset_elasticsearch = True reset_json = True reset_mysql = True if cfg_file_path and not cfg_file_path.endswith(os.path.sep): cfg_file_path += os.path.sep NewsPleaseLauncher(cfg_file_path, resume, reset_elasticsearch, reset_json, reset_mysql, no_confirm)
[ "def", "cli", "(", "cfg_file_path", ",", "resume", ",", "reset_elasticsearch", ",", "reset_mysql", ",", "reset_json", ",", "reset_all", ",", "no_confirm", ")", ":", "if", "reset_all", ":", "reset_elasticsearch", "=", "True", "reset_json", "=", "True", "reset_mys...
A generic news crawler and extractor.
[ "A", "generic", "news", "crawler", "and", "extractor", "." ]
731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9
https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/__main__.py#L634-L645
226,294
fhamborg/news-please
newsplease/__main__.py
NewsPleaseLauncher.manage_crawlers
def manage_crawlers(self): """ Manages all crawlers, threads and limites the number of parallel running threads. """ sites = self.json.get_site_objects() for index, site in enumerate(sites): if "daemonize" in site: self.daemon_list.add_daemon(index, site["daemonize"]) elif "additional_rss_daemon" in site: self.daemon_list.add_daemon(index, site["additional_rss_daemon"]) self.crawler_list.append_item(index) else: self.crawler_list.append_item(index) num_threads = self.cfg.section('Crawler')[ 'number_of_parallel_crawlers'] if self.crawler_list.len() < num_threads: num_threads = self.crawler_list.len() for _ in range(num_threads): thread = threading.Thread(target=self.manage_crawler, args=(), kwargs={}) self.threads.append(thread) thread.start() num_daemons = self.cfg.section('Crawler')['number_of_parallel_daemons'] if self.daemon_list.len() < num_daemons: num_daemons = self.daemon_list.len() for _ in range(num_daemons): thread_daemonized = threading.Thread(target=self.manage_daemon, args=(), kwargs={}) self.threads_daemonized.append(thread_daemonized) thread_daemonized.start() while not self.shutdown: try: time.sleep(10) # if we are not in daemon mode and no crawler is running any longer, # all articles have been crawled and the tool can shut down if self.daemon_list.len() == 0 and self.number_of_active_crawlers == 0: self.graceful_stop() break except IOError: # This exception will only occur on kill-process on windows. # The process should be killed, thus this exception is # irrelevant. pass
python
def manage_crawlers(self): """ Manages all crawlers, threads and limites the number of parallel running threads. """ sites = self.json.get_site_objects() for index, site in enumerate(sites): if "daemonize" in site: self.daemon_list.add_daemon(index, site["daemonize"]) elif "additional_rss_daemon" in site: self.daemon_list.add_daemon(index, site["additional_rss_daemon"]) self.crawler_list.append_item(index) else: self.crawler_list.append_item(index) num_threads = self.cfg.section('Crawler')[ 'number_of_parallel_crawlers'] if self.crawler_list.len() < num_threads: num_threads = self.crawler_list.len() for _ in range(num_threads): thread = threading.Thread(target=self.manage_crawler, args=(), kwargs={}) self.threads.append(thread) thread.start() num_daemons = self.cfg.section('Crawler')['number_of_parallel_daemons'] if self.daemon_list.len() < num_daemons: num_daemons = self.daemon_list.len() for _ in range(num_daemons): thread_daemonized = threading.Thread(target=self.manage_daemon, args=(), kwargs={}) self.threads_daemonized.append(thread_daemonized) thread_daemonized.start() while not self.shutdown: try: time.sleep(10) # if we are not in daemon mode and no crawler is running any longer, # all articles have been crawled and the tool can shut down if self.daemon_list.len() == 0 and self.number_of_active_crawlers == 0: self.graceful_stop() break except IOError: # This exception will only occur on kill-process on windows. # The process should be killed, thus this exception is # irrelevant. pass
[ "def", "manage_crawlers", "(", "self", ")", ":", "sites", "=", "self", ".", "json", ".", "get_site_objects", "(", ")", "for", "index", ",", "site", "in", "enumerate", "(", "sites", ")", ":", "if", "\"daemonize\"", "in", "site", ":", "self", ".", "daemo...
Manages all crawlers, threads and limites the number of parallel running threads.
[ "Manages", "all", "crawlers", "threads", "and", "limites", "the", "number", "of", "parallel", "running", "threads", "." ]
731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9
https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/__main__.py#L152-L204
226,295
fhamborg/news-please
newsplease/__main__.py
NewsPleaseLauncher.manage_crawler
def manage_crawler(self): """ Manages a normal crawler thread. When a crawler finished, it loads another one if there are still sites to crawl. """ index = True self.number_of_active_crawlers += 1 while not self.shutdown and index is not None: index = self.crawler_list.get_next_item() if index is None: self.number_of_active_crawlers -= 1 break self.start_crawler(index)
python
def manage_crawler(self): """ Manages a normal crawler thread. When a crawler finished, it loads another one if there are still sites to crawl. """ index = True self.number_of_active_crawlers += 1 while not self.shutdown and index is not None: index = self.crawler_list.get_next_item() if index is None: self.number_of_active_crawlers -= 1 break self.start_crawler(index)
[ "def", "manage_crawler", "(", "self", ")", ":", "index", "=", "True", "self", ".", "number_of_active_crawlers", "+=", "1", "while", "not", "self", ".", "shutdown", "and", "index", "is", "not", "None", ":", "index", "=", "self", ".", "crawler_list", ".", ...
Manages a normal crawler thread. When a crawler finished, it loads another one if there are still sites to crawl.
[ "Manages", "a", "normal", "crawler", "thread", "." ]
731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9
https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/__main__.py#L206-L221
226,296
fhamborg/news-please
newsplease/__main__.py
NewsPleaseLauncher.manage_daemon
def manage_daemon(self): """ Manages a daemonized crawler thread. Once a crawler it finished, it loads the next one. """ while not self.shutdown: # next scheduled daemon, tuple (time, index) item = self.daemon_list.get_next_item() cur = time.time() pajama_time = item[0] - cur if pajama_time > 0: self.thread_event.wait(pajama_time) if not self.shutdown: self.start_crawler(item[1], daemonize=True)
python
def manage_daemon(self): """ Manages a daemonized crawler thread. Once a crawler it finished, it loads the next one. """ while not self.shutdown: # next scheduled daemon, tuple (time, index) item = self.daemon_list.get_next_item() cur = time.time() pajama_time = item[0] - cur if pajama_time > 0: self.thread_event.wait(pajama_time) if not self.shutdown: self.start_crawler(item[1], daemonize=True)
[ "def", "manage_daemon", "(", "self", ")", ":", "while", "not", "self", ".", "shutdown", ":", "# next scheduled daemon, tuple (time, index)", "item", "=", "self", ".", "daemon_list", ".", "get_next_item", "(", ")", "cur", "=", "time", ".", "time", "(", ")", "...
Manages a daemonized crawler thread. Once a crawler it finished, it loads the next one.
[ "Manages", "a", "daemonized", "crawler", "thread", "." ]
731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9
https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/__main__.py#L223-L237
226,297
fhamborg/news-please
newsplease/__main__.py
NewsPleaseLauncher.start_crawler
def start_crawler(self, index, daemonize=False): """ Starts a crawler from the input-array. :param int index: The array-index of the site :param int daemonize: Bool if the crawler is supposed to be daemonized (to delete the JOBDIR) """ call_process = [sys.executable, self.__single_crawler, self.cfg_file_path, self.json_file_path, "%s" % index, "%s" % self.shall_resume, "%s" % daemonize] self.log.debug("Calling Process: %s", call_process) crawler = Popen(call_process, stderr=None, stdout=None) crawler.communicate() self.crawlers.append(crawler)
python
def start_crawler(self, index, daemonize=False): """ Starts a crawler from the input-array. :param int index: The array-index of the site :param int daemonize: Bool if the crawler is supposed to be daemonized (to delete the JOBDIR) """ call_process = [sys.executable, self.__single_crawler, self.cfg_file_path, self.json_file_path, "%s" % index, "%s" % self.shall_resume, "%s" % daemonize] self.log.debug("Calling Process: %s", call_process) crawler = Popen(call_process, stderr=None, stdout=None) crawler.communicate() self.crawlers.append(crawler)
[ "def", "start_crawler", "(", "self", ",", "index", ",", "daemonize", "=", "False", ")", ":", "call_process", "=", "[", "sys", ".", "executable", ",", "self", ".", "__single_crawler", ",", "self", ".", "cfg_file_path", ",", "self", ".", "json_file_path", ",...
Starts a crawler from the input-array. :param int index: The array-index of the site :param int daemonize: Bool if the crawler is supposed to be daemonized (to delete the JOBDIR)
[ "Starts", "a", "crawler", "from", "the", "input", "-", "array", "." ]
731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9
https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/__main__.py#L239-L261
226,298
fhamborg/news-please
newsplease/__main__.py
NewsPleaseLauncher.graceful_stop
def graceful_stop(self, signal_number=None, stack_frame=None): """ This function will be called when a graceful-stop is initiated. """ stop_msg = "Hard" if self.shutdown else "Graceful" if signal_number is None: self.log.info("%s stop called manually. " "Shutting down.", stop_msg) else: self.log.info("%s stop called by signal #%s. Shutting down." "Stack Frame: %s", stop_msg, signal_number, stack_frame) self.shutdown = True self.crawler_list.stop() self.daemon_list.stop() self.thread_event.set() return True
python
def graceful_stop(self, signal_number=None, stack_frame=None): """ This function will be called when a graceful-stop is initiated. """ stop_msg = "Hard" if self.shutdown else "Graceful" if signal_number is None: self.log.info("%s stop called manually. " "Shutting down.", stop_msg) else: self.log.info("%s stop called by signal #%s. Shutting down." "Stack Frame: %s", stop_msg, signal_number, stack_frame) self.shutdown = True self.crawler_list.stop() self.daemon_list.stop() self.thread_event.set() return True
[ "def", "graceful_stop", "(", "self", ",", "signal_number", "=", "None", ",", "stack_frame", "=", "None", ")", ":", "stop_msg", "=", "\"Hard\"", "if", "self", ".", "shutdown", "else", "\"Graceful\"", "if", "signal_number", "is", "None", ":", "self", ".", "l...
This function will be called when a graceful-stop is initiated.
[ "This", "function", "will", "be", "called", "when", "a", "graceful", "-", "stop", "is", "initiated", "." ]
731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9
https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/__main__.py#L263-L279
226,299
fhamborg/news-please
newsplease/helper_classes/savepath_parser.py
SavepathParser.time_replacer
def time_replacer(match, timestamp): """ Transforms the timestamp to the format the regex match determines. :param str match: the regex match :param time timestamp: the timestamp to format with match.group(1) :return str: the timestamp formated with strftime the way the regex-match within the first set of braces defines """ # match.group(0) = entire match # match.group(1) = match in braces #1 return time.strftime(match.group(1), time.gmtime(timestamp))
python
def time_replacer(match, timestamp): """ Transforms the timestamp to the format the regex match determines. :param str match: the regex match :param time timestamp: the timestamp to format with match.group(1) :return str: the timestamp formated with strftime the way the regex-match within the first set of braces defines """ # match.group(0) = entire match # match.group(1) = match in braces #1 return time.strftime(match.group(1), time.gmtime(timestamp))
[ "def", "time_replacer", "(", "match", ",", "timestamp", ")", ":", "# match.group(0) = entire match", "# match.group(1) = match in braces #1", "return", "time", ".", "strftime", "(", "match", ".", "group", "(", "1", ")", ",", "time", ".", "gmtime", "(", "timestamp"...
Transforms the timestamp to the format the regex match determines. :param str match: the regex match :param time timestamp: the timestamp to format with match.group(1) :return str: the timestamp formated with strftime the way the regex-match within the first set of braces defines
[ "Transforms", "the", "timestamp", "to", "the", "format", "the", "regex", "match", "determines", "." ]
731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9
https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/helper_classes/savepath_parser.py#L76-L87