id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
|
|---|---|---|---|---|---|---|---|---|---|---|---|
10,000
|
inonit/drf-haystack
|
drf_haystack/utils.py
|
merge_dict
|
def merge_dict(a, b):
"""
Recursively merges and returns dict a with dict b.
Any list values will be combined and returned sorted.
:param a: dictionary object
:param b: dictionary object
:return: merged dictionary object
"""
if not isinstance(b, dict):
return b
result = deepcopy(a)
for key, val in six.iteritems(b):
if key in result and isinstance(result[key], dict):
result[key] = merge_dict(result[key], val)
elif key in result and isinstance(result[key], list):
result[key] = sorted(list(set(val) | set(result[key])))
else:
result[key] = deepcopy(val)
return result
|
python
|
def merge_dict(a, b):
"""
Recursively merges and returns dict a with dict b.
Any list values will be combined and returned sorted.
:param a: dictionary object
:param b: dictionary object
:return: merged dictionary object
"""
if not isinstance(b, dict):
return b
result = deepcopy(a)
for key, val in six.iteritems(b):
if key in result and isinstance(result[key], dict):
result[key] = merge_dict(result[key], val)
elif key in result and isinstance(result[key], list):
result[key] = sorted(list(set(val) | set(result[key])))
else:
result[key] = deepcopy(val)
return result
|
[
"def",
"merge_dict",
"(",
"a",
",",
"b",
")",
":",
"if",
"not",
"isinstance",
"(",
"b",
",",
"dict",
")",
":",
"return",
"b",
"result",
"=",
"deepcopy",
"(",
"a",
")",
"for",
"key",
",",
"val",
"in",
"six",
".",
"iteritems",
"(",
"b",
")",
":",
"if",
"key",
"in",
"result",
"and",
"isinstance",
"(",
"result",
"[",
"key",
"]",
",",
"dict",
")",
":",
"result",
"[",
"key",
"]",
"=",
"merge_dict",
"(",
"result",
"[",
"key",
"]",
",",
"val",
")",
"elif",
"key",
"in",
"result",
"and",
"isinstance",
"(",
"result",
"[",
"key",
"]",
",",
"list",
")",
":",
"result",
"[",
"key",
"]",
"=",
"sorted",
"(",
"list",
"(",
"set",
"(",
"val",
")",
"|",
"set",
"(",
"result",
"[",
"key",
"]",
")",
")",
")",
"else",
":",
"result",
"[",
"key",
"]",
"=",
"deepcopy",
"(",
"val",
")",
"return",
"result"
] |
Recursively merges and returns dict a with dict b.
Any list values will be combined and returned sorted.
:param a: dictionary object
:param b: dictionary object
:return: merged dictionary object
|
[
"Recursively",
"merges",
"and",
"returns",
"dict",
"a",
"with",
"dict",
"b",
".",
"Any",
"list",
"values",
"will",
"be",
"combined",
"and",
"returned",
"sorted",
"."
] |
ceabd0f6318f129758341ab08292a20205d6f4cd
|
https://github.com/inonit/drf-haystack/blob/ceabd0f6318f129758341ab08292a20205d6f4cd/drf_haystack/utils.py#L9-L31
|
10,001
|
inonit/drf-haystack
|
drf_haystack/generics.py
|
HaystackGenericAPIView.get_queryset
|
def get_queryset(self, index_models=[]):
"""
Get the list of items for this view.
Returns ``self.queryset`` if defined and is a ``self.object_class``
instance.
@:param index_models: override `self.index_models`
"""
if self.queryset is not None and isinstance(self.queryset, self.object_class):
queryset = self.queryset.all()
else:
queryset = self.object_class()._clone()
if len(index_models):
queryset = queryset.models(*index_models)
elif len(self.index_models):
queryset = queryset.models(*self.index_models)
return queryset
|
python
|
def get_queryset(self, index_models=[]):
"""
Get the list of items for this view.
Returns ``self.queryset`` if defined and is a ``self.object_class``
instance.
@:param index_models: override `self.index_models`
"""
if self.queryset is not None and isinstance(self.queryset, self.object_class):
queryset = self.queryset.all()
else:
queryset = self.object_class()._clone()
if len(index_models):
queryset = queryset.models(*index_models)
elif len(self.index_models):
queryset = queryset.models(*self.index_models)
return queryset
|
[
"def",
"get_queryset",
"(",
"self",
",",
"index_models",
"=",
"[",
"]",
")",
":",
"if",
"self",
".",
"queryset",
"is",
"not",
"None",
"and",
"isinstance",
"(",
"self",
".",
"queryset",
",",
"self",
".",
"object_class",
")",
":",
"queryset",
"=",
"self",
".",
"queryset",
".",
"all",
"(",
")",
"else",
":",
"queryset",
"=",
"self",
".",
"object_class",
"(",
")",
".",
"_clone",
"(",
")",
"if",
"len",
"(",
"index_models",
")",
":",
"queryset",
"=",
"queryset",
".",
"models",
"(",
"*",
"index_models",
")",
"elif",
"len",
"(",
"self",
".",
"index_models",
")",
":",
"queryset",
"=",
"queryset",
".",
"models",
"(",
"*",
"self",
".",
"index_models",
")",
"return",
"queryset"
] |
Get the list of items for this view.
Returns ``self.queryset`` if defined and is a ``self.object_class``
instance.
@:param index_models: override `self.index_models`
|
[
"Get",
"the",
"list",
"of",
"items",
"for",
"this",
"view",
".",
"Returns",
"self",
".",
"queryset",
"if",
"defined",
"and",
"is",
"a",
"self",
".",
"object_class",
"instance",
"."
] |
ceabd0f6318f129758341ab08292a20205d6f4cd
|
https://github.com/inonit/drf-haystack/blob/ceabd0f6318f129758341ab08292a20205d6f4cd/drf_haystack/generics.py#L40-L56
|
10,002
|
inonit/drf-haystack
|
drf_haystack/generics.py
|
HaystackGenericAPIView.get_object
|
def get_object(self):
"""
Fetch a single document from the data store according to whatever
unique identifier is available for that document in the
SearchIndex.
In cases where the view has multiple ``index_models``, add a ``model`` query
parameter containing a single `app_label.model` name to the request in order
to override which model to include in the SearchQuerySet.
Example:
/api/v1/search/42/?model=myapp.person
"""
queryset = self.get_queryset()
if "model" in self.request.query_params:
try:
app_label, model = map(six.text_type.lower, self.request.query_params["model"].split(".", 1))
ctype = ContentType.objects.get(app_label=app_label, model=model)
queryset = self.get_queryset(index_models=[ctype.model_class()])
except (ValueError, ContentType.DoesNotExist):
raise Http404("Could not find any models matching '%s'. Make sure to use a valid "
"'app_label.model' name for the 'model' query parameter." % self.request.query_params["model"])
lookup_url_kwarg = self.lookup_url_kwarg or self.lookup_field
if lookup_url_kwarg not in self.kwargs:
raise AttributeError(
"Expected view %s to be called with a URL keyword argument "
"named '%s'. Fix your URL conf, or set the `.lookup_field` "
"attribute on the view correctly." % (self.__class__.__name__, lookup_url_kwarg)
)
queryset = queryset.filter(self.query_object((self.document_uid_field, self.kwargs[lookup_url_kwarg])))
count = queryset.count()
if count == 1:
return queryset[0]
elif count > 1:
raise Http404("Multiple results matches the given query. Expected a single result.")
raise Http404("No result matches the given query.")
|
python
|
def get_object(self):
"""
Fetch a single document from the data store according to whatever
unique identifier is available for that document in the
SearchIndex.
In cases where the view has multiple ``index_models``, add a ``model`` query
parameter containing a single `app_label.model` name to the request in order
to override which model to include in the SearchQuerySet.
Example:
/api/v1/search/42/?model=myapp.person
"""
queryset = self.get_queryset()
if "model" in self.request.query_params:
try:
app_label, model = map(six.text_type.lower, self.request.query_params["model"].split(".", 1))
ctype = ContentType.objects.get(app_label=app_label, model=model)
queryset = self.get_queryset(index_models=[ctype.model_class()])
except (ValueError, ContentType.DoesNotExist):
raise Http404("Could not find any models matching '%s'. Make sure to use a valid "
"'app_label.model' name for the 'model' query parameter." % self.request.query_params["model"])
lookup_url_kwarg = self.lookup_url_kwarg or self.lookup_field
if lookup_url_kwarg not in self.kwargs:
raise AttributeError(
"Expected view %s to be called with a URL keyword argument "
"named '%s'. Fix your URL conf, or set the `.lookup_field` "
"attribute on the view correctly." % (self.__class__.__name__, lookup_url_kwarg)
)
queryset = queryset.filter(self.query_object((self.document_uid_field, self.kwargs[lookup_url_kwarg])))
count = queryset.count()
if count == 1:
return queryset[0]
elif count > 1:
raise Http404("Multiple results matches the given query. Expected a single result.")
raise Http404("No result matches the given query.")
|
[
"def",
"get_object",
"(",
"self",
")",
":",
"queryset",
"=",
"self",
".",
"get_queryset",
"(",
")",
"if",
"\"model\"",
"in",
"self",
".",
"request",
".",
"query_params",
":",
"try",
":",
"app_label",
",",
"model",
"=",
"map",
"(",
"six",
".",
"text_type",
".",
"lower",
",",
"self",
".",
"request",
".",
"query_params",
"[",
"\"model\"",
"]",
".",
"split",
"(",
"\".\"",
",",
"1",
")",
")",
"ctype",
"=",
"ContentType",
".",
"objects",
".",
"get",
"(",
"app_label",
"=",
"app_label",
",",
"model",
"=",
"model",
")",
"queryset",
"=",
"self",
".",
"get_queryset",
"(",
"index_models",
"=",
"[",
"ctype",
".",
"model_class",
"(",
")",
"]",
")",
"except",
"(",
"ValueError",
",",
"ContentType",
".",
"DoesNotExist",
")",
":",
"raise",
"Http404",
"(",
"\"Could not find any models matching '%s'. Make sure to use a valid \"",
"\"'app_label.model' name for the 'model' query parameter.\"",
"%",
"self",
".",
"request",
".",
"query_params",
"[",
"\"model\"",
"]",
")",
"lookup_url_kwarg",
"=",
"self",
".",
"lookup_url_kwarg",
"or",
"self",
".",
"lookup_field",
"if",
"lookup_url_kwarg",
"not",
"in",
"self",
".",
"kwargs",
":",
"raise",
"AttributeError",
"(",
"\"Expected view %s to be called with a URL keyword argument \"",
"\"named '%s'. Fix your URL conf, or set the `.lookup_field` \"",
"\"attribute on the view correctly.\"",
"%",
"(",
"self",
".",
"__class__",
".",
"__name__",
",",
"lookup_url_kwarg",
")",
")",
"queryset",
"=",
"queryset",
".",
"filter",
"(",
"self",
".",
"query_object",
"(",
"(",
"self",
".",
"document_uid_field",
",",
"self",
".",
"kwargs",
"[",
"lookup_url_kwarg",
"]",
")",
")",
")",
"count",
"=",
"queryset",
".",
"count",
"(",
")",
"if",
"count",
"==",
"1",
":",
"return",
"queryset",
"[",
"0",
"]",
"elif",
"count",
">",
"1",
":",
"raise",
"Http404",
"(",
"\"Multiple results matches the given query. Expected a single result.\"",
")",
"raise",
"Http404",
"(",
"\"No result matches the given query.\"",
")"
] |
Fetch a single document from the data store according to whatever
unique identifier is available for that document in the
SearchIndex.
In cases where the view has multiple ``index_models``, add a ``model`` query
parameter containing a single `app_label.model` name to the request in order
to override which model to include in the SearchQuerySet.
Example:
/api/v1/search/42/?model=myapp.person
|
[
"Fetch",
"a",
"single",
"document",
"from",
"the",
"data",
"store",
"according",
"to",
"whatever",
"unique",
"identifier",
"is",
"available",
"for",
"that",
"document",
"in",
"the",
"SearchIndex",
"."
] |
ceabd0f6318f129758341ab08292a20205d6f4cd
|
https://github.com/inonit/drf-haystack/blob/ceabd0f6318f129758341ab08292a20205d6f4cd/drf_haystack/generics.py#L58-L95
|
10,003
|
inonit/drf-haystack
|
drf_haystack/mixins.py
|
MoreLikeThisMixin.more_like_this
|
def more_like_this(self, request, pk=None):
"""
Sets up a detail route for ``more-like-this`` results.
Note that you'll need backend support in order to take advantage of this.
This will add ie. ^search/{pk}/more-like-this/$ to your existing ^search pattern.
"""
obj = self.get_object().object
queryset = self.filter_queryset(self.get_queryset()).more_like_this(obj)
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(queryset, many=True)
return Response(serializer.data)
|
python
|
def more_like_this(self, request, pk=None):
"""
Sets up a detail route for ``more-like-this`` results.
Note that you'll need backend support in order to take advantage of this.
This will add ie. ^search/{pk}/more-like-this/$ to your existing ^search pattern.
"""
obj = self.get_object().object
queryset = self.filter_queryset(self.get_queryset()).more_like_this(obj)
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(queryset, many=True)
return Response(serializer.data)
|
[
"def",
"more_like_this",
"(",
"self",
",",
"request",
",",
"pk",
"=",
"None",
")",
":",
"obj",
"=",
"self",
".",
"get_object",
"(",
")",
".",
"object",
"queryset",
"=",
"self",
".",
"filter_queryset",
"(",
"self",
".",
"get_queryset",
"(",
")",
")",
".",
"more_like_this",
"(",
"obj",
")",
"page",
"=",
"self",
".",
"paginate_queryset",
"(",
"queryset",
")",
"if",
"page",
"is",
"not",
"None",
":",
"serializer",
"=",
"self",
".",
"get_serializer",
"(",
"page",
",",
"many",
"=",
"True",
")",
"return",
"self",
".",
"get_paginated_response",
"(",
"serializer",
".",
"data",
")",
"serializer",
"=",
"self",
".",
"get_serializer",
"(",
"queryset",
",",
"many",
"=",
"True",
")",
"return",
"Response",
"(",
"serializer",
".",
"data",
")"
] |
Sets up a detail route for ``more-like-this`` results.
Note that you'll need backend support in order to take advantage of this.
This will add ie. ^search/{pk}/more-like-this/$ to your existing ^search pattern.
|
[
"Sets",
"up",
"a",
"detail",
"route",
"for",
"more",
"-",
"like",
"-",
"this",
"results",
".",
"Note",
"that",
"you",
"ll",
"need",
"backend",
"support",
"in",
"order",
"to",
"take",
"advantage",
"of",
"this",
"."
] |
ceabd0f6318f129758341ab08292a20205d6f4cd
|
https://github.com/inonit/drf-haystack/blob/ceabd0f6318f129758341ab08292a20205d6f4cd/drf_haystack/mixins.py#L17-L33
|
10,004
|
inonit/drf-haystack
|
drf_haystack/mixins.py
|
FacetMixin.filter_facet_queryset
|
def filter_facet_queryset(self, queryset):
"""
Given a search queryset, filter it with whichever facet filter backends
in use.
"""
for backend in list(self.facet_filter_backends):
queryset = backend().filter_queryset(self.request, queryset, self)
if self.load_all:
queryset = queryset.load_all()
return queryset
|
python
|
def filter_facet_queryset(self, queryset):
"""
Given a search queryset, filter it with whichever facet filter backends
in use.
"""
for backend in list(self.facet_filter_backends):
queryset = backend().filter_queryset(self.request, queryset, self)
if self.load_all:
queryset = queryset.load_all()
return queryset
|
[
"def",
"filter_facet_queryset",
"(",
"self",
",",
"queryset",
")",
":",
"for",
"backend",
"in",
"list",
"(",
"self",
".",
"facet_filter_backends",
")",
":",
"queryset",
"=",
"backend",
"(",
")",
".",
"filter_queryset",
"(",
"self",
".",
"request",
",",
"queryset",
",",
"self",
")",
"if",
"self",
".",
"load_all",
":",
"queryset",
"=",
"queryset",
".",
"load_all",
"(",
")",
"return",
"queryset"
] |
Given a search queryset, filter it with whichever facet filter backends
in use.
|
[
"Given",
"a",
"search",
"queryset",
"filter",
"it",
"with",
"whichever",
"facet",
"filter",
"backends",
"in",
"use",
"."
] |
ceabd0f6318f129758341ab08292a20205d6f4cd
|
https://github.com/inonit/drf-haystack/blob/ceabd0f6318f129758341ab08292a20205d6f4cd/drf_haystack/mixins.py#L66-L77
|
10,005
|
inonit/drf-haystack
|
drf_haystack/mixins.py
|
FacetMixin.get_facet_serializer
|
def get_facet_serializer(self, *args, **kwargs):
"""
Return the facet serializer instance that should be used for
serializing faceted output.
"""
assert "objects" in kwargs, "`objects` is a required argument to `get_facet_serializer()`"
facet_serializer_class = self.get_facet_serializer_class()
kwargs["context"] = self.get_serializer_context()
kwargs["context"].update({
"objects": kwargs.pop("objects"),
"facet_query_params_text": self.facet_query_params_text,
})
return facet_serializer_class(*args, **kwargs)
|
python
|
def get_facet_serializer(self, *args, **kwargs):
"""
Return the facet serializer instance that should be used for
serializing faceted output.
"""
assert "objects" in kwargs, "`objects` is a required argument to `get_facet_serializer()`"
facet_serializer_class = self.get_facet_serializer_class()
kwargs["context"] = self.get_serializer_context()
kwargs["context"].update({
"objects": kwargs.pop("objects"),
"facet_query_params_text": self.facet_query_params_text,
})
return facet_serializer_class(*args, **kwargs)
|
[
"def",
"get_facet_serializer",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"assert",
"\"objects\"",
"in",
"kwargs",
",",
"\"`objects` is a required argument to `get_facet_serializer()`\"",
"facet_serializer_class",
"=",
"self",
".",
"get_facet_serializer_class",
"(",
")",
"kwargs",
"[",
"\"context\"",
"]",
"=",
"self",
".",
"get_serializer_context",
"(",
")",
"kwargs",
"[",
"\"context\"",
"]",
".",
"update",
"(",
"{",
"\"objects\"",
":",
"kwargs",
".",
"pop",
"(",
"\"objects\"",
")",
",",
"\"facet_query_params_text\"",
":",
"self",
".",
"facet_query_params_text",
",",
"}",
")",
"return",
"facet_serializer_class",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
Return the facet serializer instance that should be used for
serializing faceted output.
|
[
"Return",
"the",
"facet",
"serializer",
"instance",
"that",
"should",
"be",
"used",
"for",
"serializing",
"faceted",
"output",
"."
] |
ceabd0f6318f129758341ab08292a20205d6f4cd
|
https://github.com/inonit/drf-haystack/blob/ceabd0f6318f129758341ab08292a20205d6f4cd/drf_haystack/mixins.py#L79-L92
|
10,006
|
inonit/drf-haystack
|
drf_haystack/mixins.py
|
FacetMixin.get_facet_serializer_class
|
def get_facet_serializer_class(self):
"""
Return the class to use for serializing facets.
Defaults to using ``self.facet_serializer_class``.
"""
if self.facet_serializer_class is None:
raise AttributeError(
"%(cls)s should either include a `facet_serializer_class` attribute, "
"or override %(cls)s.get_facet_serializer_class() method." %
{"cls": self.__class__.__name__}
)
return self.facet_serializer_class
|
python
|
def get_facet_serializer_class(self):
"""
Return the class to use for serializing facets.
Defaults to using ``self.facet_serializer_class``.
"""
if self.facet_serializer_class is None:
raise AttributeError(
"%(cls)s should either include a `facet_serializer_class` attribute, "
"or override %(cls)s.get_facet_serializer_class() method." %
{"cls": self.__class__.__name__}
)
return self.facet_serializer_class
|
[
"def",
"get_facet_serializer_class",
"(",
"self",
")",
":",
"if",
"self",
".",
"facet_serializer_class",
"is",
"None",
":",
"raise",
"AttributeError",
"(",
"\"%(cls)s should either include a `facet_serializer_class` attribute, \"",
"\"or override %(cls)s.get_facet_serializer_class() method.\"",
"%",
"{",
"\"cls\"",
":",
"self",
".",
"__class__",
".",
"__name__",
"}",
")",
"return",
"self",
".",
"facet_serializer_class"
] |
Return the class to use for serializing facets.
Defaults to using ``self.facet_serializer_class``.
|
[
"Return",
"the",
"class",
"to",
"use",
"for",
"serializing",
"facets",
".",
"Defaults",
"to",
"using",
"self",
".",
"facet_serializer_class",
"."
] |
ceabd0f6318f129758341ab08292a20205d6f4cd
|
https://github.com/inonit/drf-haystack/blob/ceabd0f6318f129758341ab08292a20205d6f4cd/drf_haystack/mixins.py#L94-L105
|
10,007
|
inonit/drf-haystack
|
drf_haystack/mixins.py
|
FacetMixin.get_facet_objects_serializer
|
def get_facet_objects_serializer(self, *args, **kwargs):
"""
Return the serializer instance which should be used for
serializing faceted objects.
"""
facet_objects_serializer_class = self.get_facet_objects_serializer_class()
kwargs["context"] = self.get_serializer_context()
return facet_objects_serializer_class(*args, **kwargs)
|
python
|
def get_facet_objects_serializer(self, *args, **kwargs):
"""
Return the serializer instance which should be used for
serializing faceted objects.
"""
facet_objects_serializer_class = self.get_facet_objects_serializer_class()
kwargs["context"] = self.get_serializer_context()
return facet_objects_serializer_class(*args, **kwargs)
|
[
"def",
"get_facet_objects_serializer",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"facet_objects_serializer_class",
"=",
"self",
".",
"get_facet_objects_serializer_class",
"(",
")",
"kwargs",
"[",
"\"context\"",
"]",
"=",
"self",
".",
"get_serializer_context",
"(",
")",
"return",
"facet_objects_serializer_class",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
Return the serializer instance which should be used for
serializing faceted objects.
|
[
"Return",
"the",
"serializer",
"instance",
"which",
"should",
"be",
"used",
"for",
"serializing",
"faceted",
"objects",
"."
] |
ceabd0f6318f129758341ab08292a20205d6f4cd
|
https://github.com/inonit/drf-haystack/blob/ceabd0f6318f129758341ab08292a20205d6f4cd/drf_haystack/mixins.py#L107-L114
|
10,008
|
inonit/drf-haystack
|
drf_haystack/fields.py
|
DRFHaystackFieldMixin.bind
|
def bind(self, field_name, parent):
"""
Initializes the field name and parent for the field instance.
Called when a field is added to the parent serializer instance.
Taken from DRF and modified to support drf_haystack multiple index
functionality.
"""
# In order to enforce a consistent style, we error if a redundant
# 'source' argument has been used. For example:
# my_field = serializer.CharField(source='my_field')
assert self.source != field_name, (
"It is redundant to specify `source='%s'` on field '%s' in "
"serializer '%s', because it is the same as the field name. "
"Remove the `source` keyword argument." %
(field_name, self.__class__.__name__, parent.__class__.__name__)
)
self.field_name = field_name
self.parent = parent
# `self.label` should default to being based on the field name.
if self.label is None:
self.label = field_name.replace('_', ' ').capitalize()
# self.source should default to being the same as the field name.
if self.source is None:
self.source = self.convert_field_name(field_name)
# self.source_attrs is a list of attributes that need to be looked up
# when serializing the instance, or populating the validated data.
if self.source == '*':
self.source_attrs = []
else:
self.source_attrs = self.source.split('.')
|
python
|
def bind(self, field_name, parent):
"""
Initializes the field name and parent for the field instance.
Called when a field is added to the parent serializer instance.
Taken from DRF and modified to support drf_haystack multiple index
functionality.
"""
# In order to enforce a consistent style, we error if a redundant
# 'source' argument has been used. For example:
# my_field = serializer.CharField(source='my_field')
assert self.source != field_name, (
"It is redundant to specify `source='%s'` on field '%s' in "
"serializer '%s', because it is the same as the field name. "
"Remove the `source` keyword argument." %
(field_name, self.__class__.__name__, parent.__class__.__name__)
)
self.field_name = field_name
self.parent = parent
# `self.label` should default to being based on the field name.
if self.label is None:
self.label = field_name.replace('_', ' ').capitalize()
# self.source should default to being the same as the field name.
if self.source is None:
self.source = self.convert_field_name(field_name)
# self.source_attrs is a list of attributes that need to be looked up
# when serializing the instance, or populating the validated data.
if self.source == '*':
self.source_attrs = []
else:
self.source_attrs = self.source.split('.')
|
[
"def",
"bind",
"(",
"self",
",",
"field_name",
",",
"parent",
")",
":",
"# In order to enforce a consistent style, we error if a redundant",
"# 'source' argument has been used. For example:",
"# my_field = serializer.CharField(source='my_field')",
"assert",
"self",
".",
"source",
"!=",
"field_name",
",",
"(",
"\"It is redundant to specify `source='%s'` on field '%s' in \"",
"\"serializer '%s', because it is the same as the field name. \"",
"\"Remove the `source` keyword argument.\"",
"%",
"(",
"field_name",
",",
"self",
".",
"__class__",
".",
"__name__",
",",
"parent",
".",
"__class__",
".",
"__name__",
")",
")",
"self",
".",
"field_name",
"=",
"field_name",
"self",
".",
"parent",
"=",
"parent",
"# `self.label` should default to being based on the field name.",
"if",
"self",
".",
"label",
"is",
"None",
":",
"self",
".",
"label",
"=",
"field_name",
".",
"replace",
"(",
"'_'",
",",
"' '",
")",
".",
"capitalize",
"(",
")",
"# self.source should default to being the same as the field name.",
"if",
"self",
".",
"source",
"is",
"None",
":",
"self",
".",
"source",
"=",
"self",
".",
"convert_field_name",
"(",
"field_name",
")",
"# self.source_attrs is a list of attributes that need to be looked up",
"# when serializing the instance, or populating the validated data.",
"if",
"self",
".",
"source",
"==",
"'*'",
":",
"self",
".",
"source_attrs",
"=",
"[",
"]",
"else",
":",
"self",
".",
"source_attrs",
"=",
"self",
".",
"source",
".",
"split",
"(",
"'.'",
")"
] |
Initializes the field name and parent for the field instance.
Called when a field is added to the parent serializer instance.
Taken from DRF and modified to support drf_haystack multiple index
functionality.
|
[
"Initializes",
"the",
"field",
"name",
"and",
"parent",
"for",
"the",
"field",
"instance",
".",
"Called",
"when",
"a",
"field",
"is",
"added",
"to",
"the",
"parent",
"serializer",
"instance",
".",
"Taken",
"from",
"DRF",
"and",
"modified",
"to",
"support",
"drf_haystack",
"multiple",
"index",
"functionality",
"."
] |
ceabd0f6318f129758341ab08292a20205d6f4cd
|
https://github.com/inonit/drf-haystack/blob/ceabd0f6318f129758341ab08292a20205d6f4cd/drf_haystack/fields.py#L16-L50
|
10,009
|
inonit/drf-haystack
|
drf_haystack/serializers.py
|
HaystackSerializer._get_default_field_kwargs
|
def _get_default_field_kwargs(model, field):
"""
Get the required attributes from the model field in order
to instantiate a REST Framework serializer field.
"""
kwargs = {}
try:
field_name = field.model_attr or field.index_fieldname
model_field = model._meta.get_field(field_name)
kwargs.update(get_field_kwargs(field_name, model_field))
# Remove stuff we don't care about!
delete_attrs = [
"allow_blank",
"choices",
"model_field",
"allow_unicode",
]
for attr in delete_attrs:
if attr in kwargs:
del kwargs[attr]
except FieldDoesNotExist:
pass
return kwargs
|
python
|
def _get_default_field_kwargs(model, field):
"""
Get the required attributes from the model field in order
to instantiate a REST Framework serializer field.
"""
kwargs = {}
try:
field_name = field.model_attr or field.index_fieldname
model_field = model._meta.get_field(field_name)
kwargs.update(get_field_kwargs(field_name, model_field))
# Remove stuff we don't care about!
delete_attrs = [
"allow_blank",
"choices",
"model_field",
"allow_unicode",
]
for attr in delete_attrs:
if attr in kwargs:
del kwargs[attr]
except FieldDoesNotExist:
pass
return kwargs
|
[
"def",
"_get_default_field_kwargs",
"(",
"model",
",",
"field",
")",
":",
"kwargs",
"=",
"{",
"}",
"try",
":",
"field_name",
"=",
"field",
".",
"model_attr",
"or",
"field",
".",
"index_fieldname",
"model_field",
"=",
"model",
".",
"_meta",
".",
"get_field",
"(",
"field_name",
")",
"kwargs",
".",
"update",
"(",
"get_field_kwargs",
"(",
"field_name",
",",
"model_field",
")",
")",
"# Remove stuff we don't care about!",
"delete_attrs",
"=",
"[",
"\"allow_blank\"",
",",
"\"choices\"",
",",
"\"model_field\"",
",",
"\"allow_unicode\"",
",",
"]",
"for",
"attr",
"in",
"delete_attrs",
":",
"if",
"attr",
"in",
"kwargs",
":",
"del",
"kwargs",
"[",
"attr",
"]",
"except",
"FieldDoesNotExist",
":",
"pass",
"return",
"kwargs"
] |
Get the required attributes from the model field in order
to instantiate a REST Framework serializer field.
|
[
"Get",
"the",
"required",
"attributes",
"from",
"the",
"model",
"field",
"in",
"order",
"to",
"instantiate",
"a",
"REST",
"Framework",
"serializer",
"field",
"."
] |
ceabd0f6318f129758341ab08292a20205d6f4cd
|
https://github.com/inonit/drf-haystack/blob/ceabd0f6318f129758341ab08292a20205d6f4cd/drf_haystack/serializers.py#L124-L148
|
10,010
|
inonit/drf-haystack
|
drf_haystack/serializers.py
|
HaystackSerializer._get_index_class_name
|
def _get_index_class_name(self, index_cls):
"""
Converts in index model class to a name suitable for use as a field name prefix. A user
may optionally specify custom aliases via an 'index_aliases' attribute on the Meta class
"""
cls_name = index_cls.__name__
aliases = self.Meta.index_aliases
return aliases.get(cls_name, cls_name.split('.')[-1])
|
python
|
def _get_index_class_name(self, index_cls):
"""
Converts in index model class to a name suitable for use as a field name prefix. A user
may optionally specify custom aliases via an 'index_aliases' attribute on the Meta class
"""
cls_name = index_cls.__name__
aliases = self.Meta.index_aliases
return aliases.get(cls_name, cls_name.split('.')[-1])
|
[
"def",
"_get_index_class_name",
"(",
"self",
",",
"index_cls",
")",
":",
"cls_name",
"=",
"index_cls",
".",
"__name__",
"aliases",
"=",
"self",
".",
"Meta",
".",
"index_aliases",
"return",
"aliases",
".",
"get",
"(",
"cls_name",
",",
"cls_name",
".",
"split",
"(",
"'.'",
")",
"[",
"-",
"1",
"]",
")"
] |
Converts in index model class to a name suitable for use as a field name prefix. A user
may optionally specify custom aliases via an 'index_aliases' attribute on the Meta class
|
[
"Converts",
"in",
"index",
"model",
"class",
"to",
"a",
"name",
"suitable",
"for",
"use",
"as",
"a",
"field",
"name",
"prefix",
".",
"A",
"user",
"may",
"optionally",
"specify",
"custom",
"aliases",
"via",
"an",
"index_aliases",
"attribute",
"on",
"the",
"Meta",
"class"
] |
ceabd0f6318f129758341ab08292a20205d6f4cd
|
https://github.com/inonit/drf-haystack/blob/ceabd0f6318f129758341ab08292a20205d6f4cd/drf_haystack/serializers.py#L156-L163
|
10,011
|
inonit/drf-haystack
|
drf_haystack/serializers.py
|
HaystackSerializer.get_fields
|
def get_fields(self):
"""
Get the required fields for serializing the result.
"""
fields = self.Meta.fields
exclude = self.Meta.exclude
ignore_fields = self.Meta.ignore_fields
indices = self.Meta.index_classes
declared_fields = copy.deepcopy(self._declared_fields)
prefix_field_names = len(indices) > 1
field_mapping = OrderedDict()
# overlapping fields on multiple indices is supported by internally prefixing the field
# names with the index class to which they belong or, optionally, a user-provided alias
# for the index.
for index_cls in self.Meta.index_classes:
prefix = ""
if prefix_field_names:
prefix = "_%s__" % self._get_index_class_name(index_cls)
for field_name, field_type in six.iteritems(index_cls.fields):
orig_name = field_name
field_name = "%s%s" % (prefix, field_name)
# Don't use this field if it is in `ignore_fields`
if orig_name in ignore_fields or field_name in ignore_fields:
continue
# When fields to include are decided by `exclude`
if exclude:
if orig_name in exclude or field_name in exclude:
continue
# When fields to include are decided by `fields`
if fields:
if orig_name not in fields and field_name not in fields:
continue
# Look up the field attributes on the current index model,
# in order to correctly instantiate the serializer field.
model = index_cls().get_model()
kwargs = self._get_default_field_kwargs(model, field_type)
kwargs['prefix_field_names'] = prefix_field_names
field_mapping[field_name] = self._field_mapping[field_type](**kwargs)
# Add any explicitly declared fields. They *will* override any index fields
# in case of naming collision!.
if declared_fields:
for field_name in declared_fields:
field_mapping[field_name] = declared_fields[field_name]
return field_mapping
|
python
|
def get_fields(self):
"""
Get the required fields for serializing the result.
"""
fields = self.Meta.fields
exclude = self.Meta.exclude
ignore_fields = self.Meta.ignore_fields
indices = self.Meta.index_classes
declared_fields = copy.deepcopy(self._declared_fields)
prefix_field_names = len(indices) > 1
field_mapping = OrderedDict()
# overlapping fields on multiple indices is supported by internally prefixing the field
# names with the index class to which they belong or, optionally, a user-provided alias
# for the index.
for index_cls in self.Meta.index_classes:
prefix = ""
if prefix_field_names:
prefix = "_%s__" % self._get_index_class_name(index_cls)
for field_name, field_type in six.iteritems(index_cls.fields):
orig_name = field_name
field_name = "%s%s" % (prefix, field_name)
# Don't use this field if it is in `ignore_fields`
if orig_name in ignore_fields or field_name in ignore_fields:
continue
# When fields to include are decided by `exclude`
if exclude:
if orig_name in exclude or field_name in exclude:
continue
# When fields to include are decided by `fields`
if fields:
if orig_name not in fields and field_name not in fields:
continue
# Look up the field attributes on the current index model,
# in order to correctly instantiate the serializer field.
model = index_cls().get_model()
kwargs = self._get_default_field_kwargs(model, field_type)
kwargs['prefix_field_names'] = prefix_field_names
field_mapping[field_name] = self._field_mapping[field_type](**kwargs)
# Add any explicitly declared fields. They *will* override any index fields
# in case of naming collision!.
if declared_fields:
for field_name in declared_fields:
field_mapping[field_name] = declared_fields[field_name]
return field_mapping
|
[
"def",
"get_fields",
"(",
"self",
")",
":",
"fields",
"=",
"self",
".",
"Meta",
".",
"fields",
"exclude",
"=",
"self",
".",
"Meta",
".",
"exclude",
"ignore_fields",
"=",
"self",
".",
"Meta",
".",
"ignore_fields",
"indices",
"=",
"self",
".",
"Meta",
".",
"index_classes",
"declared_fields",
"=",
"copy",
".",
"deepcopy",
"(",
"self",
".",
"_declared_fields",
")",
"prefix_field_names",
"=",
"len",
"(",
"indices",
")",
">",
"1",
"field_mapping",
"=",
"OrderedDict",
"(",
")",
"# overlapping fields on multiple indices is supported by internally prefixing the field",
"# names with the index class to which they belong or, optionally, a user-provided alias",
"# for the index.",
"for",
"index_cls",
"in",
"self",
".",
"Meta",
".",
"index_classes",
":",
"prefix",
"=",
"\"\"",
"if",
"prefix_field_names",
":",
"prefix",
"=",
"\"_%s__\"",
"%",
"self",
".",
"_get_index_class_name",
"(",
"index_cls",
")",
"for",
"field_name",
",",
"field_type",
"in",
"six",
".",
"iteritems",
"(",
"index_cls",
".",
"fields",
")",
":",
"orig_name",
"=",
"field_name",
"field_name",
"=",
"\"%s%s\"",
"%",
"(",
"prefix",
",",
"field_name",
")",
"# Don't use this field if it is in `ignore_fields`",
"if",
"orig_name",
"in",
"ignore_fields",
"or",
"field_name",
"in",
"ignore_fields",
":",
"continue",
"# When fields to include are decided by `exclude`",
"if",
"exclude",
":",
"if",
"orig_name",
"in",
"exclude",
"or",
"field_name",
"in",
"exclude",
":",
"continue",
"# When fields to include are decided by `fields`",
"if",
"fields",
":",
"if",
"orig_name",
"not",
"in",
"fields",
"and",
"field_name",
"not",
"in",
"fields",
":",
"continue",
"# Look up the field attributes on the current index model,",
"# in order to correctly instantiate the serializer field.",
"model",
"=",
"index_cls",
"(",
")",
".",
"get_model",
"(",
")",
"kwargs",
"=",
"self",
".",
"_get_default_field_kwargs",
"(",
"model",
",",
"field_type",
")",
"kwargs",
"[",
"'prefix_field_names'",
"]",
"=",
"prefix_field_names",
"field_mapping",
"[",
"field_name",
"]",
"=",
"self",
".",
"_field_mapping",
"[",
"field_type",
"]",
"(",
"*",
"*",
"kwargs",
")",
"# Add any explicitly declared fields. They *will* override any index fields",
"# in case of naming collision!.",
"if",
"declared_fields",
":",
"for",
"field_name",
"in",
"declared_fields",
":",
"field_mapping",
"[",
"field_name",
"]",
"=",
"declared_fields",
"[",
"field_name",
"]",
"return",
"field_mapping"
] |
Get the required fields for serializing the result.
|
[
"Get",
"the",
"required",
"fields",
"for",
"serializing",
"the",
"result",
"."
] |
ceabd0f6318f129758341ab08292a20205d6f4cd
|
https://github.com/inonit/drf-haystack/blob/ceabd0f6318f129758341ab08292a20205d6f4cd/drf_haystack/serializers.py#L165-L214
|
10,012
|
inonit/drf-haystack
|
drf_haystack/serializers.py
|
HaystackSerializer.to_representation
|
def to_representation(self, instance):
"""
If we have a serializer mapping, use that. Otherwise, use standard serializer behavior
Since we might be dealing with multiple indexes, some fields might
not be valid for all results. Do not render the fields which don't belong
to the search result.
"""
if self.Meta.serializers:
ret = self.multi_serializer_representation(instance)
else:
ret = super(HaystackSerializer, self).to_representation(instance)
prefix_field_names = len(getattr(self.Meta, "index_classes")) > 1
current_index = self._get_index_class_name(type(instance.searchindex))
for field in self.fields.keys():
# handle declared field value methods on serializer
value_method = getattr(self, "get_{}".format(field), None)
if value_method and callable(value_method):
ret[field] = value_method()
# now convert namespaced field names
orig_field = field
if prefix_field_names:
parts = field.split("__")
if len(parts) > 1:
index = parts[0][1:] # trim the preceding '_'
field = parts[1]
if index == current_index:
ret[field] = ret[orig_field]
del ret[orig_field]
elif field not in chain(instance.searchindex.fields.keys(), self._declared_fields.keys()):
del ret[orig_field]
# include the highlighted field in either case
if getattr(instance, "highlighted", None):
ret["highlighted"] = instance.highlighted[0]
return ret
|
python
|
def to_representation(self, instance):
"""
If we have a serializer mapping, use that. Otherwise, use standard serializer behavior
Since we might be dealing with multiple indexes, some fields might
not be valid for all results. Do not render the fields which don't belong
to the search result.
"""
if self.Meta.serializers:
ret = self.multi_serializer_representation(instance)
else:
ret = super(HaystackSerializer, self).to_representation(instance)
prefix_field_names = len(getattr(self.Meta, "index_classes")) > 1
current_index = self._get_index_class_name(type(instance.searchindex))
for field in self.fields.keys():
# handle declared field value methods on serializer
value_method = getattr(self, "get_{}".format(field), None)
if value_method and callable(value_method):
ret[field] = value_method()
# now convert namespaced field names
orig_field = field
if prefix_field_names:
parts = field.split("__")
if len(parts) > 1:
index = parts[0][1:] # trim the preceding '_'
field = parts[1]
if index == current_index:
ret[field] = ret[orig_field]
del ret[orig_field]
elif field not in chain(instance.searchindex.fields.keys(), self._declared_fields.keys()):
del ret[orig_field]
# include the highlighted field in either case
if getattr(instance, "highlighted", None):
ret["highlighted"] = instance.highlighted[0]
return ret
|
[
"def",
"to_representation",
"(",
"self",
",",
"instance",
")",
":",
"if",
"self",
".",
"Meta",
".",
"serializers",
":",
"ret",
"=",
"self",
".",
"multi_serializer_representation",
"(",
"instance",
")",
"else",
":",
"ret",
"=",
"super",
"(",
"HaystackSerializer",
",",
"self",
")",
".",
"to_representation",
"(",
"instance",
")",
"prefix_field_names",
"=",
"len",
"(",
"getattr",
"(",
"self",
".",
"Meta",
",",
"\"index_classes\"",
")",
")",
">",
"1",
"current_index",
"=",
"self",
".",
"_get_index_class_name",
"(",
"type",
"(",
"instance",
".",
"searchindex",
")",
")",
"for",
"field",
"in",
"self",
".",
"fields",
".",
"keys",
"(",
")",
":",
"# handle declared field value methods on serializer",
"value_method",
"=",
"getattr",
"(",
"self",
",",
"\"get_{}\"",
".",
"format",
"(",
"field",
")",
",",
"None",
")",
"if",
"value_method",
"and",
"callable",
"(",
"value_method",
")",
":",
"ret",
"[",
"field",
"]",
"=",
"value_method",
"(",
")",
"# now convert namespaced field names",
"orig_field",
"=",
"field",
"if",
"prefix_field_names",
":",
"parts",
"=",
"field",
".",
"split",
"(",
"\"__\"",
")",
"if",
"len",
"(",
"parts",
")",
">",
"1",
":",
"index",
"=",
"parts",
"[",
"0",
"]",
"[",
"1",
":",
"]",
"# trim the preceding '_'",
"field",
"=",
"parts",
"[",
"1",
"]",
"if",
"index",
"==",
"current_index",
":",
"ret",
"[",
"field",
"]",
"=",
"ret",
"[",
"orig_field",
"]",
"del",
"ret",
"[",
"orig_field",
"]",
"elif",
"field",
"not",
"in",
"chain",
"(",
"instance",
".",
"searchindex",
".",
"fields",
".",
"keys",
"(",
")",
",",
"self",
".",
"_declared_fields",
".",
"keys",
"(",
")",
")",
":",
"del",
"ret",
"[",
"orig_field",
"]",
"# include the highlighted field in either case",
"if",
"getattr",
"(",
"instance",
",",
"\"highlighted\"",
",",
"None",
")",
":",
"ret",
"[",
"\"highlighted\"",
"]",
"=",
"instance",
".",
"highlighted",
"[",
"0",
"]",
"return",
"ret"
] |
If we have a serializer mapping, use that. Otherwise, use standard serializer behavior
Since we might be dealing with multiple indexes, some fields might
not be valid for all results. Do not render the fields which don't belong
to the search result.
|
[
"If",
"we",
"have",
"a",
"serializer",
"mapping",
"use",
"that",
".",
"Otherwise",
"use",
"standard",
"serializer",
"behavior",
"Since",
"we",
"might",
"be",
"dealing",
"with",
"multiple",
"indexes",
"some",
"fields",
"might",
"not",
"be",
"valid",
"for",
"all",
"results",
".",
"Do",
"not",
"render",
"the",
"fields",
"which",
"don",
"t",
"belong",
"to",
"the",
"search",
"result",
"."
] |
ceabd0f6318f129758341ab08292a20205d6f4cd
|
https://github.com/inonit/drf-haystack/blob/ceabd0f6318f129758341ab08292a20205d6f4cd/drf_haystack/serializers.py#L216-L251
|
10,013
|
inonit/drf-haystack
|
drf_haystack/serializers.py
|
FacetFieldSerializer.get_narrow_url
|
def get_narrow_url(self, instance):
"""
Return a link suitable for narrowing on the current item.
"""
text = instance[0]
request = self.context["request"]
query_params = request.GET.copy()
# Never keep the page query parameter in narrowing urls.
# It will raise a NotFound exception when trying to paginate a narrowed queryset.
page_query_param = self.get_paginate_by_param()
if page_query_param and page_query_param in query_params:
del query_params[page_query_param]
selected_facets = set(query_params.pop(self.root.facet_query_params_text, []))
selected_facets.add("%(field)s_exact:%(text)s" % {"field": self.parent_field, "text": text})
query_params.setlist(self.root.facet_query_params_text, sorted(selected_facets))
path = "%(path)s?%(query)s" % {"path": request.path_info, "query": query_params.urlencode()}
url = request.build_absolute_uri(path)
return serializers.Hyperlink(url, "narrow-url")
|
python
|
def get_narrow_url(self, instance):
"""
Return a link suitable for narrowing on the current item.
"""
text = instance[0]
request = self.context["request"]
query_params = request.GET.copy()
# Never keep the page query parameter in narrowing urls.
# It will raise a NotFound exception when trying to paginate a narrowed queryset.
page_query_param = self.get_paginate_by_param()
if page_query_param and page_query_param in query_params:
del query_params[page_query_param]
selected_facets = set(query_params.pop(self.root.facet_query_params_text, []))
selected_facets.add("%(field)s_exact:%(text)s" % {"field": self.parent_field, "text": text})
query_params.setlist(self.root.facet_query_params_text, sorted(selected_facets))
path = "%(path)s?%(query)s" % {"path": request.path_info, "query": query_params.urlencode()}
url = request.build_absolute_uri(path)
return serializers.Hyperlink(url, "narrow-url")
|
[
"def",
"get_narrow_url",
"(",
"self",
",",
"instance",
")",
":",
"text",
"=",
"instance",
"[",
"0",
"]",
"request",
"=",
"self",
".",
"context",
"[",
"\"request\"",
"]",
"query_params",
"=",
"request",
".",
"GET",
".",
"copy",
"(",
")",
"# Never keep the page query parameter in narrowing urls.",
"# It will raise a NotFound exception when trying to paginate a narrowed queryset.",
"page_query_param",
"=",
"self",
".",
"get_paginate_by_param",
"(",
")",
"if",
"page_query_param",
"and",
"page_query_param",
"in",
"query_params",
":",
"del",
"query_params",
"[",
"page_query_param",
"]",
"selected_facets",
"=",
"set",
"(",
"query_params",
".",
"pop",
"(",
"self",
".",
"root",
".",
"facet_query_params_text",
",",
"[",
"]",
")",
")",
"selected_facets",
".",
"add",
"(",
"\"%(field)s_exact:%(text)s\"",
"%",
"{",
"\"field\"",
":",
"self",
".",
"parent_field",
",",
"\"text\"",
":",
"text",
"}",
")",
"query_params",
".",
"setlist",
"(",
"self",
".",
"root",
".",
"facet_query_params_text",
",",
"sorted",
"(",
"selected_facets",
")",
")",
"path",
"=",
"\"%(path)s?%(query)s\"",
"%",
"{",
"\"path\"",
":",
"request",
".",
"path_info",
",",
"\"query\"",
":",
"query_params",
".",
"urlencode",
"(",
")",
"}",
"url",
"=",
"request",
".",
"build_absolute_uri",
"(",
"path",
")",
"return",
"serializers",
".",
"Hyperlink",
"(",
"url",
",",
"\"narrow-url\"",
")"
] |
Return a link suitable for narrowing on the current item.
|
[
"Return",
"a",
"link",
"suitable",
"for",
"narrowing",
"on",
"the",
"current",
"item",
"."
] |
ceabd0f6318f129758341ab08292a20205d6f4cd
|
https://github.com/inonit/drf-haystack/blob/ceabd0f6318f129758341ab08292a20205d6f4cd/drf_haystack/serializers.py#L340-L360
|
10,014
|
inonit/drf-haystack
|
drf_haystack/serializers.py
|
FacetFieldSerializer.to_representation
|
def to_representation(self, field, instance):
"""
Set the ``parent_field`` property equal to the current field on the serializer class,
so that each field can query it to see what kind of attribute they are processing.
"""
self.parent_field = field
return super(FacetFieldSerializer, self).to_representation(instance)
|
python
|
def to_representation(self, field, instance):
"""
Set the ``parent_field`` property equal to the current field on the serializer class,
so that each field can query it to see what kind of attribute they are processing.
"""
self.parent_field = field
return super(FacetFieldSerializer, self).to_representation(instance)
|
[
"def",
"to_representation",
"(",
"self",
",",
"field",
",",
"instance",
")",
":",
"self",
".",
"parent_field",
"=",
"field",
"return",
"super",
"(",
"FacetFieldSerializer",
",",
"self",
")",
".",
"to_representation",
"(",
"instance",
")"
] |
Set the ``parent_field`` property equal to the current field on the serializer class,
so that each field can query it to see what kind of attribute they are processing.
|
[
"Set",
"the",
"parent_field",
"property",
"equal",
"to",
"the",
"current",
"field",
"on",
"the",
"serializer",
"class",
"so",
"that",
"each",
"field",
"can",
"query",
"it",
"to",
"see",
"what",
"kind",
"of",
"attribute",
"they",
"are",
"processing",
"."
] |
ceabd0f6318f129758341ab08292a20205d6f4cd
|
https://github.com/inonit/drf-haystack/blob/ceabd0f6318f129758341ab08292a20205d6f4cd/drf_haystack/serializers.py#L362-L368
|
10,015
|
inonit/drf-haystack
|
drf_haystack/serializers.py
|
HaystackFacetSerializer.get_fields
|
def get_fields(self):
"""
This returns a dictionary containing the top most fields,
``dates``, ``fields`` and ``queries``.
"""
field_mapping = OrderedDict()
for field, data in self.instance.items():
field_mapping.update(
{field: self.facet_dict_field_class(
child=self.facet_list_field_class(child=self.facet_field_serializer_class(data)), required=False)}
)
if self.serialize_objects is True:
field_mapping["objects"] = serializers.SerializerMethodField()
return field_mapping
|
python
|
def get_fields(self):
"""
This returns a dictionary containing the top most fields,
``dates``, ``fields`` and ``queries``.
"""
field_mapping = OrderedDict()
for field, data in self.instance.items():
field_mapping.update(
{field: self.facet_dict_field_class(
child=self.facet_list_field_class(child=self.facet_field_serializer_class(data)), required=False)}
)
if self.serialize_objects is True:
field_mapping["objects"] = serializers.SerializerMethodField()
return field_mapping
|
[
"def",
"get_fields",
"(",
"self",
")",
":",
"field_mapping",
"=",
"OrderedDict",
"(",
")",
"for",
"field",
",",
"data",
"in",
"self",
".",
"instance",
".",
"items",
"(",
")",
":",
"field_mapping",
".",
"update",
"(",
"{",
"field",
":",
"self",
".",
"facet_dict_field_class",
"(",
"child",
"=",
"self",
".",
"facet_list_field_class",
"(",
"child",
"=",
"self",
".",
"facet_field_serializer_class",
"(",
"data",
")",
")",
",",
"required",
"=",
"False",
")",
"}",
")",
"if",
"self",
".",
"serialize_objects",
"is",
"True",
":",
"field_mapping",
"[",
"\"objects\"",
"]",
"=",
"serializers",
".",
"SerializerMethodField",
"(",
")",
"return",
"field_mapping"
] |
This returns a dictionary containing the top most fields,
``dates``, ``fields`` and ``queries``.
|
[
"This",
"returns",
"a",
"dictionary",
"containing",
"the",
"top",
"most",
"fields",
"dates",
"fields",
"and",
"queries",
"."
] |
ceabd0f6318f129758341ab08292a20205d6f4cd
|
https://github.com/inonit/drf-haystack/blob/ceabd0f6318f129758341ab08292a20205d6f4cd/drf_haystack/serializers.py#L384-L399
|
10,016
|
inonit/drf-haystack
|
drf_haystack/serializers.py
|
HaystackFacetSerializer.get_objects
|
def get_objects(self, instance):
"""
Return a list of objects matching the faceted result.
"""
view = self.context["view"]
queryset = self.context["objects"]
page = view.paginate_queryset(queryset)
if page is not None:
serializer = view.get_facet_objects_serializer(page, many=True)
return OrderedDict([
("count", self.get_count(queryset)),
("next", view.paginator.get_next_link()),
("previous", view.paginator.get_previous_link()),
("results", serializer.data)
])
serializer = view.get_serializer(queryset, many=True)
return serializer.data
|
python
|
def get_objects(self, instance):
"""
Return a list of objects matching the faceted result.
"""
view = self.context["view"]
queryset = self.context["objects"]
page = view.paginate_queryset(queryset)
if page is not None:
serializer = view.get_facet_objects_serializer(page, many=True)
return OrderedDict([
("count", self.get_count(queryset)),
("next", view.paginator.get_next_link()),
("previous", view.paginator.get_previous_link()),
("results", serializer.data)
])
serializer = view.get_serializer(queryset, many=True)
return serializer.data
|
[
"def",
"get_objects",
"(",
"self",
",",
"instance",
")",
":",
"view",
"=",
"self",
".",
"context",
"[",
"\"view\"",
"]",
"queryset",
"=",
"self",
".",
"context",
"[",
"\"objects\"",
"]",
"page",
"=",
"view",
".",
"paginate_queryset",
"(",
"queryset",
")",
"if",
"page",
"is",
"not",
"None",
":",
"serializer",
"=",
"view",
".",
"get_facet_objects_serializer",
"(",
"page",
",",
"many",
"=",
"True",
")",
"return",
"OrderedDict",
"(",
"[",
"(",
"\"count\"",
",",
"self",
".",
"get_count",
"(",
"queryset",
")",
")",
",",
"(",
"\"next\"",
",",
"view",
".",
"paginator",
".",
"get_next_link",
"(",
")",
")",
",",
"(",
"\"previous\"",
",",
"view",
".",
"paginator",
".",
"get_previous_link",
"(",
")",
")",
",",
"(",
"\"results\"",
",",
"serializer",
".",
"data",
")",
"]",
")",
"serializer",
"=",
"view",
".",
"get_serializer",
"(",
"queryset",
",",
"many",
"=",
"True",
")",
"return",
"serializer",
".",
"data"
] |
Return a list of objects matching the faceted result.
|
[
"Return",
"a",
"list",
"of",
"objects",
"matching",
"the",
"faceted",
"result",
"."
] |
ceabd0f6318f129758341ab08292a20205d6f4cd
|
https://github.com/inonit/drf-haystack/blob/ceabd0f6318f129758341ab08292a20205d6f4cd/drf_haystack/serializers.py#L401-L419
|
10,017
|
inonit/drf-haystack
|
drf_haystack/serializers.py
|
HighlighterMixin.get_document_field
|
def get_document_field(instance):
"""
Returns which field the search index has marked as it's
`document=True` field.
"""
for name, field in instance.searchindex.fields.items():
if field.document is True:
return name
|
python
|
def get_document_field(instance):
"""
Returns which field the search index has marked as it's
`document=True` field.
"""
for name, field in instance.searchindex.fields.items():
if field.document is True:
return name
|
[
"def",
"get_document_field",
"(",
"instance",
")",
":",
"for",
"name",
",",
"field",
"in",
"instance",
".",
"searchindex",
".",
"fields",
".",
"items",
"(",
")",
":",
"if",
"field",
".",
"document",
"is",
"True",
":",
"return",
"name"
] |
Returns which field the search index has marked as it's
`document=True` field.
|
[
"Returns",
"which",
"field",
"the",
"search",
"index",
"has",
"marked",
"as",
"it",
"s",
"document",
"=",
"True",
"field",
"."
] |
ceabd0f6318f129758341ab08292a20205d6f4cd
|
https://github.com/inonit/drf-haystack/blob/ceabd0f6318f129758341ab08292a20205d6f4cd/drf_haystack/serializers.py#L470-L477
|
10,018
|
inonit/drf-haystack
|
drf_haystack/filters.py
|
BaseHaystackFilterBackend.apply_filters
|
def apply_filters(self, queryset, applicable_filters=None, applicable_exclusions=None):
"""
Apply constructed filters and excludes and return the queryset
:param queryset: queryset to filter
:param applicable_filters: filters which are passed directly to queryset.filter()
:param applicable_exclusions: filters which are passed directly to queryset.exclude()
:returns filtered queryset
"""
if applicable_filters:
queryset = queryset.filter(applicable_filters)
if applicable_exclusions:
queryset = queryset.exclude(applicable_exclusions)
return queryset
|
python
|
def apply_filters(self, queryset, applicable_filters=None, applicable_exclusions=None):
"""
Apply constructed filters and excludes and return the queryset
:param queryset: queryset to filter
:param applicable_filters: filters which are passed directly to queryset.filter()
:param applicable_exclusions: filters which are passed directly to queryset.exclude()
:returns filtered queryset
"""
if applicable_filters:
queryset = queryset.filter(applicable_filters)
if applicable_exclusions:
queryset = queryset.exclude(applicable_exclusions)
return queryset
|
[
"def",
"apply_filters",
"(",
"self",
",",
"queryset",
",",
"applicable_filters",
"=",
"None",
",",
"applicable_exclusions",
"=",
"None",
")",
":",
"if",
"applicable_filters",
":",
"queryset",
"=",
"queryset",
".",
"filter",
"(",
"applicable_filters",
")",
"if",
"applicable_exclusions",
":",
"queryset",
"=",
"queryset",
".",
"exclude",
"(",
"applicable_exclusions",
")",
"return",
"queryset"
] |
Apply constructed filters and excludes and return the queryset
:param queryset: queryset to filter
:param applicable_filters: filters which are passed directly to queryset.filter()
:param applicable_exclusions: filters which are passed directly to queryset.exclude()
:returns filtered queryset
|
[
"Apply",
"constructed",
"filters",
"and",
"excludes",
"and",
"return",
"the",
"queryset"
] |
ceabd0f6318f129758341ab08292a20205d6f4cd
|
https://github.com/inonit/drf-haystack/blob/ceabd0f6318f129758341ab08292a20205d6f4cd/drf_haystack/filters.py#L27-L40
|
10,019
|
inonit/drf-haystack
|
drf_haystack/filters.py
|
BaseHaystackFilterBackend.build_filters
|
def build_filters(self, view, filters=None):
"""
Get the query builder instance and return constructed query filters.
"""
query_builder = self.get_query_builder(backend=self, view=view)
return query_builder.build_query(**(filters if filters else {}))
|
python
|
def build_filters(self, view, filters=None):
"""
Get the query builder instance and return constructed query filters.
"""
query_builder = self.get_query_builder(backend=self, view=view)
return query_builder.build_query(**(filters if filters else {}))
|
[
"def",
"build_filters",
"(",
"self",
",",
"view",
",",
"filters",
"=",
"None",
")",
":",
"query_builder",
"=",
"self",
".",
"get_query_builder",
"(",
"backend",
"=",
"self",
",",
"view",
"=",
"view",
")",
"return",
"query_builder",
".",
"build_query",
"(",
"*",
"*",
"(",
"filters",
"if",
"filters",
"else",
"{",
"}",
")",
")"
] |
Get the query builder instance and return constructed query filters.
|
[
"Get",
"the",
"query",
"builder",
"instance",
"and",
"return",
"constructed",
"query",
"filters",
"."
] |
ceabd0f6318f129758341ab08292a20205d6f4cd
|
https://github.com/inonit/drf-haystack/blob/ceabd0f6318f129758341ab08292a20205d6f4cd/drf_haystack/filters.py#L42-L47
|
10,020
|
inonit/drf-haystack
|
drf_haystack/filters.py
|
BaseHaystackFilterBackend.filter_queryset
|
def filter_queryset(self, request, queryset, view):
"""
Return the filtered queryset.
"""
applicable_filters, applicable_exclusions = self.build_filters(view, filters=self.get_request_filters(request))
return self.apply_filters(
queryset=queryset,
applicable_filters=self.process_filters(applicable_filters, queryset, view),
applicable_exclusions=self.process_filters(applicable_exclusions, queryset, view)
)
|
python
|
def filter_queryset(self, request, queryset, view):
"""
Return the filtered queryset.
"""
applicable_filters, applicable_exclusions = self.build_filters(view, filters=self.get_request_filters(request))
return self.apply_filters(
queryset=queryset,
applicable_filters=self.process_filters(applicable_filters, queryset, view),
applicable_exclusions=self.process_filters(applicable_exclusions, queryset, view)
)
|
[
"def",
"filter_queryset",
"(",
"self",
",",
"request",
",",
"queryset",
",",
"view",
")",
":",
"applicable_filters",
",",
"applicable_exclusions",
"=",
"self",
".",
"build_filters",
"(",
"view",
",",
"filters",
"=",
"self",
".",
"get_request_filters",
"(",
"request",
")",
")",
"return",
"self",
".",
"apply_filters",
"(",
"queryset",
"=",
"queryset",
",",
"applicable_filters",
"=",
"self",
".",
"process_filters",
"(",
"applicable_filters",
",",
"queryset",
",",
"view",
")",
",",
"applicable_exclusions",
"=",
"self",
".",
"process_filters",
"(",
"applicable_exclusions",
",",
"queryset",
",",
"view",
")",
")"
] |
Return the filtered queryset.
|
[
"Return",
"the",
"filtered",
"queryset",
"."
] |
ceabd0f6318f129758341ab08292a20205d6f4cd
|
https://github.com/inonit/drf-haystack/blob/ceabd0f6318f129758341ab08292a20205d6f4cd/drf_haystack/filters.py#L56-L65
|
10,021
|
inonit/drf-haystack
|
drf_haystack/filters.py
|
BaseHaystackFilterBackend.get_query_builder
|
def get_query_builder(self, *args, **kwargs):
"""
Return the query builder class instance that should be used to
build the query which is passed to the search engine backend.
"""
query_builder = self.get_query_builder_class()
return query_builder(*args, **kwargs)
|
python
|
def get_query_builder(self, *args, **kwargs):
"""
Return the query builder class instance that should be used to
build the query which is passed to the search engine backend.
"""
query_builder = self.get_query_builder_class()
return query_builder(*args, **kwargs)
|
[
"def",
"get_query_builder",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"query_builder",
"=",
"self",
".",
"get_query_builder_class",
"(",
")",
"return",
"query_builder",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
Return the query builder class instance that should be used to
build the query which is passed to the search engine backend.
|
[
"Return",
"the",
"query",
"builder",
"class",
"instance",
"that",
"should",
"be",
"used",
"to",
"build",
"the",
"query",
"which",
"is",
"passed",
"to",
"the",
"search",
"engine",
"backend",
"."
] |
ceabd0f6318f129758341ab08292a20205d6f4cd
|
https://github.com/inonit/drf-haystack/blob/ceabd0f6318f129758341ab08292a20205d6f4cd/drf_haystack/filters.py#L67-L73
|
10,022
|
inonit/drf-haystack
|
drf_haystack/filters.py
|
HaystackFacetFilter.apply_filters
|
def apply_filters(self, queryset, applicable_filters=None, applicable_exclusions=None):
"""
Apply faceting to the queryset
"""
for field, options in applicable_filters["field_facets"].items():
queryset = queryset.facet(field, **options)
for field, options in applicable_filters["date_facets"].items():
queryset = queryset.date_facet(field, **options)
for field, options in applicable_filters["query_facets"].items():
queryset = queryset.query_facet(field, **options)
return queryset
|
python
|
def apply_filters(self, queryset, applicable_filters=None, applicable_exclusions=None):
"""
Apply faceting to the queryset
"""
for field, options in applicable_filters["field_facets"].items():
queryset = queryset.facet(field, **options)
for field, options in applicable_filters["date_facets"].items():
queryset = queryset.date_facet(field, **options)
for field, options in applicable_filters["query_facets"].items():
queryset = queryset.query_facet(field, **options)
return queryset
|
[
"def",
"apply_filters",
"(",
"self",
",",
"queryset",
",",
"applicable_filters",
"=",
"None",
",",
"applicable_exclusions",
"=",
"None",
")",
":",
"for",
"field",
",",
"options",
"in",
"applicable_filters",
"[",
"\"field_facets\"",
"]",
".",
"items",
"(",
")",
":",
"queryset",
"=",
"queryset",
".",
"facet",
"(",
"field",
",",
"*",
"*",
"options",
")",
"for",
"field",
",",
"options",
"in",
"applicable_filters",
"[",
"\"date_facets\"",
"]",
".",
"items",
"(",
")",
":",
"queryset",
"=",
"queryset",
".",
"date_facet",
"(",
"field",
",",
"*",
"*",
"options",
")",
"for",
"field",
",",
"options",
"in",
"applicable_filters",
"[",
"\"query_facets\"",
"]",
".",
"items",
"(",
")",
":",
"queryset",
"=",
"queryset",
".",
"query_facet",
"(",
"field",
",",
"*",
"*",
"options",
")",
"return",
"queryset"
] |
Apply faceting to the queryset
|
[
"Apply",
"faceting",
"to",
"the",
"queryset"
] |
ceabd0f6318f129758341ab08292a20205d6f4cd
|
https://github.com/inonit/drf-haystack/blob/ceabd0f6318f129758341ab08292a20205d6f4cd/drf_haystack/filters.py#L202-L215
|
10,023
|
maximtrp/scikit-posthocs
|
scikit_posthocs/_posthocs.py
|
__convert_to_df
|
def __convert_to_df(a, val_col=None, group_col=None, val_id=None, group_id=None):
'''Hidden helper method to create a DataFrame with input data for further
processing.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
val_id : int, optional
Index of a column that contains dependent variable values (test or
response variable). Should be specified if a NumPy ndarray is used as an
input. It will be inferred from data, if not specified.
group_id : int, optional
Index of a column that contains independent variable values (grouping or
predictor variable). Should be specified if a NumPy ndarray is used as
an input. It will be inferred from data, if not specified.
Returns
-------
x : pandas DataFrame
DataFrame with input data, `val_col` column contains numerical values and
`group_col` column contains categorical values.
val_col : str
Name of a DataFrame column that contains dependent variable values (test
or response variable).
group_col : str
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable).
Notes
-----
Inferrence algorithm for determining `val_id` and `group_id` args is rather
simple, so it is better to specify them explicitly to prevent errors.
'''
if not group_col:
group_col = 'groups'
if not val_col:
val_col = 'vals'
if isinstance(a, DataFrame):
x = a.copy()
if not {group_col, val_col}.issubset(a.columns):
raise ValueError('Specify correct column names using `group_col` and `val_col` args')
return x, val_col, group_col
elif isinstance(a, list) or (isinstance(a, np.ndarray) and not a.shape.count(2)):
grps_len = map(len, a)
grps = list(it.chain(*[[i+1] * l for i, l in enumerate(grps_len)]))
vals = list(it.chain(*a))
return DataFrame({val_col: vals, group_col: grps}), val_col, group_col
elif isinstance(a, np.ndarray):
# cols ids not defined
# trying to infer
if not(all([val_id, group_id])):
if np.argmax(a.shape):
a = a.T
ax = [np.unique(a[:, 0]).size, np.unique(a[:, 1]).size]
if np.asscalar(np.diff(ax)):
__val_col = np.argmax(ax)
__group_col = np.argmin(ax)
else:
raise ValueError('Cannot infer input format.\nPlease specify `val_id` and `group_id` args')
cols = {__val_col: val_col,
__group_col: group_col}
else:
cols = {val_id: val_col,
group_id: group_col}
cols_vals = dict(sorted(cols.items())).values()
return DataFrame(a, columns=cols_vals), val_col, group_col
|
python
|
def __convert_to_df(a, val_col=None, group_col=None, val_id=None, group_id=None):
'''Hidden helper method to create a DataFrame with input data for further
processing.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
val_id : int, optional
Index of a column that contains dependent variable values (test or
response variable). Should be specified if a NumPy ndarray is used as an
input. It will be inferred from data, if not specified.
group_id : int, optional
Index of a column that contains independent variable values (grouping or
predictor variable). Should be specified if a NumPy ndarray is used as
an input. It will be inferred from data, if not specified.
Returns
-------
x : pandas DataFrame
DataFrame with input data, `val_col` column contains numerical values and
`group_col` column contains categorical values.
val_col : str
Name of a DataFrame column that contains dependent variable values (test
or response variable).
group_col : str
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable).
Notes
-----
Inferrence algorithm for determining `val_id` and `group_id` args is rather
simple, so it is better to specify them explicitly to prevent errors.
'''
if not group_col:
group_col = 'groups'
if not val_col:
val_col = 'vals'
if isinstance(a, DataFrame):
x = a.copy()
if not {group_col, val_col}.issubset(a.columns):
raise ValueError('Specify correct column names using `group_col` and `val_col` args')
return x, val_col, group_col
elif isinstance(a, list) or (isinstance(a, np.ndarray) and not a.shape.count(2)):
grps_len = map(len, a)
grps = list(it.chain(*[[i+1] * l for i, l in enumerate(grps_len)]))
vals = list(it.chain(*a))
return DataFrame({val_col: vals, group_col: grps}), val_col, group_col
elif isinstance(a, np.ndarray):
# cols ids not defined
# trying to infer
if not(all([val_id, group_id])):
if np.argmax(a.shape):
a = a.T
ax = [np.unique(a[:, 0]).size, np.unique(a[:, 1]).size]
if np.asscalar(np.diff(ax)):
__val_col = np.argmax(ax)
__group_col = np.argmin(ax)
else:
raise ValueError('Cannot infer input format.\nPlease specify `val_id` and `group_id` args')
cols = {__val_col: val_col,
__group_col: group_col}
else:
cols = {val_id: val_col,
group_id: group_col}
cols_vals = dict(sorted(cols.items())).values()
return DataFrame(a, columns=cols_vals), val_col, group_col
|
[
"def",
"__convert_to_df",
"(",
"a",
",",
"val_col",
"=",
"None",
",",
"group_col",
"=",
"None",
",",
"val_id",
"=",
"None",
",",
"group_id",
"=",
"None",
")",
":",
"if",
"not",
"group_col",
":",
"group_col",
"=",
"'groups'",
"if",
"not",
"val_col",
":",
"val_col",
"=",
"'vals'",
"if",
"isinstance",
"(",
"a",
",",
"DataFrame",
")",
":",
"x",
"=",
"a",
".",
"copy",
"(",
")",
"if",
"not",
"{",
"group_col",
",",
"val_col",
"}",
".",
"issubset",
"(",
"a",
".",
"columns",
")",
":",
"raise",
"ValueError",
"(",
"'Specify correct column names using `group_col` and `val_col` args'",
")",
"return",
"x",
",",
"val_col",
",",
"group_col",
"elif",
"isinstance",
"(",
"a",
",",
"list",
")",
"or",
"(",
"isinstance",
"(",
"a",
",",
"np",
".",
"ndarray",
")",
"and",
"not",
"a",
".",
"shape",
".",
"count",
"(",
"2",
")",
")",
":",
"grps_len",
"=",
"map",
"(",
"len",
",",
"a",
")",
"grps",
"=",
"list",
"(",
"it",
".",
"chain",
"(",
"*",
"[",
"[",
"i",
"+",
"1",
"]",
"*",
"l",
"for",
"i",
",",
"l",
"in",
"enumerate",
"(",
"grps_len",
")",
"]",
")",
")",
"vals",
"=",
"list",
"(",
"it",
".",
"chain",
"(",
"*",
"a",
")",
")",
"return",
"DataFrame",
"(",
"{",
"val_col",
":",
"vals",
",",
"group_col",
":",
"grps",
"}",
")",
",",
"val_col",
",",
"group_col",
"elif",
"isinstance",
"(",
"a",
",",
"np",
".",
"ndarray",
")",
":",
"# cols ids not defined",
"# trying to infer",
"if",
"not",
"(",
"all",
"(",
"[",
"val_id",
",",
"group_id",
"]",
")",
")",
":",
"if",
"np",
".",
"argmax",
"(",
"a",
".",
"shape",
")",
":",
"a",
"=",
"a",
".",
"T",
"ax",
"=",
"[",
"np",
".",
"unique",
"(",
"a",
"[",
":",
",",
"0",
"]",
")",
".",
"size",
",",
"np",
".",
"unique",
"(",
"a",
"[",
":",
",",
"1",
"]",
")",
".",
"size",
"]",
"if",
"np",
".",
"asscalar",
"(",
"np",
".",
"diff",
"(",
"ax",
")",
")",
":",
"__val_col",
"=",
"np",
".",
"argmax",
"(",
"ax",
")",
"__group_col",
"=",
"np",
".",
"argmin",
"(",
"ax",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Cannot infer input format.\\nPlease specify `val_id` and `group_id` args'",
")",
"cols",
"=",
"{",
"__val_col",
":",
"val_col",
",",
"__group_col",
":",
"group_col",
"}",
"else",
":",
"cols",
"=",
"{",
"val_id",
":",
"val_col",
",",
"group_id",
":",
"group_col",
"}",
"cols_vals",
"=",
"dict",
"(",
"sorted",
"(",
"cols",
".",
"items",
"(",
")",
")",
")",
".",
"values",
"(",
")",
"return",
"DataFrame",
"(",
"a",
",",
"columns",
"=",
"cols_vals",
")",
",",
"val_col",
",",
"group_col"
] |
Hidden helper method to create a DataFrame with input data for further
processing.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
val_id : int, optional
Index of a column that contains dependent variable values (test or
response variable). Should be specified if a NumPy ndarray is used as an
input. It will be inferred from data, if not specified.
group_id : int, optional
Index of a column that contains independent variable values (grouping or
predictor variable). Should be specified if a NumPy ndarray is used as
an input. It will be inferred from data, if not specified.
Returns
-------
x : pandas DataFrame
DataFrame with input data, `val_col` column contains numerical values and
`group_col` column contains categorical values.
val_col : str
Name of a DataFrame column that contains dependent variable values (test
or response variable).
group_col : str
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable).
Notes
-----
Inferrence algorithm for determining `val_id` and `group_id` args is rather
simple, so it is better to specify them explicitly to prevent errors.
|
[
"Hidden",
"helper",
"method",
"to",
"create",
"a",
"DataFrame",
"with",
"input",
"data",
"for",
"further",
"processing",
"."
] |
5476b09e2a325cd4e31c0b0bc6906ab5cd77fc5d
|
https://github.com/maximtrp/scikit-posthocs/blob/5476b09e2a325cd4e31c0b0bc6906ab5cd77fc5d/scikit_posthocs/_posthocs.py#L11-L106
|
10,024
|
maximtrp/scikit-posthocs
|
scikit_posthocs/_posthocs.py
|
posthoc_tukey_hsd
|
def posthoc_tukey_hsd(x, g, alpha=0.05):
'''Pairwise comparisons with TukeyHSD confidence intervals. This is a
convenience function to make statsmodels `pairwise_tukeyhsd` method more
applicable for further use.
Parameters
----------
x : array_like or pandas Series object, 1d
An array, any object exposing the array interface, containing dependent
variable values (test or response variable). Values should have a
non-nominal scale. NaN values will cause an error (please handle
manually).
g : array_like or pandas Series object, 1d
An array, any object exposing the array interface, containing
independent variable values (grouping or predictor variable). Values
should have a nominal scale (categorical).
alpha : float, optional
Significance level for the test. Default is 0.05.
Returns
-------
result : pandas DataFrame
DataFrame with 0, 1, and -1 values, where 0 is False (not significant),
1 is True (significant), and -1 is for diagonal elements.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> g = [['a'] * 5, ['b'] * 5, ['c'] * 5]
>>> sp.posthoc_tukey_hsd(np.concatenate(x), np.concatenate(g))
'''
result = pairwise_tukeyhsd(x, g, alpha=0.05)
groups = np.array(result.groupsunique, dtype=np.str)
groups_len = len(groups)
vs = np.zeros((groups_len, groups_len), dtype=np.int)
for a in result.summary()[1:]:
a0 = str(a[0])
a1 = str(a[1])
a0i = np.where(groups == a0)[0][0]
a1i = np.where(groups == a1)[0][0]
vs[a0i, a1i] = 1 if str(a[5]) == 'True' else 0
vs = np.triu(vs)
np.fill_diagonal(vs, -1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[tri_lower] = vs.T[tri_lower]
return DataFrame(vs, index=groups, columns=groups)
|
python
|
def posthoc_tukey_hsd(x, g, alpha=0.05):
'''Pairwise comparisons with TukeyHSD confidence intervals. This is a
convenience function to make statsmodels `pairwise_tukeyhsd` method more
applicable for further use.
Parameters
----------
x : array_like or pandas Series object, 1d
An array, any object exposing the array interface, containing dependent
variable values (test or response variable). Values should have a
non-nominal scale. NaN values will cause an error (please handle
manually).
g : array_like or pandas Series object, 1d
An array, any object exposing the array interface, containing
independent variable values (grouping or predictor variable). Values
should have a nominal scale (categorical).
alpha : float, optional
Significance level for the test. Default is 0.05.
Returns
-------
result : pandas DataFrame
DataFrame with 0, 1, and -1 values, where 0 is False (not significant),
1 is True (significant), and -1 is for diagonal elements.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> g = [['a'] * 5, ['b'] * 5, ['c'] * 5]
>>> sp.posthoc_tukey_hsd(np.concatenate(x), np.concatenate(g))
'''
result = pairwise_tukeyhsd(x, g, alpha=0.05)
groups = np.array(result.groupsunique, dtype=np.str)
groups_len = len(groups)
vs = np.zeros((groups_len, groups_len), dtype=np.int)
for a in result.summary()[1:]:
a0 = str(a[0])
a1 = str(a[1])
a0i = np.where(groups == a0)[0][0]
a1i = np.where(groups == a1)[0][0]
vs[a0i, a1i] = 1 if str(a[5]) == 'True' else 0
vs = np.triu(vs)
np.fill_diagonal(vs, -1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[tri_lower] = vs.T[tri_lower]
return DataFrame(vs, index=groups, columns=groups)
|
[
"def",
"posthoc_tukey_hsd",
"(",
"x",
",",
"g",
",",
"alpha",
"=",
"0.05",
")",
":",
"result",
"=",
"pairwise_tukeyhsd",
"(",
"x",
",",
"g",
",",
"alpha",
"=",
"0.05",
")",
"groups",
"=",
"np",
".",
"array",
"(",
"result",
".",
"groupsunique",
",",
"dtype",
"=",
"np",
".",
"str",
")",
"groups_len",
"=",
"len",
"(",
"groups",
")",
"vs",
"=",
"np",
".",
"zeros",
"(",
"(",
"groups_len",
",",
"groups_len",
")",
",",
"dtype",
"=",
"np",
".",
"int",
")",
"for",
"a",
"in",
"result",
".",
"summary",
"(",
")",
"[",
"1",
":",
"]",
":",
"a0",
"=",
"str",
"(",
"a",
"[",
"0",
"]",
")",
"a1",
"=",
"str",
"(",
"a",
"[",
"1",
"]",
")",
"a0i",
"=",
"np",
".",
"where",
"(",
"groups",
"==",
"a0",
")",
"[",
"0",
"]",
"[",
"0",
"]",
"a1i",
"=",
"np",
".",
"where",
"(",
"groups",
"==",
"a1",
")",
"[",
"0",
"]",
"[",
"0",
"]",
"vs",
"[",
"a0i",
",",
"a1i",
"]",
"=",
"1",
"if",
"str",
"(",
"a",
"[",
"5",
"]",
")",
"==",
"'True'",
"else",
"0",
"vs",
"=",
"np",
".",
"triu",
"(",
"vs",
")",
"np",
".",
"fill_diagonal",
"(",
"vs",
",",
"-",
"1",
")",
"tri_lower",
"=",
"np",
".",
"tril_indices",
"(",
"vs",
".",
"shape",
"[",
"0",
"]",
",",
"-",
"1",
")",
"vs",
"[",
"tri_lower",
"]",
"=",
"vs",
".",
"T",
"[",
"tri_lower",
"]",
"return",
"DataFrame",
"(",
"vs",
",",
"index",
"=",
"groups",
",",
"columns",
"=",
"groups",
")"
] |
Pairwise comparisons with TukeyHSD confidence intervals. This is a
convenience function to make statsmodels `pairwise_tukeyhsd` method more
applicable for further use.
Parameters
----------
x : array_like or pandas Series object, 1d
An array, any object exposing the array interface, containing dependent
variable values (test or response variable). Values should have a
non-nominal scale. NaN values will cause an error (please handle
manually).
g : array_like or pandas Series object, 1d
An array, any object exposing the array interface, containing
independent variable values (grouping or predictor variable). Values
should have a nominal scale (categorical).
alpha : float, optional
Significance level for the test. Default is 0.05.
Returns
-------
result : pandas DataFrame
DataFrame with 0, 1, and -1 values, where 0 is False (not significant),
1 is True (significant), and -1 is for diagonal elements.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> g = [['a'] * 5, ['b'] * 5, ['c'] * 5]
>>> sp.posthoc_tukey_hsd(np.concatenate(x), np.concatenate(g))
|
[
"Pairwise",
"comparisons",
"with",
"TukeyHSD",
"confidence",
"intervals",
".",
"This",
"is",
"a",
"convenience",
"function",
"to",
"make",
"statsmodels",
"pairwise_tukeyhsd",
"method",
"more",
"applicable",
"for",
"further",
"use",
"."
] |
5476b09e2a325cd4e31c0b0bc6906ab5cd77fc5d
|
https://github.com/maximtrp/scikit-posthocs/blob/5476b09e2a325cd4e31c0b0bc6906ab5cd77fc5d/scikit_posthocs/_posthocs.py#L1845-L1897
|
10,025
|
maximtrp/scikit-posthocs
|
scikit_posthocs/_posthocs.py
|
posthoc_mannwhitney
|
def posthoc_mannwhitney(a, val_col=None, group_col=None, use_continuity=True, alternative='two-sided', p_adjust=None, sort=True):
'''Pairwise comparisons with Mann-Whitney rank test.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
use_continuity : bool, optional
Whether a continuity correction (1/2.) should be taken into account.
Default is True.
alternative : ['two-sided', 'less', or 'greater'], optional
Whether to get the p-value for the one-sided hypothesis
('less' or 'greater') or for the two-sided hypothesis ('two-sided').
Defaults to 'two-sided'.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
Refer to `scipy.stats.mannwhitneyu` reference page for further details.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> sp.posthoc_mannwhitney(x, p_adjust = 'holm')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_len = groups.size
vs = np.zeros((x_len, x_len))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(x_len), 2)
for i,j in combs:
vs[i, j] = ss.mannwhitneyu(x.loc[x[_group_col] == groups[i], _val_col],
x.loc[x[_group_col] == groups[j], _val_col],
use_continuity=use_continuity,
alternative=alternative)[1]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
|
python
|
def posthoc_mannwhitney(a, val_col=None, group_col=None, use_continuity=True, alternative='two-sided', p_adjust=None, sort=True):
'''Pairwise comparisons with Mann-Whitney rank test.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
use_continuity : bool, optional
Whether a continuity correction (1/2.) should be taken into account.
Default is True.
alternative : ['two-sided', 'less', or 'greater'], optional
Whether to get the p-value for the one-sided hypothesis
('less' or 'greater') or for the two-sided hypothesis ('two-sided').
Defaults to 'two-sided'.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
Refer to `scipy.stats.mannwhitneyu` reference page for further details.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> sp.posthoc_mannwhitney(x, p_adjust = 'holm')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_len = groups.size
vs = np.zeros((x_len, x_len))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(x_len), 2)
for i,j in combs:
vs[i, j] = ss.mannwhitneyu(x.loc[x[_group_col] == groups[i], _val_col],
x.loc[x[_group_col] == groups[j], _val_col],
use_continuity=use_continuity,
alternative=alternative)[1]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
|
[
"def",
"posthoc_mannwhitney",
"(",
"a",
",",
"val_col",
"=",
"None",
",",
"group_col",
"=",
"None",
",",
"use_continuity",
"=",
"True",
",",
"alternative",
"=",
"'two-sided'",
",",
"p_adjust",
"=",
"None",
",",
"sort",
"=",
"True",
")",
":",
"x",
",",
"_val_col",
",",
"_group_col",
"=",
"__convert_to_df",
"(",
"a",
",",
"val_col",
",",
"group_col",
")",
"if",
"not",
"sort",
":",
"x",
"[",
"_group_col",
"]",
"=",
"Categorical",
"(",
"x",
"[",
"_group_col",
"]",
",",
"categories",
"=",
"x",
"[",
"_group_col",
"]",
".",
"unique",
"(",
")",
",",
"ordered",
"=",
"True",
")",
"x",
".",
"sort_values",
"(",
"by",
"=",
"[",
"_group_col",
",",
"_val_col",
"]",
",",
"ascending",
"=",
"True",
",",
"inplace",
"=",
"True",
")",
"groups",
"=",
"np",
".",
"unique",
"(",
"x",
"[",
"_group_col",
"]",
")",
"x_len",
"=",
"groups",
".",
"size",
"vs",
"=",
"np",
".",
"zeros",
"(",
"(",
"x_len",
",",
"x_len",
")",
")",
"tri_upper",
"=",
"np",
".",
"triu_indices",
"(",
"vs",
".",
"shape",
"[",
"0",
"]",
",",
"1",
")",
"tri_lower",
"=",
"np",
".",
"tril_indices",
"(",
"vs",
".",
"shape",
"[",
"0",
"]",
",",
"-",
"1",
")",
"vs",
"[",
":",
",",
":",
"]",
"=",
"0",
"combs",
"=",
"it",
".",
"combinations",
"(",
"range",
"(",
"x_len",
")",
",",
"2",
")",
"for",
"i",
",",
"j",
"in",
"combs",
":",
"vs",
"[",
"i",
",",
"j",
"]",
"=",
"ss",
".",
"mannwhitneyu",
"(",
"x",
".",
"loc",
"[",
"x",
"[",
"_group_col",
"]",
"==",
"groups",
"[",
"i",
"]",
",",
"_val_col",
"]",
",",
"x",
".",
"loc",
"[",
"x",
"[",
"_group_col",
"]",
"==",
"groups",
"[",
"j",
"]",
",",
"_val_col",
"]",
",",
"use_continuity",
"=",
"use_continuity",
",",
"alternative",
"=",
"alternative",
")",
"[",
"1",
"]",
"if",
"p_adjust",
":",
"vs",
"[",
"tri_upper",
"]",
"=",
"multipletests",
"(",
"vs",
"[",
"tri_upper",
"]",
",",
"method",
"=",
"p_adjust",
")",
"[",
"1",
"]",
"vs",
"[",
"tri_lower",
"]",
"=",
"vs",
".",
"T",
"[",
"tri_lower",
"]",
"np",
".",
"fill_diagonal",
"(",
"vs",
",",
"-",
"1",
")",
"return",
"DataFrame",
"(",
"vs",
",",
"index",
"=",
"groups",
",",
"columns",
"=",
"groups",
")"
] |
Pairwise comparisons with Mann-Whitney rank test.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
use_continuity : bool, optional
Whether a continuity correction (1/2.) should be taken into account.
Default is True.
alternative : ['two-sided', 'less', or 'greater'], optional
Whether to get the p-value for the one-sided hypothesis
('less' or 'greater') or for the two-sided hypothesis ('two-sided').
Defaults to 'two-sided'.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
Refer to `scipy.stats.mannwhitneyu` reference page for further details.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> sp.posthoc_mannwhitney(x, p_adjust = 'holm')
|
[
"Pairwise",
"comparisons",
"with",
"Mann",
"-",
"Whitney",
"rank",
"test",
"."
] |
5476b09e2a325cd4e31c0b0bc6906ab5cd77fc5d
|
https://github.com/maximtrp/scikit-posthocs/blob/5476b09e2a325cd4e31c0b0bc6906ab5cd77fc5d/scikit_posthocs/_posthocs.py#L1900-L1991
|
10,026
|
maximtrp/scikit-posthocs
|
scikit_posthocs/_posthocs.py
|
posthoc_wilcoxon
|
def posthoc_wilcoxon(a, val_col=None, group_col=None, zero_method='wilcox', correction=False, p_adjust=None, sort=False):
'''Pairwise comparisons with Wilcoxon signed-rank test. It is a non-parametric
version of the paired T-test for use with non-parametric ANOVA.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
zero_method : string, {"pratt", "wilcox", "zsplit"}, optional
"pratt": Pratt treatment, includes zero-differences in the ranking
process (more conservative)
"wilcox": Wilcox treatment, discards all zero-differences
"zsplit": Zero rank split, just like Pratt, but spliting the zero rank
between positive and negative ones
correction : bool, optional
If True, apply continuity correction by adjusting the Wilcoxon rank
statistic by 0.5 towards the mean value when computing the z-statistic.
Default is False.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col and val_col or not.
Default is False.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
Refer to `scipy.stats.wilcoxon` reference page for further details.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> sp.posthoc_wilcoxon(x)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
#x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_len = groups.size
vs = np.zeros((x_len, x_len))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(x_len), 2)
for i,j in combs:
vs[i, j] = ss.wilcoxon(x.loc[x[_group_col] == groups[i], _val_col],
x.loc[x[_group_col] == groups[j], _val_col],
zero_method=zero_method, correction=correction)[1]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method=p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
|
python
|
def posthoc_wilcoxon(a, val_col=None, group_col=None, zero_method='wilcox', correction=False, p_adjust=None, sort=False):
'''Pairwise comparisons with Wilcoxon signed-rank test. It is a non-parametric
version of the paired T-test for use with non-parametric ANOVA.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
zero_method : string, {"pratt", "wilcox", "zsplit"}, optional
"pratt": Pratt treatment, includes zero-differences in the ranking
process (more conservative)
"wilcox": Wilcox treatment, discards all zero-differences
"zsplit": Zero rank split, just like Pratt, but spliting the zero rank
between positive and negative ones
correction : bool, optional
If True, apply continuity correction by adjusting the Wilcoxon rank
statistic by 0.5 towards the mean value when computing the z-statistic.
Default is False.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col and val_col or not.
Default is False.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
Refer to `scipy.stats.wilcoxon` reference page for further details.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> sp.posthoc_wilcoxon(x)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
if not sort:
x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True)
#x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)
groups = np.unique(x[_group_col])
x_len = groups.size
vs = np.zeros((x_len, x_len))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(x_len), 2)
for i,j in combs:
vs[i, j] = ss.wilcoxon(x.loc[x[_group_col] == groups[i], _val_col],
x.loc[x[_group_col] == groups[j], _val_col],
zero_method=zero_method, correction=correction)[1]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method=p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups)
|
[
"def",
"posthoc_wilcoxon",
"(",
"a",
",",
"val_col",
"=",
"None",
",",
"group_col",
"=",
"None",
",",
"zero_method",
"=",
"'wilcox'",
",",
"correction",
"=",
"False",
",",
"p_adjust",
"=",
"None",
",",
"sort",
"=",
"False",
")",
":",
"x",
",",
"_val_col",
",",
"_group_col",
"=",
"__convert_to_df",
"(",
"a",
",",
"val_col",
",",
"group_col",
")",
"if",
"not",
"sort",
":",
"x",
"[",
"_group_col",
"]",
"=",
"Categorical",
"(",
"x",
"[",
"_group_col",
"]",
",",
"categories",
"=",
"x",
"[",
"_group_col",
"]",
".",
"unique",
"(",
")",
",",
"ordered",
"=",
"True",
")",
"#x.sort_values(by=[_group_col, _val_col], ascending=True, inplace=True)",
"groups",
"=",
"np",
".",
"unique",
"(",
"x",
"[",
"_group_col",
"]",
")",
"x_len",
"=",
"groups",
".",
"size",
"vs",
"=",
"np",
".",
"zeros",
"(",
"(",
"x_len",
",",
"x_len",
")",
")",
"tri_upper",
"=",
"np",
".",
"triu_indices",
"(",
"vs",
".",
"shape",
"[",
"0",
"]",
",",
"1",
")",
"tri_lower",
"=",
"np",
".",
"tril_indices",
"(",
"vs",
".",
"shape",
"[",
"0",
"]",
",",
"-",
"1",
")",
"vs",
"[",
":",
",",
":",
"]",
"=",
"0",
"combs",
"=",
"it",
".",
"combinations",
"(",
"range",
"(",
"x_len",
")",
",",
"2",
")",
"for",
"i",
",",
"j",
"in",
"combs",
":",
"vs",
"[",
"i",
",",
"j",
"]",
"=",
"ss",
".",
"wilcoxon",
"(",
"x",
".",
"loc",
"[",
"x",
"[",
"_group_col",
"]",
"==",
"groups",
"[",
"i",
"]",
",",
"_val_col",
"]",
",",
"x",
".",
"loc",
"[",
"x",
"[",
"_group_col",
"]",
"==",
"groups",
"[",
"j",
"]",
",",
"_val_col",
"]",
",",
"zero_method",
"=",
"zero_method",
",",
"correction",
"=",
"correction",
")",
"[",
"1",
"]",
"if",
"p_adjust",
":",
"vs",
"[",
"tri_upper",
"]",
"=",
"multipletests",
"(",
"vs",
"[",
"tri_upper",
"]",
",",
"method",
"=",
"p_adjust",
")",
"[",
"1",
"]",
"vs",
"[",
"tri_lower",
"]",
"=",
"vs",
".",
"T",
"[",
"tri_lower",
"]",
"np",
".",
"fill_diagonal",
"(",
"vs",
",",
"-",
"1",
")",
"return",
"DataFrame",
"(",
"vs",
",",
"index",
"=",
"groups",
",",
"columns",
"=",
"groups",
")"
] |
Pairwise comparisons with Wilcoxon signed-rank test. It is a non-parametric
version of the paired T-test for use with non-parametric ANOVA.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
zero_method : string, {"pratt", "wilcox", "zsplit"}, optional
"pratt": Pratt treatment, includes zero-differences in the ranking
process (more conservative)
"wilcox": Wilcox treatment, discards all zero-differences
"zsplit": Zero rank split, just like Pratt, but spliting the zero rank
between positive and negative ones
correction : bool, optional
If True, apply continuity correction by adjusting the Wilcoxon rank
statistic by 0.5 towards the mean value when computing the z-statistic.
Default is False.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col and val_col or not.
Default is False.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
Refer to `scipy.stats.wilcoxon` reference page for further details.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> sp.posthoc_wilcoxon(x)
|
[
"Pairwise",
"comparisons",
"with",
"Wilcoxon",
"signed",
"-",
"rank",
"test",
".",
"It",
"is",
"a",
"non",
"-",
"parametric",
"version",
"of",
"the",
"paired",
"T",
"-",
"test",
"for",
"use",
"with",
"non",
"-",
"parametric",
"ANOVA",
"."
] |
5476b09e2a325cd4e31c0b0bc6906ab5cd77fc5d
|
https://github.com/maximtrp/scikit-posthocs/blob/5476b09e2a325cd4e31c0b0bc6906ab5cd77fc5d/scikit_posthocs/_posthocs.py#L1994-L2086
|
10,027
|
cjrh/aiorun
|
aiorun.py
|
shutdown_waits_for
|
def shutdown_waits_for(coro, loop=None):
"""Prevent coro from being cancelled during the shutdown sequence.
The trick here is that we add this coro to the global
"DO_NOT_CANCEL" collection, and then later during the shutdown
sequence we make sure that the task that wraps this coro will NOT
be cancelled.
To make this work, we have to create a super-secret task, below, that
communicates with the caller (which "awaits" us) via a Future. Using
a Future in this way allows us to avoid awaiting the Task, which
decouples the Task from the normal exception propagation which would
normally happen when the outer Task gets cancelled. We get the
result of coro back to the caller via Future.set_result.
NOTE that during the shutdown sequence, the caller WILL NOT be able
to receive a result, since the caller will likely have been
cancelled. So you should probably not rely on capturing results
via this function.
"""
loop = loop or get_event_loop()
fut = loop.create_future() # This future will connect coro and the caller.
async def coro_proxy():
"""This function will await coro, but it will also send the result
over the the future. Remember: the outside caller (of
shutdown_waits_for) will be awaiting fut, NOT coro(), due to
the decoupling. However, when coro completes, we need to send its
result over to the fut to make it look *as if* it was just coro
running the whole time. This whole thing is a teeny magic trick.
"""
try:
result = await coro
except (CancelledError, Exception) as e:
set_fut_done = partial(fut.set_exception, e)
else:
set_fut_done = partial(fut.set_result, result)
if not fut.cancelled():
set_fut_done()
new_coro = coro_proxy() # We'll taskify this one instead of coro.
_DO_NOT_CANCEL_COROS.add(new_coro) # The new task must not be cancelled.
loop.create_task(new_coro) # Make the task
# Ok, so we *could* simply return fut. Callers can await it as normal,
# e.g.
#
# async def blah():
# x = await shutdown_waits_for(bleh())
#
# That will work fine. However, callers may *also* want to detach the
# call from the current execution context, e.g.
#
# async def blah():
# loop.create_task(shutdown_waits_for(bleh()))
#
# This will only work if shutdown_waits_for() returns a coroutine.
# Therefore, we just make a new coroutine to wrap the `await fut` and
# return that. Then both things will work.
#
# (Side note: instead of callers using create_tasks, it would also work
# if they used `asyncio.ensure_future()` instead, since that can work
# with futures. But I don't like ensure_future.)
#
# (Another side note: You don't even need `create_task()` or
# `ensure_future()`...If you don't want a result, you can just call
# `shutdown_waits_for()` as a flat function call, no await or anything,
# and it should still work; unfortunately it causes a RuntimeWarning to
# tell you that ``inner()`` was never awaited :/
async def inner():
return await fut
return inner()
|
python
|
def shutdown_waits_for(coro, loop=None):
"""Prevent coro from being cancelled during the shutdown sequence.
The trick here is that we add this coro to the global
"DO_NOT_CANCEL" collection, and then later during the shutdown
sequence we make sure that the task that wraps this coro will NOT
be cancelled.
To make this work, we have to create a super-secret task, below, that
communicates with the caller (which "awaits" us) via a Future. Using
a Future in this way allows us to avoid awaiting the Task, which
decouples the Task from the normal exception propagation which would
normally happen when the outer Task gets cancelled. We get the
result of coro back to the caller via Future.set_result.
NOTE that during the shutdown sequence, the caller WILL NOT be able
to receive a result, since the caller will likely have been
cancelled. So you should probably not rely on capturing results
via this function.
"""
loop = loop or get_event_loop()
fut = loop.create_future() # This future will connect coro and the caller.
async def coro_proxy():
"""This function will await coro, but it will also send the result
over the the future. Remember: the outside caller (of
shutdown_waits_for) will be awaiting fut, NOT coro(), due to
the decoupling. However, when coro completes, we need to send its
result over to the fut to make it look *as if* it was just coro
running the whole time. This whole thing is a teeny magic trick.
"""
try:
result = await coro
except (CancelledError, Exception) as e:
set_fut_done = partial(fut.set_exception, e)
else:
set_fut_done = partial(fut.set_result, result)
if not fut.cancelled():
set_fut_done()
new_coro = coro_proxy() # We'll taskify this one instead of coro.
_DO_NOT_CANCEL_COROS.add(new_coro) # The new task must not be cancelled.
loop.create_task(new_coro) # Make the task
# Ok, so we *could* simply return fut. Callers can await it as normal,
# e.g.
#
# async def blah():
# x = await shutdown_waits_for(bleh())
#
# That will work fine. However, callers may *also* want to detach the
# call from the current execution context, e.g.
#
# async def blah():
# loop.create_task(shutdown_waits_for(bleh()))
#
# This will only work if shutdown_waits_for() returns a coroutine.
# Therefore, we just make a new coroutine to wrap the `await fut` and
# return that. Then both things will work.
#
# (Side note: instead of callers using create_tasks, it would also work
# if they used `asyncio.ensure_future()` instead, since that can work
# with futures. But I don't like ensure_future.)
#
# (Another side note: You don't even need `create_task()` or
# `ensure_future()`...If you don't want a result, you can just call
# `shutdown_waits_for()` as a flat function call, no await or anything,
# and it should still work; unfortunately it causes a RuntimeWarning to
# tell you that ``inner()`` was never awaited :/
async def inner():
return await fut
return inner()
|
[
"def",
"shutdown_waits_for",
"(",
"coro",
",",
"loop",
"=",
"None",
")",
":",
"loop",
"=",
"loop",
"or",
"get_event_loop",
"(",
")",
"fut",
"=",
"loop",
".",
"create_future",
"(",
")",
"# This future will connect coro and the caller.",
"async",
"def",
"coro_proxy",
"(",
")",
":",
"\"\"\"This function will await coro, but it will also send the result\n over the the future. Remember: the outside caller (of\n shutdown_waits_for) will be awaiting fut, NOT coro(), due to\n the decoupling. However, when coro completes, we need to send its\n result over to the fut to make it look *as if* it was just coro\n running the whole time. This whole thing is a teeny magic trick.\n \"\"\"",
"try",
":",
"result",
"=",
"await",
"coro",
"except",
"(",
"CancelledError",
",",
"Exception",
")",
"as",
"e",
":",
"set_fut_done",
"=",
"partial",
"(",
"fut",
".",
"set_exception",
",",
"e",
")",
"else",
":",
"set_fut_done",
"=",
"partial",
"(",
"fut",
".",
"set_result",
",",
"result",
")",
"if",
"not",
"fut",
".",
"cancelled",
"(",
")",
":",
"set_fut_done",
"(",
")",
"new_coro",
"=",
"coro_proxy",
"(",
")",
"# We'll taskify this one instead of coro.",
"_DO_NOT_CANCEL_COROS",
".",
"add",
"(",
"new_coro",
")",
"# The new task must not be cancelled.",
"loop",
".",
"create_task",
"(",
"new_coro",
")",
"# Make the task",
"# Ok, so we *could* simply return fut. Callers can await it as normal,",
"# e.g.",
"#",
"# async def blah():",
"# x = await shutdown_waits_for(bleh())",
"#",
"# That will work fine. However, callers may *also* want to detach the",
"# call from the current execution context, e.g.",
"#",
"# async def blah():",
"# loop.create_task(shutdown_waits_for(bleh()))",
"#",
"# This will only work if shutdown_waits_for() returns a coroutine.",
"# Therefore, we just make a new coroutine to wrap the `await fut` and",
"# return that. Then both things will work.",
"#",
"# (Side note: instead of callers using create_tasks, it would also work",
"# if they used `asyncio.ensure_future()` instead, since that can work",
"# with futures. But I don't like ensure_future.)",
"#",
"# (Another side note: You don't even need `create_task()` or",
"# `ensure_future()`...If you don't want a result, you can just call",
"# `shutdown_waits_for()` as a flat function call, no await or anything,",
"# and it should still work; unfortunately it causes a RuntimeWarning to",
"# tell you that ``inner()`` was never awaited :/",
"async",
"def",
"inner",
"(",
")",
":",
"return",
"await",
"fut",
"return",
"inner",
"(",
")"
] |
Prevent coro from being cancelled during the shutdown sequence.
The trick here is that we add this coro to the global
"DO_NOT_CANCEL" collection, and then later during the shutdown
sequence we make sure that the task that wraps this coro will NOT
be cancelled.
To make this work, we have to create a super-secret task, below, that
communicates with the caller (which "awaits" us) via a Future. Using
a Future in this way allows us to avoid awaiting the Task, which
decouples the Task from the normal exception propagation which would
normally happen when the outer Task gets cancelled. We get the
result of coro back to the caller via Future.set_result.
NOTE that during the shutdown sequence, the caller WILL NOT be able
to receive a result, since the caller will likely have been
cancelled. So you should probably not rely on capturing results
via this function.
|
[
"Prevent",
"coro",
"from",
"being",
"cancelled",
"during",
"the",
"shutdown",
"sequence",
"."
] |
23c73318447f578a4a24845c5f43574ac7b414e4
|
https://github.com/cjrh/aiorun/blob/23c73318447f578a4a24845c5f43574ac7b414e4/aiorun.py#L43-L117
|
10,028
|
cjrh/aiorun
|
aiorun.py
|
run
|
def run(coro: 'Optional[Coroutine]' = None, *,
loop: Optional[AbstractEventLoop] = None,
shutdown_handler: Optional[Callable[[AbstractEventLoop], None]] = None,
executor_workers: int = 10,
executor: Optional[Executor] = None,
use_uvloop: bool = False) -> None:
"""
Start up the event loop, and wait for a signal to shut down.
:param coro: Optionally supply a coroutine. The loop will still
run if missing. The loop will continue to run after the supplied
coroutine finishes. The supplied coroutine is typically
a "main" coroutine from which all other work is spawned.
:param loop: Optionally supply your own loop. If missing, the
default loop attached to the current thread context will
be used, i.e., whatever ``asyncio.get_event_loop()`` returns.
:param shutdown_handler: By default, SIGINT and SIGTERM will be
handled and will stop the loop, thereby invoking the shutdown
sequence. Alternatively you can supply your own shutdown
handler function. It should conform to the type spec as shown
in the function signature.
:param executor_workers: The number of workers in the executor.
(NOTE: ``run()`` creates a new executor instance internally,
regardless of whether you supply your own loop.)
:param executor: You can decide to use your own executor instance
if you like.
:param use_uvloop: The loop policy will be set to use uvloop. It
is your responsibility to install uvloop. If missing, an
``ImportError`` will be raised.
"""
logger.debug('Entering run()')
assert not (loop and use_uvloop), (
"'loop' and 'use_uvloop' parameters are mutually "
"exclusive. (Just make your own uvloop and pass it in)."
)
if use_uvloop:
import uvloop
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
loop_was_supplied = bool(loop)
if not loop_was_supplied:
loop = get_event_loop()
if coro:
async def new_coro():
"""During shutdown, run_until_complete() will exit
if a CancelledError bubbles up from anything in the
group. To counteract that, we'll try to handle
any CancelledErrors that bubble up from the given
coro. This isn't fool-proof: if the user doesn't
provide a coro, and instead creates their own with
loop.create_task, that task might bubble
a CancelledError into the run_until_complete()."""
try:
await coro
except asyncio.CancelledError:
pass
loop.create_task(new_coro())
shutdown_handler = shutdown_handler or _shutdown_handler
if WINDOWS: # pragma: no cover
# This is to allow CTRL-C to be detected in a timely fashion,
# see: https://bugs.python.org/issue23057#msg246316
loop.create_task(windows_support_wakeup())
# This is to be able to handle SIGBREAK.
def windows_handler(sig, frame):
# Disable the handler so it won't be called again.
signame = signal.Signals(sig).name
logger.critical('Received signal: %s. Stopping the loop.', signame)
shutdown_handler(loop)
signal.signal(signal.SIGBREAK, windows_handler)
signal.signal(signal.SIGINT, windows_handler)
else:
loop.add_signal_handler(SIGINT, shutdown_handler, loop)
loop.add_signal_handler(SIGTERM, shutdown_handler, loop)
# TODO: We probably don't want to create a different executor if the
# TODO: loop was supplied. (User might have put stuff on that loop's
# TODO: executor).
if not executor:
logger.debug('Creating default executor')
executor = ThreadPoolExecutor(max_workers=executor_workers)
loop.set_default_executor(executor)
try:
loop.run_forever()
except KeyboardInterrupt: # pragma: no cover
logger.info('Got KeyboardInterrupt')
if WINDOWS:
# Windows doesn't do any POSIX signal handling, and no
# abstraction layer for signals is currently implemented in
# asyncio. So we fall back to KeyboardInterrupt (triggered
# by the user/environment sending CTRL-C, or signal.CTRL_C_EVENT
shutdown_handler()
logger.info('Entering shutdown phase.')
def sep():
tasks = all_tasks(loop=loop)
do_not_cancel = set()
for t in tasks:
# TODO: we don't need access to the coro. We could simply
# TODO: store the task itself in the weakset.
if t._coro in _DO_NOT_CANCEL_COROS:
do_not_cancel.add(t)
tasks -= do_not_cancel
logger.info('Cancelling pending tasks.')
for t in tasks:
logger.debug('Cancelling task: %s', t)
t.cancel()
return tasks, do_not_cancel
tasks, do_not_cancel = sep()
# Here's a protip: if you group a bunch of tasks, and some of them
# get cancelled, and they DON'T HANDLE THE CANCELLATION, then the
# raised CancelledError will bubble up to, and stop the
# loop.run_until_complete() line: meaning, not all the tasks in
# the gathered group will actually be complete. You need to
# enable this with the ``return_exceptions`` flag.
group = gather(*tasks, *do_not_cancel, return_exceptions=True)
logger.info('Running pending tasks till complete')
# TODO: obtain all the results, and log any results that are exceptions
# other than CancelledError. Will be useful for troubleshooting.
loop.run_until_complete(group)
logger.info('Waiting for executor shutdown.')
executor.shutdown(wait=True)
# If loop was supplied, it's up to the caller to close!
if not loop_was_supplied:
logger.info('Closing the loop.')
loop.close()
logger.critical('Leaving. Bye!')
|
python
|
def run(coro: 'Optional[Coroutine]' = None, *,
loop: Optional[AbstractEventLoop] = None,
shutdown_handler: Optional[Callable[[AbstractEventLoop], None]] = None,
executor_workers: int = 10,
executor: Optional[Executor] = None,
use_uvloop: bool = False) -> None:
"""
Start up the event loop, and wait for a signal to shut down.
:param coro: Optionally supply a coroutine. The loop will still
run if missing. The loop will continue to run after the supplied
coroutine finishes. The supplied coroutine is typically
a "main" coroutine from which all other work is spawned.
:param loop: Optionally supply your own loop. If missing, the
default loop attached to the current thread context will
be used, i.e., whatever ``asyncio.get_event_loop()`` returns.
:param shutdown_handler: By default, SIGINT and SIGTERM will be
handled and will stop the loop, thereby invoking the shutdown
sequence. Alternatively you can supply your own shutdown
handler function. It should conform to the type spec as shown
in the function signature.
:param executor_workers: The number of workers in the executor.
(NOTE: ``run()`` creates a new executor instance internally,
regardless of whether you supply your own loop.)
:param executor: You can decide to use your own executor instance
if you like.
:param use_uvloop: The loop policy will be set to use uvloop. It
is your responsibility to install uvloop. If missing, an
``ImportError`` will be raised.
"""
logger.debug('Entering run()')
assert not (loop and use_uvloop), (
"'loop' and 'use_uvloop' parameters are mutually "
"exclusive. (Just make your own uvloop and pass it in)."
)
if use_uvloop:
import uvloop
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
loop_was_supplied = bool(loop)
if not loop_was_supplied:
loop = get_event_loop()
if coro:
async def new_coro():
"""During shutdown, run_until_complete() will exit
if a CancelledError bubbles up from anything in the
group. To counteract that, we'll try to handle
any CancelledErrors that bubble up from the given
coro. This isn't fool-proof: if the user doesn't
provide a coro, and instead creates their own with
loop.create_task, that task might bubble
a CancelledError into the run_until_complete()."""
try:
await coro
except asyncio.CancelledError:
pass
loop.create_task(new_coro())
shutdown_handler = shutdown_handler or _shutdown_handler
if WINDOWS: # pragma: no cover
# This is to allow CTRL-C to be detected in a timely fashion,
# see: https://bugs.python.org/issue23057#msg246316
loop.create_task(windows_support_wakeup())
# This is to be able to handle SIGBREAK.
def windows_handler(sig, frame):
# Disable the handler so it won't be called again.
signame = signal.Signals(sig).name
logger.critical('Received signal: %s. Stopping the loop.', signame)
shutdown_handler(loop)
signal.signal(signal.SIGBREAK, windows_handler)
signal.signal(signal.SIGINT, windows_handler)
else:
loop.add_signal_handler(SIGINT, shutdown_handler, loop)
loop.add_signal_handler(SIGTERM, shutdown_handler, loop)
# TODO: We probably don't want to create a different executor if the
# TODO: loop was supplied. (User might have put stuff on that loop's
# TODO: executor).
if not executor:
logger.debug('Creating default executor')
executor = ThreadPoolExecutor(max_workers=executor_workers)
loop.set_default_executor(executor)
try:
loop.run_forever()
except KeyboardInterrupt: # pragma: no cover
logger.info('Got KeyboardInterrupt')
if WINDOWS:
# Windows doesn't do any POSIX signal handling, and no
# abstraction layer for signals is currently implemented in
# asyncio. So we fall back to KeyboardInterrupt (triggered
# by the user/environment sending CTRL-C, or signal.CTRL_C_EVENT
shutdown_handler()
logger.info('Entering shutdown phase.')
def sep():
tasks = all_tasks(loop=loop)
do_not_cancel = set()
for t in tasks:
# TODO: we don't need access to the coro. We could simply
# TODO: store the task itself in the weakset.
if t._coro in _DO_NOT_CANCEL_COROS:
do_not_cancel.add(t)
tasks -= do_not_cancel
logger.info('Cancelling pending tasks.')
for t in tasks:
logger.debug('Cancelling task: %s', t)
t.cancel()
return tasks, do_not_cancel
tasks, do_not_cancel = sep()
# Here's a protip: if you group a bunch of tasks, and some of them
# get cancelled, and they DON'T HANDLE THE CANCELLATION, then the
# raised CancelledError will bubble up to, and stop the
# loop.run_until_complete() line: meaning, not all the tasks in
# the gathered group will actually be complete. You need to
# enable this with the ``return_exceptions`` flag.
group = gather(*tasks, *do_not_cancel, return_exceptions=True)
logger.info('Running pending tasks till complete')
# TODO: obtain all the results, and log any results that are exceptions
# other than CancelledError. Will be useful for troubleshooting.
loop.run_until_complete(group)
logger.info('Waiting for executor shutdown.')
executor.shutdown(wait=True)
# If loop was supplied, it's up to the caller to close!
if not loop_was_supplied:
logger.info('Closing the loop.')
loop.close()
logger.critical('Leaving. Bye!')
|
[
"def",
"run",
"(",
"coro",
":",
"'Optional[Coroutine]'",
"=",
"None",
",",
"*",
",",
"loop",
":",
"Optional",
"[",
"AbstractEventLoop",
"]",
"=",
"None",
",",
"shutdown_handler",
":",
"Optional",
"[",
"Callable",
"[",
"[",
"AbstractEventLoop",
"]",
",",
"None",
"]",
"]",
"=",
"None",
",",
"executor_workers",
":",
"int",
"=",
"10",
",",
"executor",
":",
"Optional",
"[",
"Executor",
"]",
"=",
"None",
",",
"use_uvloop",
":",
"bool",
"=",
"False",
")",
"->",
"None",
":",
"logger",
".",
"debug",
"(",
"'Entering run()'",
")",
"assert",
"not",
"(",
"loop",
"and",
"use_uvloop",
")",
",",
"(",
"\"'loop' and 'use_uvloop' parameters are mutually \"",
"\"exclusive. (Just make your own uvloop and pass it in).\"",
")",
"if",
"use_uvloop",
":",
"import",
"uvloop",
"asyncio",
".",
"set_event_loop_policy",
"(",
"uvloop",
".",
"EventLoopPolicy",
"(",
")",
")",
"loop_was_supplied",
"=",
"bool",
"(",
"loop",
")",
"if",
"not",
"loop_was_supplied",
":",
"loop",
"=",
"get_event_loop",
"(",
")",
"if",
"coro",
":",
"async",
"def",
"new_coro",
"(",
")",
":",
"\"\"\"During shutdown, run_until_complete() will exit\n if a CancelledError bubbles up from anything in the\n group. To counteract that, we'll try to handle\n any CancelledErrors that bubble up from the given\n coro. This isn't fool-proof: if the user doesn't\n provide a coro, and instead creates their own with\n loop.create_task, that task might bubble\n a CancelledError into the run_until_complete().\"\"\"",
"try",
":",
"await",
"coro",
"except",
"asyncio",
".",
"CancelledError",
":",
"pass",
"loop",
".",
"create_task",
"(",
"new_coro",
"(",
")",
")",
"shutdown_handler",
"=",
"shutdown_handler",
"or",
"_shutdown_handler",
"if",
"WINDOWS",
":",
"# pragma: no cover",
"# This is to allow CTRL-C to be detected in a timely fashion,",
"# see: https://bugs.python.org/issue23057#msg246316",
"loop",
".",
"create_task",
"(",
"windows_support_wakeup",
"(",
")",
")",
"# This is to be able to handle SIGBREAK.",
"def",
"windows_handler",
"(",
"sig",
",",
"frame",
")",
":",
"# Disable the handler so it won't be called again.",
"signame",
"=",
"signal",
".",
"Signals",
"(",
"sig",
")",
".",
"name",
"logger",
".",
"critical",
"(",
"'Received signal: %s. Stopping the loop.'",
",",
"signame",
")",
"shutdown_handler",
"(",
"loop",
")",
"signal",
".",
"signal",
"(",
"signal",
".",
"SIGBREAK",
",",
"windows_handler",
")",
"signal",
".",
"signal",
"(",
"signal",
".",
"SIGINT",
",",
"windows_handler",
")",
"else",
":",
"loop",
".",
"add_signal_handler",
"(",
"SIGINT",
",",
"shutdown_handler",
",",
"loop",
")",
"loop",
".",
"add_signal_handler",
"(",
"SIGTERM",
",",
"shutdown_handler",
",",
"loop",
")",
"# TODO: We probably don't want to create a different executor if the",
"# TODO: loop was supplied. (User might have put stuff on that loop's",
"# TODO: executor).",
"if",
"not",
"executor",
":",
"logger",
".",
"debug",
"(",
"'Creating default executor'",
")",
"executor",
"=",
"ThreadPoolExecutor",
"(",
"max_workers",
"=",
"executor_workers",
")",
"loop",
".",
"set_default_executor",
"(",
"executor",
")",
"try",
":",
"loop",
".",
"run_forever",
"(",
")",
"except",
"KeyboardInterrupt",
":",
"# pragma: no cover",
"logger",
".",
"info",
"(",
"'Got KeyboardInterrupt'",
")",
"if",
"WINDOWS",
":",
"# Windows doesn't do any POSIX signal handling, and no",
"# abstraction layer for signals is currently implemented in",
"# asyncio. So we fall back to KeyboardInterrupt (triggered",
"# by the user/environment sending CTRL-C, or signal.CTRL_C_EVENT",
"shutdown_handler",
"(",
")",
"logger",
".",
"info",
"(",
"'Entering shutdown phase.'",
")",
"def",
"sep",
"(",
")",
":",
"tasks",
"=",
"all_tasks",
"(",
"loop",
"=",
"loop",
")",
"do_not_cancel",
"=",
"set",
"(",
")",
"for",
"t",
"in",
"tasks",
":",
"# TODO: we don't need access to the coro. We could simply",
"# TODO: store the task itself in the weakset.",
"if",
"t",
".",
"_coro",
"in",
"_DO_NOT_CANCEL_COROS",
":",
"do_not_cancel",
".",
"add",
"(",
"t",
")",
"tasks",
"-=",
"do_not_cancel",
"logger",
".",
"info",
"(",
"'Cancelling pending tasks.'",
")",
"for",
"t",
"in",
"tasks",
":",
"logger",
".",
"debug",
"(",
"'Cancelling task: %s'",
",",
"t",
")",
"t",
".",
"cancel",
"(",
")",
"return",
"tasks",
",",
"do_not_cancel",
"tasks",
",",
"do_not_cancel",
"=",
"sep",
"(",
")",
"# Here's a protip: if you group a bunch of tasks, and some of them",
"# get cancelled, and they DON'T HANDLE THE CANCELLATION, then the",
"# raised CancelledError will bubble up to, and stop the",
"# loop.run_until_complete() line: meaning, not all the tasks in",
"# the gathered group will actually be complete. You need to",
"# enable this with the ``return_exceptions`` flag.",
"group",
"=",
"gather",
"(",
"*",
"tasks",
",",
"*",
"do_not_cancel",
",",
"return_exceptions",
"=",
"True",
")",
"logger",
".",
"info",
"(",
"'Running pending tasks till complete'",
")",
"# TODO: obtain all the results, and log any results that are exceptions",
"# other than CancelledError. Will be useful for troubleshooting.",
"loop",
".",
"run_until_complete",
"(",
"group",
")",
"logger",
".",
"info",
"(",
"'Waiting for executor shutdown.'",
")",
"executor",
".",
"shutdown",
"(",
"wait",
"=",
"True",
")",
"# If loop was supplied, it's up to the caller to close!",
"if",
"not",
"loop_was_supplied",
":",
"logger",
".",
"info",
"(",
"'Closing the loop.'",
")",
"loop",
".",
"close",
"(",
")",
"logger",
".",
"critical",
"(",
"'Leaving. Bye!'",
")"
] |
Start up the event loop, and wait for a signal to shut down.
:param coro: Optionally supply a coroutine. The loop will still
run if missing. The loop will continue to run after the supplied
coroutine finishes. The supplied coroutine is typically
a "main" coroutine from which all other work is spawned.
:param loop: Optionally supply your own loop. If missing, the
default loop attached to the current thread context will
be used, i.e., whatever ``asyncio.get_event_loop()`` returns.
:param shutdown_handler: By default, SIGINT and SIGTERM will be
handled and will stop the loop, thereby invoking the shutdown
sequence. Alternatively you can supply your own shutdown
handler function. It should conform to the type spec as shown
in the function signature.
:param executor_workers: The number of workers in the executor.
(NOTE: ``run()`` creates a new executor instance internally,
regardless of whether you supply your own loop.)
:param executor: You can decide to use your own executor instance
if you like.
:param use_uvloop: The loop policy will be set to use uvloop. It
is your responsibility to install uvloop. If missing, an
``ImportError`` will be raised.
|
[
"Start",
"up",
"the",
"event",
"loop",
"and",
"wait",
"for",
"a",
"signal",
"to",
"shut",
"down",
"."
] |
23c73318447f578a4a24845c5f43574ac7b414e4
|
https://github.com/cjrh/aiorun/blob/23c73318447f578a4a24845c5f43574ac7b414e4/aiorun.py#L120-L255
|
10,029
|
emre/storm
|
storm/kommandr.py
|
prog.command
|
def command(self, *args, **kwargs):
"""Convenient decorator simply creates corresponding command"""
if len(args) == 1 and isinstance(args[0], collections.Callable):
return self._generate_command(args[0])
else:
def _command(func):
return self._generate_command(func, *args, **kwargs)
return _command
|
python
|
def command(self, *args, **kwargs):
"""Convenient decorator simply creates corresponding command"""
if len(args) == 1 and isinstance(args[0], collections.Callable):
return self._generate_command(args[0])
else:
def _command(func):
return self._generate_command(func, *args, **kwargs)
return _command
|
[
"def",
"command",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"len",
"(",
"args",
")",
"==",
"1",
"and",
"isinstance",
"(",
"args",
"[",
"0",
"]",
",",
"collections",
".",
"Callable",
")",
":",
"return",
"self",
".",
"_generate_command",
"(",
"args",
"[",
"0",
"]",
")",
"else",
":",
"def",
"_command",
"(",
"func",
")",
":",
"return",
"self",
".",
"_generate_command",
"(",
"func",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"_command"
] |
Convenient decorator simply creates corresponding command
|
[
"Convenient",
"decorator",
"simply",
"creates",
"corresponding",
"command"
] |
c752defc1b718cfffbf0e0e15532fa1d7840bf6d
|
https://github.com/emre/storm/blob/c752defc1b718cfffbf0e0e15532fa1d7840bf6d/storm/kommandr.py#L96-L103
|
10,030
|
emre/storm
|
storm/kommandr.py
|
prog._generate_command
|
def _generate_command(self, func, name=None, **kwargs):
"""Generates a command parser for given func.
:param func: func to generate related command parser
:param type: function
:param name: command name
:param type: str
:param **kwargs: keyword arguments those passed through to
:py:class:``argparse.ArgumentParser.add_parser``
:param type: dict
"""
func_pointer = name or func.__name__
storm_config = get_storm_config()
aliases, additional_kwarg = None, None
if 'aliases' in storm_config:
for command, alias_list in \
six.iteritems(storm_config.get("aliases")):
if func_pointer == command:
aliases = alias_list
break
func_help = func.__doc__ and func.__doc__.strip()
subparser = self.subparsers.add_parser(name or func.__name__,
aliases=aliases,
help=func_help)
spec = inspect.getargspec(func)
opts = reversed(list(izip_longest(reversed(spec.args or []),
reversed(spec.defaults or []),
fillvalue=self._POSITIONAL())))
for k, v in opts:
argopts = getattr(func, 'argopts', {})
args, kwargs = argopts.get(k, ([], {}))
args = list(args)
is_positional = isinstance(v, self._POSITIONAL)
options = [arg for arg in args if arg.startswith('-')]
if isinstance(v, list):
kwargs.update({
'action': 'append',
})
if is_positional:
if options:
args = options
kwargs.update({'required': True, 'dest': k})
else:
args = [k]
else:
args = options or ['--%s' % k]
kwargs.update({'default': v, 'dest': k})
arg = subparser.add_argument(*args, **kwargs)
subparser.set_defaults(**{self._COMMAND_FLAG: func})
return func
|
python
|
def _generate_command(self, func, name=None, **kwargs):
"""Generates a command parser for given func.
:param func: func to generate related command parser
:param type: function
:param name: command name
:param type: str
:param **kwargs: keyword arguments those passed through to
:py:class:``argparse.ArgumentParser.add_parser``
:param type: dict
"""
func_pointer = name or func.__name__
storm_config = get_storm_config()
aliases, additional_kwarg = None, None
if 'aliases' in storm_config:
for command, alias_list in \
six.iteritems(storm_config.get("aliases")):
if func_pointer == command:
aliases = alias_list
break
func_help = func.__doc__ and func.__doc__.strip()
subparser = self.subparsers.add_parser(name or func.__name__,
aliases=aliases,
help=func_help)
spec = inspect.getargspec(func)
opts = reversed(list(izip_longest(reversed(spec.args or []),
reversed(spec.defaults or []),
fillvalue=self._POSITIONAL())))
for k, v in opts:
argopts = getattr(func, 'argopts', {})
args, kwargs = argopts.get(k, ([], {}))
args = list(args)
is_positional = isinstance(v, self._POSITIONAL)
options = [arg for arg in args if arg.startswith('-')]
if isinstance(v, list):
kwargs.update({
'action': 'append',
})
if is_positional:
if options:
args = options
kwargs.update({'required': True, 'dest': k})
else:
args = [k]
else:
args = options or ['--%s' % k]
kwargs.update({'default': v, 'dest': k})
arg = subparser.add_argument(*args, **kwargs)
subparser.set_defaults(**{self._COMMAND_FLAG: func})
return func
|
[
"def",
"_generate_command",
"(",
"self",
",",
"func",
",",
"name",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"func_pointer",
"=",
"name",
"or",
"func",
".",
"__name__",
"storm_config",
"=",
"get_storm_config",
"(",
")",
"aliases",
",",
"additional_kwarg",
"=",
"None",
",",
"None",
"if",
"'aliases'",
"in",
"storm_config",
":",
"for",
"command",
",",
"alias_list",
"in",
"six",
".",
"iteritems",
"(",
"storm_config",
".",
"get",
"(",
"\"aliases\"",
")",
")",
":",
"if",
"func_pointer",
"==",
"command",
":",
"aliases",
"=",
"alias_list",
"break",
"func_help",
"=",
"func",
".",
"__doc__",
"and",
"func",
".",
"__doc__",
".",
"strip",
"(",
")",
"subparser",
"=",
"self",
".",
"subparsers",
".",
"add_parser",
"(",
"name",
"or",
"func",
".",
"__name__",
",",
"aliases",
"=",
"aliases",
",",
"help",
"=",
"func_help",
")",
"spec",
"=",
"inspect",
".",
"getargspec",
"(",
"func",
")",
"opts",
"=",
"reversed",
"(",
"list",
"(",
"izip_longest",
"(",
"reversed",
"(",
"spec",
".",
"args",
"or",
"[",
"]",
")",
",",
"reversed",
"(",
"spec",
".",
"defaults",
"or",
"[",
"]",
")",
",",
"fillvalue",
"=",
"self",
".",
"_POSITIONAL",
"(",
")",
")",
")",
")",
"for",
"k",
",",
"v",
"in",
"opts",
":",
"argopts",
"=",
"getattr",
"(",
"func",
",",
"'argopts'",
",",
"{",
"}",
")",
"args",
",",
"kwargs",
"=",
"argopts",
".",
"get",
"(",
"k",
",",
"(",
"[",
"]",
",",
"{",
"}",
")",
")",
"args",
"=",
"list",
"(",
"args",
")",
"is_positional",
"=",
"isinstance",
"(",
"v",
",",
"self",
".",
"_POSITIONAL",
")",
"options",
"=",
"[",
"arg",
"for",
"arg",
"in",
"args",
"if",
"arg",
".",
"startswith",
"(",
"'-'",
")",
"]",
"if",
"isinstance",
"(",
"v",
",",
"list",
")",
":",
"kwargs",
".",
"update",
"(",
"{",
"'action'",
":",
"'append'",
",",
"}",
")",
"if",
"is_positional",
":",
"if",
"options",
":",
"args",
"=",
"options",
"kwargs",
".",
"update",
"(",
"{",
"'required'",
":",
"True",
",",
"'dest'",
":",
"k",
"}",
")",
"else",
":",
"args",
"=",
"[",
"k",
"]",
"else",
":",
"args",
"=",
"options",
"or",
"[",
"'--%s'",
"%",
"k",
"]",
"kwargs",
".",
"update",
"(",
"{",
"'default'",
":",
"v",
",",
"'dest'",
":",
"k",
"}",
")",
"arg",
"=",
"subparser",
".",
"add_argument",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"subparser",
".",
"set_defaults",
"(",
"*",
"*",
"{",
"self",
".",
"_COMMAND_FLAG",
":",
"func",
"}",
")",
"return",
"func"
] |
Generates a command parser for given func.
:param func: func to generate related command parser
:param type: function
:param name: command name
:param type: str
:param **kwargs: keyword arguments those passed through to
:py:class:``argparse.ArgumentParser.add_parser``
:param type: dict
|
[
"Generates",
"a",
"command",
"parser",
"for",
"given",
"func",
"."
] |
c752defc1b718cfffbf0e0e15532fa1d7840bf6d
|
https://github.com/emre/storm/blob/c752defc1b718cfffbf0e0e15532fa1d7840bf6d/storm/kommandr.py#L121-L177
|
10,031
|
emre/storm
|
storm/kommandr.py
|
prog.execute
|
def execute(self, arg_list):
"""Main function to parse and dispatch commands by given ``arg_list``
:param arg_list: all arguments provided by the command line
:param type: list
"""
arg_map = self.parser.parse_args(arg_list).__dict__
command = arg_map.pop(self._COMMAND_FLAG)
return command(**arg_map)
|
python
|
def execute(self, arg_list):
"""Main function to parse and dispatch commands by given ``arg_list``
:param arg_list: all arguments provided by the command line
:param type: list
"""
arg_map = self.parser.parse_args(arg_list).__dict__
command = arg_map.pop(self._COMMAND_FLAG)
return command(**arg_map)
|
[
"def",
"execute",
"(",
"self",
",",
"arg_list",
")",
":",
"arg_map",
"=",
"self",
".",
"parser",
".",
"parse_args",
"(",
"arg_list",
")",
".",
"__dict__",
"command",
"=",
"arg_map",
".",
"pop",
"(",
"self",
".",
"_COMMAND_FLAG",
")",
"return",
"command",
"(",
"*",
"*",
"arg_map",
")"
] |
Main function to parse and dispatch commands by given ``arg_list``
:param arg_list: all arguments provided by the command line
:param type: list
|
[
"Main",
"function",
"to",
"parse",
"and",
"dispatch",
"commands",
"by",
"given",
"arg_list"
] |
c752defc1b718cfffbf0e0e15532fa1d7840bf6d
|
https://github.com/emre/storm/blob/c752defc1b718cfffbf0e0e15532fa1d7840bf6d/storm/kommandr.py#L179-L188
|
10,032
|
emre/storm
|
storm/__main__.py
|
add
|
def add(name, connection_uri, id_file="", o=[], config=None):
"""
Adds a new entry to sshconfig.
"""
storm_ = get_storm_instance(config)
try:
# validate name
if '@' in name:
raise ValueError('invalid value: "@" cannot be used in name.')
user, host, port = parse(
connection_uri,
user=get_default("user", storm_.defaults),
port=get_default("port", storm_.defaults)
)
storm_.add_entry(name, host, user, port, id_file, o)
print(
get_formatted_message(
'{0} added to your ssh config. you can connect '
'it by typing "ssh {0}".'.format(name),
'success')
)
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1)
|
python
|
def add(name, connection_uri, id_file="", o=[], config=None):
"""
Adds a new entry to sshconfig.
"""
storm_ = get_storm_instance(config)
try:
# validate name
if '@' in name:
raise ValueError('invalid value: "@" cannot be used in name.')
user, host, port = parse(
connection_uri,
user=get_default("user", storm_.defaults),
port=get_default("port", storm_.defaults)
)
storm_.add_entry(name, host, user, port, id_file, o)
print(
get_formatted_message(
'{0} added to your ssh config. you can connect '
'it by typing "ssh {0}".'.format(name),
'success')
)
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1)
|
[
"def",
"add",
"(",
"name",
",",
"connection_uri",
",",
"id_file",
"=",
"\"\"",
",",
"o",
"=",
"[",
"]",
",",
"config",
"=",
"None",
")",
":",
"storm_",
"=",
"get_storm_instance",
"(",
"config",
")",
"try",
":",
"# validate name",
"if",
"'@'",
"in",
"name",
":",
"raise",
"ValueError",
"(",
"'invalid value: \"@\" cannot be used in name.'",
")",
"user",
",",
"host",
",",
"port",
"=",
"parse",
"(",
"connection_uri",
",",
"user",
"=",
"get_default",
"(",
"\"user\"",
",",
"storm_",
".",
"defaults",
")",
",",
"port",
"=",
"get_default",
"(",
"\"port\"",
",",
"storm_",
".",
"defaults",
")",
")",
"storm_",
".",
"add_entry",
"(",
"name",
",",
"host",
",",
"user",
",",
"port",
",",
"id_file",
",",
"o",
")",
"print",
"(",
"get_formatted_message",
"(",
"'{0} added to your ssh config. you can connect '",
"'it by typing \"ssh {0}\".'",
".",
"format",
"(",
"name",
")",
",",
"'success'",
")",
")",
"except",
"ValueError",
"as",
"error",
":",
"print",
"(",
"get_formatted_message",
"(",
"error",
",",
"'error'",
")",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"sys",
".",
"exit",
"(",
"1",
")"
] |
Adds a new entry to sshconfig.
|
[
"Adds",
"a",
"new",
"entry",
"to",
"sshconfig",
"."
] |
c752defc1b718cfffbf0e0e15532fa1d7840bf6d
|
https://github.com/emre/storm/blob/c752defc1b718cfffbf0e0e15532fa1d7840bf6d/storm/__main__.py#L34-L63
|
10,033
|
emre/storm
|
storm/__main__.py
|
clone
|
def clone(name, clone_name, config=None):
"""
Clone an entry to the sshconfig.
"""
storm_ = get_storm_instance(config)
try:
# validate name
if '@' in name:
raise ValueError('invalid value: "@" cannot be used in name.')
storm_.clone_entry(name, clone_name)
print(
get_formatted_message(
'{0} added to your ssh config. you can connect '
'it by typing "ssh {0}".'.format(clone_name),
'success')
)
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1)
|
python
|
def clone(name, clone_name, config=None):
"""
Clone an entry to the sshconfig.
"""
storm_ = get_storm_instance(config)
try:
# validate name
if '@' in name:
raise ValueError('invalid value: "@" cannot be used in name.')
storm_.clone_entry(name, clone_name)
print(
get_formatted_message(
'{0} added to your ssh config. you can connect '
'it by typing "ssh {0}".'.format(clone_name),
'success')
)
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1)
|
[
"def",
"clone",
"(",
"name",
",",
"clone_name",
",",
"config",
"=",
"None",
")",
":",
"storm_",
"=",
"get_storm_instance",
"(",
"config",
")",
"try",
":",
"# validate name",
"if",
"'@'",
"in",
"name",
":",
"raise",
"ValueError",
"(",
"'invalid value: \"@\" cannot be used in name.'",
")",
"storm_",
".",
"clone_entry",
"(",
"name",
",",
"clone_name",
")",
"print",
"(",
"get_formatted_message",
"(",
"'{0} added to your ssh config. you can connect '",
"'it by typing \"ssh {0}\".'",
".",
"format",
"(",
"clone_name",
")",
",",
"'success'",
")",
")",
"except",
"ValueError",
"as",
"error",
":",
"print",
"(",
"get_formatted_message",
"(",
"error",
",",
"'error'",
")",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"sys",
".",
"exit",
"(",
"1",
")"
] |
Clone an entry to the sshconfig.
|
[
"Clone",
"an",
"entry",
"to",
"the",
"sshconfig",
"."
] |
c752defc1b718cfffbf0e0e15532fa1d7840bf6d
|
https://github.com/emre/storm/blob/c752defc1b718cfffbf0e0e15532fa1d7840bf6d/storm/__main__.py#L67-L90
|
10,034
|
emre/storm
|
storm/__main__.py
|
move
|
def move(name, entry_name, config=None):
"""
Move an entry to the sshconfig.
"""
storm_ = get_storm_instance(config)
try:
if '@' in name:
raise ValueError('invalid value: "@" cannot be used in name.')
storm_.clone_entry(name, entry_name, keep_original=False)
print(
get_formatted_message(
'{0} moved in ssh config. you can '
'connect it by typing "ssh {0}".'.format(
entry_name
),
'success')
)
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1)
|
python
|
def move(name, entry_name, config=None):
"""
Move an entry to the sshconfig.
"""
storm_ = get_storm_instance(config)
try:
if '@' in name:
raise ValueError('invalid value: "@" cannot be used in name.')
storm_.clone_entry(name, entry_name, keep_original=False)
print(
get_formatted_message(
'{0} moved in ssh config. you can '
'connect it by typing "ssh {0}".'.format(
entry_name
),
'success')
)
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1)
|
[
"def",
"move",
"(",
"name",
",",
"entry_name",
",",
"config",
"=",
"None",
")",
":",
"storm_",
"=",
"get_storm_instance",
"(",
"config",
")",
"try",
":",
"if",
"'@'",
"in",
"name",
":",
"raise",
"ValueError",
"(",
"'invalid value: \"@\" cannot be used in name.'",
")",
"storm_",
".",
"clone_entry",
"(",
"name",
",",
"entry_name",
",",
"keep_original",
"=",
"False",
")",
"print",
"(",
"get_formatted_message",
"(",
"'{0} moved in ssh config. you can '",
"'connect it by typing \"ssh {0}\".'",
".",
"format",
"(",
"entry_name",
")",
",",
"'success'",
")",
")",
"except",
"ValueError",
"as",
"error",
":",
"print",
"(",
"get_formatted_message",
"(",
"error",
",",
"'error'",
")",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"sys",
".",
"exit",
"(",
"1",
")"
] |
Move an entry to the sshconfig.
|
[
"Move",
"an",
"entry",
"to",
"the",
"sshconfig",
"."
] |
c752defc1b718cfffbf0e0e15532fa1d7840bf6d
|
https://github.com/emre/storm/blob/c752defc1b718cfffbf0e0e15532fa1d7840bf6d/storm/__main__.py#L93-L117
|
10,035
|
emre/storm
|
storm/__main__.py
|
edit
|
def edit(name, connection_uri, id_file="", o=[], config=None):
"""
Edits the related entry in ssh config.
"""
storm_ = get_storm_instance(config)
try:
if ',' in name:
name = " ".join(name.split(","))
user, host, port = parse(
connection_uri,
user=get_default("user", storm_.defaults),
port=get_default("port", storm_.defaults)
)
storm_.edit_entry(name, host, user, port, id_file, o)
print(get_formatted_message(
'"{0}" updated successfully.'.format(
name
), 'success'))
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1)
|
python
|
def edit(name, connection_uri, id_file="", o=[], config=None):
"""
Edits the related entry in ssh config.
"""
storm_ = get_storm_instance(config)
try:
if ',' in name:
name = " ".join(name.split(","))
user, host, port = parse(
connection_uri,
user=get_default("user", storm_.defaults),
port=get_default("port", storm_.defaults)
)
storm_.edit_entry(name, host, user, port, id_file, o)
print(get_formatted_message(
'"{0}" updated successfully.'.format(
name
), 'success'))
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1)
|
[
"def",
"edit",
"(",
"name",
",",
"connection_uri",
",",
"id_file",
"=",
"\"\"",
",",
"o",
"=",
"[",
"]",
",",
"config",
"=",
"None",
")",
":",
"storm_",
"=",
"get_storm_instance",
"(",
"config",
")",
"try",
":",
"if",
"','",
"in",
"name",
":",
"name",
"=",
"\" \"",
".",
"join",
"(",
"name",
".",
"split",
"(",
"\",\"",
")",
")",
"user",
",",
"host",
",",
"port",
"=",
"parse",
"(",
"connection_uri",
",",
"user",
"=",
"get_default",
"(",
"\"user\"",
",",
"storm_",
".",
"defaults",
")",
",",
"port",
"=",
"get_default",
"(",
"\"port\"",
",",
"storm_",
".",
"defaults",
")",
")",
"storm_",
".",
"edit_entry",
"(",
"name",
",",
"host",
",",
"user",
",",
"port",
",",
"id_file",
",",
"o",
")",
"print",
"(",
"get_formatted_message",
"(",
"'\"{0}\" updated successfully.'",
".",
"format",
"(",
"name",
")",
",",
"'success'",
")",
")",
"except",
"ValueError",
"as",
"error",
":",
"print",
"(",
"get_formatted_message",
"(",
"error",
",",
"'error'",
")",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"sys",
".",
"exit",
"(",
"1",
")"
] |
Edits the related entry in ssh config.
|
[
"Edits",
"the",
"related",
"entry",
"in",
"ssh",
"config",
"."
] |
c752defc1b718cfffbf0e0e15532fa1d7840bf6d
|
https://github.com/emre/storm/blob/c752defc1b718cfffbf0e0e15532fa1d7840bf6d/storm/__main__.py#L120-L143
|
10,036
|
emre/storm
|
storm/__main__.py
|
update
|
def update(name, connection_uri="", id_file="", o=[], config=None):
"""
Enhanced version of the edit command featuring multiple
edits using regular expressions to match entries
"""
storm_ = get_storm_instance(config)
settings = {}
if id_file != "":
settings['identityfile'] = id_file
for option in o:
k, v = option.split("=")
settings[k] = v
try:
storm_.update_entry(name, **settings)
print(get_formatted_message(
'"{0}" updated successfully.'.format(
name
), 'success'))
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1)
|
python
|
def update(name, connection_uri="", id_file="", o=[], config=None):
"""
Enhanced version of the edit command featuring multiple
edits using regular expressions to match entries
"""
storm_ = get_storm_instance(config)
settings = {}
if id_file != "":
settings['identityfile'] = id_file
for option in o:
k, v = option.split("=")
settings[k] = v
try:
storm_.update_entry(name, **settings)
print(get_formatted_message(
'"{0}" updated successfully.'.format(
name
), 'success'))
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1)
|
[
"def",
"update",
"(",
"name",
",",
"connection_uri",
"=",
"\"\"",
",",
"id_file",
"=",
"\"\"",
",",
"o",
"=",
"[",
"]",
",",
"config",
"=",
"None",
")",
":",
"storm_",
"=",
"get_storm_instance",
"(",
"config",
")",
"settings",
"=",
"{",
"}",
"if",
"id_file",
"!=",
"\"\"",
":",
"settings",
"[",
"'identityfile'",
"]",
"=",
"id_file",
"for",
"option",
"in",
"o",
":",
"k",
",",
"v",
"=",
"option",
".",
"split",
"(",
"\"=\"",
")",
"settings",
"[",
"k",
"]",
"=",
"v",
"try",
":",
"storm_",
".",
"update_entry",
"(",
"name",
",",
"*",
"*",
"settings",
")",
"print",
"(",
"get_formatted_message",
"(",
"'\"{0}\" updated successfully.'",
".",
"format",
"(",
"name",
")",
",",
"'success'",
")",
")",
"except",
"ValueError",
"as",
"error",
":",
"print",
"(",
"get_formatted_message",
"(",
"error",
",",
"'error'",
")",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"sys",
".",
"exit",
"(",
"1",
")"
] |
Enhanced version of the edit command featuring multiple
edits using regular expressions to match entries
|
[
"Enhanced",
"version",
"of",
"the",
"edit",
"command",
"featuring",
"multiple",
"edits",
"using",
"regular",
"expressions",
"to",
"match",
"entries"
] |
c752defc1b718cfffbf0e0e15532fa1d7840bf6d
|
https://github.com/emre/storm/blob/c752defc1b718cfffbf0e0e15532fa1d7840bf6d/storm/__main__.py#L146-L169
|
10,037
|
emre/storm
|
storm/__main__.py
|
delete
|
def delete(name, config=None):
"""
Deletes a single host.
"""
storm_ = get_storm_instance(config)
try:
storm_.delete_entry(name)
print(
get_formatted_message(
'hostname "{0}" deleted successfully.'.format(name),
'success')
)
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1)
|
python
|
def delete(name, config=None):
"""
Deletes a single host.
"""
storm_ = get_storm_instance(config)
try:
storm_.delete_entry(name)
print(
get_formatted_message(
'hostname "{0}" deleted successfully.'.format(name),
'success')
)
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1)
|
[
"def",
"delete",
"(",
"name",
",",
"config",
"=",
"None",
")",
":",
"storm_",
"=",
"get_storm_instance",
"(",
"config",
")",
"try",
":",
"storm_",
".",
"delete_entry",
"(",
"name",
")",
"print",
"(",
"get_formatted_message",
"(",
"'hostname \"{0}\" deleted successfully.'",
".",
"format",
"(",
"name",
")",
",",
"'success'",
")",
")",
"except",
"ValueError",
"as",
"error",
":",
"print",
"(",
"get_formatted_message",
"(",
"error",
",",
"'error'",
")",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"sys",
".",
"exit",
"(",
"1",
")"
] |
Deletes a single host.
|
[
"Deletes",
"a",
"single",
"host",
"."
] |
c752defc1b718cfffbf0e0e15532fa1d7840bf6d
|
https://github.com/emre/storm/blob/c752defc1b718cfffbf0e0e15532fa1d7840bf6d/storm/__main__.py#L172-L187
|
10,038
|
emre/storm
|
storm/__main__.py
|
list
|
def list(config=None):
"""
Lists all hosts from ssh config.
"""
storm_ = get_storm_instance(config)
try:
result = colored('Listing entries:', 'white', attrs=["bold", ]) + "\n\n"
result_stack = ""
for host in storm_.list_entries(True):
if host.get("type") == 'entry':
if not host.get("host") == "*":
result += " {0} -> {1}@{2}:{3}".format(
colored(host["host"], 'green', attrs=["bold", ]),
host.get("options").get(
"user", get_default("user", storm_.defaults)
),
host.get("options").get(
"hostname", "[hostname_not_specified]"
),
host.get("options").get(
"port", get_default("port", storm_.defaults)
)
)
extra = False
for key, value in six.iteritems(host.get("options")):
if not key in ["user", "hostname", "port"]:
if not extra:
custom_options = colored(
'\n\t[custom options] ', 'white'
)
result += " {0}".format(custom_options)
extra = True
if isinstance(value, collections.Sequence):
if isinstance(value, builtins.list):
value = ",".join(value)
result += "{0}={1} ".format(key, value)
if extra:
result = result[0:-1]
result += "\n\n"
else:
result_stack = colored(
" (*) General options: \n", "green", attrs=["bold",]
)
for key, value in six.iteritems(host.get("options")):
if isinstance(value, type([])):
result_stack += "\t {0}: ".format(
colored(key, "magenta")
)
result_stack += ', '.join(value)
result_stack += "\n"
else:
result_stack += "\t {0}: {1}\n".format(
colored(key, "magenta"),
value,
)
result_stack = result_stack[0:-1] + "\n"
result += result_stack
print(get_formatted_message(result, ""))
except Exception as error:
print(get_formatted_message(str(error), 'error'), file=sys.stderr)
sys.exit(1)
|
python
|
def list(config=None):
"""
Lists all hosts from ssh config.
"""
storm_ = get_storm_instance(config)
try:
result = colored('Listing entries:', 'white', attrs=["bold", ]) + "\n\n"
result_stack = ""
for host in storm_.list_entries(True):
if host.get("type") == 'entry':
if not host.get("host") == "*":
result += " {0} -> {1}@{2}:{3}".format(
colored(host["host"], 'green', attrs=["bold", ]),
host.get("options").get(
"user", get_default("user", storm_.defaults)
),
host.get("options").get(
"hostname", "[hostname_not_specified]"
),
host.get("options").get(
"port", get_default("port", storm_.defaults)
)
)
extra = False
for key, value in six.iteritems(host.get("options")):
if not key in ["user", "hostname", "port"]:
if not extra:
custom_options = colored(
'\n\t[custom options] ', 'white'
)
result += " {0}".format(custom_options)
extra = True
if isinstance(value, collections.Sequence):
if isinstance(value, builtins.list):
value = ",".join(value)
result += "{0}={1} ".format(key, value)
if extra:
result = result[0:-1]
result += "\n\n"
else:
result_stack = colored(
" (*) General options: \n", "green", attrs=["bold",]
)
for key, value in six.iteritems(host.get("options")):
if isinstance(value, type([])):
result_stack += "\t {0}: ".format(
colored(key, "magenta")
)
result_stack += ', '.join(value)
result_stack += "\n"
else:
result_stack += "\t {0}: {1}\n".format(
colored(key, "magenta"),
value,
)
result_stack = result_stack[0:-1] + "\n"
result += result_stack
print(get_formatted_message(result, ""))
except Exception as error:
print(get_formatted_message(str(error), 'error'), file=sys.stderr)
sys.exit(1)
|
[
"def",
"list",
"(",
"config",
"=",
"None",
")",
":",
"storm_",
"=",
"get_storm_instance",
"(",
"config",
")",
"try",
":",
"result",
"=",
"colored",
"(",
"'Listing entries:'",
",",
"'white'",
",",
"attrs",
"=",
"[",
"\"bold\"",
",",
"]",
")",
"+",
"\"\\n\\n\"",
"result_stack",
"=",
"\"\"",
"for",
"host",
"in",
"storm_",
".",
"list_entries",
"(",
"True",
")",
":",
"if",
"host",
".",
"get",
"(",
"\"type\"",
")",
"==",
"'entry'",
":",
"if",
"not",
"host",
".",
"get",
"(",
"\"host\"",
")",
"==",
"\"*\"",
":",
"result",
"+=",
"\" {0} -> {1}@{2}:{3}\"",
".",
"format",
"(",
"colored",
"(",
"host",
"[",
"\"host\"",
"]",
",",
"'green'",
",",
"attrs",
"=",
"[",
"\"bold\"",
",",
"]",
")",
",",
"host",
".",
"get",
"(",
"\"options\"",
")",
".",
"get",
"(",
"\"user\"",
",",
"get_default",
"(",
"\"user\"",
",",
"storm_",
".",
"defaults",
")",
")",
",",
"host",
".",
"get",
"(",
"\"options\"",
")",
".",
"get",
"(",
"\"hostname\"",
",",
"\"[hostname_not_specified]\"",
")",
",",
"host",
".",
"get",
"(",
"\"options\"",
")",
".",
"get",
"(",
"\"port\"",
",",
"get_default",
"(",
"\"port\"",
",",
"storm_",
".",
"defaults",
")",
")",
")",
"extra",
"=",
"False",
"for",
"key",
",",
"value",
"in",
"six",
".",
"iteritems",
"(",
"host",
".",
"get",
"(",
"\"options\"",
")",
")",
":",
"if",
"not",
"key",
"in",
"[",
"\"user\"",
",",
"\"hostname\"",
",",
"\"port\"",
"]",
":",
"if",
"not",
"extra",
":",
"custom_options",
"=",
"colored",
"(",
"'\\n\\t[custom options] '",
",",
"'white'",
")",
"result",
"+=",
"\" {0}\"",
".",
"format",
"(",
"custom_options",
")",
"extra",
"=",
"True",
"if",
"isinstance",
"(",
"value",
",",
"collections",
".",
"Sequence",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"builtins",
".",
"list",
")",
":",
"value",
"=",
"\",\"",
".",
"join",
"(",
"value",
")",
"result",
"+=",
"\"{0}={1} \"",
".",
"format",
"(",
"key",
",",
"value",
")",
"if",
"extra",
":",
"result",
"=",
"result",
"[",
"0",
":",
"-",
"1",
"]",
"result",
"+=",
"\"\\n\\n\"",
"else",
":",
"result_stack",
"=",
"colored",
"(",
"\" (*) General options: \\n\"",
",",
"\"green\"",
",",
"attrs",
"=",
"[",
"\"bold\"",
",",
"]",
")",
"for",
"key",
",",
"value",
"in",
"six",
".",
"iteritems",
"(",
"host",
".",
"get",
"(",
"\"options\"",
")",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"type",
"(",
"[",
"]",
")",
")",
":",
"result_stack",
"+=",
"\"\\t {0}: \"",
".",
"format",
"(",
"colored",
"(",
"key",
",",
"\"magenta\"",
")",
")",
"result_stack",
"+=",
"', '",
".",
"join",
"(",
"value",
")",
"result_stack",
"+=",
"\"\\n\"",
"else",
":",
"result_stack",
"+=",
"\"\\t {0}: {1}\\n\"",
".",
"format",
"(",
"colored",
"(",
"key",
",",
"\"magenta\"",
")",
",",
"value",
",",
")",
"result_stack",
"=",
"result_stack",
"[",
"0",
":",
"-",
"1",
"]",
"+",
"\"\\n\"",
"result",
"+=",
"result_stack",
"print",
"(",
"get_formatted_message",
"(",
"result",
",",
"\"\"",
")",
")",
"except",
"Exception",
"as",
"error",
":",
"print",
"(",
"get_formatted_message",
"(",
"str",
"(",
"error",
")",
",",
"'error'",
")",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"sys",
".",
"exit",
"(",
"1",
")"
] |
Lists all hosts from ssh config.
|
[
"Lists",
"all",
"hosts",
"from",
"ssh",
"config",
"."
] |
c752defc1b718cfffbf0e0e15532fa1d7840bf6d
|
https://github.com/emre/storm/blob/c752defc1b718cfffbf0e0e15532fa1d7840bf6d/storm/__main__.py#L190-L258
|
10,039
|
emre/storm
|
storm/__main__.py
|
search
|
def search(search_text, config=None):
"""
Searches entries by given search text.
"""
storm_ = get_storm_instance(config)
try:
results = storm_.search_host(search_text)
if len(results) == 0:
print ('no results found.')
if len(results) > 0:
message = 'Listing results for {0}:\n'.format(search_text)
message += "".join(results)
print(message)
except Exception as error:
print(get_formatted_message(str(error), 'error'), file=sys.stderr)
sys.exit(1)
|
python
|
def search(search_text, config=None):
"""
Searches entries by given search text.
"""
storm_ = get_storm_instance(config)
try:
results = storm_.search_host(search_text)
if len(results) == 0:
print ('no results found.')
if len(results) > 0:
message = 'Listing results for {0}:\n'.format(search_text)
message += "".join(results)
print(message)
except Exception as error:
print(get_formatted_message(str(error), 'error'), file=sys.stderr)
sys.exit(1)
|
[
"def",
"search",
"(",
"search_text",
",",
"config",
"=",
"None",
")",
":",
"storm_",
"=",
"get_storm_instance",
"(",
"config",
")",
"try",
":",
"results",
"=",
"storm_",
".",
"search_host",
"(",
"search_text",
")",
"if",
"len",
"(",
"results",
")",
"==",
"0",
":",
"print",
"(",
"'no results found.'",
")",
"if",
"len",
"(",
"results",
")",
">",
"0",
":",
"message",
"=",
"'Listing results for {0}:\\n'",
".",
"format",
"(",
"search_text",
")",
"message",
"+=",
"\"\"",
".",
"join",
"(",
"results",
")",
"print",
"(",
"message",
")",
"except",
"Exception",
"as",
"error",
":",
"print",
"(",
"get_formatted_message",
"(",
"str",
"(",
"error",
")",
",",
"'error'",
")",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"sys",
".",
"exit",
"(",
"1",
")"
] |
Searches entries by given search text.
|
[
"Searches",
"entries",
"by",
"given",
"search",
"text",
"."
] |
c752defc1b718cfffbf0e0e15532fa1d7840bf6d
|
https://github.com/emre/storm/blob/c752defc1b718cfffbf0e0e15532fa1d7840bf6d/storm/__main__.py#L261-L278
|
10,040
|
emre/storm
|
storm/__main__.py
|
delete_all
|
def delete_all(config=None):
"""
Deletes all hosts from ssh config.
"""
storm_ = get_storm_instance(config)
try:
storm_.delete_all_entries()
print(get_formatted_message('all entries deleted.', 'success'))
except Exception as error:
print(get_formatted_message(str(error), 'error'), file=sys.stderr)
sys.exit(1)
|
python
|
def delete_all(config=None):
"""
Deletes all hosts from ssh config.
"""
storm_ = get_storm_instance(config)
try:
storm_.delete_all_entries()
print(get_formatted_message('all entries deleted.', 'success'))
except Exception as error:
print(get_formatted_message(str(error), 'error'), file=sys.stderr)
sys.exit(1)
|
[
"def",
"delete_all",
"(",
"config",
"=",
"None",
")",
":",
"storm_",
"=",
"get_storm_instance",
"(",
"config",
")",
"try",
":",
"storm_",
".",
"delete_all_entries",
"(",
")",
"print",
"(",
"get_formatted_message",
"(",
"'all entries deleted.'",
",",
"'success'",
")",
")",
"except",
"Exception",
"as",
"error",
":",
"print",
"(",
"get_formatted_message",
"(",
"str",
"(",
"error",
")",
",",
"'error'",
")",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"sys",
".",
"exit",
"(",
"1",
")"
] |
Deletes all hosts from ssh config.
|
[
"Deletes",
"all",
"hosts",
"from",
"ssh",
"config",
"."
] |
c752defc1b718cfffbf0e0e15532fa1d7840bf6d
|
https://github.com/emre/storm/blob/c752defc1b718cfffbf0e0e15532fa1d7840bf6d/storm/__main__.py#L281-L292
|
10,041
|
emre/storm
|
storm/__main__.py
|
backup
|
def backup(target_file, config=None):
"""
Backups the main ssh configuration into target file.
"""
storm_ = get_storm_instance(config)
try:
storm_.backup(target_file)
except Exception as error:
print(get_formatted_message(str(error), 'error'), file=sys.stderr)
sys.exit(1)
|
python
|
def backup(target_file, config=None):
"""
Backups the main ssh configuration into target file.
"""
storm_ = get_storm_instance(config)
try:
storm_.backup(target_file)
except Exception as error:
print(get_formatted_message(str(error), 'error'), file=sys.stderr)
sys.exit(1)
|
[
"def",
"backup",
"(",
"target_file",
",",
"config",
"=",
"None",
")",
":",
"storm_",
"=",
"get_storm_instance",
"(",
"config",
")",
"try",
":",
"storm_",
".",
"backup",
"(",
"target_file",
")",
"except",
"Exception",
"as",
"error",
":",
"print",
"(",
"get_formatted_message",
"(",
"str",
"(",
"error",
")",
",",
"'error'",
")",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"sys",
".",
"exit",
"(",
"1",
")"
] |
Backups the main ssh configuration into target file.
|
[
"Backups",
"the",
"main",
"ssh",
"configuration",
"into",
"target",
"file",
"."
] |
c752defc1b718cfffbf0e0e15532fa1d7840bf6d
|
https://github.com/emre/storm/blob/c752defc1b718cfffbf0e0e15532fa1d7840bf6d/storm/__main__.py#L295-L304
|
10,042
|
emre/storm
|
storm/__main__.py
|
web
|
def web(port, debug=False, theme="modern", ssh_config=None):
"""Starts the web UI."""
from storm import web as _web
_web.run(port, debug, theme, ssh_config)
|
python
|
def web(port, debug=False, theme="modern", ssh_config=None):
"""Starts the web UI."""
from storm import web as _web
_web.run(port, debug, theme, ssh_config)
|
[
"def",
"web",
"(",
"port",
",",
"debug",
"=",
"False",
",",
"theme",
"=",
"\"modern\"",
",",
"ssh_config",
"=",
"None",
")",
":",
"from",
"storm",
"import",
"web",
"as",
"_web",
"_web",
".",
"run",
"(",
"port",
",",
"debug",
",",
"theme",
",",
"ssh_config",
")"
] |
Starts the web UI.
|
[
"Starts",
"the",
"web",
"UI",
"."
] |
c752defc1b718cfffbf0e0e15532fa1d7840bf6d
|
https://github.com/emre/storm/blob/c752defc1b718cfffbf0e0e15532fa1d7840bf6d/storm/__main__.py#L310-L313
|
10,043
|
diging/tethne
|
tethne/writers/collection.py
|
_strip_list_attributes
|
def _strip_list_attributes(graph_):
"""Converts lists attributes to strings for all nodes and edges in G."""
for n_ in graph_.nodes(data=True):
for k,v in n_[1].iteritems():
if type(v) is list:
graph_.node[n_[0]][k] = unicode(v)
for e_ in graph_.edges(data=True):
for k,v in e_[2].iteritems():
if type(v) is list:
graph_.edge[e_[0]][e_[1]][k] = unicode(v)
return graph_
|
python
|
def _strip_list_attributes(graph_):
"""Converts lists attributes to strings for all nodes and edges in G."""
for n_ in graph_.nodes(data=True):
for k,v in n_[1].iteritems():
if type(v) is list:
graph_.node[n_[0]][k] = unicode(v)
for e_ in graph_.edges(data=True):
for k,v in e_[2].iteritems():
if type(v) is list:
graph_.edge[e_[0]][e_[1]][k] = unicode(v)
return graph_
|
[
"def",
"_strip_list_attributes",
"(",
"graph_",
")",
":",
"for",
"n_",
"in",
"graph_",
".",
"nodes",
"(",
"data",
"=",
"True",
")",
":",
"for",
"k",
",",
"v",
"in",
"n_",
"[",
"1",
"]",
".",
"iteritems",
"(",
")",
":",
"if",
"type",
"(",
"v",
")",
"is",
"list",
":",
"graph_",
".",
"node",
"[",
"n_",
"[",
"0",
"]",
"]",
"[",
"k",
"]",
"=",
"unicode",
"(",
"v",
")",
"for",
"e_",
"in",
"graph_",
".",
"edges",
"(",
"data",
"=",
"True",
")",
":",
"for",
"k",
",",
"v",
"in",
"e_",
"[",
"2",
"]",
".",
"iteritems",
"(",
")",
":",
"if",
"type",
"(",
"v",
")",
"is",
"list",
":",
"graph_",
".",
"edge",
"[",
"e_",
"[",
"0",
"]",
"]",
"[",
"e_",
"[",
"1",
"]",
"]",
"[",
"k",
"]",
"=",
"unicode",
"(",
"v",
")",
"return",
"graph_"
] |
Converts lists attributes to strings for all nodes and edges in G.
|
[
"Converts",
"lists",
"attributes",
"to",
"strings",
"for",
"all",
"nodes",
"and",
"edges",
"in",
"G",
"."
] |
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
|
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/writers/collection.py#L189-L200
|
10,044
|
diging/tethne
|
tethne/writers/collection.py
|
_safe_type
|
def _safe_type(value):
"""Converts Python type names to XGMML-safe type names."""
if type(value) is str: dtype = 'string'
if type(value) is unicode: dtype = 'string'
if type(value) is int: dtype = 'integer'
if type(value) is float: dtype = 'real'
return dtype
|
python
|
def _safe_type(value):
"""Converts Python type names to XGMML-safe type names."""
if type(value) is str: dtype = 'string'
if type(value) is unicode: dtype = 'string'
if type(value) is int: dtype = 'integer'
if type(value) is float: dtype = 'real'
return dtype
|
[
"def",
"_safe_type",
"(",
"value",
")",
":",
"if",
"type",
"(",
"value",
")",
"is",
"str",
":",
"dtype",
"=",
"'string'",
"if",
"type",
"(",
"value",
")",
"is",
"unicode",
":",
"dtype",
"=",
"'string'",
"if",
"type",
"(",
"value",
")",
"is",
"int",
":",
"dtype",
"=",
"'integer'",
"if",
"type",
"(",
"value",
")",
"is",
"float",
":",
"dtype",
"=",
"'real'",
"return",
"dtype"
] |
Converts Python type names to XGMML-safe type names.
|
[
"Converts",
"Python",
"type",
"names",
"to",
"XGMML",
"-",
"safe",
"type",
"names",
"."
] |
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
|
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/writers/collection.py#L202-L210
|
10,045
|
diging/tethne
|
tethne/readers/wos.py
|
read
|
def read(path, corpus=True, index_by='wosid', streaming=False, parse_only=None,
corpus_class=Corpus, **kwargs):
"""
Parse one or more WoS field-tagged data files.
Examples
--------
.. code-block:: python
>>> from tethne.readers import wos
>>> corpus = wos.read("/path/to/some/wos/data")
>>> corpus
<tethne.classes.corpus.Corpus object at 0x10057c2d0>
Parameters
----------
path : str
Path to WoS field-tagged data. Can be a path directly to a single data
file, or to a directory containing several data files.
corpus : bool
If True (default), returns a :class:`.Corpus`\. If False, will return
only a list of :class:`.Paper`\s.
Returns
-------
:class:`.Corpus` or :class:`.Paper`
"""
if not os.path.exists(path):
raise ValueError('No such file or directory')
# We need the primary index field in the parse results.
if parse_only:
parse_only.append(index_by)
if streaming:
return streaming_read(path, corpus=corpus, index_by=index_by,
parse_only=parse_only, **kwargs)
if os.path.isdir(path): # Directory containing 1+ WoS data files.
papers = []
for sname in os.listdir(path):
if sname.endswith('txt') and not sname.startswith('.'):
papers += read(os.path.join(path, sname),
corpus=False,
parse_only=parse_only)
else: # A single data file.
papers = WoSParser(path).parse(parse_only=parse_only)
if corpus:
return corpus_class(papers, index_by=index_by, **kwargs)
return papers
|
python
|
def read(path, corpus=True, index_by='wosid', streaming=False, parse_only=None,
corpus_class=Corpus, **kwargs):
"""
Parse one or more WoS field-tagged data files.
Examples
--------
.. code-block:: python
>>> from tethne.readers import wos
>>> corpus = wos.read("/path/to/some/wos/data")
>>> corpus
<tethne.classes.corpus.Corpus object at 0x10057c2d0>
Parameters
----------
path : str
Path to WoS field-tagged data. Can be a path directly to a single data
file, or to a directory containing several data files.
corpus : bool
If True (default), returns a :class:`.Corpus`\. If False, will return
only a list of :class:`.Paper`\s.
Returns
-------
:class:`.Corpus` or :class:`.Paper`
"""
if not os.path.exists(path):
raise ValueError('No such file or directory')
# We need the primary index field in the parse results.
if parse_only:
parse_only.append(index_by)
if streaming:
return streaming_read(path, corpus=corpus, index_by=index_by,
parse_only=parse_only, **kwargs)
if os.path.isdir(path): # Directory containing 1+ WoS data files.
papers = []
for sname in os.listdir(path):
if sname.endswith('txt') and not sname.startswith('.'):
papers += read(os.path.join(path, sname),
corpus=False,
parse_only=parse_only)
else: # A single data file.
papers = WoSParser(path).parse(parse_only=parse_only)
if corpus:
return corpus_class(papers, index_by=index_by, **kwargs)
return papers
|
[
"def",
"read",
"(",
"path",
",",
"corpus",
"=",
"True",
",",
"index_by",
"=",
"'wosid'",
",",
"streaming",
"=",
"False",
",",
"parse_only",
"=",
"None",
",",
"corpus_class",
"=",
"Corpus",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"raise",
"ValueError",
"(",
"'No such file or directory'",
")",
"# We need the primary index field in the parse results.",
"if",
"parse_only",
":",
"parse_only",
".",
"append",
"(",
"index_by",
")",
"if",
"streaming",
":",
"return",
"streaming_read",
"(",
"path",
",",
"corpus",
"=",
"corpus",
",",
"index_by",
"=",
"index_by",
",",
"parse_only",
"=",
"parse_only",
",",
"*",
"*",
"kwargs",
")",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"path",
")",
":",
"# Directory containing 1+ WoS data files.",
"papers",
"=",
"[",
"]",
"for",
"sname",
"in",
"os",
".",
"listdir",
"(",
"path",
")",
":",
"if",
"sname",
".",
"endswith",
"(",
"'txt'",
")",
"and",
"not",
"sname",
".",
"startswith",
"(",
"'.'",
")",
":",
"papers",
"+=",
"read",
"(",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"sname",
")",
",",
"corpus",
"=",
"False",
",",
"parse_only",
"=",
"parse_only",
")",
"else",
":",
"# A single data file.",
"papers",
"=",
"WoSParser",
"(",
"path",
")",
".",
"parse",
"(",
"parse_only",
"=",
"parse_only",
")",
"if",
"corpus",
":",
"return",
"corpus_class",
"(",
"papers",
",",
"index_by",
"=",
"index_by",
",",
"*",
"*",
"kwargs",
")",
"return",
"papers"
] |
Parse one or more WoS field-tagged data files.
Examples
--------
.. code-block:: python
>>> from tethne.readers import wos
>>> corpus = wos.read("/path/to/some/wos/data")
>>> corpus
<tethne.classes.corpus.Corpus object at 0x10057c2d0>
Parameters
----------
path : str
Path to WoS field-tagged data. Can be a path directly to a single data
file, or to a directory containing several data files.
corpus : bool
If True (default), returns a :class:`.Corpus`\. If False, will return
only a list of :class:`.Paper`\s.
Returns
-------
:class:`.Corpus` or :class:`.Paper`
|
[
"Parse",
"one",
"or",
"more",
"WoS",
"field",
"-",
"tagged",
"data",
"files",
"."
] |
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
|
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/readers/wos.py#L350-L401
|
10,046
|
diging/tethne
|
tethne/readers/wos.py
|
WoSParser.parse_author
|
def parse_author(self, value):
"""
Attempts to split an author name into last and first parts.
"""
tokens = tuple([t.upper().strip() for t in value.split(',')])
if len(tokens) == 1:
tokens = value.split(' ')
if len(tokens) > 0:
if len(tokens) > 1:
aulast, auinit = tokens[0:2] # Ignore JR, II, III, etc.
else:
aulast = tokens[0]
auinit = ''
else:
aulast, auinit = tokens[0], ''
aulast = _strip_punctuation(aulast).upper()
auinit = _strip_punctuation(auinit).upper()
return aulast, auinit
|
python
|
def parse_author(self, value):
"""
Attempts to split an author name into last and first parts.
"""
tokens = tuple([t.upper().strip() for t in value.split(',')])
if len(tokens) == 1:
tokens = value.split(' ')
if len(tokens) > 0:
if len(tokens) > 1:
aulast, auinit = tokens[0:2] # Ignore JR, II, III, etc.
else:
aulast = tokens[0]
auinit = ''
else:
aulast, auinit = tokens[0], ''
aulast = _strip_punctuation(aulast).upper()
auinit = _strip_punctuation(auinit).upper()
return aulast, auinit
|
[
"def",
"parse_author",
"(",
"self",
",",
"value",
")",
":",
"tokens",
"=",
"tuple",
"(",
"[",
"t",
".",
"upper",
"(",
")",
".",
"strip",
"(",
")",
"for",
"t",
"in",
"value",
".",
"split",
"(",
"','",
")",
"]",
")",
"if",
"len",
"(",
"tokens",
")",
"==",
"1",
":",
"tokens",
"=",
"value",
".",
"split",
"(",
"' '",
")",
"if",
"len",
"(",
"tokens",
")",
">",
"0",
":",
"if",
"len",
"(",
"tokens",
")",
">",
"1",
":",
"aulast",
",",
"auinit",
"=",
"tokens",
"[",
"0",
":",
"2",
"]",
"# Ignore JR, II, III, etc.",
"else",
":",
"aulast",
"=",
"tokens",
"[",
"0",
"]",
"auinit",
"=",
"''",
"else",
":",
"aulast",
",",
"auinit",
"=",
"tokens",
"[",
"0",
"]",
",",
"''",
"aulast",
"=",
"_strip_punctuation",
"(",
"aulast",
")",
".",
"upper",
"(",
")",
"auinit",
"=",
"_strip_punctuation",
"(",
"auinit",
")",
".",
"upper",
"(",
")",
"return",
"aulast",
",",
"auinit"
] |
Attempts to split an author name into last and first parts.
|
[
"Attempts",
"to",
"split",
"an",
"author",
"name",
"into",
"last",
"and",
"first",
"parts",
"."
] |
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
|
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/readers/wos.py#L112-L129
|
10,047
|
diging/tethne
|
tethne/readers/wos.py
|
WoSParser.handle_CR
|
def handle_CR(self, value):
"""
Parses cited references.
"""
citation = self.entry_class()
value = strip_tags(value)
# First-author name and publication date.
ptn = '([\w\s\W]+),\s([0-9]{4}),\s([\w\s]+)'
ny_match = re.match(ptn, value, flags=re.U)
nj_match = re.match('([\w\s\W]+),\s([\w\s]+)',
value, flags=re.U)
if ny_match is not None:
name_raw, date, journal = ny_match.groups()
elif nj_match is not None:
name_raw, journal = nj_match.groups()
date = None
else:
return
datematch = re.match('([0-9]{4})', value)
if datematch:
date = datematch.group(1)
name_raw = None
if name_raw:
name_tokens = [t.replace('.', '') for t in name_raw.split(' ')]
if len(name_tokens) > 4 or value.startswith('*'): # Probably not a person.
proc = lambda x: _strip_punctuation(x)
aulast = ' '.join([proc(n) for n in name_tokens]).upper()
auinit = ''
elif len(name_tokens) > 0:
aulast = name_tokens[0].upper()
proc = lambda x: _space_sep(_strip_punctuation(x))
auinit = ' '.join([proc(n) for n in name_tokens[1:]]).upper()
else:
aulast = name_tokens[0].upper()
auinit = ''
setattr(citation, 'authors_init', [(aulast, auinit)])
if date:
date = int(date)
setattr(citation, 'date', date)
setattr(citation, 'journal', journal)
# Volume.
v_match = re.search('\,\s+V([0-9A-Za-z]+)', value)
if v_match is not None:
volume = v_match.group(1)
else:
volume = None
setattr(citation, 'volume', volume)
# Start page.
p_match = re.search('\,\s+[Pp]([0-9A-Za-z]+)', value)
if p_match is not None:
page = p_match.group(1)
else:
page = None
setattr(citation, 'pageStart', page)
# DOI.
doi_match = re.search('DOI\s(.*)', value)
if doi_match is not None:
doi = doi_match.group(1)
else:
doi = None
setattr(citation, 'doi', doi)
return citation
|
python
|
def handle_CR(self, value):
"""
Parses cited references.
"""
citation = self.entry_class()
value = strip_tags(value)
# First-author name and publication date.
ptn = '([\w\s\W]+),\s([0-9]{4}),\s([\w\s]+)'
ny_match = re.match(ptn, value, flags=re.U)
nj_match = re.match('([\w\s\W]+),\s([\w\s]+)',
value, flags=re.U)
if ny_match is not None:
name_raw, date, journal = ny_match.groups()
elif nj_match is not None:
name_raw, journal = nj_match.groups()
date = None
else:
return
datematch = re.match('([0-9]{4})', value)
if datematch:
date = datematch.group(1)
name_raw = None
if name_raw:
name_tokens = [t.replace('.', '') for t in name_raw.split(' ')]
if len(name_tokens) > 4 or value.startswith('*'): # Probably not a person.
proc = lambda x: _strip_punctuation(x)
aulast = ' '.join([proc(n) for n in name_tokens]).upper()
auinit = ''
elif len(name_tokens) > 0:
aulast = name_tokens[0].upper()
proc = lambda x: _space_sep(_strip_punctuation(x))
auinit = ' '.join([proc(n) for n in name_tokens[1:]]).upper()
else:
aulast = name_tokens[0].upper()
auinit = ''
setattr(citation, 'authors_init', [(aulast, auinit)])
if date:
date = int(date)
setattr(citation, 'date', date)
setattr(citation, 'journal', journal)
# Volume.
v_match = re.search('\,\s+V([0-9A-Za-z]+)', value)
if v_match is not None:
volume = v_match.group(1)
else:
volume = None
setattr(citation, 'volume', volume)
# Start page.
p_match = re.search('\,\s+[Pp]([0-9A-Za-z]+)', value)
if p_match is not None:
page = p_match.group(1)
else:
page = None
setattr(citation, 'pageStart', page)
# DOI.
doi_match = re.search('DOI\s(.*)', value)
if doi_match is not None:
doi = doi_match.group(1)
else:
doi = None
setattr(citation, 'doi', doi)
return citation
|
[
"def",
"handle_CR",
"(",
"self",
",",
"value",
")",
":",
"citation",
"=",
"self",
".",
"entry_class",
"(",
")",
"value",
"=",
"strip_tags",
"(",
"value",
")",
"# First-author name and publication date.",
"ptn",
"=",
"'([\\w\\s\\W]+),\\s([0-9]{4}),\\s([\\w\\s]+)'",
"ny_match",
"=",
"re",
".",
"match",
"(",
"ptn",
",",
"value",
",",
"flags",
"=",
"re",
".",
"U",
")",
"nj_match",
"=",
"re",
".",
"match",
"(",
"'([\\w\\s\\W]+),\\s([\\w\\s]+)'",
",",
"value",
",",
"flags",
"=",
"re",
".",
"U",
")",
"if",
"ny_match",
"is",
"not",
"None",
":",
"name_raw",
",",
"date",
",",
"journal",
"=",
"ny_match",
".",
"groups",
"(",
")",
"elif",
"nj_match",
"is",
"not",
"None",
":",
"name_raw",
",",
"journal",
"=",
"nj_match",
".",
"groups",
"(",
")",
"date",
"=",
"None",
"else",
":",
"return",
"datematch",
"=",
"re",
".",
"match",
"(",
"'([0-9]{4})'",
",",
"value",
")",
"if",
"datematch",
":",
"date",
"=",
"datematch",
".",
"group",
"(",
"1",
")",
"name_raw",
"=",
"None",
"if",
"name_raw",
":",
"name_tokens",
"=",
"[",
"t",
".",
"replace",
"(",
"'.'",
",",
"''",
")",
"for",
"t",
"in",
"name_raw",
".",
"split",
"(",
"' '",
")",
"]",
"if",
"len",
"(",
"name_tokens",
")",
">",
"4",
"or",
"value",
".",
"startswith",
"(",
"'*'",
")",
":",
"# Probably not a person.",
"proc",
"=",
"lambda",
"x",
":",
"_strip_punctuation",
"(",
"x",
")",
"aulast",
"=",
"' '",
".",
"join",
"(",
"[",
"proc",
"(",
"n",
")",
"for",
"n",
"in",
"name_tokens",
"]",
")",
".",
"upper",
"(",
")",
"auinit",
"=",
"''",
"elif",
"len",
"(",
"name_tokens",
")",
">",
"0",
":",
"aulast",
"=",
"name_tokens",
"[",
"0",
"]",
".",
"upper",
"(",
")",
"proc",
"=",
"lambda",
"x",
":",
"_space_sep",
"(",
"_strip_punctuation",
"(",
"x",
")",
")",
"auinit",
"=",
"' '",
".",
"join",
"(",
"[",
"proc",
"(",
"n",
")",
"for",
"n",
"in",
"name_tokens",
"[",
"1",
":",
"]",
"]",
")",
".",
"upper",
"(",
")",
"else",
":",
"aulast",
"=",
"name_tokens",
"[",
"0",
"]",
".",
"upper",
"(",
")",
"auinit",
"=",
"''",
"setattr",
"(",
"citation",
",",
"'authors_init'",
",",
"[",
"(",
"aulast",
",",
"auinit",
")",
"]",
")",
"if",
"date",
":",
"date",
"=",
"int",
"(",
"date",
")",
"setattr",
"(",
"citation",
",",
"'date'",
",",
"date",
")",
"setattr",
"(",
"citation",
",",
"'journal'",
",",
"journal",
")",
"# Volume.",
"v_match",
"=",
"re",
".",
"search",
"(",
"'\\,\\s+V([0-9A-Za-z]+)'",
",",
"value",
")",
"if",
"v_match",
"is",
"not",
"None",
":",
"volume",
"=",
"v_match",
".",
"group",
"(",
"1",
")",
"else",
":",
"volume",
"=",
"None",
"setattr",
"(",
"citation",
",",
"'volume'",
",",
"volume",
")",
"# Start page.",
"p_match",
"=",
"re",
".",
"search",
"(",
"'\\,\\s+[Pp]([0-9A-Za-z]+)'",
",",
"value",
")",
"if",
"p_match",
"is",
"not",
"None",
":",
"page",
"=",
"p_match",
".",
"group",
"(",
"1",
")",
"else",
":",
"page",
"=",
"None",
"setattr",
"(",
"citation",
",",
"'pageStart'",
",",
"page",
")",
"# DOI.",
"doi_match",
"=",
"re",
".",
"search",
"(",
"'DOI\\s(.*)'",
",",
"value",
")",
"if",
"doi_match",
"is",
"not",
"None",
":",
"doi",
"=",
"doi_match",
".",
"group",
"(",
"1",
")",
"else",
":",
"doi",
"=",
"None",
"setattr",
"(",
"citation",
",",
"'doi'",
",",
"doi",
")",
"return",
"citation"
] |
Parses cited references.
|
[
"Parses",
"cited",
"references",
"."
] |
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
|
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/readers/wos.py#L157-L227
|
10,048
|
diging/tethne
|
tethne/readers/wos.py
|
WoSParser.postprocess_WC
|
def postprocess_WC(self, entry):
"""
Parse WC keywords.
Subject keywords are usually semicolon-delimited.
"""
if type(entry.WC) not in [str, unicode]:
WC= u' '.join([unicode(k) for k in entry.WC])
else:
WC= entry.WC
entry.WC= [k.strip().upper() for k in WC.split(';')]
|
python
|
def postprocess_WC(self, entry):
"""
Parse WC keywords.
Subject keywords are usually semicolon-delimited.
"""
if type(entry.WC) not in [str, unicode]:
WC= u' '.join([unicode(k) for k in entry.WC])
else:
WC= entry.WC
entry.WC= [k.strip().upper() for k in WC.split(';')]
|
[
"def",
"postprocess_WC",
"(",
"self",
",",
"entry",
")",
":",
"if",
"type",
"(",
"entry",
".",
"WC",
")",
"not",
"in",
"[",
"str",
",",
"unicode",
"]",
":",
"WC",
"=",
"u' '",
".",
"join",
"(",
"[",
"unicode",
"(",
"k",
")",
"for",
"k",
"in",
"entry",
".",
"WC",
"]",
")",
"else",
":",
"WC",
"=",
"entry",
".",
"WC",
"entry",
".",
"WC",
"=",
"[",
"k",
".",
"strip",
"(",
")",
".",
"upper",
"(",
")",
"for",
"k",
"in",
"WC",
".",
"split",
"(",
"';'",
")",
"]"
] |
Parse WC keywords.
Subject keywords are usually semicolon-delimited.
|
[
"Parse",
"WC",
"keywords",
"."
] |
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
|
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/readers/wos.py#L229-L240
|
10,049
|
diging/tethne
|
tethne/readers/wos.py
|
WoSParser.postprocess_subject
|
def postprocess_subject(self, entry):
"""
Parse subject keywords.
Subject keywords are usually semicolon-delimited.
"""
if type(entry.subject) not in [str, unicode]:
subject = u' '.join([unicode(k) for k in entry.subject])
else:
subject = entry.subject
entry.subject = [k.strip().upper() for k in subject.split(';')]
|
python
|
def postprocess_subject(self, entry):
"""
Parse subject keywords.
Subject keywords are usually semicolon-delimited.
"""
if type(entry.subject) not in [str, unicode]:
subject = u' '.join([unicode(k) for k in entry.subject])
else:
subject = entry.subject
entry.subject = [k.strip().upper() for k in subject.split(';')]
|
[
"def",
"postprocess_subject",
"(",
"self",
",",
"entry",
")",
":",
"if",
"type",
"(",
"entry",
".",
"subject",
")",
"not",
"in",
"[",
"str",
",",
"unicode",
"]",
":",
"subject",
"=",
"u' '",
".",
"join",
"(",
"[",
"unicode",
"(",
"k",
")",
"for",
"k",
"in",
"entry",
".",
"subject",
"]",
")",
"else",
":",
"subject",
"=",
"entry",
".",
"subject",
"entry",
".",
"subject",
"=",
"[",
"k",
".",
"strip",
"(",
")",
".",
"upper",
"(",
")",
"for",
"k",
"in",
"subject",
".",
"split",
"(",
"';'",
")",
"]"
] |
Parse subject keywords.
Subject keywords are usually semicolon-delimited.
|
[
"Parse",
"subject",
"keywords",
"."
] |
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
|
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/readers/wos.py#L242-L253
|
10,050
|
diging/tethne
|
tethne/readers/wos.py
|
WoSParser.postprocess_authorKeywords
|
def postprocess_authorKeywords(self, entry):
"""
Parse author keywords.
Author keywords are usually semicolon-delimited.
"""
if type(entry.authorKeywords) not in [str, unicode]:
aK = u' '.join([unicode(k) for k in entry.authorKeywords])
else:
aK = entry.authorKeywords
entry.authorKeywords = [k.strip().upper() for k in aK.split(';')]
|
python
|
def postprocess_authorKeywords(self, entry):
"""
Parse author keywords.
Author keywords are usually semicolon-delimited.
"""
if type(entry.authorKeywords) not in [str, unicode]:
aK = u' '.join([unicode(k) for k in entry.authorKeywords])
else:
aK = entry.authorKeywords
entry.authorKeywords = [k.strip().upper() for k in aK.split(';')]
|
[
"def",
"postprocess_authorKeywords",
"(",
"self",
",",
"entry",
")",
":",
"if",
"type",
"(",
"entry",
".",
"authorKeywords",
")",
"not",
"in",
"[",
"str",
",",
"unicode",
"]",
":",
"aK",
"=",
"u' '",
".",
"join",
"(",
"[",
"unicode",
"(",
"k",
")",
"for",
"k",
"in",
"entry",
".",
"authorKeywords",
"]",
")",
"else",
":",
"aK",
"=",
"entry",
".",
"authorKeywords",
"entry",
".",
"authorKeywords",
"=",
"[",
"k",
".",
"strip",
"(",
")",
".",
"upper",
"(",
")",
"for",
"k",
"in",
"aK",
".",
"split",
"(",
"';'",
")",
"]"
] |
Parse author keywords.
Author keywords are usually semicolon-delimited.
|
[
"Parse",
"author",
"keywords",
"."
] |
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
|
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/readers/wos.py#L255-L266
|
10,051
|
diging/tethne
|
tethne/readers/wos.py
|
WoSParser.postprocess_keywordsPlus
|
def postprocess_keywordsPlus(self, entry):
"""
Parse WoS "Keyword Plus" keywords.
Keyword Plus keywords are usually semicolon-delimited.
"""
if type(entry.keywordsPlus) in [str, unicode]:
entry.keywordsPlus = [k.strip().upper() for k
in entry.keywordsPlus.split(';')]
|
python
|
def postprocess_keywordsPlus(self, entry):
"""
Parse WoS "Keyword Plus" keywords.
Keyword Plus keywords are usually semicolon-delimited.
"""
if type(entry.keywordsPlus) in [str, unicode]:
entry.keywordsPlus = [k.strip().upper() for k
in entry.keywordsPlus.split(';')]
|
[
"def",
"postprocess_keywordsPlus",
"(",
"self",
",",
"entry",
")",
":",
"if",
"type",
"(",
"entry",
".",
"keywordsPlus",
")",
"in",
"[",
"str",
",",
"unicode",
"]",
":",
"entry",
".",
"keywordsPlus",
"=",
"[",
"k",
".",
"strip",
"(",
")",
".",
"upper",
"(",
")",
"for",
"k",
"in",
"entry",
".",
"keywordsPlus",
".",
"split",
"(",
"';'",
")",
"]"
] |
Parse WoS "Keyword Plus" keywords.
Keyword Plus keywords are usually semicolon-delimited.
|
[
"Parse",
"WoS",
"Keyword",
"Plus",
"keywords",
"."
] |
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
|
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/readers/wos.py#L268-L277
|
10,052
|
diging/tethne
|
tethne/readers/wos.py
|
WoSParser.postprocess_funding
|
def postprocess_funding(self, entry):
"""
Separates funding agency from grant numbers.
"""
if type(entry.funding) not in [str, unicode]:
return
sources = [fu.strip() for fu in entry.funding.split(';')]
sources_processed = []
for source in sources:
m = re.search('(.*)?\s+\[(.+)\]', source)
if m:
agency, grant = m.groups()
else:
agency, grant = source, None
sources_processed.append((agency, grant))
entry.funding = sources_processed
|
python
|
def postprocess_funding(self, entry):
"""
Separates funding agency from grant numbers.
"""
if type(entry.funding) not in [str, unicode]:
return
sources = [fu.strip() for fu in entry.funding.split(';')]
sources_processed = []
for source in sources:
m = re.search('(.*)?\s+\[(.+)\]', source)
if m:
agency, grant = m.groups()
else:
agency, grant = source, None
sources_processed.append((agency, grant))
entry.funding = sources_processed
|
[
"def",
"postprocess_funding",
"(",
"self",
",",
"entry",
")",
":",
"if",
"type",
"(",
"entry",
".",
"funding",
")",
"not",
"in",
"[",
"str",
",",
"unicode",
"]",
":",
"return",
"sources",
"=",
"[",
"fu",
".",
"strip",
"(",
")",
"for",
"fu",
"in",
"entry",
".",
"funding",
".",
"split",
"(",
"';'",
")",
"]",
"sources_processed",
"=",
"[",
"]",
"for",
"source",
"in",
"sources",
":",
"m",
"=",
"re",
".",
"search",
"(",
"'(.*)?\\s+\\[(.+)\\]'",
",",
"source",
")",
"if",
"m",
":",
"agency",
",",
"grant",
"=",
"m",
".",
"groups",
"(",
")",
"else",
":",
"agency",
",",
"grant",
"=",
"source",
",",
"None",
"sources_processed",
".",
"append",
"(",
"(",
"agency",
",",
"grant",
")",
")",
"entry",
".",
"funding",
"=",
"sources_processed"
] |
Separates funding agency from grant numbers.
|
[
"Separates",
"funding",
"agency",
"from",
"grant",
"numbers",
"."
] |
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
|
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/readers/wos.py#L279-L296
|
10,053
|
diging/tethne
|
tethne/readers/wos.py
|
WoSParser.postprocess_authors_full
|
def postprocess_authors_full(self, entry):
"""
If only a single author was found, ensure that ``authors_full`` is
nonetheless a list.
"""
if type(entry.authors_full) is not list:
entry.authors_full = [entry.authors_full]
|
python
|
def postprocess_authors_full(self, entry):
"""
If only a single author was found, ensure that ``authors_full`` is
nonetheless a list.
"""
if type(entry.authors_full) is not list:
entry.authors_full = [entry.authors_full]
|
[
"def",
"postprocess_authors_full",
"(",
"self",
",",
"entry",
")",
":",
"if",
"type",
"(",
"entry",
".",
"authors_full",
")",
"is",
"not",
"list",
":",
"entry",
".",
"authors_full",
"=",
"[",
"entry",
".",
"authors_full",
"]"
] |
If only a single author was found, ensure that ``authors_full`` is
nonetheless a list.
|
[
"If",
"only",
"a",
"single",
"author",
"was",
"found",
"ensure",
"that",
"authors_full",
"is",
"nonetheless",
"a",
"list",
"."
] |
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
|
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/readers/wos.py#L298-L304
|
10,054
|
diging/tethne
|
tethne/readers/wos.py
|
WoSParser.postprocess_authors_init
|
def postprocess_authors_init(self, entry):
"""
If only a single author was found, ensure that ``authors_init`` is
nonetheless a list.
"""
if type(entry.authors_init) is not list:
entry.authors_init = [entry.authors_init]
|
python
|
def postprocess_authors_init(self, entry):
"""
If only a single author was found, ensure that ``authors_init`` is
nonetheless a list.
"""
if type(entry.authors_init) is not list:
entry.authors_init = [entry.authors_init]
|
[
"def",
"postprocess_authors_init",
"(",
"self",
",",
"entry",
")",
":",
"if",
"type",
"(",
"entry",
".",
"authors_init",
")",
"is",
"not",
"list",
":",
"entry",
".",
"authors_init",
"=",
"[",
"entry",
".",
"authors_init",
"]"
] |
If only a single author was found, ensure that ``authors_init`` is
nonetheless a list.
|
[
"If",
"only",
"a",
"single",
"author",
"was",
"found",
"ensure",
"that",
"authors_init",
"is",
"nonetheless",
"a",
"list",
"."
] |
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
|
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/readers/wos.py#L306-L312
|
10,055
|
diging/tethne
|
tethne/readers/wos.py
|
WoSParser.postprocess_citedReferences
|
def postprocess_citedReferences(self, entry):
"""
If only a single cited reference was found, ensure that
``citedReferences`` is nonetheless a list.
"""
if type(entry.citedReferences) is not list:
entry.citedReferences = [entry.citedReferences]
|
python
|
def postprocess_citedReferences(self, entry):
"""
If only a single cited reference was found, ensure that
``citedReferences`` is nonetheless a list.
"""
if type(entry.citedReferences) is not list:
entry.citedReferences = [entry.citedReferences]
|
[
"def",
"postprocess_citedReferences",
"(",
"self",
",",
"entry",
")",
":",
"if",
"type",
"(",
"entry",
".",
"citedReferences",
")",
"is",
"not",
"list",
":",
"entry",
".",
"citedReferences",
"=",
"[",
"entry",
".",
"citedReferences",
"]"
] |
If only a single cited reference was found, ensure that
``citedReferences`` is nonetheless a list.
|
[
"If",
"only",
"a",
"single",
"cited",
"reference",
"was",
"found",
"ensure",
"that",
"citedReferences",
"is",
"nonetheless",
"a",
"list",
"."
] |
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
|
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/readers/wos.py#L314-L320
|
10,056
|
diging/tethne
|
tethne/plot/__init__.py
|
plot_burstness
|
def plot_burstness(corpus, B, **kwargs):
"""
Generate a figure depicting burstness profiles for ``feature``.
Parameters
----------
B
Returns
-------
fig : :class:`matplotlib.figure.Figure`
Examples
--------
.. code-block:: python
>>> from tethne.analyze.corpus import burstness
>>> fig = plot_burstness(corpus, 'citations', topn=2, perslice=True)
>>> fig.savefig('~/burstness.png')
Years prior to the first occurrence of each feature are grayed out. Periods
in which the feature was bursty are depicted by colored blocks, the opacity
of which indicates burstness intensity.
.. figure:: _static/images/burstness.png
:width: 600
:align: center
"""
try:
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
except ImportError:
raise RuntimeError('This method requires the package matplotlib.')
color = kwargs.get('color', 'red')
# Get width based on slices.
years = sorted(corpus.indices['date'].keys())
width = years[1] - years[0]
height = 1.0
fig = plt.figure(figsize=(10,len(B)/4.))
f = 1
axes = {}
for key, value in B.iteritems():
x,y = value
ax = fig.add_subplot(len(B),1,f)
f+=1
ax.set_yticks([])
ax.set_xbound(min(years), max(years) + 1)
if not f == len(B)+1: # Only show xticks on the bottom subplot.
ax.set_xticklabels([])
# Block out years until first occurrence of feature.
rect = mpatches.Rectangle((min(years), 0), sorted(x)[0]-min(years),
height, fill=True, linewidth=0.0)
rect.set_facecolor('black')
rect.set_alpha(0.3)
ax.add_patch(rect)
# Add a rectangle for each year, shaded according to burstness state.
for d in xrange(min(x), max(x)):
try:
i = x.index(d)
except ValueError:
continue
xy = (d, 0.)
state = y[i]
rect = mpatches.Rectangle(xy, width, height,
fill=True, linewidth=0.0)
rect.set_facecolor(color)
rect.set_alpha(state)
ax.add_patch(rect)
ax.set_ylabel(key, rotation=0,
horizontalalignment='right',
verticalalignment='center')
plt.subplots_adjust(left=0.5)
fig.tight_layout(h_pad=0.25)
plt.show()
|
python
|
def plot_burstness(corpus, B, **kwargs):
"""
Generate a figure depicting burstness profiles for ``feature``.
Parameters
----------
B
Returns
-------
fig : :class:`matplotlib.figure.Figure`
Examples
--------
.. code-block:: python
>>> from tethne.analyze.corpus import burstness
>>> fig = plot_burstness(corpus, 'citations', topn=2, perslice=True)
>>> fig.savefig('~/burstness.png')
Years prior to the first occurrence of each feature are grayed out. Periods
in which the feature was bursty are depicted by colored blocks, the opacity
of which indicates burstness intensity.
.. figure:: _static/images/burstness.png
:width: 600
:align: center
"""
try:
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
except ImportError:
raise RuntimeError('This method requires the package matplotlib.')
color = kwargs.get('color', 'red')
# Get width based on slices.
years = sorted(corpus.indices['date'].keys())
width = years[1] - years[0]
height = 1.0
fig = plt.figure(figsize=(10,len(B)/4.))
f = 1
axes = {}
for key, value in B.iteritems():
x,y = value
ax = fig.add_subplot(len(B),1,f)
f+=1
ax.set_yticks([])
ax.set_xbound(min(years), max(years) + 1)
if not f == len(B)+1: # Only show xticks on the bottom subplot.
ax.set_xticklabels([])
# Block out years until first occurrence of feature.
rect = mpatches.Rectangle((min(years), 0), sorted(x)[0]-min(years),
height, fill=True, linewidth=0.0)
rect.set_facecolor('black')
rect.set_alpha(0.3)
ax.add_patch(rect)
# Add a rectangle for each year, shaded according to burstness state.
for d in xrange(min(x), max(x)):
try:
i = x.index(d)
except ValueError:
continue
xy = (d, 0.)
state = y[i]
rect = mpatches.Rectangle(xy, width, height,
fill=True, linewidth=0.0)
rect.set_facecolor(color)
rect.set_alpha(state)
ax.add_patch(rect)
ax.set_ylabel(key, rotation=0,
horizontalalignment='right',
verticalalignment='center')
plt.subplots_adjust(left=0.5)
fig.tight_layout(h_pad=0.25)
plt.show()
|
[
"def",
"plot_burstness",
"(",
"corpus",
",",
"B",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"import",
"matplotlib",
".",
"pyplot",
"as",
"plt",
"import",
"matplotlib",
".",
"patches",
"as",
"mpatches",
"except",
"ImportError",
":",
"raise",
"RuntimeError",
"(",
"'This method requires the package matplotlib.'",
")",
"color",
"=",
"kwargs",
".",
"get",
"(",
"'color'",
",",
"'red'",
")",
"# Get width based on slices.",
"years",
"=",
"sorted",
"(",
"corpus",
".",
"indices",
"[",
"'date'",
"]",
".",
"keys",
"(",
")",
")",
"width",
"=",
"years",
"[",
"1",
"]",
"-",
"years",
"[",
"0",
"]",
"height",
"=",
"1.0",
"fig",
"=",
"plt",
".",
"figure",
"(",
"figsize",
"=",
"(",
"10",
",",
"len",
"(",
"B",
")",
"/",
"4.",
")",
")",
"f",
"=",
"1",
"axes",
"=",
"{",
"}",
"for",
"key",
",",
"value",
"in",
"B",
".",
"iteritems",
"(",
")",
":",
"x",
",",
"y",
"=",
"value",
"ax",
"=",
"fig",
".",
"add_subplot",
"(",
"len",
"(",
"B",
")",
",",
"1",
",",
"f",
")",
"f",
"+=",
"1",
"ax",
".",
"set_yticks",
"(",
"[",
"]",
")",
"ax",
".",
"set_xbound",
"(",
"min",
"(",
"years",
")",
",",
"max",
"(",
"years",
")",
"+",
"1",
")",
"if",
"not",
"f",
"==",
"len",
"(",
"B",
")",
"+",
"1",
":",
"# Only show xticks on the bottom subplot.",
"ax",
".",
"set_xticklabels",
"(",
"[",
"]",
")",
"# Block out years until first occurrence of feature.",
"rect",
"=",
"mpatches",
".",
"Rectangle",
"(",
"(",
"min",
"(",
"years",
")",
",",
"0",
")",
",",
"sorted",
"(",
"x",
")",
"[",
"0",
"]",
"-",
"min",
"(",
"years",
")",
",",
"height",
",",
"fill",
"=",
"True",
",",
"linewidth",
"=",
"0.0",
")",
"rect",
".",
"set_facecolor",
"(",
"'black'",
")",
"rect",
".",
"set_alpha",
"(",
"0.3",
")",
"ax",
".",
"add_patch",
"(",
"rect",
")",
"# Add a rectangle for each year, shaded according to burstness state.",
"for",
"d",
"in",
"xrange",
"(",
"min",
"(",
"x",
")",
",",
"max",
"(",
"x",
")",
")",
":",
"try",
":",
"i",
"=",
"x",
".",
"index",
"(",
"d",
")",
"except",
"ValueError",
":",
"continue",
"xy",
"=",
"(",
"d",
",",
"0.",
")",
"state",
"=",
"y",
"[",
"i",
"]",
"rect",
"=",
"mpatches",
".",
"Rectangle",
"(",
"xy",
",",
"width",
",",
"height",
",",
"fill",
"=",
"True",
",",
"linewidth",
"=",
"0.0",
")",
"rect",
".",
"set_facecolor",
"(",
"color",
")",
"rect",
".",
"set_alpha",
"(",
"state",
")",
"ax",
".",
"add_patch",
"(",
"rect",
")",
"ax",
".",
"set_ylabel",
"(",
"key",
",",
"rotation",
"=",
"0",
",",
"horizontalalignment",
"=",
"'right'",
",",
"verticalalignment",
"=",
"'center'",
")",
"plt",
".",
"subplots_adjust",
"(",
"left",
"=",
"0.5",
")",
"fig",
".",
"tight_layout",
"(",
"h_pad",
"=",
"0.25",
")",
"plt",
".",
"show",
"(",
")"
] |
Generate a figure depicting burstness profiles for ``feature``.
Parameters
----------
B
Returns
-------
fig : :class:`matplotlib.figure.Figure`
Examples
--------
.. code-block:: python
>>> from tethne.analyze.corpus import burstness
>>> fig = plot_burstness(corpus, 'citations', topn=2, perslice=True)
>>> fig.savefig('~/burstness.png')
Years prior to the first occurrence of each feature are grayed out. Periods
in which the feature was bursty are depicted by colored blocks, the opacity
of which indicates burstness intensity.
.. figure:: _static/images/burstness.png
:width: 600
:align: center
|
[
"Generate",
"a",
"figure",
"depicting",
"burstness",
"profiles",
"for",
"feature",
"."
] |
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
|
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/plot/__init__.py#L11-L97
|
10,057
|
diging/tethne
|
tethne/networks/helpers.py
|
simplify_multigraph
|
def simplify_multigraph(multigraph, time=False):
"""
Simplifies a graph by condensing multiple edges between the same node pair
into a single edge, with a weight attribute equal to the number of edges.
Parameters
----------
graph : networkx.MultiGraph
E.g. a coauthorship graph.
time : bool
If True, will generate 'start' and 'end' attributes for each edge,
corresponding to the earliest and latest 'date' values for that edge.
Returns
-------
graph : networkx.Graph
A NetworkX :class:`.graph` .
"""
graph = nx.Graph()
for node in multigraph.nodes(data=True):
u = node[0]
node_attribs = node[1]
graph.add_node(u, node_attribs)
for v in multigraph[u]:
edges = multigraph.get_edge_data(u, v) # Dict.
edge_attribs = { 'weight': len(edges) }
if time: # Look for a date in each edge.
start = 3000
end = 0
found_date = False
for edge in edges.values():
try:
found_date = True
if edge['date'] < start:
start = edge['date']
if edge['date'] > end:
end = edge['date']
except KeyError: # No date to be found.
pass
if found_date: # If no date found, don't add start/end atts.
edge_attribs['start'] = start
edge_attribs['end'] = end
graph.add_edge(u, v, edge_attribs)
return graph
|
python
|
def simplify_multigraph(multigraph, time=False):
"""
Simplifies a graph by condensing multiple edges between the same node pair
into a single edge, with a weight attribute equal to the number of edges.
Parameters
----------
graph : networkx.MultiGraph
E.g. a coauthorship graph.
time : bool
If True, will generate 'start' and 'end' attributes for each edge,
corresponding to the earliest and latest 'date' values for that edge.
Returns
-------
graph : networkx.Graph
A NetworkX :class:`.graph` .
"""
graph = nx.Graph()
for node in multigraph.nodes(data=True):
u = node[0]
node_attribs = node[1]
graph.add_node(u, node_attribs)
for v in multigraph[u]:
edges = multigraph.get_edge_data(u, v) # Dict.
edge_attribs = { 'weight': len(edges) }
if time: # Look for a date in each edge.
start = 3000
end = 0
found_date = False
for edge in edges.values():
try:
found_date = True
if edge['date'] < start:
start = edge['date']
if edge['date'] > end:
end = edge['date']
except KeyError: # No date to be found.
pass
if found_date: # If no date found, don't add start/end atts.
edge_attribs['start'] = start
edge_attribs['end'] = end
graph.add_edge(u, v, edge_attribs)
return graph
|
[
"def",
"simplify_multigraph",
"(",
"multigraph",
",",
"time",
"=",
"False",
")",
":",
"graph",
"=",
"nx",
".",
"Graph",
"(",
")",
"for",
"node",
"in",
"multigraph",
".",
"nodes",
"(",
"data",
"=",
"True",
")",
":",
"u",
"=",
"node",
"[",
"0",
"]",
"node_attribs",
"=",
"node",
"[",
"1",
"]",
"graph",
".",
"add_node",
"(",
"u",
",",
"node_attribs",
")",
"for",
"v",
"in",
"multigraph",
"[",
"u",
"]",
":",
"edges",
"=",
"multigraph",
".",
"get_edge_data",
"(",
"u",
",",
"v",
")",
"# Dict.",
"edge_attribs",
"=",
"{",
"'weight'",
":",
"len",
"(",
"edges",
")",
"}",
"if",
"time",
":",
"# Look for a date in each edge.",
"start",
"=",
"3000",
"end",
"=",
"0",
"found_date",
"=",
"False",
"for",
"edge",
"in",
"edges",
".",
"values",
"(",
")",
":",
"try",
":",
"found_date",
"=",
"True",
"if",
"edge",
"[",
"'date'",
"]",
"<",
"start",
":",
"start",
"=",
"edge",
"[",
"'date'",
"]",
"if",
"edge",
"[",
"'date'",
"]",
">",
"end",
":",
"end",
"=",
"edge",
"[",
"'date'",
"]",
"except",
"KeyError",
":",
"# No date to be found.",
"pass",
"if",
"found_date",
":",
"# If no date found, don't add start/end atts.",
"edge_attribs",
"[",
"'start'",
"]",
"=",
"start",
"edge_attribs",
"[",
"'end'",
"]",
"=",
"end",
"graph",
".",
"add_edge",
"(",
"u",
",",
"v",
",",
"edge_attribs",
")",
"return",
"graph"
] |
Simplifies a graph by condensing multiple edges between the same node pair
into a single edge, with a weight attribute equal to the number of edges.
Parameters
----------
graph : networkx.MultiGraph
E.g. a coauthorship graph.
time : bool
If True, will generate 'start' and 'end' attributes for each edge,
corresponding to the earliest and latest 'date' values for that edge.
Returns
-------
graph : networkx.Graph
A NetworkX :class:`.graph` .
|
[
"Simplifies",
"a",
"graph",
"by",
"condensing",
"multiple",
"edges",
"between",
"the",
"same",
"node",
"pair",
"into",
"a",
"single",
"edge",
"with",
"a",
"weight",
"attribute",
"equal",
"to",
"the",
"number",
"of",
"edges",
"."
] |
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
|
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/networks/helpers.py#L28-L81
|
10,058
|
diging/tethne
|
tethne/networks/helpers.py
|
citation_count
|
def citation_count(papers, key='ayjid', verbose=False):
"""
Generates citation counts for all of the papers cited by papers.
Parameters
----------
papers : list
A list of :class:`.Paper` instances.
key : str
Property to use as node key. Default is 'ayjid' (recommended).
verbose : bool
If True, prints status messages.
Returns
-------
counts : dict
Citation counts for all papers cited by papers.
"""
if verbose:
print "Generating citation counts for "+unicode(len(papers))+" papers..."
counts = Counter()
for P in papers:
if P['citations'] is not None:
for p in P['citations']:
counts[p[key]] += 1
return counts
|
python
|
def citation_count(papers, key='ayjid', verbose=False):
"""
Generates citation counts for all of the papers cited by papers.
Parameters
----------
papers : list
A list of :class:`.Paper` instances.
key : str
Property to use as node key. Default is 'ayjid' (recommended).
verbose : bool
If True, prints status messages.
Returns
-------
counts : dict
Citation counts for all papers cited by papers.
"""
if verbose:
print "Generating citation counts for "+unicode(len(papers))+" papers..."
counts = Counter()
for P in papers:
if P['citations'] is not None:
for p in P['citations']:
counts[p[key]] += 1
return counts
|
[
"def",
"citation_count",
"(",
"papers",
",",
"key",
"=",
"'ayjid'",
",",
"verbose",
"=",
"False",
")",
":",
"if",
"verbose",
":",
"print",
"\"Generating citation counts for \"",
"+",
"unicode",
"(",
"len",
"(",
"papers",
")",
")",
"+",
"\" papers...\"",
"counts",
"=",
"Counter",
"(",
")",
"for",
"P",
"in",
"papers",
":",
"if",
"P",
"[",
"'citations'",
"]",
"is",
"not",
"None",
":",
"for",
"p",
"in",
"P",
"[",
"'citations'",
"]",
":",
"counts",
"[",
"p",
"[",
"key",
"]",
"]",
"+=",
"1",
"return",
"counts"
] |
Generates citation counts for all of the papers cited by papers.
Parameters
----------
papers : list
A list of :class:`.Paper` instances.
key : str
Property to use as node key. Default is 'ayjid' (recommended).
verbose : bool
If True, prints status messages.
Returns
-------
counts : dict
Citation counts for all papers cited by papers.
|
[
"Generates",
"citation",
"counts",
"for",
"all",
"of",
"the",
"papers",
"cited",
"by",
"papers",
"."
] |
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
|
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/networks/helpers.py#L83-L111
|
10,059
|
diging/tethne
|
tethne/analyze/collection.py
|
connected
|
def connected(G, method_name, **kwargs):
"""
Performs analysis methods from networkx.connected on each graph in the
collection.
Parameters
----------
G : :class:`.GraphCollection`
The :class:`.GraphCollection` to analyze. The specified method will be
applied to each graph in ``G``.
method : string
Name of method in networkx.connected.
**kwargs : kwargs
Keyword arguments, passed directly to method.
Returns
-------
results : dict
Keys are graph indices, values are output of method for that graph.
Raises
------
ValueError
If name is not in networkx.connected, or if no such method exists.
"""
warnings.warn("To be removed in 0.8. Use GraphCollection.analyze instead.",
DeprecationWarning)
return G.analyze(['connected', method_name], **kwargs)
|
python
|
def connected(G, method_name, **kwargs):
"""
Performs analysis methods from networkx.connected on each graph in the
collection.
Parameters
----------
G : :class:`.GraphCollection`
The :class:`.GraphCollection` to analyze. The specified method will be
applied to each graph in ``G``.
method : string
Name of method in networkx.connected.
**kwargs : kwargs
Keyword arguments, passed directly to method.
Returns
-------
results : dict
Keys are graph indices, values are output of method for that graph.
Raises
------
ValueError
If name is not in networkx.connected, or if no such method exists.
"""
warnings.warn("To be removed in 0.8. Use GraphCollection.analyze instead.",
DeprecationWarning)
return G.analyze(['connected', method_name], **kwargs)
|
[
"def",
"connected",
"(",
"G",
",",
"method_name",
",",
"*",
"*",
"kwargs",
")",
":",
"warnings",
".",
"warn",
"(",
"\"To be removed in 0.8. Use GraphCollection.analyze instead.\"",
",",
"DeprecationWarning",
")",
"return",
"G",
".",
"analyze",
"(",
"[",
"'connected'",
",",
"method_name",
"]",
",",
"*",
"*",
"kwargs",
")"
] |
Performs analysis methods from networkx.connected on each graph in the
collection.
Parameters
----------
G : :class:`.GraphCollection`
The :class:`.GraphCollection` to analyze. The specified method will be
applied to each graph in ``G``.
method : string
Name of method in networkx.connected.
**kwargs : kwargs
Keyword arguments, passed directly to method.
Returns
-------
results : dict
Keys are graph indices, values are output of method for that graph.
Raises
------
ValueError
If name is not in networkx.connected, or if no such method exists.
|
[
"Performs",
"analysis",
"methods",
"from",
"networkx",
".",
"connected",
"on",
"each",
"graph",
"in",
"the",
"collection",
"."
] |
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
|
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/analyze/collection.py#L72-L101
|
10,060
|
diging/tethne
|
tethne/analyze/collection.py
|
attachment_probability
|
def attachment_probability(G):
"""
Calculates the observed attachment probability for each node at each
time-step.
Attachment probability is calculated based on the observed new edges in the
next time-step. So if a node acquires new edges at time t, this will accrue
to the node's attachment probability at time t-1. Thus at a given time,
one can ask whether degree and attachment probability are related.
Parameters
----------
G : :class:`.GraphCollection`
Must be sliced by 'date'. See :func:`.GraphCollection.slice`\.
Returns
-------
probs : dict
Keyed by index in G.graphs, and then by node.
"""
warnings.warn("Removed in 0.8. Too domain-specific.")
probs = {}
G_ = None
k_ = None
for k,g in G.graphs.iteritems():
new_edges = {}
if G_ is not None:
for n in g.nodes():
try:
old_neighbors = set(G_[n].keys())
if len(old_neighbors) > 0:
new_neighbors = set(g[n].keys()) - old_neighbors
new_edges[n] = float(len(new_neighbors))
else:
new_edges[n] = 0.
except KeyError:
pass
N = sum( new_edges.values() )
probs[k_] = { n:0. for n in G_.nodes() }
if N > 0.:
for n in G.nodes():
try:
probs[k_][n] = new_edges[n]/N
except KeyError:
pass
if probs[k_] is not None:
networkx.set_node_attributes(G.graphs[k_],
'attachment_probability',
probs[k_])
G_ = G
k_ = k
# Handle last graph (no values).
key = G.graphs.keys()[-1]
zprobs = { n:0. for n in G.graphs[key].nodes() }
networkx.set_node_attributes(G.graphs[key], 'attachment_probability', zprobs)
return probs
|
python
|
def attachment_probability(G):
"""
Calculates the observed attachment probability for each node at each
time-step.
Attachment probability is calculated based on the observed new edges in the
next time-step. So if a node acquires new edges at time t, this will accrue
to the node's attachment probability at time t-1. Thus at a given time,
one can ask whether degree and attachment probability are related.
Parameters
----------
G : :class:`.GraphCollection`
Must be sliced by 'date'. See :func:`.GraphCollection.slice`\.
Returns
-------
probs : dict
Keyed by index in G.graphs, and then by node.
"""
warnings.warn("Removed in 0.8. Too domain-specific.")
probs = {}
G_ = None
k_ = None
for k,g in G.graphs.iteritems():
new_edges = {}
if G_ is not None:
for n in g.nodes():
try:
old_neighbors = set(G_[n].keys())
if len(old_neighbors) > 0:
new_neighbors = set(g[n].keys()) - old_neighbors
new_edges[n] = float(len(new_neighbors))
else:
new_edges[n] = 0.
except KeyError:
pass
N = sum( new_edges.values() )
probs[k_] = { n:0. for n in G_.nodes() }
if N > 0.:
for n in G.nodes():
try:
probs[k_][n] = new_edges[n]/N
except KeyError:
pass
if probs[k_] is not None:
networkx.set_node_attributes(G.graphs[k_],
'attachment_probability',
probs[k_])
G_ = G
k_ = k
# Handle last graph (no values).
key = G.graphs.keys()[-1]
zprobs = { n:0. for n in G.graphs[key].nodes() }
networkx.set_node_attributes(G.graphs[key], 'attachment_probability', zprobs)
return probs
|
[
"def",
"attachment_probability",
"(",
"G",
")",
":",
"warnings",
".",
"warn",
"(",
"\"Removed in 0.8. Too domain-specific.\"",
")",
"probs",
"=",
"{",
"}",
"G_",
"=",
"None",
"k_",
"=",
"None",
"for",
"k",
",",
"g",
"in",
"G",
".",
"graphs",
".",
"iteritems",
"(",
")",
":",
"new_edges",
"=",
"{",
"}",
"if",
"G_",
"is",
"not",
"None",
":",
"for",
"n",
"in",
"g",
".",
"nodes",
"(",
")",
":",
"try",
":",
"old_neighbors",
"=",
"set",
"(",
"G_",
"[",
"n",
"]",
".",
"keys",
"(",
")",
")",
"if",
"len",
"(",
"old_neighbors",
")",
">",
"0",
":",
"new_neighbors",
"=",
"set",
"(",
"g",
"[",
"n",
"]",
".",
"keys",
"(",
")",
")",
"-",
"old_neighbors",
"new_edges",
"[",
"n",
"]",
"=",
"float",
"(",
"len",
"(",
"new_neighbors",
")",
")",
"else",
":",
"new_edges",
"[",
"n",
"]",
"=",
"0.",
"except",
"KeyError",
":",
"pass",
"N",
"=",
"sum",
"(",
"new_edges",
".",
"values",
"(",
")",
")",
"probs",
"[",
"k_",
"]",
"=",
"{",
"n",
":",
"0.",
"for",
"n",
"in",
"G_",
".",
"nodes",
"(",
")",
"}",
"if",
"N",
">",
"0.",
":",
"for",
"n",
"in",
"G",
".",
"nodes",
"(",
")",
":",
"try",
":",
"probs",
"[",
"k_",
"]",
"[",
"n",
"]",
"=",
"new_edges",
"[",
"n",
"]",
"/",
"N",
"except",
"KeyError",
":",
"pass",
"if",
"probs",
"[",
"k_",
"]",
"is",
"not",
"None",
":",
"networkx",
".",
"set_node_attributes",
"(",
"G",
".",
"graphs",
"[",
"k_",
"]",
",",
"'attachment_probability'",
",",
"probs",
"[",
"k_",
"]",
")",
"G_",
"=",
"G",
"k_",
"=",
"k",
"# Handle last graph (no values).",
"key",
"=",
"G",
".",
"graphs",
".",
"keys",
"(",
")",
"[",
"-",
"1",
"]",
"zprobs",
"=",
"{",
"n",
":",
"0.",
"for",
"n",
"in",
"G",
".",
"graphs",
"[",
"key",
"]",
".",
"nodes",
"(",
")",
"}",
"networkx",
".",
"set_node_attributes",
"(",
"G",
".",
"graphs",
"[",
"key",
"]",
",",
"'attachment_probability'",
",",
"zprobs",
")",
"return",
"probs"
] |
Calculates the observed attachment probability for each node at each
time-step.
Attachment probability is calculated based on the observed new edges in the
next time-step. So if a node acquires new edges at time t, this will accrue
to the node's attachment probability at time t-1. Thus at a given time,
one can ask whether degree and attachment probability are related.
Parameters
----------
G : :class:`.GraphCollection`
Must be sliced by 'date'. See :func:`.GraphCollection.slice`\.
Returns
-------
probs : dict
Keyed by index in G.graphs, and then by node.
|
[
"Calculates",
"the",
"observed",
"attachment",
"probability",
"for",
"each",
"node",
"at",
"each",
"time",
"-",
"step",
".",
"Attachment",
"probability",
"is",
"calculated",
"based",
"on",
"the",
"observed",
"new",
"edges",
"in",
"the",
"next",
"time",
"-",
"step",
".",
"So",
"if",
"a",
"node",
"acquires",
"new",
"edges",
"at",
"time",
"t",
"this",
"will",
"accrue",
"to",
"the",
"node",
"s",
"attachment",
"probability",
"at",
"time",
"t",
"-",
"1",
".",
"Thus",
"at",
"a",
"given",
"time",
"one",
"can",
"ask",
"whether",
"degree",
"and",
"attachment",
"probability",
"are",
"related",
"."
] |
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
|
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/analyze/collection.py#L104-L166
|
10,061
|
diging/tethne
|
tethne/analyze/graph.py
|
global_closeness_centrality
|
def global_closeness_centrality(g, node=None, normalize=True):
"""
Calculates global closeness centrality for one or all nodes in the network.
See :func:`.node_global_closeness_centrality` for more information.
Parameters
----------
g : networkx.Graph
normalize : boolean
If True, normalizes centrality based on the average shortest path
length. Default is True.
Returns
-------
C : dict
Dictionary of results, with node identifiers as keys and gcc as values.
"""
if not node:
C = {}
for node in g.nodes():
C[node] = global_closeness_centrality(g, node, normalize=normalize)
return C
values = nx.shortest_path_length(g, node).values()
c = sum([1./pl for pl in values if pl != 0.]) / len(g)
if normalize:
ac = 0
for sg in nx.connected_component_subgraphs(g):
if len(sg.nodes()) > 1:
aspl = nx.average_shortest_path_length(sg)
ac += (1./aspl) * (float(len(sg)) / float(len(g))**2 )
c = c/ac
return c
|
python
|
def global_closeness_centrality(g, node=None, normalize=True):
"""
Calculates global closeness centrality for one or all nodes in the network.
See :func:`.node_global_closeness_centrality` for more information.
Parameters
----------
g : networkx.Graph
normalize : boolean
If True, normalizes centrality based on the average shortest path
length. Default is True.
Returns
-------
C : dict
Dictionary of results, with node identifiers as keys and gcc as values.
"""
if not node:
C = {}
for node in g.nodes():
C[node] = global_closeness_centrality(g, node, normalize=normalize)
return C
values = nx.shortest_path_length(g, node).values()
c = sum([1./pl for pl in values if pl != 0.]) / len(g)
if normalize:
ac = 0
for sg in nx.connected_component_subgraphs(g):
if len(sg.nodes()) > 1:
aspl = nx.average_shortest_path_length(sg)
ac += (1./aspl) * (float(len(sg)) / float(len(g))**2 )
c = c/ac
return c
|
[
"def",
"global_closeness_centrality",
"(",
"g",
",",
"node",
"=",
"None",
",",
"normalize",
"=",
"True",
")",
":",
"if",
"not",
"node",
":",
"C",
"=",
"{",
"}",
"for",
"node",
"in",
"g",
".",
"nodes",
"(",
")",
":",
"C",
"[",
"node",
"]",
"=",
"global_closeness_centrality",
"(",
"g",
",",
"node",
",",
"normalize",
"=",
"normalize",
")",
"return",
"C",
"values",
"=",
"nx",
".",
"shortest_path_length",
"(",
"g",
",",
"node",
")",
".",
"values",
"(",
")",
"c",
"=",
"sum",
"(",
"[",
"1.",
"/",
"pl",
"for",
"pl",
"in",
"values",
"if",
"pl",
"!=",
"0.",
"]",
")",
"/",
"len",
"(",
"g",
")",
"if",
"normalize",
":",
"ac",
"=",
"0",
"for",
"sg",
"in",
"nx",
".",
"connected_component_subgraphs",
"(",
"g",
")",
":",
"if",
"len",
"(",
"sg",
".",
"nodes",
"(",
")",
")",
">",
"1",
":",
"aspl",
"=",
"nx",
".",
"average_shortest_path_length",
"(",
"sg",
")",
"ac",
"+=",
"(",
"1.",
"/",
"aspl",
")",
"*",
"(",
"float",
"(",
"len",
"(",
"sg",
")",
")",
"/",
"float",
"(",
"len",
"(",
"g",
")",
")",
"**",
"2",
")",
"c",
"=",
"c",
"/",
"ac",
"return",
"c"
] |
Calculates global closeness centrality for one or all nodes in the network.
See :func:`.node_global_closeness_centrality` for more information.
Parameters
----------
g : networkx.Graph
normalize : boolean
If True, normalizes centrality based on the average shortest path
length. Default is True.
Returns
-------
C : dict
Dictionary of results, with node identifiers as keys and gcc as values.
|
[
"Calculates",
"global",
"closeness",
"centrality",
"for",
"one",
"or",
"all",
"nodes",
"in",
"the",
"network",
"."
] |
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
|
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/analyze/graph.py#L13-L49
|
10,062
|
diging/tethne
|
tethne/readers/dfr.py
|
ngrams
|
def ngrams(path, elem, ignore_hash=True):
"""
Yields N-grams from a JSTOR DfR dataset.
Parameters
----------
path : string
Path to unzipped JSTOR DfR folder containing N-grams.
elem : string
Name of subdirectory containing N-grams. (e.g. 'bigrams').
ignore_hash : bool
If True, will exclude all N-grams that contain the hash '#' character.
Returns
-------
ngrams : :class:`.FeatureSet`
"""
grams = GramGenerator(path, elem, ignore_hash=ignore_hash)
return FeatureSet({k: Feature(f) for k, f in grams})
|
python
|
def ngrams(path, elem, ignore_hash=True):
"""
Yields N-grams from a JSTOR DfR dataset.
Parameters
----------
path : string
Path to unzipped JSTOR DfR folder containing N-grams.
elem : string
Name of subdirectory containing N-grams. (e.g. 'bigrams').
ignore_hash : bool
If True, will exclude all N-grams that contain the hash '#' character.
Returns
-------
ngrams : :class:`.FeatureSet`
"""
grams = GramGenerator(path, elem, ignore_hash=ignore_hash)
return FeatureSet({k: Feature(f) for k, f in grams})
|
[
"def",
"ngrams",
"(",
"path",
",",
"elem",
",",
"ignore_hash",
"=",
"True",
")",
":",
"grams",
"=",
"GramGenerator",
"(",
"path",
",",
"elem",
",",
"ignore_hash",
"=",
"ignore_hash",
")",
"return",
"FeatureSet",
"(",
"{",
"k",
":",
"Feature",
"(",
"f",
")",
"for",
"k",
",",
"f",
"in",
"grams",
"}",
")"
] |
Yields N-grams from a JSTOR DfR dataset.
Parameters
----------
path : string
Path to unzipped JSTOR DfR folder containing N-grams.
elem : string
Name of subdirectory containing N-grams. (e.g. 'bigrams').
ignore_hash : bool
If True, will exclude all N-grams that contain the hash '#' character.
Returns
-------
ngrams : :class:`.FeatureSet`
|
[
"Yields",
"N",
"-",
"grams",
"from",
"a",
"JSTOR",
"DfR",
"dataset",
"."
] |
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
|
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/readers/dfr.py#L294-L314
|
10,063
|
diging/tethne
|
tethne/readers/dfr.py
|
tokenize
|
def tokenize(ngrams, min_tf=2, min_df=2, min_len=3, apply_stoplist=False):
"""
Builds a vocabulary, and replaces words with vocab indices.
Parameters
----------
ngrams : dict
Keys are paper DOIs, values are lists of (Ngram, frequency) tuples.
apply_stoplist : bool
If True, will exclude all N-grams that contain words in the NLTK
stoplist.
Returns
-------
t_ngrams : dict
Tokenized ngrams, as doi:{i:count}.
vocab : dict
Vocabulary as i:term.
token_tf : :class:`.Counter`
Term counts for corpus, as i:count.
"""
vocab = {}
vocab_ = {}
word_tf = Counter()
word_df = Counter()
token_tf = Counter()
token_df = Counter()
t_ngrams = {}
# Get global word counts, first.
for grams in ngrams.values():
for g,c in grams:
word_tf[g] += c
word_df[g] += 1
if apply_stoplist:
stoplist = stopwords.words()
# Now tokenize.
for doi, grams in ngrams.iteritems():
t_ngrams[doi] = []
for g,c in grams:
ignore = False
# Ignore extremely rare words (probably garbage).
if word_tf[g] < min_tf or word_df[g] < min_df or len(g) < min_len:
ignore = True
# Stoplist.
elif apply_stoplist:
for w in g.split():
if w in stoplist:
ignore = True
if not ignore:
# Coerce unicode to string.
if type(g) is str:
g = unicode(g)
g = unidecode(g)
if g not in vocab.values():
i = len(vocab)
vocab[i] = g
vocab_[g] = i
else:
i = vocab_[g]
token_tf[i] += c
token_df[i] += 1
t_ngrams[doi].append( (i,c) )
return t_ngrams, vocab, token_tf
|
python
|
def tokenize(ngrams, min_tf=2, min_df=2, min_len=3, apply_stoplist=False):
"""
Builds a vocabulary, and replaces words with vocab indices.
Parameters
----------
ngrams : dict
Keys are paper DOIs, values are lists of (Ngram, frequency) tuples.
apply_stoplist : bool
If True, will exclude all N-grams that contain words in the NLTK
stoplist.
Returns
-------
t_ngrams : dict
Tokenized ngrams, as doi:{i:count}.
vocab : dict
Vocabulary as i:term.
token_tf : :class:`.Counter`
Term counts for corpus, as i:count.
"""
vocab = {}
vocab_ = {}
word_tf = Counter()
word_df = Counter()
token_tf = Counter()
token_df = Counter()
t_ngrams = {}
# Get global word counts, first.
for grams in ngrams.values():
for g,c in grams:
word_tf[g] += c
word_df[g] += 1
if apply_stoplist:
stoplist = stopwords.words()
# Now tokenize.
for doi, grams in ngrams.iteritems():
t_ngrams[doi] = []
for g,c in grams:
ignore = False
# Ignore extremely rare words (probably garbage).
if word_tf[g] < min_tf or word_df[g] < min_df or len(g) < min_len:
ignore = True
# Stoplist.
elif apply_stoplist:
for w in g.split():
if w in stoplist:
ignore = True
if not ignore:
# Coerce unicode to string.
if type(g) is str:
g = unicode(g)
g = unidecode(g)
if g not in vocab.values():
i = len(vocab)
vocab[i] = g
vocab_[g] = i
else:
i = vocab_[g]
token_tf[i] += c
token_df[i] += 1
t_ngrams[doi].append( (i,c) )
return t_ngrams, vocab, token_tf
|
[
"def",
"tokenize",
"(",
"ngrams",
",",
"min_tf",
"=",
"2",
",",
"min_df",
"=",
"2",
",",
"min_len",
"=",
"3",
",",
"apply_stoplist",
"=",
"False",
")",
":",
"vocab",
"=",
"{",
"}",
"vocab_",
"=",
"{",
"}",
"word_tf",
"=",
"Counter",
"(",
")",
"word_df",
"=",
"Counter",
"(",
")",
"token_tf",
"=",
"Counter",
"(",
")",
"token_df",
"=",
"Counter",
"(",
")",
"t_ngrams",
"=",
"{",
"}",
"# Get global word counts, first.",
"for",
"grams",
"in",
"ngrams",
".",
"values",
"(",
")",
":",
"for",
"g",
",",
"c",
"in",
"grams",
":",
"word_tf",
"[",
"g",
"]",
"+=",
"c",
"word_df",
"[",
"g",
"]",
"+=",
"1",
"if",
"apply_stoplist",
":",
"stoplist",
"=",
"stopwords",
".",
"words",
"(",
")",
"# Now tokenize.",
"for",
"doi",
",",
"grams",
"in",
"ngrams",
".",
"iteritems",
"(",
")",
":",
"t_ngrams",
"[",
"doi",
"]",
"=",
"[",
"]",
"for",
"g",
",",
"c",
"in",
"grams",
":",
"ignore",
"=",
"False",
"# Ignore extremely rare words (probably garbage).",
"if",
"word_tf",
"[",
"g",
"]",
"<",
"min_tf",
"or",
"word_df",
"[",
"g",
"]",
"<",
"min_df",
"or",
"len",
"(",
"g",
")",
"<",
"min_len",
":",
"ignore",
"=",
"True",
"# Stoplist.",
"elif",
"apply_stoplist",
":",
"for",
"w",
"in",
"g",
".",
"split",
"(",
")",
":",
"if",
"w",
"in",
"stoplist",
":",
"ignore",
"=",
"True",
"if",
"not",
"ignore",
":",
"# Coerce unicode to string.",
"if",
"type",
"(",
"g",
")",
"is",
"str",
":",
"g",
"=",
"unicode",
"(",
"g",
")",
"g",
"=",
"unidecode",
"(",
"g",
")",
"if",
"g",
"not",
"in",
"vocab",
".",
"values",
"(",
")",
":",
"i",
"=",
"len",
"(",
"vocab",
")",
"vocab",
"[",
"i",
"]",
"=",
"g",
"vocab_",
"[",
"g",
"]",
"=",
"i",
"else",
":",
"i",
"=",
"vocab_",
"[",
"g",
"]",
"token_tf",
"[",
"i",
"]",
"+=",
"c",
"token_df",
"[",
"i",
"]",
"+=",
"1",
"t_ngrams",
"[",
"doi",
"]",
".",
"append",
"(",
"(",
"i",
",",
"c",
")",
")",
"return",
"t_ngrams",
",",
"vocab",
",",
"token_tf"
] |
Builds a vocabulary, and replaces words with vocab indices.
Parameters
----------
ngrams : dict
Keys are paper DOIs, values are lists of (Ngram, frequency) tuples.
apply_stoplist : bool
If True, will exclude all N-grams that contain words in the NLTK
stoplist.
Returns
-------
t_ngrams : dict
Tokenized ngrams, as doi:{i:count}.
vocab : dict
Vocabulary as i:term.
token_tf : :class:`.Counter`
Term counts for corpus, as i:count.
|
[
"Builds",
"a",
"vocabulary",
"and",
"replaces",
"words",
"with",
"vocab",
"indices",
"."
] |
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
|
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/readers/dfr.py#L317-L390
|
10,064
|
diging/tethne
|
tethne/readers/dfr.py
|
_handle_pagerange
|
def _handle_pagerange(pagerange):
"""
Yields start and end pages from DfR pagerange field.
Parameters
----------
pagerange : str or unicode
DfR-style pagerange, e.g. "pp. 435-444".
Returns
-------
start : str
Start page.
end : str
End page.
"""
try:
pr = re.compile("pp\.\s([0-9]+)\-([0-9]+)")
start, end = re.findall(pr, pagerange)[0]
except IndexError:
start = end = 0
return unicode(start), unicode(end)
|
python
|
def _handle_pagerange(pagerange):
"""
Yields start and end pages from DfR pagerange field.
Parameters
----------
pagerange : str or unicode
DfR-style pagerange, e.g. "pp. 435-444".
Returns
-------
start : str
Start page.
end : str
End page.
"""
try:
pr = re.compile("pp\.\s([0-9]+)\-([0-9]+)")
start, end = re.findall(pr, pagerange)[0]
except IndexError:
start = end = 0
return unicode(start), unicode(end)
|
[
"def",
"_handle_pagerange",
"(",
"pagerange",
")",
":",
"try",
":",
"pr",
"=",
"re",
".",
"compile",
"(",
"\"pp\\.\\s([0-9]+)\\-([0-9]+)\"",
")",
"start",
",",
"end",
"=",
"re",
".",
"findall",
"(",
"pr",
",",
"pagerange",
")",
"[",
"0",
"]",
"except",
"IndexError",
":",
"start",
"=",
"end",
"=",
"0",
"return",
"unicode",
"(",
"start",
")",
",",
"unicode",
"(",
"end",
")"
] |
Yields start and end pages from DfR pagerange field.
Parameters
----------
pagerange : str or unicode
DfR-style pagerange, e.g. "pp. 435-444".
Returns
-------
start : str
Start page.
end : str
End page.
|
[
"Yields",
"start",
"and",
"end",
"pages",
"from",
"DfR",
"pagerange",
"field",
"."
] |
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
|
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/readers/dfr.py#L430-L453
|
10,065
|
diging/tethne
|
tethne/readers/dfr.py
|
_handle_authors
|
def _handle_authors(authors):
"""
Yields aulast and auinit lists from value of authors node.
Parameters
----------
authors : list, str, or unicode
Value or values of 'author' element in DfR XML.
Returns
-------
aulast : list
A list of author surnames (string).
auinit : list
A list of author first-initials (string).
"""
aulast = []
auinit = []
if type(authors) is list:
for author in authors:
if type(author) is str:
author = unicode(author)
author = unidecode(author)
try:
l,i = _handle_author(author)
aulast.append(l)
auinit.append(i)
except ValueError:
pass
elif type(authors) is str or type(authors) is unicode:
if type(authors) is str:
authors = unicode(authors)
author = unidecode(authors)
try:
l,i = _handle_author(author)
aulast.append(l)
auinit.append(i)
except ValueError:
pass
else:
raise ValueError("authors must be a list or a string")
return aulast, auinit
|
python
|
def _handle_authors(authors):
"""
Yields aulast and auinit lists from value of authors node.
Parameters
----------
authors : list, str, or unicode
Value or values of 'author' element in DfR XML.
Returns
-------
aulast : list
A list of author surnames (string).
auinit : list
A list of author first-initials (string).
"""
aulast = []
auinit = []
if type(authors) is list:
for author in authors:
if type(author) is str:
author = unicode(author)
author = unidecode(author)
try:
l,i = _handle_author(author)
aulast.append(l)
auinit.append(i)
except ValueError:
pass
elif type(authors) is str or type(authors) is unicode:
if type(authors) is str:
authors = unicode(authors)
author = unidecode(authors)
try:
l,i = _handle_author(author)
aulast.append(l)
auinit.append(i)
except ValueError:
pass
else:
raise ValueError("authors must be a list or a string")
return aulast, auinit
|
[
"def",
"_handle_authors",
"(",
"authors",
")",
":",
"aulast",
"=",
"[",
"]",
"auinit",
"=",
"[",
"]",
"if",
"type",
"(",
"authors",
")",
"is",
"list",
":",
"for",
"author",
"in",
"authors",
":",
"if",
"type",
"(",
"author",
")",
"is",
"str",
":",
"author",
"=",
"unicode",
"(",
"author",
")",
"author",
"=",
"unidecode",
"(",
"author",
")",
"try",
":",
"l",
",",
"i",
"=",
"_handle_author",
"(",
"author",
")",
"aulast",
".",
"append",
"(",
"l",
")",
"auinit",
".",
"append",
"(",
"i",
")",
"except",
"ValueError",
":",
"pass",
"elif",
"type",
"(",
"authors",
")",
"is",
"str",
"or",
"type",
"(",
"authors",
")",
"is",
"unicode",
":",
"if",
"type",
"(",
"authors",
")",
"is",
"str",
":",
"authors",
"=",
"unicode",
"(",
"authors",
")",
"author",
"=",
"unidecode",
"(",
"authors",
")",
"try",
":",
"l",
",",
"i",
"=",
"_handle_author",
"(",
"author",
")",
"aulast",
".",
"append",
"(",
"l",
")",
"auinit",
".",
"append",
"(",
"i",
")",
"except",
"ValueError",
":",
"pass",
"else",
":",
"raise",
"ValueError",
"(",
"\"authors must be a list or a string\"",
")",
"return",
"aulast",
",",
"auinit"
] |
Yields aulast and auinit lists from value of authors node.
Parameters
----------
authors : list, str, or unicode
Value or values of 'author' element in DfR XML.
Returns
-------
aulast : list
A list of author surnames (string).
auinit : list
A list of author first-initials (string).
|
[
"Yields",
"aulast",
"and",
"auinit",
"lists",
"from",
"value",
"of",
"authors",
"node",
"."
] |
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
|
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/readers/dfr.py#L462-L505
|
10,066
|
diging/tethne
|
tethne/readers/dfr.py
|
_handle_author
|
def _handle_author(author):
"""
Yields aulast and auinit from an author's full name.
Parameters
----------
author : str or unicode
Author fullname, e.g. "Richard L. Nixon".
Returns
-------
aulast : str
Author surname.
auinit : str
Author first-initial.
"""
lname = author.split(' ')
try:
auinit = lname[0][0]
final = lname[-1].upper()
if final in ['JR.', 'III']:
aulast = lname[-2].upper() + " " + final.strip(".")
else:
aulast = final
except IndexError:
raise ValueError("malformed author name")
return aulast, auinit
|
python
|
def _handle_author(author):
"""
Yields aulast and auinit from an author's full name.
Parameters
----------
author : str or unicode
Author fullname, e.g. "Richard L. Nixon".
Returns
-------
aulast : str
Author surname.
auinit : str
Author first-initial.
"""
lname = author.split(' ')
try:
auinit = lname[0][0]
final = lname[-1].upper()
if final in ['JR.', 'III']:
aulast = lname[-2].upper() + " " + final.strip(".")
else:
aulast = final
except IndexError:
raise ValueError("malformed author name")
return aulast, auinit
|
[
"def",
"_handle_author",
"(",
"author",
")",
":",
"lname",
"=",
"author",
".",
"split",
"(",
"' '",
")",
"try",
":",
"auinit",
"=",
"lname",
"[",
"0",
"]",
"[",
"0",
"]",
"final",
"=",
"lname",
"[",
"-",
"1",
"]",
".",
"upper",
"(",
")",
"if",
"final",
"in",
"[",
"'JR.'",
",",
"'III'",
"]",
":",
"aulast",
"=",
"lname",
"[",
"-",
"2",
"]",
".",
"upper",
"(",
")",
"+",
"\" \"",
"+",
"final",
".",
"strip",
"(",
"\".\"",
")",
"else",
":",
"aulast",
"=",
"final",
"except",
"IndexError",
":",
"raise",
"ValueError",
"(",
"\"malformed author name\"",
")",
"return",
"aulast",
",",
"auinit"
] |
Yields aulast and auinit from an author's full name.
Parameters
----------
author : str or unicode
Author fullname, e.g. "Richard L. Nixon".
Returns
-------
aulast : str
Author surname.
auinit : str
Author first-initial.
|
[
"Yields",
"aulast",
"and",
"auinit",
"from",
"an",
"author",
"s",
"full",
"name",
"."
] |
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
|
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/readers/dfr.py#L507-L536
|
10,067
|
diging/tethne
|
tethne/readers/dfr.py
|
GramGenerator._get
|
def _get(self, i):
"""
Retrieve data for the ith file in the dataset.
"""
with open(os.path.join(self.path, self.elem, self.files[i]), 'r') as f:
# JSTOR hasn't always produced valid XML.
contents = re.sub('(&)(?!amp;)', lambda match: '&', f.read())
root = ET.fromstring(contents)
doi = root.attrib['id']
if self.K: # Keys only.
return doi
grams = []
for gram in root.findall(self.elem_xml):
text = unidecode(unicode(gram.text.strip()))
if ( not self.ignore_hash or '#' not in list(text) ):
c = ( text, number(gram.attrib['weight']) )
grams.append(c)
if self.V: # Values only.
return grams
return doi, grams
|
python
|
def _get(self, i):
"""
Retrieve data for the ith file in the dataset.
"""
with open(os.path.join(self.path, self.elem, self.files[i]), 'r') as f:
# JSTOR hasn't always produced valid XML.
contents = re.sub('(&)(?!amp;)', lambda match: '&', f.read())
root = ET.fromstring(contents)
doi = root.attrib['id']
if self.K: # Keys only.
return doi
grams = []
for gram in root.findall(self.elem_xml):
text = unidecode(unicode(gram.text.strip()))
if ( not self.ignore_hash or '#' not in list(text) ):
c = ( text, number(gram.attrib['weight']) )
grams.append(c)
if self.V: # Values only.
return grams
return doi, grams
|
[
"def",
"_get",
"(",
"self",
",",
"i",
")",
":",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"path",
",",
"self",
".",
"elem",
",",
"self",
".",
"files",
"[",
"i",
"]",
")",
",",
"'r'",
")",
"as",
"f",
":",
"# JSTOR hasn't always produced valid XML.",
"contents",
"=",
"re",
".",
"sub",
"(",
"'(&)(?!amp;)'",
",",
"lambda",
"match",
":",
"'&'",
",",
"f",
".",
"read",
"(",
")",
")",
"root",
"=",
"ET",
".",
"fromstring",
"(",
"contents",
")",
"doi",
"=",
"root",
".",
"attrib",
"[",
"'id'",
"]",
"if",
"self",
".",
"K",
":",
"# Keys only.",
"return",
"doi",
"grams",
"=",
"[",
"]",
"for",
"gram",
"in",
"root",
".",
"findall",
"(",
"self",
".",
"elem_xml",
")",
":",
"text",
"=",
"unidecode",
"(",
"unicode",
"(",
"gram",
".",
"text",
".",
"strip",
"(",
")",
")",
")",
"if",
"(",
"not",
"self",
".",
"ignore_hash",
"or",
"'#'",
"not",
"in",
"list",
"(",
"text",
")",
")",
":",
"c",
"=",
"(",
"text",
",",
"number",
"(",
"gram",
".",
"attrib",
"[",
"'weight'",
"]",
")",
")",
"grams",
".",
"append",
"(",
"c",
")",
"if",
"self",
".",
"V",
":",
"# Values only.",
"return",
"grams",
"return",
"doi",
",",
"grams"
] |
Retrieve data for the ith file in the dataset.
|
[
"Retrieve",
"data",
"for",
"the",
"ith",
"file",
"in",
"the",
"dataset",
"."
] |
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
|
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/readers/dfr.py#L175-L198
|
10,068
|
diging/tethne
|
tethne/model/corpus/mallet.py
|
LDAModel._generate_corpus
|
def _generate_corpus(self):
"""
Writes a corpus to disk amenable to MALLET topic modeling.
"""
target = self.temp + 'mallet'
paths = write_documents(self.corpus, target, self.featureset_name,
['date', 'title'])
self.corpus_path, self.metapath = paths
self._export_corpus()
|
python
|
def _generate_corpus(self):
"""
Writes a corpus to disk amenable to MALLET topic modeling.
"""
target = self.temp + 'mallet'
paths = write_documents(self.corpus, target, self.featureset_name,
['date', 'title'])
self.corpus_path, self.metapath = paths
self._export_corpus()
|
[
"def",
"_generate_corpus",
"(",
"self",
")",
":",
"target",
"=",
"self",
".",
"temp",
"+",
"'mallet'",
"paths",
"=",
"write_documents",
"(",
"self",
".",
"corpus",
",",
"target",
",",
"self",
".",
"featureset_name",
",",
"[",
"'date'",
",",
"'title'",
"]",
")",
"self",
".",
"corpus_path",
",",
"self",
".",
"metapath",
"=",
"paths",
"self",
".",
"_export_corpus",
"(",
")"
] |
Writes a corpus to disk amenable to MALLET topic modeling.
|
[
"Writes",
"a",
"corpus",
"to",
"disk",
"amenable",
"to",
"MALLET",
"topic",
"modeling",
"."
] |
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
|
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/model/corpus/mallet.py#L151-L161
|
10,069
|
diging/tethne
|
tethne/model/corpus/mallet.py
|
LDAModel._export_corpus
|
def _export_corpus(self):
"""
Calls MALLET's `import-file` method.
"""
# bin/mallet import-file --input /Users/erickpeirson/mycorpus_docs.txt
# --output mytopic-input.mallet --keep-sequence --remove-stopwords
if not os.path.exists(self.mallet_bin):
raise IOError("MALLET path invalid or non-existent.")
self.input_path = os.path.join(self.temp, "input.mallet")
exit = subprocess.call([
self.mallet_bin,
'import-file',
'--input', self.corpus_path,
'--output', self.input_path,
'--keep-sequence', # Required for LDA.
'--remove-stopwords']) # Probably redundant.
if exit != 0:
msg = "MALLET import-file failed with exit code {0}.".format(exit)
raise RuntimeError(msg)
|
python
|
def _export_corpus(self):
"""
Calls MALLET's `import-file` method.
"""
# bin/mallet import-file --input /Users/erickpeirson/mycorpus_docs.txt
# --output mytopic-input.mallet --keep-sequence --remove-stopwords
if not os.path.exists(self.mallet_bin):
raise IOError("MALLET path invalid or non-existent.")
self.input_path = os.path.join(self.temp, "input.mallet")
exit = subprocess.call([
self.mallet_bin,
'import-file',
'--input', self.corpus_path,
'--output', self.input_path,
'--keep-sequence', # Required for LDA.
'--remove-stopwords']) # Probably redundant.
if exit != 0:
msg = "MALLET import-file failed with exit code {0}.".format(exit)
raise RuntimeError(msg)
|
[
"def",
"_export_corpus",
"(",
"self",
")",
":",
"# bin/mallet import-file --input /Users/erickpeirson/mycorpus_docs.txt",
"# --output mytopic-input.mallet --keep-sequence --remove-stopwords",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"self",
".",
"mallet_bin",
")",
":",
"raise",
"IOError",
"(",
"\"MALLET path invalid or non-existent.\"",
")",
"self",
".",
"input_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"temp",
",",
"\"input.mallet\"",
")",
"exit",
"=",
"subprocess",
".",
"call",
"(",
"[",
"self",
".",
"mallet_bin",
",",
"'import-file'",
",",
"'--input'",
",",
"self",
".",
"corpus_path",
",",
"'--output'",
",",
"self",
".",
"input_path",
",",
"'--keep-sequence'",
",",
"# Required for LDA.",
"'--remove-stopwords'",
"]",
")",
"# Probably redundant.",
"if",
"exit",
"!=",
"0",
":",
"msg",
"=",
"\"MALLET import-file failed with exit code {0}.\"",
".",
"format",
"(",
"exit",
")",
"raise",
"RuntimeError",
"(",
"msg",
")"
] |
Calls MALLET's `import-file` method.
|
[
"Calls",
"MALLET",
"s",
"import",
"-",
"file",
"method",
"."
] |
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
|
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/model/corpus/mallet.py#L163-L184
|
10,070
|
diging/tethne
|
tethne/model/corpus/mallet.py
|
LDAModel.run
|
def run(self, **kwargs):
"""
Calls MALLET's `train-topic` method.
"""
#$ bin/mallet train-topics --input mytopic-input.mallet
#> --num-topics 100
#> --output-doc-topics /Users/erickpeirson/doc_top
#> --word-topic-counts-file /Users/erickpeirson/word_top
#> --output-topic-keys /Users/erickpeirson/topic_keys
if not os.path.exists(self.mallet_bin):
raise IOError("MALLET path invalid or non-existent.")
for attr in ['Z', 'max_iter']:
if not hasattr(self, attr):
raise AttributeError('Please set {0}'.format(attr))
self.ll = []
self.num_iters = 0
logger.debug('run() with k={0} for {1} iterations'.format(self.Z, self.max_iter))
prog = re.compile(u'\<([^\)]+)\>')
ll_prog = re.compile(r'(\d+)')
p = subprocess.Popen([
self.mallet_bin,
'train-topics',
'--input', self.input_path,
'--num-topics', unicode(self.Z),
'--num-iterations', unicode(self.max_iter),
'--output-doc-topics', self.dt,
'--word-topic-counts-file', self.wt,
'--output-model', self.om],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Handle output of MALLET in real time.
while p.poll() is None:
l = p.stderr.readline()
# Keep track of LL/topic.
try:
this_ll = float(re.findall(u'([-+]\d+\.\d+)', l)[0])
self.ll.append(this_ll)
except IndexError: # Not every line will match.
pass
# Keep track of modeling progress.
try:
this_iter = float(prog.match(l).groups()[0])
progress = int(100. * this_iter/self.max_iter)
print 'Modeling progress: {0}%.\r'.format(progress),
except AttributeError: # Not every line will match.
pass
self.num_iters += self.max_iter
self.load()
|
python
|
def run(self, **kwargs):
"""
Calls MALLET's `train-topic` method.
"""
#$ bin/mallet train-topics --input mytopic-input.mallet
#> --num-topics 100
#> --output-doc-topics /Users/erickpeirson/doc_top
#> --word-topic-counts-file /Users/erickpeirson/word_top
#> --output-topic-keys /Users/erickpeirson/topic_keys
if not os.path.exists(self.mallet_bin):
raise IOError("MALLET path invalid or non-existent.")
for attr in ['Z', 'max_iter']:
if not hasattr(self, attr):
raise AttributeError('Please set {0}'.format(attr))
self.ll = []
self.num_iters = 0
logger.debug('run() with k={0} for {1} iterations'.format(self.Z, self.max_iter))
prog = re.compile(u'\<([^\)]+)\>')
ll_prog = re.compile(r'(\d+)')
p = subprocess.Popen([
self.mallet_bin,
'train-topics',
'--input', self.input_path,
'--num-topics', unicode(self.Z),
'--num-iterations', unicode(self.max_iter),
'--output-doc-topics', self.dt,
'--word-topic-counts-file', self.wt,
'--output-model', self.om],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Handle output of MALLET in real time.
while p.poll() is None:
l = p.stderr.readline()
# Keep track of LL/topic.
try:
this_ll = float(re.findall(u'([-+]\d+\.\d+)', l)[0])
self.ll.append(this_ll)
except IndexError: # Not every line will match.
pass
# Keep track of modeling progress.
try:
this_iter = float(prog.match(l).groups()[0])
progress = int(100. * this_iter/self.max_iter)
print 'Modeling progress: {0}%.\r'.format(progress),
except AttributeError: # Not every line will match.
pass
self.num_iters += self.max_iter
self.load()
|
[
"def",
"run",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"#$ bin/mallet train-topics --input mytopic-input.mallet",
"#> --num-topics 100",
"#> --output-doc-topics /Users/erickpeirson/doc_top",
"#> --word-topic-counts-file /Users/erickpeirson/word_top",
"#> --output-topic-keys /Users/erickpeirson/topic_keys",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"self",
".",
"mallet_bin",
")",
":",
"raise",
"IOError",
"(",
"\"MALLET path invalid or non-existent.\"",
")",
"for",
"attr",
"in",
"[",
"'Z'",
",",
"'max_iter'",
"]",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"attr",
")",
":",
"raise",
"AttributeError",
"(",
"'Please set {0}'",
".",
"format",
"(",
"attr",
")",
")",
"self",
".",
"ll",
"=",
"[",
"]",
"self",
".",
"num_iters",
"=",
"0",
"logger",
".",
"debug",
"(",
"'run() with k={0} for {1} iterations'",
".",
"format",
"(",
"self",
".",
"Z",
",",
"self",
".",
"max_iter",
")",
")",
"prog",
"=",
"re",
".",
"compile",
"(",
"u'\\<([^\\)]+)\\>'",
")",
"ll_prog",
"=",
"re",
".",
"compile",
"(",
"r'(\\d+)'",
")",
"p",
"=",
"subprocess",
".",
"Popen",
"(",
"[",
"self",
".",
"mallet_bin",
",",
"'train-topics'",
",",
"'--input'",
",",
"self",
".",
"input_path",
",",
"'--num-topics'",
",",
"unicode",
"(",
"self",
".",
"Z",
")",
",",
"'--num-iterations'",
",",
"unicode",
"(",
"self",
".",
"max_iter",
")",
",",
"'--output-doc-topics'",
",",
"self",
".",
"dt",
",",
"'--word-topic-counts-file'",
",",
"self",
".",
"wt",
",",
"'--output-model'",
",",
"self",
".",
"om",
"]",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"PIPE",
")",
"# Handle output of MALLET in real time.",
"while",
"p",
".",
"poll",
"(",
")",
"is",
"None",
":",
"l",
"=",
"p",
".",
"stderr",
".",
"readline",
"(",
")",
"# Keep track of LL/topic.",
"try",
":",
"this_ll",
"=",
"float",
"(",
"re",
".",
"findall",
"(",
"u'([-+]\\d+\\.\\d+)'",
",",
"l",
")",
"[",
"0",
"]",
")",
"self",
".",
"ll",
".",
"append",
"(",
"this_ll",
")",
"except",
"IndexError",
":",
"# Not every line will match.",
"pass",
"# Keep track of modeling progress.",
"try",
":",
"this_iter",
"=",
"float",
"(",
"prog",
".",
"match",
"(",
"l",
")",
".",
"groups",
"(",
")",
"[",
"0",
"]",
")",
"progress",
"=",
"int",
"(",
"100.",
"*",
"this_iter",
"/",
"self",
".",
"max_iter",
")",
"print",
"'Modeling progress: {0}%.\\r'",
".",
"format",
"(",
"progress",
")",
",",
"except",
"AttributeError",
":",
"# Not every line will match.",
"pass",
"self",
".",
"num_iters",
"+=",
"self",
".",
"max_iter",
"self",
".",
"load",
"(",
")"
] |
Calls MALLET's `train-topic` method.
|
[
"Calls",
"MALLET",
"s",
"train",
"-",
"topic",
"method",
"."
] |
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
|
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/model/corpus/mallet.py#L186-L241
|
10,071
|
diging/tethne
|
tethne/model/corpus/mallet.py
|
LDAModel.topics_in
|
def topics_in(self, d, topn=5):
"""
List the top ``topn`` topics in document ``d``.
"""
return self.theta.features[d].top(topn)
|
python
|
def topics_in(self, d, topn=5):
"""
List the top ``topn`` topics in document ``d``.
"""
return self.theta.features[d].top(topn)
|
[
"def",
"topics_in",
"(",
"self",
",",
"d",
",",
"topn",
"=",
"5",
")",
":",
"return",
"self",
".",
"theta",
".",
"features",
"[",
"d",
"]",
".",
"top",
"(",
"topn",
")"
] |
List the top ``topn`` topics in document ``d``.
|
[
"List",
"the",
"top",
"topn",
"topics",
"in",
"document",
"d",
"."
] |
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
|
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/model/corpus/mallet.py#L307-L311
|
10,072
|
diging/tethne
|
tethne/model/corpus/mallet.py
|
LDAModel.list_topic
|
def list_topic(self, k, Nwords=10):
"""
List the top ``topn`` words for topic ``k``.
Examples
--------
.. code-block:: python
>>> model.list_topic(1, Nwords=5)
[ 'opposed', 'terminates', 'trichinosis', 'cistus', 'acaule' ]
"""
return [(self.vocabulary[w], p) for w, p
in self.phi.features[k].top(Nwords)]
|
python
|
def list_topic(self, k, Nwords=10):
"""
List the top ``topn`` words for topic ``k``.
Examples
--------
.. code-block:: python
>>> model.list_topic(1, Nwords=5)
[ 'opposed', 'terminates', 'trichinosis', 'cistus', 'acaule' ]
"""
return [(self.vocabulary[w], p) for w, p
in self.phi.features[k].top(Nwords)]
|
[
"def",
"list_topic",
"(",
"self",
",",
"k",
",",
"Nwords",
"=",
"10",
")",
":",
"return",
"[",
"(",
"self",
".",
"vocabulary",
"[",
"w",
"]",
",",
"p",
")",
"for",
"w",
",",
"p",
"in",
"self",
".",
"phi",
".",
"features",
"[",
"k",
"]",
".",
"top",
"(",
"Nwords",
")",
"]"
] |
List the top ``topn`` words for topic ``k``.
Examples
--------
.. code-block:: python
>>> model.list_topic(1, Nwords=5)
[ 'opposed', 'terminates', 'trichinosis', 'cistus', 'acaule' ]
|
[
"List",
"the",
"top",
"topn",
"words",
"for",
"topic",
"k",
"."
] |
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
|
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/model/corpus/mallet.py#L313-L329
|
10,073
|
diging/tethne
|
tethne/model/corpus/mallet.py
|
LDAModel.list_topics
|
def list_topics(self, Nwords=10):
"""
List the top ``Nwords`` words for each topic.
"""
return [(k, self.list_topic(k, Nwords)) for k in xrange(len(self.phi))]
|
python
|
def list_topics(self, Nwords=10):
"""
List the top ``Nwords`` words for each topic.
"""
return [(k, self.list_topic(k, Nwords)) for k in xrange(len(self.phi))]
|
[
"def",
"list_topics",
"(",
"self",
",",
"Nwords",
"=",
"10",
")",
":",
"return",
"[",
"(",
"k",
",",
"self",
".",
"list_topic",
"(",
"k",
",",
"Nwords",
")",
")",
"for",
"k",
"in",
"xrange",
"(",
"len",
"(",
"self",
".",
"phi",
")",
")",
"]"
] |
List the top ``Nwords`` words for each topic.
|
[
"List",
"the",
"top",
"Nwords",
"words",
"for",
"each",
"topic",
"."
] |
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
|
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/model/corpus/mallet.py#L331-L335
|
10,074
|
diging/tethne
|
tethne/model/corpus/mallet.py
|
LDAModel.print_topics
|
def print_topics(self, Nwords=10):
"""
Print the top ``Nwords`` words for each topic.
"""
print('Topic\tTop %i words' % Nwords)
for k, words in self.list_topics(Nwords):
print(unicode(k).ljust(3) + '\t' + ' '.join(list(zip(*words))[0]))
|
python
|
def print_topics(self, Nwords=10):
"""
Print the top ``Nwords`` words for each topic.
"""
print('Topic\tTop %i words' % Nwords)
for k, words in self.list_topics(Nwords):
print(unicode(k).ljust(3) + '\t' + ' '.join(list(zip(*words))[0]))
|
[
"def",
"print_topics",
"(",
"self",
",",
"Nwords",
"=",
"10",
")",
":",
"print",
"(",
"'Topic\\tTop %i words'",
"%",
"Nwords",
")",
"for",
"k",
",",
"words",
"in",
"self",
".",
"list_topics",
"(",
"Nwords",
")",
":",
"print",
"(",
"unicode",
"(",
"k",
")",
".",
"ljust",
"(",
"3",
")",
"+",
"'\\t'",
"+",
"' '",
".",
"join",
"(",
"list",
"(",
"zip",
"(",
"*",
"words",
")",
")",
"[",
"0",
"]",
")",
")"
] |
Print the top ``Nwords`` words for each topic.
|
[
"Print",
"the",
"top",
"Nwords",
"words",
"for",
"each",
"topic",
"."
] |
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
|
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/model/corpus/mallet.py#L338-L344
|
10,075
|
diging/tethne
|
tethne/model/corpus/mallet.py
|
LDAModel.topic_over_time
|
def topic_over_time(self, k, mode='counts', slice_kwargs={}):
"""
Calculate the representation of topic ``k`` in the corpus over time.
"""
return self.corpus.feature_distribution('topics', k, mode=mode,
**slice_kwargs)
|
python
|
def topic_over_time(self, k, mode='counts', slice_kwargs={}):
"""
Calculate the representation of topic ``k`` in the corpus over time.
"""
return self.corpus.feature_distribution('topics', k, mode=mode,
**slice_kwargs)
|
[
"def",
"topic_over_time",
"(",
"self",
",",
"k",
",",
"mode",
"=",
"'counts'",
",",
"slice_kwargs",
"=",
"{",
"}",
")",
":",
"return",
"self",
".",
"corpus",
".",
"feature_distribution",
"(",
"'topics'",
",",
"k",
",",
"mode",
"=",
"mode",
",",
"*",
"*",
"slice_kwargs",
")"
] |
Calculate the representation of topic ``k`` in the corpus over time.
|
[
"Calculate",
"the",
"representation",
"of",
"topic",
"k",
"in",
"the",
"corpus",
"over",
"time",
"."
] |
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
|
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/model/corpus/mallet.py#L347-L353
|
10,076
|
diging/tethne
|
tethne/classes/corpus.py
|
Corpus.distribution
|
def distribution(self, **slice_kwargs):
"""
Calculates the number of papers in each slice, as defined by
``slice_kwargs``.
Examples
--------
.. code-block:: python
>>> corpus.distribution(step_size=1, window_size=1)
[5, 5]
Parameters
----------
slice_kwargs : kwargs
Keyword arguments to be passed to :meth:`.Corpus.slice`\.
Returns
-------
list
"""
values = []
keys = []
for key, size in self.slice(count_only=True, **slice_kwargs):
values.append(size)
keys.append(key)
return keys, values
|
python
|
def distribution(self, **slice_kwargs):
"""
Calculates the number of papers in each slice, as defined by
``slice_kwargs``.
Examples
--------
.. code-block:: python
>>> corpus.distribution(step_size=1, window_size=1)
[5, 5]
Parameters
----------
slice_kwargs : kwargs
Keyword arguments to be passed to :meth:`.Corpus.slice`\.
Returns
-------
list
"""
values = []
keys = []
for key, size in self.slice(count_only=True, **slice_kwargs):
values.append(size)
keys.append(key)
return keys, values
|
[
"def",
"distribution",
"(",
"self",
",",
"*",
"*",
"slice_kwargs",
")",
":",
"values",
"=",
"[",
"]",
"keys",
"=",
"[",
"]",
"for",
"key",
",",
"size",
"in",
"self",
".",
"slice",
"(",
"count_only",
"=",
"True",
",",
"*",
"*",
"slice_kwargs",
")",
":",
"values",
".",
"append",
"(",
"size",
")",
"keys",
".",
"append",
"(",
"key",
")",
"return",
"keys",
",",
"values"
] |
Calculates the number of papers in each slice, as defined by
``slice_kwargs``.
Examples
--------
.. code-block:: python
>>> corpus.distribution(step_size=1, window_size=1)
[5, 5]
Parameters
----------
slice_kwargs : kwargs
Keyword arguments to be passed to :meth:`.Corpus.slice`\.
Returns
-------
list
|
[
"Calculates",
"the",
"number",
"of",
"papers",
"in",
"each",
"slice",
"as",
"defined",
"by",
"slice_kwargs",
"."
] |
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
|
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/classes/corpus.py#L595-L622
|
10,077
|
diging/tethne
|
tethne/classes/corpus.py
|
Corpus.feature_distribution
|
def feature_distribution(self, featureset_name, feature, mode='counts',
**slice_kwargs):
"""
Calculates the distribution of a feature across slices of the corpus.
Examples
--------
.. code-block:: python
>>> corpus.feature_distribution(featureset_name='citations', \
... feature='DOLE RJ 1965 CELL', \
... step_size=1, window_size=1)
[2, 15, 25, 1]
Parameters
----------
featureset_name : str
Name of a :class:`.FeatureSet` in the :class:`.Corpus`\.
feature : str
Name of the specific feature of interest. E.g. if
``featureset_name='citations'``, then ``feature`` could be
something like ``'DOLE RJ 1965 CELL'``.
mode : str
(default: ``'counts'``) If set to ``'counts'``, values will be the
sum of all count values for the feature in each slice. If set to
``'documentCounts'``, values will be the number of papers in which
the feature occurs in each slice.
slice_kwargs : kwargs
Keyword arguments to be passed to :meth:`.Corpus.slice`\.
Returns
-------
list
"""
values = []
keys = []
fset = self.features[featureset_name]
for key, papers in self.slice(subcorpus=False, **slice_kwargs):
allfeatures = [v for v
in chain(*[fset.features[self._generate_index(p)]
for p in papers
if self._generate_index(p)
in fset.features])]
if len(allfeatures) < 1:
keys.append(key)
values.append(0.)
continue
count = 0.
for elem, v in allfeatures:
if elem != feature:
continue
if mode == 'counts':
count += v
else:
count += 1.
values.append(count)
keys.append(key)
return keys, values
|
python
|
def feature_distribution(self, featureset_name, feature, mode='counts',
**slice_kwargs):
"""
Calculates the distribution of a feature across slices of the corpus.
Examples
--------
.. code-block:: python
>>> corpus.feature_distribution(featureset_name='citations', \
... feature='DOLE RJ 1965 CELL', \
... step_size=1, window_size=1)
[2, 15, 25, 1]
Parameters
----------
featureset_name : str
Name of a :class:`.FeatureSet` in the :class:`.Corpus`\.
feature : str
Name of the specific feature of interest. E.g. if
``featureset_name='citations'``, then ``feature`` could be
something like ``'DOLE RJ 1965 CELL'``.
mode : str
(default: ``'counts'``) If set to ``'counts'``, values will be the
sum of all count values for the feature in each slice. If set to
``'documentCounts'``, values will be the number of papers in which
the feature occurs in each slice.
slice_kwargs : kwargs
Keyword arguments to be passed to :meth:`.Corpus.slice`\.
Returns
-------
list
"""
values = []
keys = []
fset = self.features[featureset_name]
for key, papers in self.slice(subcorpus=False, **slice_kwargs):
allfeatures = [v for v
in chain(*[fset.features[self._generate_index(p)]
for p in papers
if self._generate_index(p)
in fset.features])]
if len(allfeatures) < 1:
keys.append(key)
values.append(0.)
continue
count = 0.
for elem, v in allfeatures:
if elem != feature:
continue
if mode == 'counts':
count += v
else:
count += 1.
values.append(count)
keys.append(key)
return keys, values
|
[
"def",
"feature_distribution",
"(",
"self",
",",
"featureset_name",
",",
"feature",
",",
"mode",
"=",
"'counts'",
",",
"*",
"*",
"slice_kwargs",
")",
":",
"values",
"=",
"[",
"]",
"keys",
"=",
"[",
"]",
"fset",
"=",
"self",
".",
"features",
"[",
"featureset_name",
"]",
"for",
"key",
",",
"papers",
"in",
"self",
".",
"slice",
"(",
"subcorpus",
"=",
"False",
",",
"*",
"*",
"slice_kwargs",
")",
":",
"allfeatures",
"=",
"[",
"v",
"for",
"v",
"in",
"chain",
"(",
"*",
"[",
"fset",
".",
"features",
"[",
"self",
".",
"_generate_index",
"(",
"p",
")",
"]",
"for",
"p",
"in",
"papers",
"if",
"self",
".",
"_generate_index",
"(",
"p",
")",
"in",
"fset",
".",
"features",
"]",
")",
"]",
"if",
"len",
"(",
"allfeatures",
")",
"<",
"1",
":",
"keys",
".",
"append",
"(",
"key",
")",
"values",
".",
"append",
"(",
"0.",
")",
"continue",
"count",
"=",
"0.",
"for",
"elem",
",",
"v",
"in",
"allfeatures",
":",
"if",
"elem",
"!=",
"feature",
":",
"continue",
"if",
"mode",
"==",
"'counts'",
":",
"count",
"+=",
"v",
"else",
":",
"count",
"+=",
"1.",
"values",
".",
"append",
"(",
"count",
")",
"keys",
".",
"append",
"(",
"key",
")",
"return",
"keys",
",",
"values"
] |
Calculates the distribution of a feature across slices of the corpus.
Examples
--------
.. code-block:: python
>>> corpus.feature_distribution(featureset_name='citations', \
... feature='DOLE RJ 1965 CELL', \
... step_size=1, window_size=1)
[2, 15, 25, 1]
Parameters
----------
featureset_name : str
Name of a :class:`.FeatureSet` in the :class:`.Corpus`\.
feature : str
Name of the specific feature of interest. E.g. if
``featureset_name='citations'``, then ``feature`` could be
something like ``'DOLE RJ 1965 CELL'``.
mode : str
(default: ``'counts'``) If set to ``'counts'``, values will be the
sum of all count values for the feature in each slice. If set to
``'documentCounts'``, values will be the number of papers in which
the feature occurs in each slice.
slice_kwargs : kwargs
Keyword arguments to be passed to :meth:`.Corpus.slice`\.
Returns
-------
list
|
[
"Calculates",
"the",
"distribution",
"of",
"a",
"feature",
"across",
"slices",
"of",
"the",
"corpus",
"."
] |
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
|
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/classes/corpus.py#L624-L685
|
10,078
|
diging/tethne
|
tethne/classes/corpus.py
|
Corpus.top_features
|
def top_features(self, featureset_name, topn=20, by='counts',
perslice=False, slice_kwargs={}):
"""
Retrieves the top ``topn`` most numerous features in the corpus.
Parameters
----------
featureset_name : str
Name of a :class:`.FeatureSet` in the :class:`.Corpus`\.
topn : int
(default: ``20``) Number of features to return.
by : str
(default: ``'counts'``) If ``'counts'``, uses the sum of feature
count values to rank features. If ``'documentCounts'``, uses the
number of papers in which features occur.
perslice : bool
(default: False) If True, retrieves the top ``topn`` features in
each slice.
slice_kwargs : kwargs
If ``perslice=True``, these keyword arguments are passed to
:meth:`.Corpus.slice`\.
"""
if perslice:
return [(k, subcorpus.features[featureset_name].top(topn, by=by))
for k, subcorpus in self.slice(**slice_kwargs)]
return self.features[featureset_name].top(topn, by=by)
|
python
|
def top_features(self, featureset_name, topn=20, by='counts',
perslice=False, slice_kwargs={}):
"""
Retrieves the top ``topn`` most numerous features in the corpus.
Parameters
----------
featureset_name : str
Name of a :class:`.FeatureSet` in the :class:`.Corpus`\.
topn : int
(default: ``20``) Number of features to return.
by : str
(default: ``'counts'``) If ``'counts'``, uses the sum of feature
count values to rank features. If ``'documentCounts'``, uses the
number of papers in which features occur.
perslice : bool
(default: False) If True, retrieves the top ``topn`` features in
each slice.
slice_kwargs : kwargs
If ``perslice=True``, these keyword arguments are passed to
:meth:`.Corpus.slice`\.
"""
if perslice:
return [(k, subcorpus.features[featureset_name].top(topn, by=by))
for k, subcorpus in self.slice(**slice_kwargs)]
return self.features[featureset_name].top(topn, by=by)
|
[
"def",
"top_features",
"(",
"self",
",",
"featureset_name",
",",
"topn",
"=",
"20",
",",
"by",
"=",
"'counts'",
",",
"perslice",
"=",
"False",
",",
"slice_kwargs",
"=",
"{",
"}",
")",
":",
"if",
"perslice",
":",
"return",
"[",
"(",
"k",
",",
"subcorpus",
".",
"features",
"[",
"featureset_name",
"]",
".",
"top",
"(",
"topn",
",",
"by",
"=",
"by",
")",
")",
"for",
"k",
",",
"subcorpus",
"in",
"self",
".",
"slice",
"(",
"*",
"*",
"slice_kwargs",
")",
"]",
"return",
"self",
".",
"features",
"[",
"featureset_name",
"]",
".",
"top",
"(",
"topn",
",",
"by",
"=",
"by",
")"
] |
Retrieves the top ``topn`` most numerous features in the corpus.
Parameters
----------
featureset_name : str
Name of a :class:`.FeatureSet` in the :class:`.Corpus`\.
topn : int
(default: ``20``) Number of features to return.
by : str
(default: ``'counts'``) If ``'counts'``, uses the sum of feature
count values to rank features. If ``'documentCounts'``, uses the
number of papers in which features occur.
perslice : bool
(default: False) If True, retrieves the top ``topn`` features in
each slice.
slice_kwargs : kwargs
If ``perslice=True``, these keyword arguments are passed to
:meth:`.Corpus.slice`\.
|
[
"Retrieves",
"the",
"top",
"topn",
"most",
"numerous",
"features",
"in",
"the",
"corpus",
"."
] |
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
|
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/classes/corpus.py#L687-L713
|
10,079
|
diging/tethne
|
tethne/analyze/corpus.py
|
feature_burstness
|
def feature_burstness(corpus, featureset_name, feature, k=5, normalize=True,
s=1.1, gamma=1., **slice_kwargs):
"""
Estimate burstness profile for a feature over the ``'date'`` axis.
Parameters
----------
corpus : :class:`.Corpus`
feature : str
Name of featureset in ``corpus``. E.g. ``'citations'``.
findex : int
Index of ``feature`` in ``corpus``.
k : int
(default: 5) Number of burst states.
normalize : bool
(default: True) If True, burstness is expressed relative to the hightest
possible state (``k-1``). Otherwise, states themselves are returned.
kwargs : kwargs
Parameters for burstness automaton HMM.
"""
if featureset_name not in corpus.features:
corpus.index_feature(featureset_name)
if 'date' not in corpus.indices:
corpus.index('date')
# Get time-intervals between occurrences.
dates = [min(corpus.indices['date'].keys()) - 1] # Pad start.
X_ = [1.]
years, values = corpus.feature_distribution(featureset_name, feature)
for year, N in izip(years, values):
if N == 0:
continue
if N > 1:
if year == dates[-1] + 1:
for n in xrange(int(N)):
X_.append(1./N)
dates.append(year)
else:
X_.append(float(year - dates[-1]))
dates.append(year)
for n in xrange(int(N) - 1):
X_.append(1./(N - 1))
dates.append(year)
else:
X_.append(float(year - dates[-1]))
dates.append(year)
# Get optimum state sequence.
st = _forward(map(lambda x: x*100, X_), s=s, gamma=gamma, k=k)
# Bin by date.
A = defaultdict(list)
for i in xrange(len(X_)):
A[dates[i]].append(st[i])
# Normalize.
if normalize:
A = {key: mean(values)/k for key, values in A.items()}
else:
A = {key: mean(values) for key, values in A.items()}
D = sorted(A.keys())
return D[1:], [A[d] for d in D[1:]]
|
python
|
def feature_burstness(corpus, featureset_name, feature, k=5, normalize=True,
s=1.1, gamma=1., **slice_kwargs):
"""
Estimate burstness profile for a feature over the ``'date'`` axis.
Parameters
----------
corpus : :class:`.Corpus`
feature : str
Name of featureset in ``corpus``. E.g. ``'citations'``.
findex : int
Index of ``feature`` in ``corpus``.
k : int
(default: 5) Number of burst states.
normalize : bool
(default: True) If True, burstness is expressed relative to the hightest
possible state (``k-1``). Otherwise, states themselves are returned.
kwargs : kwargs
Parameters for burstness automaton HMM.
"""
if featureset_name not in corpus.features:
corpus.index_feature(featureset_name)
if 'date' not in corpus.indices:
corpus.index('date')
# Get time-intervals between occurrences.
dates = [min(corpus.indices['date'].keys()) - 1] # Pad start.
X_ = [1.]
years, values = corpus.feature_distribution(featureset_name, feature)
for year, N in izip(years, values):
if N == 0:
continue
if N > 1:
if year == dates[-1] + 1:
for n in xrange(int(N)):
X_.append(1./N)
dates.append(year)
else:
X_.append(float(year - dates[-1]))
dates.append(year)
for n in xrange(int(N) - 1):
X_.append(1./(N - 1))
dates.append(year)
else:
X_.append(float(year - dates[-1]))
dates.append(year)
# Get optimum state sequence.
st = _forward(map(lambda x: x*100, X_), s=s, gamma=gamma, k=k)
# Bin by date.
A = defaultdict(list)
for i in xrange(len(X_)):
A[dates[i]].append(st[i])
# Normalize.
if normalize:
A = {key: mean(values)/k for key, values in A.items()}
else:
A = {key: mean(values) for key, values in A.items()}
D = sorted(A.keys())
return D[1:], [A[d] for d in D[1:]]
|
[
"def",
"feature_burstness",
"(",
"corpus",
",",
"featureset_name",
",",
"feature",
",",
"k",
"=",
"5",
",",
"normalize",
"=",
"True",
",",
"s",
"=",
"1.1",
",",
"gamma",
"=",
"1.",
",",
"*",
"*",
"slice_kwargs",
")",
":",
"if",
"featureset_name",
"not",
"in",
"corpus",
".",
"features",
":",
"corpus",
".",
"index_feature",
"(",
"featureset_name",
")",
"if",
"'date'",
"not",
"in",
"corpus",
".",
"indices",
":",
"corpus",
".",
"index",
"(",
"'date'",
")",
"# Get time-intervals between occurrences.",
"dates",
"=",
"[",
"min",
"(",
"corpus",
".",
"indices",
"[",
"'date'",
"]",
".",
"keys",
"(",
")",
")",
"-",
"1",
"]",
"# Pad start.",
"X_",
"=",
"[",
"1.",
"]",
"years",
",",
"values",
"=",
"corpus",
".",
"feature_distribution",
"(",
"featureset_name",
",",
"feature",
")",
"for",
"year",
",",
"N",
"in",
"izip",
"(",
"years",
",",
"values",
")",
":",
"if",
"N",
"==",
"0",
":",
"continue",
"if",
"N",
">",
"1",
":",
"if",
"year",
"==",
"dates",
"[",
"-",
"1",
"]",
"+",
"1",
":",
"for",
"n",
"in",
"xrange",
"(",
"int",
"(",
"N",
")",
")",
":",
"X_",
".",
"append",
"(",
"1.",
"/",
"N",
")",
"dates",
".",
"append",
"(",
"year",
")",
"else",
":",
"X_",
".",
"append",
"(",
"float",
"(",
"year",
"-",
"dates",
"[",
"-",
"1",
"]",
")",
")",
"dates",
".",
"append",
"(",
"year",
")",
"for",
"n",
"in",
"xrange",
"(",
"int",
"(",
"N",
")",
"-",
"1",
")",
":",
"X_",
".",
"append",
"(",
"1.",
"/",
"(",
"N",
"-",
"1",
")",
")",
"dates",
".",
"append",
"(",
"year",
")",
"else",
":",
"X_",
".",
"append",
"(",
"float",
"(",
"year",
"-",
"dates",
"[",
"-",
"1",
"]",
")",
")",
"dates",
".",
"append",
"(",
"year",
")",
"# Get optimum state sequence.",
"st",
"=",
"_forward",
"(",
"map",
"(",
"lambda",
"x",
":",
"x",
"*",
"100",
",",
"X_",
")",
",",
"s",
"=",
"s",
",",
"gamma",
"=",
"gamma",
",",
"k",
"=",
"k",
")",
"# Bin by date.",
"A",
"=",
"defaultdict",
"(",
"list",
")",
"for",
"i",
"in",
"xrange",
"(",
"len",
"(",
"X_",
")",
")",
":",
"A",
"[",
"dates",
"[",
"i",
"]",
"]",
".",
"append",
"(",
"st",
"[",
"i",
"]",
")",
"# Normalize.",
"if",
"normalize",
":",
"A",
"=",
"{",
"key",
":",
"mean",
"(",
"values",
")",
"/",
"k",
"for",
"key",
",",
"values",
"in",
"A",
".",
"items",
"(",
")",
"}",
"else",
":",
"A",
"=",
"{",
"key",
":",
"mean",
"(",
"values",
")",
"for",
"key",
",",
"values",
"in",
"A",
".",
"items",
"(",
")",
"}",
"D",
"=",
"sorted",
"(",
"A",
".",
"keys",
"(",
")",
")",
"return",
"D",
"[",
"1",
":",
"]",
",",
"[",
"A",
"[",
"d",
"]",
"for",
"d",
"in",
"D",
"[",
"1",
":",
"]",
"]"
] |
Estimate burstness profile for a feature over the ``'date'`` axis.
Parameters
----------
corpus : :class:`.Corpus`
feature : str
Name of featureset in ``corpus``. E.g. ``'citations'``.
findex : int
Index of ``feature`` in ``corpus``.
k : int
(default: 5) Number of burst states.
normalize : bool
(default: True) If True, burstness is expressed relative to the hightest
possible state (``k-1``). Otherwise, states themselves are returned.
kwargs : kwargs
Parameters for burstness automaton HMM.
|
[
"Estimate",
"burstness",
"profile",
"for",
"a",
"feature",
"over",
"the",
"date",
"axis",
"."
] |
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
|
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/analyze/corpus.py#L157-L224
|
10,080
|
diging/tethne
|
tethne/networks/papers.py
|
cocitation
|
def cocitation(corpus, min_weight=1, edge_attrs=['ayjid', 'date'], **kwargs):
"""
Generate a cocitation network.
A **cocitation network** is a network in which vertices are papers, and
edges indicate that two papers were cited by the same third paper.
`CiteSpace
<http://cluster.cis.drexel.edu/~cchen/citespace/doc/jasist2006.pdf>`_
is a popular desktop application for co-citation analysis, and you can read
about the theory behind it
`here <http://cluster.cis.drexel.edu/~cchen/citespace/>`_.
"""
return cooccurrence(corpus, 'citations', min_weight=min_weight,
edge_attrs=edge_attrs, **kwargs)
|
python
|
def cocitation(corpus, min_weight=1, edge_attrs=['ayjid', 'date'], **kwargs):
"""
Generate a cocitation network.
A **cocitation network** is a network in which vertices are papers, and
edges indicate that two papers were cited by the same third paper.
`CiteSpace
<http://cluster.cis.drexel.edu/~cchen/citespace/doc/jasist2006.pdf>`_
is a popular desktop application for co-citation analysis, and you can read
about the theory behind it
`here <http://cluster.cis.drexel.edu/~cchen/citespace/>`_.
"""
return cooccurrence(corpus, 'citations', min_weight=min_weight,
edge_attrs=edge_attrs, **kwargs)
|
[
"def",
"cocitation",
"(",
"corpus",
",",
"min_weight",
"=",
"1",
",",
"edge_attrs",
"=",
"[",
"'ayjid'",
",",
"'date'",
"]",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"cooccurrence",
"(",
"corpus",
",",
"'citations'",
",",
"min_weight",
"=",
"min_weight",
",",
"edge_attrs",
"=",
"edge_attrs",
",",
"*",
"*",
"kwargs",
")"
] |
Generate a cocitation network.
A **cocitation network** is a network in which vertices are papers, and
edges indicate that two papers were cited by the same third paper.
`CiteSpace
<http://cluster.cis.drexel.edu/~cchen/citespace/doc/jasist2006.pdf>`_
is a popular desktop application for co-citation analysis, and you can read
about the theory behind it
`here <http://cluster.cis.drexel.edu/~cchen/citespace/>`_.
|
[
"Generate",
"a",
"cocitation",
"network",
"."
] |
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
|
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/networks/papers.py#L43-L56
|
10,081
|
diging/tethne
|
tethne/classes/feature.py
|
StructuredFeature.context_chunk
|
def context_chunk(self, context, j):
"""
Retrieve the tokens in the ``j``th chunk of context ``context``.
Parameters
----------
context : str
Context name.
j : int
Index of a context chunk.
Returns
-------
chunk : list
List of tokens in the selected chunk.
"""
N_chunks = len(self.contexts[context])
start = self.contexts[context][j]
if j == N_chunks - 1:
end = len(self)
else:
end = self.contexts[context][j+1]
return [self[i] for i in xrange(start, end)]
|
python
|
def context_chunk(self, context, j):
"""
Retrieve the tokens in the ``j``th chunk of context ``context``.
Parameters
----------
context : str
Context name.
j : int
Index of a context chunk.
Returns
-------
chunk : list
List of tokens in the selected chunk.
"""
N_chunks = len(self.contexts[context])
start = self.contexts[context][j]
if j == N_chunks - 1:
end = len(self)
else:
end = self.contexts[context][j+1]
return [self[i] for i in xrange(start, end)]
|
[
"def",
"context_chunk",
"(",
"self",
",",
"context",
",",
"j",
")",
":",
"N_chunks",
"=",
"len",
"(",
"self",
".",
"contexts",
"[",
"context",
"]",
")",
"start",
"=",
"self",
".",
"contexts",
"[",
"context",
"]",
"[",
"j",
"]",
"if",
"j",
"==",
"N_chunks",
"-",
"1",
":",
"end",
"=",
"len",
"(",
"self",
")",
"else",
":",
"end",
"=",
"self",
".",
"contexts",
"[",
"context",
"]",
"[",
"j",
"+",
"1",
"]",
"return",
"[",
"self",
"[",
"i",
"]",
"for",
"i",
"in",
"xrange",
"(",
"start",
",",
"end",
")",
"]"
] |
Retrieve the tokens in the ``j``th chunk of context ``context``.
Parameters
----------
context : str
Context name.
j : int
Index of a context chunk.
Returns
-------
chunk : list
List of tokens in the selected chunk.
|
[
"Retrieve",
"the",
"tokens",
"in",
"the",
"j",
"th",
"chunk",
"of",
"context",
"context",
"."
] |
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
|
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/classes/feature.py#L108-L131
|
10,082
|
diging/tethne
|
tethne/classes/feature.py
|
StructuredFeature.add_context
|
def add_context(self, name, indices, level=None):
"""
Add a new context level to the hierarchy.
By default, new contexts are added to the lowest level of the hierarchy.
To insert the context elsewhere in the hierarchy, use the ``level``
argument. For example, ``level=0`` would insert the context at the
highest level of the hierarchy.
Parameters
----------
name : str
indices : list
Token indices at which each chunk in the context begins.
level : int
Level in the hierarchy at which to insert the context. By default,
inserts context at the lowest level of the hierarchy
"""
self._validate_context((name, indices))
if level is None:
level = len(self.contexts_ranked)
self.contexts_ranked.insert(level, name)
self.contexts[name] = indices
|
python
|
def add_context(self, name, indices, level=None):
"""
Add a new context level to the hierarchy.
By default, new contexts are added to the lowest level of the hierarchy.
To insert the context elsewhere in the hierarchy, use the ``level``
argument. For example, ``level=0`` would insert the context at the
highest level of the hierarchy.
Parameters
----------
name : str
indices : list
Token indices at which each chunk in the context begins.
level : int
Level in the hierarchy at which to insert the context. By default,
inserts context at the lowest level of the hierarchy
"""
self._validate_context((name, indices))
if level is None:
level = len(self.contexts_ranked)
self.contexts_ranked.insert(level, name)
self.contexts[name] = indices
|
[
"def",
"add_context",
"(",
"self",
",",
"name",
",",
"indices",
",",
"level",
"=",
"None",
")",
":",
"self",
".",
"_validate_context",
"(",
"(",
"name",
",",
"indices",
")",
")",
"if",
"level",
"is",
"None",
":",
"level",
"=",
"len",
"(",
"self",
".",
"contexts_ranked",
")",
"self",
".",
"contexts_ranked",
".",
"insert",
"(",
"level",
",",
"name",
")",
"self",
".",
"contexts",
"[",
"name",
"]",
"=",
"indices"
] |
Add a new context level to the hierarchy.
By default, new contexts are added to the lowest level of the hierarchy.
To insert the context elsewhere in the hierarchy, use the ``level``
argument. For example, ``level=0`` would insert the context at the
highest level of the hierarchy.
Parameters
----------
name : str
indices : list
Token indices at which each chunk in the context begins.
level : int
Level in the hierarchy at which to insert the context. By default,
inserts context at the lowest level of the hierarchy
|
[
"Add",
"a",
"new",
"context",
"level",
"to",
"the",
"hierarchy",
"."
] |
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
|
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/classes/feature.py#L170-L195
|
10,083
|
diging/tethne
|
tethne/classes/graphcollection.py
|
GraphCollection.index
|
def index(self, name, graph):
"""
Index any new nodes in `graph`, and relabel the nodes in `graph` using
the index.
Parameters
----------
name : hashable
Unique name used to identify the `graph`.
graph : networkx.Graph
Returns
-------
indexed_graph : networkx.Graph
"""
nodes = graph.nodes()
# Index new nodes.
new_nodes = list(set(nodes) - set(self.node_index.values()))
start = max(len(self.node_index) - 1, max(self.node_index.keys()))
for i in xrange(start, start + len(new_nodes)):
n = new_nodes.pop()
self.node_index[i], self.node_lookup[n] = n, i
self.graphs_containing[n].append(name)
# Relabel nodes in `graph`.
new_labels = {n: self.node_lookup[n] for n in nodes}
indexed_graph = nx.relabel.relabel_nodes(graph, new_labels, copy=True)
return indexed_graph
|
python
|
def index(self, name, graph):
"""
Index any new nodes in `graph`, and relabel the nodes in `graph` using
the index.
Parameters
----------
name : hashable
Unique name used to identify the `graph`.
graph : networkx.Graph
Returns
-------
indexed_graph : networkx.Graph
"""
nodes = graph.nodes()
# Index new nodes.
new_nodes = list(set(nodes) - set(self.node_index.values()))
start = max(len(self.node_index) - 1, max(self.node_index.keys()))
for i in xrange(start, start + len(new_nodes)):
n = new_nodes.pop()
self.node_index[i], self.node_lookup[n] = n, i
self.graphs_containing[n].append(name)
# Relabel nodes in `graph`.
new_labels = {n: self.node_lookup[n] for n in nodes}
indexed_graph = nx.relabel.relabel_nodes(graph, new_labels, copy=True)
return indexed_graph
|
[
"def",
"index",
"(",
"self",
",",
"name",
",",
"graph",
")",
":",
"nodes",
"=",
"graph",
".",
"nodes",
"(",
")",
"# Index new nodes.",
"new_nodes",
"=",
"list",
"(",
"set",
"(",
"nodes",
")",
"-",
"set",
"(",
"self",
".",
"node_index",
".",
"values",
"(",
")",
")",
")",
"start",
"=",
"max",
"(",
"len",
"(",
"self",
".",
"node_index",
")",
"-",
"1",
",",
"max",
"(",
"self",
".",
"node_index",
".",
"keys",
"(",
")",
")",
")",
"for",
"i",
"in",
"xrange",
"(",
"start",
",",
"start",
"+",
"len",
"(",
"new_nodes",
")",
")",
":",
"n",
"=",
"new_nodes",
".",
"pop",
"(",
")",
"self",
".",
"node_index",
"[",
"i",
"]",
",",
"self",
".",
"node_lookup",
"[",
"n",
"]",
"=",
"n",
",",
"i",
"self",
".",
"graphs_containing",
"[",
"n",
"]",
".",
"append",
"(",
"name",
")",
"# Relabel nodes in `graph`.",
"new_labels",
"=",
"{",
"n",
":",
"self",
".",
"node_lookup",
"[",
"n",
"]",
"for",
"n",
"in",
"nodes",
"}",
"indexed_graph",
"=",
"nx",
".",
"relabel",
".",
"relabel_nodes",
"(",
"graph",
",",
"new_labels",
",",
"copy",
"=",
"True",
")",
"return",
"indexed_graph"
] |
Index any new nodes in `graph`, and relabel the nodes in `graph` using
the index.
Parameters
----------
name : hashable
Unique name used to identify the `graph`.
graph : networkx.Graph
Returns
-------
indexed_graph : networkx.Graph
|
[
"Index",
"any",
"new",
"nodes",
"in",
"graph",
"and",
"relabel",
"the",
"nodes",
"in",
"graph",
"using",
"the",
"index",
"."
] |
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
|
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/classes/graphcollection.py#L159-L188
|
10,084
|
diging/tethne
|
tethne/networks/topics.py
|
terms
|
def terms(model, threshold=0.01, **kwargs):
"""
Two terms are coupled if the posterior probability for both terms is
greather than ``threshold`` for the same topic.
Parameters
----------
model : :class:`.LDAModel`
threshold : float
Default: 0.01
kwargs : kwargs
Passed on to :func:`.cooccurrence`\.
Returns
-------
:ref:`networkx.Graph <networkx:graph>`
"""
select = lambda f, v, c, dc: v > threshold
graph = cooccurrence(model.phi, filter=select, **kwargs)
# Only include labels for terms that are actually in the graph.
label_map = {k: v for k, v in model.vocabulary.items()
if k in graph.nodes()}
graph.name = ''
return networkx.relabel_nodes(graph, label_map)
|
python
|
def terms(model, threshold=0.01, **kwargs):
"""
Two terms are coupled if the posterior probability for both terms is
greather than ``threshold`` for the same topic.
Parameters
----------
model : :class:`.LDAModel`
threshold : float
Default: 0.01
kwargs : kwargs
Passed on to :func:`.cooccurrence`\.
Returns
-------
:ref:`networkx.Graph <networkx:graph>`
"""
select = lambda f, v, c, dc: v > threshold
graph = cooccurrence(model.phi, filter=select, **kwargs)
# Only include labels for terms that are actually in the graph.
label_map = {k: v for k, v in model.vocabulary.items()
if k in graph.nodes()}
graph.name = ''
return networkx.relabel_nodes(graph, label_map)
|
[
"def",
"terms",
"(",
"model",
",",
"threshold",
"=",
"0.01",
",",
"*",
"*",
"kwargs",
")",
":",
"select",
"=",
"lambda",
"f",
",",
"v",
",",
"c",
",",
"dc",
":",
"v",
">",
"threshold",
"graph",
"=",
"cooccurrence",
"(",
"model",
".",
"phi",
",",
"filter",
"=",
"select",
",",
"*",
"*",
"kwargs",
")",
"# Only include labels for terms that are actually in the graph.",
"label_map",
"=",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"model",
".",
"vocabulary",
".",
"items",
"(",
")",
"if",
"k",
"in",
"graph",
".",
"nodes",
"(",
")",
"}",
"graph",
".",
"name",
"=",
"''",
"return",
"networkx",
".",
"relabel_nodes",
"(",
"graph",
",",
"label_map",
")"
] |
Two terms are coupled if the posterior probability for both terms is
greather than ``threshold`` for the same topic.
Parameters
----------
model : :class:`.LDAModel`
threshold : float
Default: 0.01
kwargs : kwargs
Passed on to :func:`.cooccurrence`\.
Returns
-------
:ref:`networkx.Graph <networkx:graph>`
|
[
"Two",
"terms",
"are",
"coupled",
"if",
"the",
"posterior",
"probability",
"for",
"both",
"terms",
"is",
"greather",
"than",
"threshold",
"for",
"the",
"same",
"topic",
"."
] |
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
|
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/networks/topics.py#L24-L50
|
10,085
|
diging/tethne
|
tethne/networks/topics.py
|
topic_coupling
|
def topic_coupling(model, threshold=None, **kwargs):
"""
Two papers are coupled if they both contain a shared topic above a
``threshold``.
Parameters
----------
model : :class:`.LDAModel`
threshold : float
Default: ``3./model.Z``
kwargs : kwargs
Passed on to :func:`.coupling`\.
Returns
-------
:ref:`networkx.Graph <networkx:graph>`
"""
if not threshold:
threshold = 3./model.Z
select = lambda f, v, c, dc: v > threshold
graph = coupling(model.corpus, 'topics', filter=select, **kwargs)
graph.name = ''
return graph
|
python
|
def topic_coupling(model, threshold=None, **kwargs):
"""
Two papers are coupled if they both contain a shared topic above a
``threshold``.
Parameters
----------
model : :class:`.LDAModel`
threshold : float
Default: ``3./model.Z``
kwargs : kwargs
Passed on to :func:`.coupling`\.
Returns
-------
:ref:`networkx.Graph <networkx:graph>`
"""
if not threshold:
threshold = 3./model.Z
select = lambda f, v, c, dc: v > threshold
graph = coupling(model.corpus, 'topics', filter=select, **kwargs)
graph.name = ''
return graph
|
[
"def",
"topic_coupling",
"(",
"model",
",",
"threshold",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"threshold",
":",
"threshold",
"=",
"3.",
"/",
"model",
".",
"Z",
"select",
"=",
"lambda",
"f",
",",
"v",
",",
"c",
",",
"dc",
":",
"v",
">",
"threshold",
"graph",
"=",
"coupling",
"(",
"model",
".",
"corpus",
",",
"'topics'",
",",
"filter",
"=",
"select",
",",
"*",
"*",
"kwargs",
")",
"graph",
".",
"name",
"=",
"''",
"return",
"graph"
] |
Two papers are coupled if they both contain a shared topic above a
``threshold``.
Parameters
----------
model : :class:`.LDAModel`
threshold : float
Default: ``3./model.Z``
kwargs : kwargs
Passed on to :func:`.coupling`\.
Returns
-------
:ref:`networkx.Graph <networkx:graph>`
|
[
"Two",
"papers",
"are",
"coupled",
"if",
"they",
"both",
"contain",
"a",
"shared",
"topic",
"above",
"a",
"threshold",
"."
] |
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
|
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/networks/topics.py#L53-L77
|
10,086
|
diging/tethne
|
tethne/analyze/features.py
|
kl_divergence
|
def kl_divergence(V_a, V_b):
"""
Calculate Kullback-Leibler distance.
Uses the smoothing method described in `Bigi 2003
<http://lvk.cs.msu.su/~bruzz/articles/classification/Using%20Kullback-Leibler%20Distance%20for%20Text%20Categorization.pdf>`_
to facilitate better comparisons between vectors describing wordcounts.
Parameters
----------
V_a : list
V_b : list
Returns
-------
divergence : float
KL divergence.
"""
# Find shared features.
Ndiff = _shared_features(V_a, V_b)
# aprob and bprob should each sum to 1.0
aprob = map(lambda v: float(v)/sum(V_a), V_a)
bprob = map(lambda v: float(v)/sum(V_b), V_b)
# Smooth according to Bigi 2003.
aprob, bprob = _smooth(aprob, bprob, Ndiff)
return sum(map(lambda a, b: (a-b)*log(a/b), aprob, bprob))
|
python
|
def kl_divergence(V_a, V_b):
"""
Calculate Kullback-Leibler distance.
Uses the smoothing method described in `Bigi 2003
<http://lvk.cs.msu.su/~bruzz/articles/classification/Using%20Kullback-Leibler%20Distance%20for%20Text%20Categorization.pdf>`_
to facilitate better comparisons between vectors describing wordcounts.
Parameters
----------
V_a : list
V_b : list
Returns
-------
divergence : float
KL divergence.
"""
# Find shared features.
Ndiff = _shared_features(V_a, V_b)
# aprob and bprob should each sum to 1.0
aprob = map(lambda v: float(v)/sum(V_a), V_a)
bprob = map(lambda v: float(v)/sum(V_b), V_b)
# Smooth according to Bigi 2003.
aprob, bprob = _smooth(aprob, bprob, Ndiff)
return sum(map(lambda a, b: (a-b)*log(a/b), aprob, bprob))
|
[
"def",
"kl_divergence",
"(",
"V_a",
",",
"V_b",
")",
":",
"# Find shared features.",
"Ndiff",
"=",
"_shared_features",
"(",
"V_a",
",",
"V_b",
")",
"# aprob and bprob should each sum to 1.0",
"aprob",
"=",
"map",
"(",
"lambda",
"v",
":",
"float",
"(",
"v",
")",
"/",
"sum",
"(",
"V_a",
")",
",",
"V_a",
")",
"bprob",
"=",
"map",
"(",
"lambda",
"v",
":",
"float",
"(",
"v",
")",
"/",
"sum",
"(",
"V_b",
")",
",",
"V_b",
")",
"# Smooth according to Bigi 2003.",
"aprob",
",",
"bprob",
"=",
"_smooth",
"(",
"aprob",
",",
"bprob",
",",
"Ndiff",
")",
"return",
"sum",
"(",
"map",
"(",
"lambda",
"a",
",",
"b",
":",
"(",
"a",
"-",
"b",
")",
"*",
"log",
"(",
"a",
"/",
"b",
")",
",",
"aprob",
",",
"bprob",
")",
")"
] |
Calculate Kullback-Leibler distance.
Uses the smoothing method described in `Bigi 2003
<http://lvk.cs.msu.su/~bruzz/articles/classification/Using%20Kullback-Leibler%20Distance%20for%20Text%20Categorization.pdf>`_
to facilitate better comparisons between vectors describing wordcounts.
Parameters
----------
V_a : list
V_b : list
Returns
-------
divergence : float
KL divergence.
|
[
"Calculate",
"Kullback",
"-",
"Leibler",
"distance",
"."
] |
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
|
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/analyze/features.py#L18-L47
|
10,087
|
diging/tethne
|
tethne/analyze/features.py
|
_shared_features
|
def _shared_features(adense, bdense):
"""
Number of features in ``adense`` that are also in ``bdense``.
"""
a_indices = set(nonzero(adense))
b_indices = set(nonzero(bdense))
shared = list(a_indices & b_indices)
diff = list(a_indices - b_indices)
Ndiff = len(diff)
return Ndiff
|
python
|
def _shared_features(adense, bdense):
"""
Number of features in ``adense`` that are also in ``bdense``.
"""
a_indices = set(nonzero(adense))
b_indices = set(nonzero(bdense))
shared = list(a_indices & b_indices)
diff = list(a_indices - b_indices)
Ndiff = len(diff)
return Ndiff
|
[
"def",
"_shared_features",
"(",
"adense",
",",
"bdense",
")",
":",
"a_indices",
"=",
"set",
"(",
"nonzero",
"(",
"adense",
")",
")",
"b_indices",
"=",
"set",
"(",
"nonzero",
"(",
"bdense",
")",
")",
"shared",
"=",
"list",
"(",
"a_indices",
"&",
"b_indices",
")",
"diff",
"=",
"list",
"(",
"a_indices",
"-",
"b_indices",
")",
"Ndiff",
"=",
"len",
"(",
"diff",
")",
"return",
"Ndiff"
] |
Number of features in ``adense`` that are also in ``bdense``.
|
[
"Number",
"of",
"features",
"in",
"adense",
"that",
"are",
"also",
"in",
"bdense",
"."
] |
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
|
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/analyze/features.py#L100-L111
|
10,088
|
diging/tethne
|
tethne/networks/base.py
|
cooccurrence
|
def cooccurrence(corpus_or_featureset, featureset_name=None, min_weight=1,
edge_attrs=['ayjid', 'date'],
filter=None):
"""
A network of feature elements linked by their joint occurrence in papers.
"""
if not filter:
filter = lambda f, v, c, dc: dc >= min_weight
featureset = _get_featureset(corpus_or_featureset, featureset_name)
if type(corpus_or_featureset) in [Corpus, StreamingCorpus]:
attributes = {i: {a: corpus_or_featureset.indices_lookup[i][a] for a in edge_attrs}
for i in corpus_or_featureset.indexed_papers.keys()}
c = lambda f: featureset.count(f) # Overall count.
dc = lambda f: featureset.documentCount(f) # Document count.
attributes = {}
# select applies filter to the elements in a (Structured)Feature. The
# iteration behavior of Feature and StructuredFeature are different, as is
# the manner in which the count for an element in each (Structured)Feature.
if type(featureset) is FeatureSet:
select = lambda feature: [f for f, v in feature
if filter(f, v, c(f), dc(f))]
elif type(featureset) is StructuredFeatureSet:
select = lambda feature: [f for f in feature
if filter(f, feature.count(f), c(f), dc(f))]
pairs = Counter()
eattrs = defaultdict(dict)
nattrs = defaultdict(dict)
nset = set()
for paper, feature in featureset.iteritems():
if len(feature) == 0:
continue
selected = select(feature)
nset |= set(selected)
for combo in combinations(selected, 2):
combo = tuple(sorted(combo))
pairs[combo] += 1
if paper in attributes:
eattrs[combo] = attributes[paper]
# Generate node attributes.
for n in list(nset):
nattrs[n]['count'] = featureset.count(n)
nattrs[n]['documentCount'] = featureset.documentCount(n)
return _generate_graph(nx.Graph, pairs, edge_attrs=eattrs,
node_attrs=nattrs, min_weight=min_weight)
|
python
|
def cooccurrence(corpus_or_featureset, featureset_name=None, min_weight=1,
edge_attrs=['ayjid', 'date'],
filter=None):
"""
A network of feature elements linked by their joint occurrence in papers.
"""
if not filter:
filter = lambda f, v, c, dc: dc >= min_weight
featureset = _get_featureset(corpus_or_featureset, featureset_name)
if type(corpus_or_featureset) in [Corpus, StreamingCorpus]:
attributes = {i: {a: corpus_or_featureset.indices_lookup[i][a] for a in edge_attrs}
for i in corpus_or_featureset.indexed_papers.keys()}
c = lambda f: featureset.count(f) # Overall count.
dc = lambda f: featureset.documentCount(f) # Document count.
attributes = {}
# select applies filter to the elements in a (Structured)Feature. The
# iteration behavior of Feature and StructuredFeature are different, as is
# the manner in which the count for an element in each (Structured)Feature.
if type(featureset) is FeatureSet:
select = lambda feature: [f for f, v in feature
if filter(f, v, c(f), dc(f))]
elif type(featureset) is StructuredFeatureSet:
select = lambda feature: [f for f in feature
if filter(f, feature.count(f), c(f), dc(f))]
pairs = Counter()
eattrs = defaultdict(dict)
nattrs = defaultdict(dict)
nset = set()
for paper, feature in featureset.iteritems():
if len(feature) == 0:
continue
selected = select(feature)
nset |= set(selected)
for combo in combinations(selected, 2):
combo = tuple(sorted(combo))
pairs[combo] += 1
if paper in attributes:
eattrs[combo] = attributes[paper]
# Generate node attributes.
for n in list(nset):
nattrs[n]['count'] = featureset.count(n)
nattrs[n]['documentCount'] = featureset.documentCount(n)
return _generate_graph(nx.Graph, pairs, edge_attrs=eattrs,
node_attrs=nattrs, min_weight=min_weight)
|
[
"def",
"cooccurrence",
"(",
"corpus_or_featureset",
",",
"featureset_name",
"=",
"None",
",",
"min_weight",
"=",
"1",
",",
"edge_attrs",
"=",
"[",
"'ayjid'",
",",
"'date'",
"]",
",",
"filter",
"=",
"None",
")",
":",
"if",
"not",
"filter",
":",
"filter",
"=",
"lambda",
"f",
",",
"v",
",",
"c",
",",
"dc",
":",
"dc",
">=",
"min_weight",
"featureset",
"=",
"_get_featureset",
"(",
"corpus_or_featureset",
",",
"featureset_name",
")",
"if",
"type",
"(",
"corpus_or_featureset",
")",
"in",
"[",
"Corpus",
",",
"StreamingCorpus",
"]",
":",
"attributes",
"=",
"{",
"i",
":",
"{",
"a",
":",
"corpus_or_featureset",
".",
"indices_lookup",
"[",
"i",
"]",
"[",
"a",
"]",
"for",
"a",
"in",
"edge_attrs",
"}",
"for",
"i",
"in",
"corpus_or_featureset",
".",
"indexed_papers",
".",
"keys",
"(",
")",
"}",
"c",
"=",
"lambda",
"f",
":",
"featureset",
".",
"count",
"(",
"f",
")",
"# Overall count.",
"dc",
"=",
"lambda",
"f",
":",
"featureset",
".",
"documentCount",
"(",
"f",
")",
"# Document count.",
"attributes",
"=",
"{",
"}",
"# select applies filter to the elements in a (Structured)Feature. The",
"# iteration behavior of Feature and StructuredFeature are different, as is",
"# the manner in which the count for an element in each (Structured)Feature.",
"if",
"type",
"(",
"featureset",
")",
"is",
"FeatureSet",
":",
"select",
"=",
"lambda",
"feature",
":",
"[",
"f",
"for",
"f",
",",
"v",
"in",
"feature",
"if",
"filter",
"(",
"f",
",",
"v",
",",
"c",
"(",
"f",
")",
",",
"dc",
"(",
"f",
")",
")",
"]",
"elif",
"type",
"(",
"featureset",
")",
"is",
"StructuredFeatureSet",
":",
"select",
"=",
"lambda",
"feature",
":",
"[",
"f",
"for",
"f",
"in",
"feature",
"if",
"filter",
"(",
"f",
",",
"feature",
".",
"count",
"(",
"f",
")",
",",
"c",
"(",
"f",
")",
",",
"dc",
"(",
"f",
")",
")",
"]",
"pairs",
"=",
"Counter",
"(",
")",
"eattrs",
"=",
"defaultdict",
"(",
"dict",
")",
"nattrs",
"=",
"defaultdict",
"(",
"dict",
")",
"nset",
"=",
"set",
"(",
")",
"for",
"paper",
",",
"feature",
"in",
"featureset",
".",
"iteritems",
"(",
")",
":",
"if",
"len",
"(",
"feature",
")",
"==",
"0",
":",
"continue",
"selected",
"=",
"select",
"(",
"feature",
")",
"nset",
"|=",
"set",
"(",
"selected",
")",
"for",
"combo",
"in",
"combinations",
"(",
"selected",
",",
"2",
")",
":",
"combo",
"=",
"tuple",
"(",
"sorted",
"(",
"combo",
")",
")",
"pairs",
"[",
"combo",
"]",
"+=",
"1",
"if",
"paper",
"in",
"attributes",
":",
"eattrs",
"[",
"combo",
"]",
"=",
"attributes",
"[",
"paper",
"]",
"# Generate node attributes.",
"for",
"n",
"in",
"list",
"(",
"nset",
")",
":",
"nattrs",
"[",
"n",
"]",
"[",
"'count'",
"]",
"=",
"featureset",
".",
"count",
"(",
"n",
")",
"nattrs",
"[",
"n",
"]",
"[",
"'documentCount'",
"]",
"=",
"featureset",
".",
"documentCount",
"(",
"n",
")",
"return",
"_generate_graph",
"(",
"nx",
".",
"Graph",
",",
"pairs",
",",
"edge_attrs",
"=",
"eattrs",
",",
"node_attrs",
"=",
"nattrs",
",",
"min_weight",
"=",
"min_weight",
")"
] |
A network of feature elements linked by their joint occurrence in papers.
|
[
"A",
"network",
"of",
"feature",
"elements",
"linked",
"by",
"their",
"joint",
"occurrence",
"in",
"papers",
"."
] |
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
|
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/networks/base.py#L39-L93
|
10,089
|
diging/tethne
|
tethne/networks/base.py
|
coupling
|
def coupling(corpus_or_featureset, featureset_name=None,
min_weight=1, filter=lambda f, v, c, dc: True,
node_attrs=[]):
"""
A network of papers linked by their joint posession of features.
"""
featureset = _get_featureset(corpus_or_featureset, featureset_name)
c = lambda f: featureset.count(f) # Overall count.
dc = lambda f: featureset.documentCount(f) # Document count.
f = lambda elem: featureset.index[elem]
v = lambda p, f: featureset.features[p].value(f)
select = lambda p, elem: filter(f(elem), v(p, f(elem)), c(f(elem)), dc(f(elem)))
pairs = defaultdict(list)
for elem, papers in featureset.with_feature.iteritems():
selected = [p for p in papers if select(p, elem)]
for combo in combinations(selected, 2):
combo = tuple(sorted(combo))
pairs[combo].append(featureset.index[elem])
graph = nx.Graph()
for combo, features in pairs.iteritems():
count = len(features)
if count >= min_weight:
graph.add_edge(combo[0], combo[1], features=features, weight=count)
# Add node attributes.
for attr in node_attrs:
for node in graph.nodes():
value = ''
if node in corpus_or_featureset:
paper = corpus_or_featureset[node]
if hasattr(paper, attr):
value = getattr(paper, attr)
if value is None:
value = ''
elif callable(value):
value = value()
graph.node[node][attr] = value
return graph
|
python
|
def coupling(corpus_or_featureset, featureset_name=None,
min_weight=1, filter=lambda f, v, c, dc: True,
node_attrs=[]):
"""
A network of papers linked by their joint posession of features.
"""
featureset = _get_featureset(corpus_or_featureset, featureset_name)
c = lambda f: featureset.count(f) # Overall count.
dc = lambda f: featureset.documentCount(f) # Document count.
f = lambda elem: featureset.index[elem]
v = lambda p, f: featureset.features[p].value(f)
select = lambda p, elem: filter(f(elem), v(p, f(elem)), c(f(elem)), dc(f(elem)))
pairs = defaultdict(list)
for elem, papers in featureset.with_feature.iteritems():
selected = [p for p in papers if select(p, elem)]
for combo in combinations(selected, 2):
combo = tuple(sorted(combo))
pairs[combo].append(featureset.index[elem])
graph = nx.Graph()
for combo, features in pairs.iteritems():
count = len(features)
if count >= min_weight:
graph.add_edge(combo[0], combo[1], features=features, weight=count)
# Add node attributes.
for attr in node_attrs:
for node in graph.nodes():
value = ''
if node in corpus_or_featureset:
paper = corpus_or_featureset[node]
if hasattr(paper, attr):
value = getattr(paper, attr)
if value is None:
value = ''
elif callable(value):
value = value()
graph.node[node][attr] = value
return graph
|
[
"def",
"coupling",
"(",
"corpus_or_featureset",
",",
"featureset_name",
"=",
"None",
",",
"min_weight",
"=",
"1",
",",
"filter",
"=",
"lambda",
"f",
",",
"v",
",",
"c",
",",
"dc",
":",
"True",
",",
"node_attrs",
"=",
"[",
"]",
")",
":",
"featureset",
"=",
"_get_featureset",
"(",
"corpus_or_featureset",
",",
"featureset_name",
")",
"c",
"=",
"lambda",
"f",
":",
"featureset",
".",
"count",
"(",
"f",
")",
"# Overall count.",
"dc",
"=",
"lambda",
"f",
":",
"featureset",
".",
"documentCount",
"(",
"f",
")",
"# Document count.",
"f",
"=",
"lambda",
"elem",
":",
"featureset",
".",
"index",
"[",
"elem",
"]",
"v",
"=",
"lambda",
"p",
",",
"f",
":",
"featureset",
".",
"features",
"[",
"p",
"]",
".",
"value",
"(",
"f",
")",
"select",
"=",
"lambda",
"p",
",",
"elem",
":",
"filter",
"(",
"f",
"(",
"elem",
")",
",",
"v",
"(",
"p",
",",
"f",
"(",
"elem",
")",
")",
",",
"c",
"(",
"f",
"(",
"elem",
")",
")",
",",
"dc",
"(",
"f",
"(",
"elem",
")",
")",
")",
"pairs",
"=",
"defaultdict",
"(",
"list",
")",
"for",
"elem",
",",
"papers",
"in",
"featureset",
".",
"with_feature",
".",
"iteritems",
"(",
")",
":",
"selected",
"=",
"[",
"p",
"for",
"p",
"in",
"papers",
"if",
"select",
"(",
"p",
",",
"elem",
")",
"]",
"for",
"combo",
"in",
"combinations",
"(",
"selected",
",",
"2",
")",
":",
"combo",
"=",
"tuple",
"(",
"sorted",
"(",
"combo",
")",
")",
"pairs",
"[",
"combo",
"]",
".",
"append",
"(",
"featureset",
".",
"index",
"[",
"elem",
"]",
")",
"graph",
"=",
"nx",
".",
"Graph",
"(",
")",
"for",
"combo",
",",
"features",
"in",
"pairs",
".",
"iteritems",
"(",
")",
":",
"count",
"=",
"len",
"(",
"features",
")",
"if",
"count",
">=",
"min_weight",
":",
"graph",
".",
"add_edge",
"(",
"combo",
"[",
"0",
"]",
",",
"combo",
"[",
"1",
"]",
",",
"features",
"=",
"features",
",",
"weight",
"=",
"count",
")",
"# Add node attributes.",
"for",
"attr",
"in",
"node_attrs",
":",
"for",
"node",
"in",
"graph",
".",
"nodes",
"(",
")",
":",
"value",
"=",
"''",
"if",
"node",
"in",
"corpus_or_featureset",
":",
"paper",
"=",
"corpus_or_featureset",
"[",
"node",
"]",
"if",
"hasattr",
"(",
"paper",
",",
"attr",
")",
":",
"value",
"=",
"getattr",
"(",
"paper",
",",
"attr",
")",
"if",
"value",
"is",
"None",
":",
"value",
"=",
"''",
"elif",
"callable",
"(",
"value",
")",
":",
"value",
"=",
"value",
"(",
")",
"graph",
".",
"node",
"[",
"node",
"]",
"[",
"attr",
"]",
"=",
"value",
"return",
"graph"
] |
A network of papers linked by their joint posession of features.
|
[
"A",
"network",
"of",
"papers",
"linked",
"by",
"their",
"joint",
"posession",
"of",
"features",
"."
] |
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
|
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/networks/base.py#L97-L140
|
10,090
|
diging/tethne
|
tethne/networks/base.py
|
multipartite
|
def multipartite(corpus, featureset_names, min_weight=1, filters={}):
"""
A network of papers and one or more featuresets.
"""
pairs = Counter()
node_type = {corpus._generate_index(p): {'type': 'paper'}
for p in corpus.papers}
for featureset_name in featureset_names:
ftypes = {}
featureset = _get_featureset(corpus, featureset_name)
for paper, feature in featureset.iteritems():
if featureset_name in filters:
if not filters[featureset_name](featureset, feature):
continue
if len(feature) < 1:
continue
for f in list(zip(*feature))[0]:
ftypes[f] = {'type': featureset_name}
pairs[(paper, f)] += 1
node_type.update(ftypes)
return _generate_graph(nx.DiGraph, pairs, node_attrs=node_type,
min_weight=min_weight)
|
python
|
def multipartite(corpus, featureset_names, min_weight=1, filters={}):
"""
A network of papers and one or more featuresets.
"""
pairs = Counter()
node_type = {corpus._generate_index(p): {'type': 'paper'}
for p in corpus.papers}
for featureset_name in featureset_names:
ftypes = {}
featureset = _get_featureset(corpus, featureset_name)
for paper, feature in featureset.iteritems():
if featureset_name in filters:
if not filters[featureset_name](featureset, feature):
continue
if len(feature) < 1:
continue
for f in list(zip(*feature))[0]:
ftypes[f] = {'type': featureset_name}
pairs[(paper, f)] += 1
node_type.update(ftypes)
return _generate_graph(nx.DiGraph, pairs, node_attrs=node_type,
min_weight=min_weight)
|
[
"def",
"multipartite",
"(",
"corpus",
",",
"featureset_names",
",",
"min_weight",
"=",
"1",
",",
"filters",
"=",
"{",
"}",
")",
":",
"pairs",
"=",
"Counter",
"(",
")",
"node_type",
"=",
"{",
"corpus",
".",
"_generate_index",
"(",
"p",
")",
":",
"{",
"'type'",
":",
"'paper'",
"}",
"for",
"p",
"in",
"corpus",
".",
"papers",
"}",
"for",
"featureset_name",
"in",
"featureset_names",
":",
"ftypes",
"=",
"{",
"}",
"featureset",
"=",
"_get_featureset",
"(",
"corpus",
",",
"featureset_name",
")",
"for",
"paper",
",",
"feature",
"in",
"featureset",
".",
"iteritems",
"(",
")",
":",
"if",
"featureset_name",
"in",
"filters",
":",
"if",
"not",
"filters",
"[",
"featureset_name",
"]",
"(",
"featureset",
",",
"feature",
")",
":",
"continue",
"if",
"len",
"(",
"feature",
")",
"<",
"1",
":",
"continue",
"for",
"f",
"in",
"list",
"(",
"zip",
"(",
"*",
"feature",
")",
")",
"[",
"0",
"]",
":",
"ftypes",
"[",
"f",
"]",
"=",
"{",
"'type'",
":",
"featureset_name",
"}",
"pairs",
"[",
"(",
"paper",
",",
"f",
")",
"]",
"+=",
"1",
"node_type",
".",
"update",
"(",
"ftypes",
")",
"return",
"_generate_graph",
"(",
"nx",
".",
"DiGraph",
",",
"pairs",
",",
"node_attrs",
"=",
"node_type",
",",
"min_weight",
"=",
"min_weight",
")"
] |
A network of papers and one or more featuresets.
|
[
"A",
"network",
"of",
"papers",
"and",
"one",
"or",
"more",
"featuresets",
"."
] |
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
|
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/networks/base.py#L143-L167
|
10,091
|
diging/tethne
|
tethne/utilities.py
|
_strip_punctuation
|
def _strip_punctuation(s):
"""
Removes all punctuation characters from a string.
"""
if type(s) is str and not PYTHON_3: # Bytestring (default in Python 2.x).
return s.translate(string.maketrans("",""), string.punctuation)
else: # Unicode string (default in Python 3.x).
translate_table = dict((ord(char), u'') for char in u'!"#%\'()*+,-./:;<=>?@[\]^_`{|}~')
return s.translate(translate_table)
|
python
|
def _strip_punctuation(s):
"""
Removes all punctuation characters from a string.
"""
if type(s) is str and not PYTHON_3: # Bytestring (default in Python 2.x).
return s.translate(string.maketrans("",""), string.punctuation)
else: # Unicode string (default in Python 3.x).
translate_table = dict((ord(char), u'') for char in u'!"#%\'()*+,-./:;<=>?@[\]^_`{|}~')
return s.translate(translate_table)
|
[
"def",
"_strip_punctuation",
"(",
"s",
")",
":",
"if",
"type",
"(",
"s",
")",
"is",
"str",
"and",
"not",
"PYTHON_3",
":",
"# Bytestring (default in Python 2.x).",
"return",
"s",
".",
"translate",
"(",
"string",
".",
"maketrans",
"(",
"\"\"",
",",
"\"\"",
")",
",",
"string",
".",
"punctuation",
")",
"else",
":",
"# Unicode string (default in Python 3.x).",
"translate_table",
"=",
"dict",
"(",
"(",
"ord",
"(",
"char",
")",
",",
"u''",
")",
"for",
"char",
"in",
"u'!\"#%\\'()*+,-./:;<=>?@[\\]^_`{|}~'",
")",
"return",
"s",
".",
"translate",
"(",
"translate_table",
")"
] |
Removes all punctuation characters from a string.
|
[
"Removes",
"all",
"punctuation",
"characters",
"from",
"a",
"string",
"."
] |
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
|
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/utilities.py#L115-L123
|
10,092
|
diging/tethne
|
tethne/utilities.py
|
overlap
|
def overlap(listA, listB):
"""
Return list of objects shared by listA, listB.
"""
if (listA is None) or (listB is None):
return []
else:
return list(set(listA) & set(listB))
|
python
|
def overlap(listA, listB):
"""
Return list of objects shared by listA, listB.
"""
if (listA is None) or (listB is None):
return []
else:
return list(set(listA) & set(listB))
|
[
"def",
"overlap",
"(",
"listA",
",",
"listB",
")",
":",
"if",
"(",
"listA",
"is",
"None",
")",
"or",
"(",
"listB",
"is",
"None",
")",
":",
"return",
"[",
"]",
"else",
":",
"return",
"list",
"(",
"set",
"(",
"listA",
")",
"&",
"set",
"(",
"listB",
")",
")"
] |
Return list of objects shared by listA, listB.
|
[
"Return",
"list",
"of",
"objects",
"shared",
"by",
"listA",
"listB",
"."
] |
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
|
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/utilities.py#L174-L181
|
10,093
|
diging/tethne
|
tethne/utilities.py
|
subdict
|
def subdict(super_dict, keys):
"""
Returns a subset of the super_dict with the specified keys.
"""
sub_dict = {}
valid_keys = super_dict.keys()
for key in keys:
if key in valid_keys:
sub_dict[key] = super_dict[key]
return sub_dict
|
python
|
def subdict(super_dict, keys):
"""
Returns a subset of the super_dict with the specified keys.
"""
sub_dict = {}
valid_keys = super_dict.keys()
for key in keys:
if key in valid_keys:
sub_dict[key] = super_dict[key]
return sub_dict
|
[
"def",
"subdict",
"(",
"super_dict",
",",
"keys",
")",
":",
"sub_dict",
"=",
"{",
"}",
"valid_keys",
"=",
"super_dict",
".",
"keys",
"(",
")",
"for",
"key",
"in",
"keys",
":",
"if",
"key",
"in",
"valid_keys",
":",
"sub_dict",
"[",
"key",
"]",
"=",
"super_dict",
"[",
"key",
"]",
"return",
"sub_dict"
] |
Returns a subset of the super_dict with the specified keys.
|
[
"Returns",
"a",
"subset",
"of",
"the",
"super_dict",
"with",
"the",
"specified",
"keys",
"."
] |
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
|
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/utilities.py#L184-L194
|
10,094
|
diging/tethne
|
tethne/utilities.py
|
concat_list
|
def concat_list(listA, listB, delim=' '):
"""
Concatenate list elements pair-wise with the delim character
Returns the concatenated list
Raises index error if lists are not parallel
"""
# Lists must be of equal length.
if len(listA) != len(listB):
raise IndexError('Input lists are not parallel.')
# Concatenate lists.
listC = []
for i in xrange(len(listA)):
app = listA[i] + delim + listB[i]
listC.append(app)
return listC
|
python
|
def concat_list(listA, listB, delim=' '):
"""
Concatenate list elements pair-wise with the delim character
Returns the concatenated list
Raises index error if lists are not parallel
"""
# Lists must be of equal length.
if len(listA) != len(listB):
raise IndexError('Input lists are not parallel.')
# Concatenate lists.
listC = []
for i in xrange(len(listA)):
app = listA[i] + delim + listB[i]
listC.append(app)
return listC
|
[
"def",
"concat_list",
"(",
"listA",
",",
"listB",
",",
"delim",
"=",
"' '",
")",
":",
"# Lists must be of equal length.",
"if",
"len",
"(",
"listA",
")",
"!=",
"len",
"(",
"listB",
")",
":",
"raise",
"IndexError",
"(",
"'Input lists are not parallel.'",
")",
"# Concatenate lists.",
"listC",
"=",
"[",
"]",
"for",
"i",
"in",
"xrange",
"(",
"len",
"(",
"listA",
")",
")",
":",
"app",
"=",
"listA",
"[",
"i",
"]",
"+",
"delim",
"+",
"listB",
"[",
"i",
"]",
"listC",
".",
"append",
"(",
"app",
")",
"return",
"listC"
] |
Concatenate list elements pair-wise with the delim character
Returns the concatenated list
Raises index error if lists are not parallel
|
[
"Concatenate",
"list",
"elements",
"pair",
"-",
"wise",
"with",
"the",
"delim",
"character",
"Returns",
"the",
"concatenated",
"list",
"Raises",
"index",
"error",
"if",
"lists",
"are",
"not",
"parallel"
] |
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
|
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/utilities.py#L212-L229
|
10,095
|
diging/tethne
|
tethne/utilities.py
|
strip_non_ascii
|
def strip_non_ascii(s):
"""
Returns the string without non-ASCII characters.
Parameters
----------
string : string
A string that may contain non-ASCII characters.
Returns
-------
clean_string : string
A string that does not contain non-ASCII characters.
"""
stripped = (c for c in s if 0 < ord(c) < 127)
clean_string = u''.join(stripped)
return clean_string
|
python
|
def strip_non_ascii(s):
"""
Returns the string without non-ASCII characters.
Parameters
----------
string : string
A string that may contain non-ASCII characters.
Returns
-------
clean_string : string
A string that does not contain non-ASCII characters.
"""
stripped = (c for c in s if 0 < ord(c) < 127)
clean_string = u''.join(stripped)
return clean_string
|
[
"def",
"strip_non_ascii",
"(",
"s",
")",
":",
"stripped",
"=",
"(",
"c",
"for",
"c",
"in",
"s",
"if",
"0",
"<",
"ord",
"(",
"c",
")",
"<",
"127",
")",
"clean_string",
"=",
"u''",
".",
"join",
"(",
"stripped",
")",
"return",
"clean_string"
] |
Returns the string without non-ASCII characters.
Parameters
----------
string : string
A string that may contain non-ASCII characters.
Returns
-------
clean_string : string
A string that does not contain non-ASCII characters.
|
[
"Returns",
"the",
"string",
"without",
"non",
"-",
"ASCII",
"characters",
"."
] |
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
|
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/utilities.py#L231-L248
|
10,096
|
diging/tethne
|
tethne/utilities.py
|
dict_from_node
|
def dict_from_node(node, recursive=False):
"""
Converts ElementTree node to a dictionary.
Parameters
----------
node : ElementTree node
recursive : boolean
If recursive=False, the value of any field with children will be the
number of children.
Returns
-------
dict : nested dictionary.
Tags as keys and values as values. Sub-elements that occur multiple
times in an element are contained in a list.
"""
dict = {}
for snode in node:
if len(snode) > 0:
if recursive:
# Will drill down until len(snode) <= 0.
value = dict_from_node(snode, True)
else:
value = len(snode)
elif snode.text is not None:
value = snode.text
else:
value = u''
if snode.tag in dict.keys(): # If there are multiple subelements
# with the same tag, then the value
# of the element should be a list
# rather than a dict.
if type(dict[snode.tag]) is list: # If a list has already been
# started, just append to
# it.
dict[snode.tag].append(value)
else:
dict[snode.tag] = [ dict[snode.tag], value ]
else:
dict[snode.tag] = value # Default behavior.
return dict
|
python
|
def dict_from_node(node, recursive=False):
"""
Converts ElementTree node to a dictionary.
Parameters
----------
node : ElementTree node
recursive : boolean
If recursive=False, the value of any field with children will be the
number of children.
Returns
-------
dict : nested dictionary.
Tags as keys and values as values. Sub-elements that occur multiple
times in an element are contained in a list.
"""
dict = {}
for snode in node:
if len(snode) > 0:
if recursive:
# Will drill down until len(snode) <= 0.
value = dict_from_node(snode, True)
else:
value = len(snode)
elif snode.text is not None:
value = snode.text
else:
value = u''
if snode.tag in dict.keys(): # If there are multiple subelements
# with the same tag, then the value
# of the element should be a list
# rather than a dict.
if type(dict[snode.tag]) is list: # If a list has already been
# started, just append to
# it.
dict[snode.tag].append(value)
else:
dict[snode.tag] = [ dict[snode.tag], value ]
else:
dict[snode.tag] = value # Default behavior.
return dict
|
[
"def",
"dict_from_node",
"(",
"node",
",",
"recursive",
"=",
"False",
")",
":",
"dict",
"=",
"{",
"}",
"for",
"snode",
"in",
"node",
":",
"if",
"len",
"(",
"snode",
")",
">",
"0",
":",
"if",
"recursive",
":",
"# Will drill down until len(snode) <= 0.",
"value",
"=",
"dict_from_node",
"(",
"snode",
",",
"True",
")",
"else",
":",
"value",
"=",
"len",
"(",
"snode",
")",
"elif",
"snode",
".",
"text",
"is",
"not",
"None",
":",
"value",
"=",
"snode",
".",
"text",
"else",
":",
"value",
"=",
"u''",
"if",
"snode",
".",
"tag",
"in",
"dict",
".",
"keys",
"(",
")",
":",
"# If there are multiple subelements",
"# with the same tag, then the value",
"# of the element should be a list",
"# rather than a dict.",
"if",
"type",
"(",
"dict",
"[",
"snode",
".",
"tag",
"]",
")",
"is",
"list",
":",
"# If a list has already been",
"# started, just append to",
"# it.",
"dict",
"[",
"snode",
".",
"tag",
"]",
".",
"append",
"(",
"value",
")",
"else",
":",
"dict",
"[",
"snode",
".",
"tag",
"]",
"=",
"[",
"dict",
"[",
"snode",
".",
"tag",
"]",
",",
"value",
"]",
"else",
":",
"dict",
"[",
"snode",
".",
"tag",
"]",
"=",
"value",
"# Default behavior.",
"return",
"dict"
] |
Converts ElementTree node to a dictionary.
Parameters
----------
node : ElementTree node
recursive : boolean
If recursive=False, the value of any field with children will be the
number of children.
Returns
-------
dict : nested dictionary.
Tags as keys and values as values. Sub-elements that occur multiple
times in an element are contained in a list.
|
[
"Converts",
"ElementTree",
"node",
"to",
"a",
"dictionary",
"."
] |
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
|
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/utilities.py#L255-L298
|
10,097
|
diging/tethne
|
tethne/utilities.py
|
MLStripper.feed
|
def feed(self, data):
"""
added this check as sometimes we are getting the data in integer format instead of string
"""
try:
self.rawdata = self.rawdata + data
except TypeError:
data = unicode(data)
self.rawdata = self.rawdata + data
self.goahead(0)
|
python
|
def feed(self, data):
"""
added this check as sometimes we are getting the data in integer format instead of string
"""
try:
self.rawdata = self.rawdata + data
except TypeError:
data = unicode(data)
self.rawdata = self.rawdata + data
self.goahead(0)
|
[
"def",
"feed",
"(",
"self",
",",
"data",
")",
":",
"try",
":",
"self",
".",
"rawdata",
"=",
"self",
".",
"rawdata",
"+",
"data",
"except",
"TypeError",
":",
"data",
"=",
"unicode",
"(",
"data",
")",
"self",
".",
"rawdata",
"=",
"self",
".",
"rawdata",
"+",
"data",
"self",
".",
"goahead",
"(",
"0",
")"
] |
added this check as sometimes we are getting the data in integer format instead of string
|
[
"added",
"this",
"check",
"as",
"sometimes",
"we",
"are",
"getting",
"the",
"data",
"in",
"integer",
"format",
"instead",
"of",
"string"
] |
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
|
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/utilities.py#L50-L60
|
10,098
|
diging/tethne
|
tethne/serialize/paper.py
|
Serialize.serializePaper
|
def serializePaper(self):
"""
This method creates a fixture for the "django-tethne_paper" model.
Returns
-------
paper_details in JSON format, which can written to a file.
"""
pid = tethnedao.getMaxPaperID();
papers_details = []
for paper in self.corpus:
pid = pid + 1
paper_key = getattr(paper, Serialize.paper_source_map[self.source])
self.paperIdMap[paper_key] = pid
paper_data = {
"model": "django-tethne.paper",
"pk": self.paperIdMap[paper_key],
"fields": {
"paper_id": paper_key,
"corpus":self.corpus_id,
"pub_date": getattr(paper, 'date', ''),
"volume": getattr(paper, 'volume', ''),
"title": getattr(paper, 'title', ''),
"abstract": getattr(paper, 'abstract', ''),
}
}
papers_details.append(paper_data)
return papers_details
|
python
|
def serializePaper(self):
"""
This method creates a fixture for the "django-tethne_paper" model.
Returns
-------
paper_details in JSON format, which can written to a file.
"""
pid = tethnedao.getMaxPaperID();
papers_details = []
for paper in self.corpus:
pid = pid + 1
paper_key = getattr(paper, Serialize.paper_source_map[self.source])
self.paperIdMap[paper_key] = pid
paper_data = {
"model": "django-tethne.paper",
"pk": self.paperIdMap[paper_key],
"fields": {
"paper_id": paper_key,
"corpus":self.corpus_id,
"pub_date": getattr(paper, 'date', ''),
"volume": getattr(paper, 'volume', ''),
"title": getattr(paper, 'title', ''),
"abstract": getattr(paper, 'abstract', ''),
}
}
papers_details.append(paper_data)
return papers_details
|
[
"def",
"serializePaper",
"(",
"self",
")",
":",
"pid",
"=",
"tethnedao",
".",
"getMaxPaperID",
"(",
")",
"papers_details",
"=",
"[",
"]",
"for",
"paper",
"in",
"self",
".",
"corpus",
":",
"pid",
"=",
"pid",
"+",
"1",
"paper_key",
"=",
"getattr",
"(",
"paper",
",",
"Serialize",
".",
"paper_source_map",
"[",
"self",
".",
"source",
"]",
")",
"self",
".",
"paperIdMap",
"[",
"paper_key",
"]",
"=",
"pid",
"paper_data",
"=",
"{",
"\"model\"",
":",
"\"django-tethne.paper\"",
",",
"\"pk\"",
":",
"self",
".",
"paperIdMap",
"[",
"paper_key",
"]",
",",
"\"fields\"",
":",
"{",
"\"paper_id\"",
":",
"paper_key",
",",
"\"corpus\"",
":",
"self",
".",
"corpus_id",
",",
"\"pub_date\"",
":",
"getattr",
"(",
"paper",
",",
"'date'",
",",
"''",
")",
",",
"\"volume\"",
":",
"getattr",
"(",
"paper",
",",
"'volume'",
",",
"''",
")",
",",
"\"title\"",
":",
"getattr",
"(",
"paper",
",",
"'title'",
",",
"''",
")",
",",
"\"abstract\"",
":",
"getattr",
"(",
"paper",
",",
"'abstract'",
",",
"''",
")",
",",
"}",
"}",
"papers_details",
".",
"append",
"(",
"paper_data",
")",
"return",
"papers_details"
] |
This method creates a fixture for the "django-tethne_paper" model.
Returns
-------
paper_details in JSON format, which can written to a file.
|
[
"This",
"method",
"creates",
"a",
"fixture",
"for",
"the",
"django",
"-",
"tethne_paper",
"model",
"."
] |
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
|
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/serialize/paper.py#L108-L137
|
10,099
|
diging/tethne
|
tethne/serialize/paper.py
|
Serialize.serializeCitation
|
def serializeCitation(self):
"""
This method creates a fixture for the "django-tethne_citation" model.
Returns
-------
citation details which can be written to a file
"""
citation_details = []
citation_id = tethnedao.getMaxCitationID()
for citation in self.corpus.features['citations'].index.values():
date_match = re.search(r'(\d+)', citation)
if date_match is not None:
date = date_match.group(1)
if date_match is None:
date_match = re.search(r"NONE", citation)
date = date_match.group()
first_author = citation.replace('_', ' ').split(date)[0].rstrip()
journal = citation.replace('_', ' ').split(date)[1].lstrip()
citation_key = citation
if citation_key not in self.citationIdMap:
citation_id += 1
self.citationIdMap[citation_key] = citation_id
citation_data = {
"model": "django-tethne.citation",
"pk": citation_id,
"fields": {
"literal": citation,
"journal": journal,
"first_author": first_author,
"date": date
}
}
citation_details.append(citation_data)
return citation_details
|
python
|
def serializeCitation(self):
"""
This method creates a fixture for the "django-tethne_citation" model.
Returns
-------
citation details which can be written to a file
"""
citation_details = []
citation_id = tethnedao.getMaxCitationID()
for citation in self.corpus.features['citations'].index.values():
date_match = re.search(r'(\d+)', citation)
if date_match is not None:
date = date_match.group(1)
if date_match is None:
date_match = re.search(r"NONE", citation)
date = date_match.group()
first_author = citation.replace('_', ' ').split(date)[0].rstrip()
journal = citation.replace('_', ' ').split(date)[1].lstrip()
citation_key = citation
if citation_key not in self.citationIdMap:
citation_id += 1
self.citationIdMap[citation_key] = citation_id
citation_data = {
"model": "django-tethne.citation",
"pk": citation_id,
"fields": {
"literal": citation,
"journal": journal,
"first_author": first_author,
"date": date
}
}
citation_details.append(citation_data)
return citation_details
|
[
"def",
"serializeCitation",
"(",
"self",
")",
":",
"citation_details",
"=",
"[",
"]",
"citation_id",
"=",
"tethnedao",
".",
"getMaxCitationID",
"(",
")",
"for",
"citation",
"in",
"self",
".",
"corpus",
".",
"features",
"[",
"'citations'",
"]",
".",
"index",
".",
"values",
"(",
")",
":",
"date_match",
"=",
"re",
".",
"search",
"(",
"r'(\\d+)'",
",",
"citation",
")",
"if",
"date_match",
"is",
"not",
"None",
":",
"date",
"=",
"date_match",
".",
"group",
"(",
"1",
")",
"if",
"date_match",
"is",
"None",
":",
"date_match",
"=",
"re",
".",
"search",
"(",
"r\"NONE\"",
",",
"citation",
")",
"date",
"=",
"date_match",
".",
"group",
"(",
")",
"first_author",
"=",
"citation",
".",
"replace",
"(",
"'_'",
",",
"' '",
")",
".",
"split",
"(",
"date",
")",
"[",
"0",
"]",
".",
"rstrip",
"(",
")",
"journal",
"=",
"citation",
".",
"replace",
"(",
"'_'",
",",
"' '",
")",
".",
"split",
"(",
"date",
")",
"[",
"1",
"]",
".",
"lstrip",
"(",
")",
"citation_key",
"=",
"citation",
"if",
"citation_key",
"not",
"in",
"self",
".",
"citationIdMap",
":",
"citation_id",
"+=",
"1",
"self",
".",
"citationIdMap",
"[",
"citation_key",
"]",
"=",
"citation_id",
"citation_data",
"=",
"{",
"\"model\"",
":",
"\"django-tethne.citation\"",
",",
"\"pk\"",
":",
"citation_id",
",",
"\"fields\"",
":",
"{",
"\"literal\"",
":",
"citation",
",",
"\"journal\"",
":",
"journal",
",",
"\"first_author\"",
":",
"first_author",
",",
"\"date\"",
":",
"date",
"}",
"}",
"citation_details",
".",
"append",
"(",
"citation_data",
")",
"return",
"citation_details"
] |
This method creates a fixture for the "django-tethne_citation" model.
Returns
-------
citation details which can be written to a file
|
[
"This",
"method",
"creates",
"a",
"fixture",
"for",
"the",
"django",
"-",
"tethne_citation",
"model",
"."
] |
ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f
|
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/serialize/paper.py#L210-L246
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.