id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
|
|---|---|---|---|---|---|---|---|---|---|---|---|
241,200
|
krukas/Trionyx
|
trionyx/trionyx/views/core.py
|
ListExportView.items
|
def items(self):
"""Get all list items"""
query = self.get_queryset()
fields = self.get_model_config().get_list_fields()
for item in query.iterator():
row = OrderedDict()
for field_name in self.get_current_fields():
field = fields.get(field_name)
if not field_name:
row[field_name] = ''
if hasattr(item, field['field']):
row[field_name] = getattr(item, field['field'])
else:
row[field_name] = '' # TODO Maybe render field ans strip html?
yield row
|
python
|
def items(self):
"""Get all list items"""
query = self.get_queryset()
fields = self.get_model_config().get_list_fields()
for item in query.iterator():
row = OrderedDict()
for field_name in self.get_current_fields():
field = fields.get(field_name)
if not field_name:
row[field_name] = ''
if hasattr(item, field['field']):
row[field_name] = getattr(item, field['field'])
else:
row[field_name] = '' # TODO Maybe render field ans strip html?
yield row
|
[
"def",
"items",
"(",
"self",
")",
":",
"query",
"=",
"self",
".",
"get_queryset",
"(",
")",
"fields",
"=",
"self",
".",
"get_model_config",
"(",
")",
".",
"get_list_fields",
"(",
")",
"for",
"item",
"in",
"query",
".",
"iterator",
"(",
")",
":",
"row",
"=",
"OrderedDict",
"(",
")",
"for",
"field_name",
"in",
"self",
".",
"get_current_fields",
"(",
")",
":",
"field",
"=",
"fields",
".",
"get",
"(",
"field_name",
")",
"if",
"not",
"field_name",
":",
"row",
"[",
"field_name",
"]",
"=",
"''",
"if",
"hasattr",
"(",
"item",
",",
"field",
"[",
"'field'",
"]",
")",
":",
"row",
"[",
"field_name",
"]",
"=",
"getattr",
"(",
"item",
",",
"field",
"[",
"'field'",
"]",
")",
"else",
":",
"row",
"[",
"field_name",
"]",
"=",
"''",
"# TODO Maybe render field ans strip html?",
"yield",
"row"
] |
Get all list items
|
[
"Get",
"all",
"list",
"items"
] |
edac132cc0797190153f2e60bc7e88cb50e80da6
|
https://github.com/krukas/Trionyx/blob/edac132cc0797190153f2e60bc7e88cb50e80da6/trionyx/trionyx/views/core.py#L304-L320
|
241,201
|
krukas/Trionyx
|
trionyx/trionyx/views/core.py
|
ListExportView.csv_response
|
def csv_response(self):
"""Get csv response"""
def stream():
"""Create data stream generator"""
stream_file = io.StringIO()
csvwriter = csv.writer(stream_file, delimiter=',', quotechar='"')
csvwriter.writerow(self.get_current_fields())
for index, item in enumerate(self.items()):
csvwriter.writerow([value for index, value in item.items()])
stream_file.seek(0)
data = stream_file.read()
stream_file.seek(0)
stream_file.truncate()
yield data
response = StreamingHttpResponse(stream(), content_type="text/csv")
response["Content-Disposition"] = "attachment; filename={}.csv".format(self.get_model_config().model_name.lower())
return response
|
python
|
def csv_response(self):
"""Get csv response"""
def stream():
"""Create data stream generator"""
stream_file = io.StringIO()
csvwriter = csv.writer(stream_file, delimiter=',', quotechar='"')
csvwriter.writerow(self.get_current_fields())
for index, item in enumerate(self.items()):
csvwriter.writerow([value for index, value in item.items()])
stream_file.seek(0)
data = stream_file.read()
stream_file.seek(0)
stream_file.truncate()
yield data
response = StreamingHttpResponse(stream(), content_type="text/csv")
response["Content-Disposition"] = "attachment; filename={}.csv".format(self.get_model_config().model_name.lower())
return response
|
[
"def",
"csv_response",
"(",
"self",
")",
":",
"def",
"stream",
"(",
")",
":",
"\"\"\"Create data stream generator\"\"\"",
"stream_file",
"=",
"io",
".",
"StringIO",
"(",
")",
"csvwriter",
"=",
"csv",
".",
"writer",
"(",
"stream_file",
",",
"delimiter",
"=",
"','",
",",
"quotechar",
"=",
"'\"'",
")",
"csvwriter",
".",
"writerow",
"(",
"self",
".",
"get_current_fields",
"(",
")",
")",
"for",
"index",
",",
"item",
"in",
"enumerate",
"(",
"self",
".",
"items",
"(",
")",
")",
":",
"csvwriter",
".",
"writerow",
"(",
"[",
"value",
"for",
"index",
",",
"value",
"in",
"item",
".",
"items",
"(",
")",
"]",
")",
"stream_file",
".",
"seek",
"(",
"0",
")",
"data",
"=",
"stream_file",
".",
"read",
"(",
")",
"stream_file",
".",
"seek",
"(",
"0",
")",
"stream_file",
".",
"truncate",
"(",
")",
"yield",
"data",
"response",
"=",
"StreamingHttpResponse",
"(",
"stream",
"(",
")",
",",
"content_type",
"=",
"\"text/csv\"",
")",
"response",
"[",
"\"Content-Disposition\"",
"]",
"=",
"\"attachment; filename={}.csv\"",
".",
"format",
"(",
"self",
".",
"get_model_config",
"(",
")",
".",
"model_name",
".",
"lower",
"(",
")",
")",
"return",
"response"
] |
Get csv response
|
[
"Get",
"csv",
"response"
] |
edac132cc0797190153f2e60bc7e88cb50e80da6
|
https://github.com/krukas/Trionyx/blob/edac132cc0797190153f2e60bc7e88cb50e80da6/trionyx/trionyx/views/core.py#L322-L341
|
241,202
|
krukas/Trionyx
|
trionyx/trionyx/views/core.py
|
DetailTabView.get_delete_url
|
def get_delete_url(self):
"""Get model object delete url"""
return reverse('trionyx:model-delete', kwargs={
'app': self.get_app_label(),
'model': self.get_model_name(),
'pk': self.object.id
})
|
python
|
def get_delete_url(self):
"""Get model object delete url"""
return reverse('trionyx:model-delete', kwargs={
'app': self.get_app_label(),
'model': self.get_model_name(),
'pk': self.object.id
})
|
[
"def",
"get_delete_url",
"(",
"self",
")",
":",
"return",
"reverse",
"(",
"'trionyx:model-delete'",
",",
"kwargs",
"=",
"{",
"'app'",
":",
"self",
".",
"get_app_label",
"(",
")",
",",
"'model'",
":",
"self",
".",
"get_model_name",
"(",
")",
",",
"'pk'",
":",
"self",
".",
"object",
".",
"id",
"}",
")"
] |
Get model object delete url
|
[
"Get",
"model",
"object",
"delete",
"url"
] |
edac132cc0797190153f2e60bc7e88cb50e80da6
|
https://github.com/krukas/Trionyx/blob/edac132cc0797190153f2e60bc7e88cb50e80da6/trionyx/trionyx/views/core.py#L398-L404
|
241,203
|
krukas/Trionyx
|
trionyx/trionyx/views/core.py
|
DetailTabView.get_edit_url
|
def get_edit_url(self):
"""Get model object edit url"""
return reverse('trionyx:model-edit', kwargs={
'app': self.get_app_label(),
'model': self.get_model_name(),
'pk': self.object.id
})
|
python
|
def get_edit_url(self):
"""Get model object edit url"""
return reverse('trionyx:model-edit', kwargs={
'app': self.get_app_label(),
'model': self.get_model_name(),
'pk': self.object.id
})
|
[
"def",
"get_edit_url",
"(",
"self",
")",
":",
"return",
"reverse",
"(",
"'trionyx:model-edit'",
",",
"kwargs",
"=",
"{",
"'app'",
":",
"self",
".",
"get_app_label",
"(",
")",
",",
"'model'",
":",
"self",
".",
"get_model_name",
"(",
")",
",",
"'pk'",
":",
"self",
".",
"object",
".",
"id",
"}",
")"
] |
Get model object edit url
|
[
"Get",
"model",
"object",
"edit",
"url"
] |
edac132cc0797190153f2e60bc7e88cb50e80da6
|
https://github.com/krukas/Trionyx/blob/edac132cc0797190153f2e60bc7e88cb50e80da6/trionyx/trionyx/views/core.py#L406-L412
|
241,204
|
krukas/Trionyx
|
trionyx/trionyx/views/core.py
|
DetailTabView.get_model_alias
|
def get_model_alias(self):
"""Get model alias"""
if self.model_alias:
return self.model_alias
return '{}.{}'.format(self.get_app_label(), self.get_model_name())
|
python
|
def get_model_alias(self):
"""Get model alias"""
if self.model_alias:
return self.model_alias
return '{}.{}'.format(self.get_app_label(), self.get_model_name())
|
[
"def",
"get_model_alias",
"(",
"self",
")",
":",
"if",
"self",
".",
"model_alias",
":",
"return",
"self",
".",
"model_alias",
"return",
"'{}.{}'",
".",
"format",
"(",
"self",
".",
"get_app_label",
"(",
")",
",",
"self",
".",
"get_model_name",
"(",
")",
")"
] |
Get model alias
|
[
"Get",
"model",
"alias"
] |
edac132cc0797190153f2e60bc7e88cb50e80da6
|
https://github.com/krukas/Trionyx/blob/edac132cc0797190153f2e60bc7e88cb50e80da6/trionyx/trionyx/views/core.py#L414-L418
|
241,205
|
krukas/Trionyx
|
trionyx/trionyx/views/core.py
|
DetailTabJsendView.handle_request
|
def handle_request(self, request, app, model, pk):
"""Render and return tab"""
ModelClass = self.get_model_class()
object = ModelClass.objects.get(id=pk)
tab_code = request.GET.get('tab')
model_alias = request.GET.get('model_alias')
model_alias = model_alias if model_alias else '{}.{}'.format(app, model)
# TODO permission check
item = tabs.get_tab(model_alias, object, tab_code)
return item.get_layout(object).render(request)
|
python
|
def handle_request(self, request, app, model, pk):
"""Render and return tab"""
ModelClass = self.get_model_class()
object = ModelClass.objects.get(id=pk)
tab_code = request.GET.get('tab')
model_alias = request.GET.get('model_alias')
model_alias = model_alias if model_alias else '{}.{}'.format(app, model)
# TODO permission check
item = tabs.get_tab(model_alias, object, tab_code)
return item.get_layout(object).render(request)
|
[
"def",
"handle_request",
"(",
"self",
",",
"request",
",",
"app",
",",
"model",
",",
"pk",
")",
":",
"ModelClass",
"=",
"self",
".",
"get_model_class",
"(",
")",
"object",
"=",
"ModelClass",
".",
"objects",
".",
"get",
"(",
"id",
"=",
"pk",
")",
"tab_code",
"=",
"request",
".",
"GET",
".",
"get",
"(",
"'tab'",
")",
"model_alias",
"=",
"request",
".",
"GET",
".",
"get",
"(",
"'model_alias'",
")",
"model_alias",
"=",
"model_alias",
"if",
"model_alias",
"else",
"'{}.{}'",
".",
"format",
"(",
"app",
",",
"model",
")",
"# TODO permission check",
"item",
"=",
"tabs",
".",
"get_tab",
"(",
"model_alias",
",",
"object",
",",
"tab_code",
")",
"return",
"item",
".",
"get_layout",
"(",
"object",
")",
".",
"render",
"(",
"request",
")"
] |
Render and return tab
|
[
"Render",
"and",
"return",
"tab"
] |
edac132cc0797190153f2e60bc7e88cb50e80da6
|
https://github.com/krukas/Trionyx/blob/edac132cc0797190153f2e60bc7e88cb50e80da6/trionyx/trionyx/views/core.py#L442-L455
|
241,206
|
krukas/Trionyx
|
trionyx/trionyx/views/core.py
|
UpdateView.get_form
|
def get_form(self, form_class=None):
"""Get form for model"""
form = super().get_form(form_class)
if not getattr(form, 'helper', None):
form.helper = FormHelper()
form.helper.form_tag = False
else:
form.helper.form_tag = False
return form
|
python
|
def get_form(self, form_class=None):
"""Get form for model"""
form = super().get_form(form_class)
if not getattr(form, 'helper', None):
form.helper = FormHelper()
form.helper.form_tag = False
else:
form.helper.form_tag = False
return form
|
[
"def",
"get_form",
"(",
"self",
",",
"form_class",
"=",
"None",
")",
":",
"form",
"=",
"super",
"(",
")",
".",
"get_form",
"(",
"form_class",
")",
"if",
"not",
"getattr",
"(",
"form",
",",
"'helper'",
",",
"None",
")",
":",
"form",
".",
"helper",
"=",
"FormHelper",
"(",
")",
"form",
".",
"helper",
".",
"form_tag",
"=",
"False",
"else",
":",
"form",
".",
"helper",
".",
"form_tag",
"=",
"False",
"return",
"form"
] |
Get form for model
|
[
"Get",
"form",
"for",
"model"
] |
edac132cc0797190153f2e60bc7e88cb50e80da6
|
https://github.com/krukas/Trionyx/blob/edac132cc0797190153f2e60bc7e88cb50e80da6/trionyx/trionyx/views/core.py#L490-L499
|
241,207
|
krukas/Trionyx
|
trionyx/trionyx/views/core.py
|
CreateView.get_cancel_url
|
def get_cancel_url(self):
"""Get cancel url"""
if self.cancel_url:
return self.cancel_url
ModelClass = self.get_model_class()
return reverse('trionyx:model-list', kwargs={
'app': ModelClass._meta.app_label,
'model': ModelClass._meta.model_name,
})
|
python
|
def get_cancel_url(self):
"""Get cancel url"""
if self.cancel_url:
return self.cancel_url
ModelClass = self.get_model_class()
return reverse('trionyx:model-list', kwargs={
'app': ModelClass._meta.app_label,
'model': ModelClass._meta.model_name,
})
|
[
"def",
"get_cancel_url",
"(",
"self",
")",
":",
"if",
"self",
".",
"cancel_url",
":",
"return",
"self",
".",
"cancel_url",
"ModelClass",
"=",
"self",
".",
"get_model_class",
"(",
")",
"return",
"reverse",
"(",
"'trionyx:model-list'",
",",
"kwargs",
"=",
"{",
"'app'",
":",
"ModelClass",
".",
"_meta",
".",
"app_label",
",",
"'model'",
":",
"ModelClass",
".",
"_meta",
".",
"model_name",
",",
"}",
")"
] |
Get cancel url
|
[
"Get",
"cancel",
"url"
] |
edac132cc0797190153f2e60bc7e88cb50e80da6
|
https://github.com/krukas/Trionyx/blob/edac132cc0797190153f2e60bc7e88cb50e80da6/trionyx/trionyx/views/core.py#L559-L568
|
241,208
|
krukas/Trionyx
|
trionyx/trionyx/views/core.py
|
CreateView.form_valid
|
def form_valid(self, form):
"""Add success message"""
response = super().form_valid(form)
messages.success(self.request, "Successfully created ({})".format(self.object))
return response
|
python
|
def form_valid(self, form):
"""Add success message"""
response = super().form_valid(form)
messages.success(self.request, "Successfully created ({})".format(self.object))
return response
|
[
"def",
"form_valid",
"(",
"self",
",",
"form",
")",
":",
"response",
"=",
"super",
"(",
")",
".",
"form_valid",
"(",
"form",
")",
"messages",
".",
"success",
"(",
"self",
".",
"request",
",",
"\"Successfully created ({})\"",
".",
"format",
"(",
"self",
".",
"object",
")",
")",
"return",
"response"
] |
Add success message
|
[
"Add",
"success",
"message"
] |
edac132cc0797190153f2e60bc7e88cb50e80da6
|
https://github.com/krukas/Trionyx/blob/edac132cc0797190153f2e60bc7e88cb50e80da6/trionyx/trionyx/views/core.py#L582-L586
|
241,209
|
krukas/Trionyx
|
trionyx/trionyx/views/core.py
|
DeleteView.get_success_url
|
def get_success_url(self):
"""Get success url"""
messages.success(self.request, "Successfully deleted ({})".format(self.object))
if self.success_url:
return reverse(self.success_url)
if 'app' in self.kwargs and 'model' in self.kwargs:
return reverse('trionyx:model-list', kwargs={
'app': self.kwargs.get('app'),
'model': self.kwargs.get('model'),
})
return '/'
|
python
|
def get_success_url(self):
"""Get success url"""
messages.success(self.request, "Successfully deleted ({})".format(self.object))
if self.success_url:
return reverse(self.success_url)
if 'app' in self.kwargs and 'model' in self.kwargs:
return reverse('trionyx:model-list', kwargs={
'app': self.kwargs.get('app'),
'model': self.kwargs.get('model'),
})
return '/'
|
[
"def",
"get_success_url",
"(",
"self",
")",
":",
"messages",
".",
"success",
"(",
"self",
".",
"request",
",",
"\"Successfully deleted ({})\"",
".",
"format",
"(",
"self",
".",
"object",
")",
")",
"if",
"self",
".",
"success_url",
":",
"return",
"reverse",
"(",
"self",
".",
"success_url",
")",
"if",
"'app'",
"in",
"self",
".",
"kwargs",
"and",
"'model'",
"in",
"self",
".",
"kwargs",
":",
"return",
"reverse",
"(",
"'trionyx:model-list'",
",",
"kwargs",
"=",
"{",
"'app'",
":",
"self",
".",
"kwargs",
".",
"get",
"(",
"'app'",
")",
",",
"'model'",
":",
"self",
".",
"kwargs",
".",
"get",
"(",
"'model'",
")",
",",
"}",
")",
"return",
"'/'"
] |
Get success url
|
[
"Get",
"success",
"url"
] |
edac132cc0797190153f2e60bc7e88cb50e80da6
|
https://github.com/krukas/Trionyx/blob/edac132cc0797190153f2e60bc7e88cb50e80da6/trionyx/trionyx/views/core.py#L622-L634
|
241,210
|
krukas/Trionyx
|
trionyx/trionyx/views/core.py
|
DialogView.get
|
def get(self, request, *args, **kwargs):
"""Handle get request"""
try:
kwargs = self.load_object(kwargs)
except Exception as e:
return self.render_te_response({
'title': str(e),
})
if not self.has_permission(request):
return self.render_te_response({
'title': 'No access',
})
return self.render_te_response(self.display_dialog(*args, **kwargs))
|
python
|
def get(self, request, *args, **kwargs):
"""Handle get request"""
try:
kwargs = self.load_object(kwargs)
except Exception as e:
return self.render_te_response({
'title': str(e),
})
if not self.has_permission(request):
return self.render_te_response({
'title': 'No access',
})
return self.render_te_response(self.display_dialog(*args, **kwargs))
|
[
"def",
"get",
"(",
"self",
",",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"kwargs",
"=",
"self",
".",
"load_object",
"(",
"kwargs",
")",
"except",
"Exception",
"as",
"e",
":",
"return",
"self",
".",
"render_te_response",
"(",
"{",
"'title'",
":",
"str",
"(",
"e",
")",
",",
"}",
")",
"if",
"not",
"self",
".",
"has_permission",
"(",
"request",
")",
":",
"return",
"self",
".",
"render_te_response",
"(",
"{",
"'title'",
":",
"'No access'",
",",
"}",
")",
"return",
"self",
".",
"render_te_response",
"(",
"self",
".",
"display_dialog",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
")"
] |
Handle get request
|
[
"Handle",
"get",
"request"
] |
edac132cc0797190153f2e60bc7e88cb50e80da6
|
https://github.com/krukas/Trionyx/blob/edac132cc0797190153f2e60bc7e88cb50e80da6/trionyx/trionyx/views/core.py#L682-L695
|
241,211
|
krukas/Trionyx
|
trionyx/trionyx/views/core.py
|
DialogView.post
|
def post(self, request, *args, **kwargs):
"""Handle post request"""
try:
kwargs = self.load_object(kwargs)
except Exception as e:
return self.render_te_response({
'title': str(e),
})
if not self.has_permission(request):
return self.render_te_response({
'title': 'No access',
})
return self.render_te_response(self.handle_dialog(*args, **kwargs))
|
python
|
def post(self, request, *args, **kwargs):
"""Handle post request"""
try:
kwargs = self.load_object(kwargs)
except Exception as e:
return self.render_te_response({
'title': str(e),
})
if not self.has_permission(request):
return self.render_te_response({
'title': 'No access',
})
return self.render_te_response(self.handle_dialog(*args, **kwargs))
|
[
"def",
"post",
"(",
"self",
",",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"kwargs",
"=",
"self",
".",
"load_object",
"(",
"kwargs",
")",
"except",
"Exception",
"as",
"e",
":",
"return",
"self",
".",
"render_te_response",
"(",
"{",
"'title'",
":",
"str",
"(",
"e",
")",
",",
"}",
")",
"if",
"not",
"self",
".",
"has_permission",
"(",
"request",
")",
":",
"return",
"self",
".",
"render_te_response",
"(",
"{",
"'title'",
":",
"'No access'",
",",
"}",
")",
"return",
"self",
".",
"render_te_response",
"(",
"self",
".",
"handle_dialog",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
")"
] |
Handle post request
|
[
"Handle",
"post",
"request"
] |
edac132cc0797190153f2e60bc7e88cb50e80da6
|
https://github.com/krukas/Trionyx/blob/edac132cc0797190153f2e60bc7e88cb50e80da6/trionyx/trionyx/views/core.py#L697-L710
|
241,212
|
krukas/Trionyx
|
trionyx/trionyx/views/core.py
|
DialogView.load_object
|
def load_object(self, kwargs):
"""Load object and model config and remove pk from kwargs"""
self.object = None
self.config = None
self.model = self.get_model_class()
kwargs.pop('app', None)
kwargs.pop('model', None)
if self.model and kwargs.get('pk', False):
try:
self.object = self.model.objects.get(pk=kwargs.pop('pk'))
except Exception:
raise Exception("Could not load {}".format(self.model.__name__.lower()))
setattr(self, self.model.__name__.lower(), self.object)
return kwargs
|
python
|
def load_object(self, kwargs):
"""Load object and model config and remove pk from kwargs"""
self.object = None
self.config = None
self.model = self.get_model_class()
kwargs.pop('app', None)
kwargs.pop('model', None)
if self.model and kwargs.get('pk', False):
try:
self.object = self.model.objects.get(pk=kwargs.pop('pk'))
except Exception:
raise Exception("Could not load {}".format(self.model.__name__.lower()))
setattr(self, self.model.__name__.lower(), self.object)
return kwargs
|
[
"def",
"load_object",
"(",
"self",
",",
"kwargs",
")",
":",
"self",
".",
"object",
"=",
"None",
"self",
".",
"config",
"=",
"None",
"self",
".",
"model",
"=",
"self",
".",
"get_model_class",
"(",
")",
"kwargs",
".",
"pop",
"(",
"'app'",
",",
"None",
")",
"kwargs",
".",
"pop",
"(",
"'model'",
",",
"None",
")",
"if",
"self",
".",
"model",
"and",
"kwargs",
".",
"get",
"(",
"'pk'",
",",
"False",
")",
":",
"try",
":",
"self",
".",
"object",
"=",
"self",
".",
"model",
".",
"objects",
".",
"get",
"(",
"pk",
"=",
"kwargs",
".",
"pop",
"(",
"'pk'",
")",
")",
"except",
"Exception",
":",
"raise",
"Exception",
"(",
"\"Could not load {}\"",
".",
"format",
"(",
"self",
".",
"model",
".",
"__name__",
".",
"lower",
"(",
")",
")",
")",
"setattr",
"(",
"self",
",",
"self",
".",
"model",
".",
"__name__",
".",
"lower",
"(",
")",
",",
"self",
".",
"object",
")",
"return",
"kwargs"
] |
Load object and model config and remove pk from kwargs
|
[
"Load",
"object",
"and",
"model",
"config",
"and",
"remove",
"pk",
"from",
"kwargs"
] |
edac132cc0797190153f2e60bc7e88cb50e80da6
|
https://github.com/krukas/Trionyx/blob/edac132cc0797190153f2e60bc7e88cb50e80da6/trionyx/trionyx/views/core.py#L712-L727
|
241,213
|
krukas/Trionyx
|
trionyx/trionyx/views/core.py
|
DialogView.has_permission
|
def has_permission(self, request):
"""Check if user has permission"""
if not self.object and not self.permission:
return True
if not self.permission:
return request.user.has_perm('{}_{}'.format(
self.model_permission,
self.object.__class__.__name__.lower()), self.object
)
return request.user.has_perm(self.permission)
|
python
|
def has_permission(self, request):
"""Check if user has permission"""
if not self.object and not self.permission:
return True
if not self.permission:
return request.user.has_perm('{}_{}'.format(
self.model_permission,
self.object.__class__.__name__.lower()), self.object
)
return request.user.has_perm(self.permission)
|
[
"def",
"has_permission",
"(",
"self",
",",
"request",
")",
":",
"if",
"not",
"self",
".",
"object",
"and",
"not",
"self",
".",
"permission",
":",
"return",
"True",
"if",
"not",
"self",
".",
"permission",
":",
"return",
"request",
".",
"user",
".",
"has_perm",
"(",
"'{}_{}'",
".",
"format",
"(",
"self",
".",
"model_permission",
",",
"self",
".",
"object",
".",
"__class__",
".",
"__name__",
".",
"lower",
"(",
")",
")",
",",
"self",
".",
"object",
")",
"return",
"request",
".",
"user",
".",
"has_perm",
"(",
"self",
".",
"permission",
")"
] |
Check if user has permission
|
[
"Check",
"if",
"user",
"has",
"permission"
] |
edac132cc0797190153f2e60bc7e88cb50e80da6
|
https://github.com/krukas/Trionyx/blob/edac132cc0797190153f2e60bc7e88cb50e80da6/trionyx/trionyx/views/core.py#L729-L740
|
241,214
|
krukas/Trionyx
|
trionyx/trionyx/views/core.py
|
DialogView.render_to_string
|
def render_to_string(self, template_file, context):
"""Render given template to string and add object to context"""
context = context if context else {}
if self.object:
context['object'] = self.object
context[self.object.__class__.__name__.lower()] = self.object
return render_to_string(template_file, context, self.request)
|
python
|
def render_to_string(self, template_file, context):
"""Render given template to string and add object to context"""
context = context if context else {}
if self.object:
context['object'] = self.object
context[self.object.__class__.__name__.lower()] = self.object
return render_to_string(template_file, context, self.request)
|
[
"def",
"render_to_string",
"(",
"self",
",",
"template_file",
",",
"context",
")",
":",
"context",
"=",
"context",
"if",
"context",
"else",
"{",
"}",
"if",
"self",
".",
"object",
":",
"context",
"[",
"'object'",
"]",
"=",
"self",
".",
"object",
"context",
"[",
"self",
".",
"object",
".",
"__class__",
".",
"__name__",
".",
"lower",
"(",
")",
"]",
"=",
"self",
".",
"object",
"return",
"render_to_string",
"(",
"template_file",
",",
"context",
",",
"self",
".",
"request",
")"
] |
Render given template to string and add object to context
|
[
"Render",
"given",
"template",
"to",
"string",
"and",
"add",
"object",
"to",
"context"
] |
edac132cc0797190153f2e60bc7e88cb50e80da6
|
https://github.com/krukas/Trionyx/blob/edac132cc0797190153f2e60bc7e88cb50e80da6/trionyx/trionyx/views/core.py#L742-L748
|
241,215
|
krukas/Trionyx
|
trionyx/trionyx/views/core.py
|
DialogView.render_te_response
|
def render_te_response(self, data):
"""Render data to JsonResponse"""
if 'submit_label' in data and 'url' not in data:
data['url'] = self.request.get_full_path()
return JsonResponse(data)
|
python
|
def render_te_response(self, data):
"""Render data to JsonResponse"""
if 'submit_label' in data and 'url' not in data:
data['url'] = self.request.get_full_path()
return JsonResponse(data)
|
[
"def",
"render_te_response",
"(",
"self",
",",
"data",
")",
":",
"if",
"'submit_label'",
"in",
"data",
"and",
"'url'",
"not",
"in",
"data",
":",
"data",
"[",
"'url'",
"]",
"=",
"self",
".",
"request",
".",
"get_full_path",
"(",
")",
"return",
"JsonResponse",
"(",
"data",
")"
] |
Render data to JsonResponse
|
[
"Render",
"data",
"to",
"JsonResponse"
] |
edac132cc0797190153f2e60bc7e88cb50e80da6
|
https://github.com/krukas/Trionyx/blob/edac132cc0797190153f2e60bc7e88cb50e80da6/trionyx/trionyx/views/core.py#L774-L779
|
241,216
|
krukas/Trionyx
|
trionyx/trionyx/views/core.py
|
UpdateDialog.display_dialog
|
def display_dialog(self, *args, **kwargs):
"""Display form and success message when set"""
form = kwargs.pop('form_instance', None)
success_message = kwargs.pop('success_message', None)
if not form:
form = self.get_form_class()(initial=kwargs, instance=self.object)
if not hasattr(form, "helper"):
form.helper = FormHelper()
form.helper.form_tag = False
return {
'title': self.title.format(
model_name=self.get_model_config().model_name,
object=str(self.object) if self.object else '',
),
'content': self.render_to_string(self.template, {
'form': form,
'success_message': success_message,
}),
'submit_label': self.submit_label,
'success': bool(success_message),
}
|
python
|
def display_dialog(self, *args, **kwargs):
"""Display form and success message when set"""
form = kwargs.pop('form_instance', None)
success_message = kwargs.pop('success_message', None)
if not form:
form = self.get_form_class()(initial=kwargs, instance=self.object)
if not hasattr(form, "helper"):
form.helper = FormHelper()
form.helper.form_tag = False
return {
'title': self.title.format(
model_name=self.get_model_config().model_name,
object=str(self.object) if self.object else '',
),
'content': self.render_to_string(self.template, {
'form': form,
'success_message': success_message,
}),
'submit_label': self.submit_label,
'success': bool(success_message),
}
|
[
"def",
"display_dialog",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"form",
"=",
"kwargs",
".",
"pop",
"(",
"'form_instance'",
",",
"None",
")",
"success_message",
"=",
"kwargs",
".",
"pop",
"(",
"'success_message'",
",",
"None",
")",
"if",
"not",
"form",
":",
"form",
"=",
"self",
".",
"get_form_class",
"(",
")",
"(",
"initial",
"=",
"kwargs",
",",
"instance",
"=",
"self",
".",
"object",
")",
"if",
"not",
"hasattr",
"(",
"form",
",",
"\"helper\"",
")",
":",
"form",
".",
"helper",
"=",
"FormHelper",
"(",
")",
"form",
".",
"helper",
".",
"form_tag",
"=",
"False",
"return",
"{",
"'title'",
":",
"self",
".",
"title",
".",
"format",
"(",
"model_name",
"=",
"self",
".",
"get_model_config",
"(",
")",
".",
"model_name",
",",
"object",
"=",
"str",
"(",
"self",
".",
"object",
")",
"if",
"self",
".",
"object",
"else",
"''",
",",
")",
",",
"'content'",
":",
"self",
".",
"render_to_string",
"(",
"self",
".",
"template",
",",
"{",
"'form'",
":",
"form",
",",
"'success_message'",
":",
"success_message",
",",
"}",
")",
",",
"'submit_label'",
":",
"self",
".",
"submit_label",
",",
"'success'",
":",
"bool",
"(",
"success_message",
")",
",",
"}"
] |
Display form and success message when set
|
[
"Display",
"form",
"and",
"success",
"message",
"when",
"set"
] |
edac132cc0797190153f2e60bc7e88cb50e80da6
|
https://github.com/krukas/Trionyx/blob/edac132cc0797190153f2e60bc7e88cb50e80da6/trionyx/trionyx/views/core.py#L802-L825
|
241,217
|
krukas/Trionyx
|
trionyx/trionyx/views/core.py
|
UpdateDialog.handle_dialog
|
def handle_dialog(self, *args, **kwargs):
"""Handle form and save and set success message on valid form"""
form = self.get_form_class()(self.request.POST, initial=kwargs, instance=self.object)
success_message = None
if form.is_valid():
obj = form.save()
success_message = self.success_message.format(
model_name=self.get_model_config().model_name.capitalize(),
object=str(obj),
)
return self.display_dialog(*args, form_instance=form, success_message=success_message, **kwargs)
|
python
|
def handle_dialog(self, *args, **kwargs):
"""Handle form and save and set success message on valid form"""
form = self.get_form_class()(self.request.POST, initial=kwargs, instance=self.object)
success_message = None
if form.is_valid():
obj = form.save()
success_message = self.success_message.format(
model_name=self.get_model_config().model_name.capitalize(),
object=str(obj),
)
return self.display_dialog(*args, form_instance=form, success_message=success_message, **kwargs)
|
[
"def",
"handle_dialog",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"form",
"=",
"self",
".",
"get_form_class",
"(",
")",
"(",
"self",
".",
"request",
".",
"POST",
",",
"initial",
"=",
"kwargs",
",",
"instance",
"=",
"self",
".",
"object",
")",
"success_message",
"=",
"None",
"if",
"form",
".",
"is_valid",
"(",
")",
":",
"obj",
"=",
"form",
".",
"save",
"(",
")",
"success_message",
"=",
"self",
".",
"success_message",
".",
"format",
"(",
"model_name",
"=",
"self",
".",
"get_model_config",
"(",
")",
".",
"model_name",
".",
"capitalize",
"(",
")",
",",
"object",
"=",
"str",
"(",
"obj",
")",
",",
")",
"return",
"self",
".",
"display_dialog",
"(",
"*",
"args",
",",
"form_instance",
"=",
"form",
",",
"success_message",
"=",
"success_message",
",",
"*",
"*",
"kwargs",
")"
] |
Handle form and save and set success message on valid form
|
[
"Handle",
"form",
"and",
"save",
"and",
"set",
"success",
"message",
"on",
"valid",
"form"
] |
edac132cc0797190153f2e60bc7e88cb50e80da6
|
https://github.com/krukas/Trionyx/blob/edac132cc0797190153f2e60bc7e88cb50e80da6/trionyx/trionyx/views/core.py#L827-L838
|
241,218
|
neshkatrapati/pypresenter
|
pypresenter/pypresenter.py
|
SlideDeck.get_term_size
|
def get_term_size():
'''Gets the size of your terminal. May not work everywhere. YMMV.'''
rows, columns = os.popen('stty size', 'r').read().split()
return int(rows), int(columns)
|
python
|
def get_term_size():
'''Gets the size of your terminal. May not work everywhere. YMMV.'''
rows, columns = os.popen('stty size', 'r').read().split()
return int(rows), int(columns)
|
[
"def",
"get_term_size",
"(",
")",
":",
"rows",
",",
"columns",
"=",
"os",
".",
"popen",
"(",
"'stty size'",
",",
"'r'",
")",
".",
"read",
"(",
")",
".",
"split",
"(",
")",
"return",
"int",
"(",
"rows",
")",
",",
"int",
"(",
"columns",
")"
] |
Gets the size of your terminal. May not work everywhere. YMMV.
|
[
"Gets",
"the",
"size",
"of",
"your",
"terminal",
".",
"May",
"not",
"work",
"everywhere",
".",
"YMMV",
"."
] |
bc4cccb17523972dd60de49a34e0ed050b788ad4
|
https://github.com/neshkatrapati/pypresenter/blob/bc4cccb17523972dd60de49a34e0ed050b788ad4/pypresenter/pypresenter.py#L280-L283
|
241,219
|
hobson/pug-dj
|
pug/dj/crawlnmine/management/__init__.py
|
find_commands
|
def find_commands(management_dir):
"""
Given a path to a management directory, returns a list of all the command
names that are available.
Returns an empty list if no commands are defined.
"""
command_dir = os.path.join(management_dir, 'commands')
try:
return [f[:-3] for f in os.listdir(command_dir)
if not f.startswith('_') and f.endswith('.py')]
except OSError:
return []
|
python
|
def find_commands(management_dir):
"""
Given a path to a management directory, returns a list of all the command
names that are available.
Returns an empty list if no commands are defined.
"""
command_dir = os.path.join(management_dir, 'commands')
try:
return [f[:-3] for f in os.listdir(command_dir)
if not f.startswith('_') and f.endswith('.py')]
except OSError:
return []
|
[
"def",
"find_commands",
"(",
"management_dir",
")",
":",
"command_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"management_dir",
",",
"'commands'",
")",
"try",
":",
"return",
"[",
"f",
"[",
":",
"-",
"3",
"]",
"for",
"f",
"in",
"os",
".",
"listdir",
"(",
"command_dir",
")",
"if",
"not",
"f",
".",
"startswith",
"(",
"'_'",
")",
"and",
"f",
".",
"endswith",
"(",
"'.py'",
")",
"]",
"except",
"OSError",
":",
"return",
"[",
"]"
] |
Given a path to a management directory, returns a list of all the command
names that are available.
Returns an empty list if no commands are defined.
|
[
"Given",
"a",
"path",
"to",
"a",
"management",
"directory",
"returns",
"a",
"list",
"of",
"all",
"the",
"command",
"names",
"that",
"are",
"available",
"."
] |
55678b08755a55366ce18e7d3b8ea8fa4491ab04
|
https://github.com/hobson/pug-dj/blob/55678b08755a55366ce18e7d3b8ea8fa4491ab04/pug/dj/crawlnmine/management/__init__.py#L23-L35
|
241,220
|
hobson/pug-dj
|
pug/dj/crawlnmine/management/__init__.py
|
get_commands
|
def get_commands():
"""
Returns a dictionary mapping command names to their callback applications.
This works by looking for a management.commands package in django.core, and
in each installed application -- if a commands package exists, all commands
in that package are registered.
Core commands are always included. If a settings module has been
specified, user-defined commands will also be included.
The dictionary is in the format {command_name: app_name}. Key-value
pairs from this dictionary can then be used in calls to
load_command_class(app_name, command_name)
If a specific version of a command must be loaded (e.g., with the
startapp command), the instantiated module can be placed in the
dictionary in place of the application name.
The dictionary is cached on the first call and reused on subsequent
calls.
"""
commands = dict((name, 'pug.crawlnmine') for name in find_commands(__path__[0]))
if not settings.configured:
return commands
for app_config in reversed(list(apps.get_app_configs())):
path = os.path.join(app_config.path, 'management')
commands.update(dict((name, app_config.name) for name in find_commands(path)))
return commands
|
python
|
def get_commands():
"""
Returns a dictionary mapping command names to their callback applications.
This works by looking for a management.commands package in django.core, and
in each installed application -- if a commands package exists, all commands
in that package are registered.
Core commands are always included. If a settings module has been
specified, user-defined commands will also be included.
The dictionary is in the format {command_name: app_name}. Key-value
pairs from this dictionary can then be used in calls to
load_command_class(app_name, command_name)
If a specific version of a command must be loaded (e.g., with the
startapp command), the instantiated module can be placed in the
dictionary in place of the application name.
The dictionary is cached on the first call and reused on subsequent
calls.
"""
commands = dict((name, 'pug.crawlnmine') for name in find_commands(__path__[0]))
if not settings.configured:
return commands
for app_config in reversed(list(apps.get_app_configs())):
path = os.path.join(app_config.path, 'management')
commands.update(dict((name, app_config.name) for name in find_commands(path)))
return commands
|
[
"def",
"get_commands",
"(",
")",
":",
"commands",
"=",
"dict",
"(",
"(",
"name",
",",
"'pug.crawlnmine'",
")",
"for",
"name",
"in",
"find_commands",
"(",
"__path__",
"[",
"0",
"]",
")",
")",
"if",
"not",
"settings",
".",
"configured",
":",
"return",
"commands",
"for",
"app_config",
"in",
"reversed",
"(",
"list",
"(",
"apps",
".",
"get_app_configs",
"(",
")",
")",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"app_config",
".",
"path",
",",
"'management'",
")",
"commands",
".",
"update",
"(",
"dict",
"(",
"(",
"name",
",",
"app_config",
".",
"name",
")",
"for",
"name",
"in",
"find_commands",
"(",
"path",
")",
")",
")",
"return",
"commands"
] |
Returns a dictionary mapping command names to their callback applications.
This works by looking for a management.commands package in django.core, and
in each installed application -- if a commands package exists, all commands
in that package are registered.
Core commands are always included. If a settings module has been
specified, user-defined commands will also be included.
The dictionary is in the format {command_name: app_name}. Key-value
pairs from this dictionary can then be used in calls to
load_command_class(app_name, command_name)
If a specific version of a command must be loaded (e.g., with the
startapp command), the instantiated module can be placed in the
dictionary in place of the application name.
The dictionary is cached on the first call and reused on subsequent
calls.
|
[
"Returns",
"a",
"dictionary",
"mapping",
"command",
"names",
"to",
"their",
"callback",
"applications",
"."
] |
55678b08755a55366ce18e7d3b8ea8fa4491ab04
|
https://github.com/hobson/pug-dj/blob/55678b08755a55366ce18e7d3b8ea8fa4491ab04/pug/dj/crawlnmine/management/__init__.py#L49-L80
|
241,221
|
hobson/pug-dj
|
pug/dj/crawlnmine/management/__init__.py
|
ManagementUtility.autocomplete
|
def autocomplete(self):
"""
Output completion suggestions for BASH.
The output of this function is passed to BASH's `COMREPLY` variable and
treated as completion suggestions. `COMREPLY` expects a space
separated string as the result.
The `COMP_WORDS` and `COMP_CWORD` BASH environment variables are used
to get information about the cli input. Please refer to the BASH
man-page for more information about this variables.
Subcommand options are saved as pairs. A pair consists of
the long option string (e.g. '--exclude') and a boolean
value indicating if the option requires arguments. When printing to
stdout, an equal sign is appended to options which require arguments.
Note: If debugging this function, it is recommended to write the debug
output in a separate file. Otherwise the debug output will be treated
and formatted as potential completion suggestions.
"""
# Don't complete if user hasn't sourced bash_completion file.
if 'DJANGO_AUTO_COMPLETE' not in os.environ:
return
cwords = os.environ['COMP_WORDS'].split()[1:]
cword = int(os.environ['COMP_CWORD'])
try:
curr = cwords[cword - 1]
except IndexError:
curr = ''
subcommands = list(get_commands()) + ['help']
options = [('--help', None)]
# subcommand
if cword == 1:
print(' '.join(sorted(filter(lambda x: x.startswith(curr), subcommands))))
# subcommand options
# special case: the 'help' subcommand has no options
elif cwords[0] in subcommands and cwords[0] != 'help':
subcommand_cls = self.fetch_command(cwords[0])
# special case: 'runfcgi' stores additional options as
# 'key=value' pairs
if cwords[0] == 'runfcgi':
from django.core.servers.fastcgi import FASTCGI_OPTIONS
options += [(k, 1) for k in FASTCGI_OPTIONS]
# special case: add the names of installed apps to options
elif cwords[0] in ('dumpdata', 'sql', 'sqlall', 'sqlclear',
'sqlcustom', 'sqlindexes', 'sqlsequencereset', 'test'):
try:
app_configs = apps.get_app_configs()
# Get the last part of the dotted path as the app name.
options += [(app_config.label, 0) for app_config in app_configs]
except ImportError:
# Fail silently if DJANGO_SETTINGS_MODULE isn't set. The
# user will find out once they execute the command.
pass
parser = subcommand_cls.create_parser('', cwords[0])
if subcommand_cls.use_argparse:
options += [(sorted(s_opt.option_strings)[0], s_opt.nargs != 0) for s_opt in
parser._actions if s_opt.option_strings]
else:
options += [(s_opt.get_opt_string(), s_opt.nargs) for s_opt in
parser.option_list]
# filter out previously specified options from available options
prev_opts = [x.split('=')[0] for x in cwords[1:cword - 1]]
options = [opt for opt in options if opt[0] not in prev_opts]
# filter options by current input
options = sorted((k, v) for k, v in options if k.startswith(curr))
for option in options:
opt_label = option[0]
# append '=' to options which require args
if option[1]:
opt_label += '='
print(opt_label)
sys.exit(1)
|
python
|
def autocomplete(self):
"""
Output completion suggestions for BASH.
The output of this function is passed to BASH's `COMREPLY` variable and
treated as completion suggestions. `COMREPLY` expects a space
separated string as the result.
The `COMP_WORDS` and `COMP_CWORD` BASH environment variables are used
to get information about the cli input. Please refer to the BASH
man-page for more information about this variables.
Subcommand options are saved as pairs. A pair consists of
the long option string (e.g. '--exclude') and a boolean
value indicating if the option requires arguments. When printing to
stdout, an equal sign is appended to options which require arguments.
Note: If debugging this function, it is recommended to write the debug
output in a separate file. Otherwise the debug output will be treated
and formatted as potential completion suggestions.
"""
# Don't complete if user hasn't sourced bash_completion file.
if 'DJANGO_AUTO_COMPLETE' not in os.environ:
return
cwords = os.environ['COMP_WORDS'].split()[1:]
cword = int(os.environ['COMP_CWORD'])
try:
curr = cwords[cword - 1]
except IndexError:
curr = ''
subcommands = list(get_commands()) + ['help']
options = [('--help', None)]
# subcommand
if cword == 1:
print(' '.join(sorted(filter(lambda x: x.startswith(curr), subcommands))))
# subcommand options
# special case: the 'help' subcommand has no options
elif cwords[0] in subcommands and cwords[0] != 'help':
subcommand_cls = self.fetch_command(cwords[0])
# special case: 'runfcgi' stores additional options as
# 'key=value' pairs
if cwords[0] == 'runfcgi':
from django.core.servers.fastcgi import FASTCGI_OPTIONS
options += [(k, 1) for k in FASTCGI_OPTIONS]
# special case: add the names of installed apps to options
elif cwords[0] in ('dumpdata', 'sql', 'sqlall', 'sqlclear',
'sqlcustom', 'sqlindexes', 'sqlsequencereset', 'test'):
try:
app_configs = apps.get_app_configs()
# Get the last part of the dotted path as the app name.
options += [(app_config.label, 0) for app_config in app_configs]
except ImportError:
# Fail silently if DJANGO_SETTINGS_MODULE isn't set. The
# user will find out once they execute the command.
pass
parser = subcommand_cls.create_parser('', cwords[0])
if subcommand_cls.use_argparse:
options += [(sorted(s_opt.option_strings)[0], s_opt.nargs != 0) for s_opt in
parser._actions if s_opt.option_strings]
else:
options += [(s_opt.get_opt_string(), s_opt.nargs) for s_opt in
parser.option_list]
# filter out previously specified options from available options
prev_opts = [x.split('=')[0] for x in cwords[1:cword - 1]]
options = [opt for opt in options if opt[0] not in prev_opts]
# filter options by current input
options = sorted((k, v) for k, v in options if k.startswith(curr))
for option in options:
opt_label = option[0]
# append '=' to options which require args
if option[1]:
opt_label += '='
print(opt_label)
sys.exit(1)
|
[
"def",
"autocomplete",
"(",
"self",
")",
":",
"# Don't complete if user hasn't sourced bash_completion file.",
"if",
"'DJANGO_AUTO_COMPLETE'",
"not",
"in",
"os",
".",
"environ",
":",
"return",
"cwords",
"=",
"os",
".",
"environ",
"[",
"'COMP_WORDS'",
"]",
".",
"split",
"(",
")",
"[",
"1",
":",
"]",
"cword",
"=",
"int",
"(",
"os",
".",
"environ",
"[",
"'COMP_CWORD'",
"]",
")",
"try",
":",
"curr",
"=",
"cwords",
"[",
"cword",
"-",
"1",
"]",
"except",
"IndexError",
":",
"curr",
"=",
"''",
"subcommands",
"=",
"list",
"(",
"get_commands",
"(",
")",
")",
"+",
"[",
"'help'",
"]",
"options",
"=",
"[",
"(",
"'--help'",
",",
"None",
")",
"]",
"# subcommand",
"if",
"cword",
"==",
"1",
":",
"print",
"(",
"' '",
".",
"join",
"(",
"sorted",
"(",
"filter",
"(",
"lambda",
"x",
":",
"x",
".",
"startswith",
"(",
"curr",
")",
",",
"subcommands",
")",
")",
")",
")",
"# subcommand options",
"# special case: the 'help' subcommand has no options",
"elif",
"cwords",
"[",
"0",
"]",
"in",
"subcommands",
"and",
"cwords",
"[",
"0",
"]",
"!=",
"'help'",
":",
"subcommand_cls",
"=",
"self",
".",
"fetch_command",
"(",
"cwords",
"[",
"0",
"]",
")",
"# special case: 'runfcgi' stores additional options as",
"# 'key=value' pairs",
"if",
"cwords",
"[",
"0",
"]",
"==",
"'runfcgi'",
":",
"from",
"django",
".",
"core",
".",
"servers",
".",
"fastcgi",
"import",
"FASTCGI_OPTIONS",
"options",
"+=",
"[",
"(",
"k",
",",
"1",
")",
"for",
"k",
"in",
"FASTCGI_OPTIONS",
"]",
"# special case: add the names of installed apps to options",
"elif",
"cwords",
"[",
"0",
"]",
"in",
"(",
"'dumpdata'",
",",
"'sql'",
",",
"'sqlall'",
",",
"'sqlclear'",
",",
"'sqlcustom'",
",",
"'sqlindexes'",
",",
"'sqlsequencereset'",
",",
"'test'",
")",
":",
"try",
":",
"app_configs",
"=",
"apps",
".",
"get_app_configs",
"(",
")",
"# Get the last part of the dotted path as the app name.",
"options",
"+=",
"[",
"(",
"app_config",
".",
"label",
",",
"0",
")",
"for",
"app_config",
"in",
"app_configs",
"]",
"except",
"ImportError",
":",
"# Fail silently if DJANGO_SETTINGS_MODULE isn't set. The",
"# user will find out once they execute the command.",
"pass",
"parser",
"=",
"subcommand_cls",
".",
"create_parser",
"(",
"''",
",",
"cwords",
"[",
"0",
"]",
")",
"if",
"subcommand_cls",
".",
"use_argparse",
":",
"options",
"+=",
"[",
"(",
"sorted",
"(",
"s_opt",
".",
"option_strings",
")",
"[",
"0",
"]",
",",
"s_opt",
".",
"nargs",
"!=",
"0",
")",
"for",
"s_opt",
"in",
"parser",
".",
"_actions",
"if",
"s_opt",
".",
"option_strings",
"]",
"else",
":",
"options",
"+=",
"[",
"(",
"s_opt",
".",
"get_opt_string",
"(",
")",
",",
"s_opt",
".",
"nargs",
")",
"for",
"s_opt",
"in",
"parser",
".",
"option_list",
"]",
"# filter out previously specified options from available options",
"prev_opts",
"=",
"[",
"x",
".",
"split",
"(",
"'='",
")",
"[",
"0",
"]",
"for",
"x",
"in",
"cwords",
"[",
"1",
":",
"cword",
"-",
"1",
"]",
"]",
"options",
"=",
"[",
"opt",
"for",
"opt",
"in",
"options",
"if",
"opt",
"[",
"0",
"]",
"not",
"in",
"prev_opts",
"]",
"# filter options by current input",
"options",
"=",
"sorted",
"(",
"(",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"options",
"if",
"k",
".",
"startswith",
"(",
"curr",
")",
")",
"for",
"option",
"in",
"options",
":",
"opt_label",
"=",
"option",
"[",
"0",
"]",
"# append '=' to options which require args",
"if",
"option",
"[",
"1",
"]",
":",
"opt_label",
"+=",
"'='",
"print",
"(",
"opt_label",
")",
"sys",
".",
"exit",
"(",
"1",
")"
] |
Output completion suggestions for BASH.
The output of this function is passed to BASH's `COMREPLY` variable and
treated as completion suggestions. `COMREPLY` expects a space
separated string as the result.
The `COMP_WORDS` and `COMP_CWORD` BASH environment variables are used
to get information about the cli input. Please refer to the BASH
man-page for more information about this variables.
Subcommand options are saved as pairs. A pair consists of
the long option string (e.g. '--exclude') and a boolean
value indicating if the option requires arguments. When printing to
stdout, an equal sign is appended to options which require arguments.
Note: If debugging this function, it is recommended to write the debug
output in a separate file. Otherwise the debug output will be treated
and formatted as potential completion suggestions.
|
[
"Output",
"completion",
"suggestions",
"for",
"BASH",
"."
] |
55678b08755a55366ce18e7d3b8ea8fa4491ab04
|
https://github.com/hobson/pug-dj/blob/55678b08755a55366ce18e7d3b8ea8fa4491ab04/pug/dj/crawlnmine/management/__init__.py#L189-L267
|
241,222
|
maxweisspoker/simplebitcoinfuncs
|
simplebitcoinfuncs/miscbitcoinfuncs.py
|
genkeyhex
|
def genkeyhex():
'''
Generate new random Bitcoin private key, using os.urandom and
double-sha256. Hex format.
'''
while True:
key = hash256(
hexlify(os.urandom(40) + str(datetime.datetime.now())
.encode("utf-8")))
# 40 bytes used instead of 32, as a buffer for any slight
# lack of entropy in urandom
# Double-sha256 used instead of single hash, for entropy
# reasons as well.
# I know, it's nit-picking, but better safe than sorry.
if int(key,16) > 1 and int(key,16) < N:
break
return key
|
python
|
def genkeyhex():
'''
Generate new random Bitcoin private key, using os.urandom and
double-sha256. Hex format.
'''
while True:
key = hash256(
hexlify(os.urandom(40) + str(datetime.datetime.now())
.encode("utf-8")))
# 40 bytes used instead of 32, as a buffer for any slight
# lack of entropy in urandom
# Double-sha256 used instead of single hash, for entropy
# reasons as well.
# I know, it's nit-picking, but better safe than sorry.
if int(key,16) > 1 and int(key,16) < N:
break
return key
|
[
"def",
"genkeyhex",
"(",
")",
":",
"while",
"True",
":",
"key",
"=",
"hash256",
"(",
"hexlify",
"(",
"os",
".",
"urandom",
"(",
"40",
")",
"+",
"str",
"(",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
")",
".",
"encode",
"(",
"\"utf-8\"",
")",
")",
")",
"# 40 bytes used instead of 32, as a buffer for any slight",
"# lack of entropy in urandom",
"# Double-sha256 used instead of single hash, for entropy",
"# reasons as well.",
"# I know, it's nit-picking, but better safe than sorry.",
"if",
"int",
"(",
"key",
",",
"16",
")",
">",
"1",
"and",
"int",
"(",
"key",
",",
"16",
")",
"<",
"N",
":",
"break",
"return",
"key"
] |
Generate new random Bitcoin private key, using os.urandom and
double-sha256. Hex format.
|
[
"Generate",
"new",
"random",
"Bitcoin",
"private",
"key",
"using",
"os",
".",
"urandom",
"and",
"double",
"-",
"sha256",
".",
"Hex",
"format",
"."
] |
ad332433dfcc067e86d2e77fa0c8f1a27daffb63
|
https://github.com/maxweisspoker/simplebitcoinfuncs/blob/ad332433dfcc067e86d2e77fa0c8f1a27daffb63/simplebitcoinfuncs/miscbitcoinfuncs.py#L38-L56
|
241,223
|
maxweisspoker/simplebitcoinfuncs
|
simplebitcoinfuncs/miscbitcoinfuncs.py
|
genkey
|
def genkey(outcompressed=True,prefix='80'):
'''
Generate new random Bitcoin private key, using os.urandom and
double-sha256.
'''
key = prefix + genkeyhex()
if outcompressed:
key = key + '01'
return b58e(key)
|
python
|
def genkey(outcompressed=True,prefix='80'):
'''
Generate new random Bitcoin private key, using os.urandom and
double-sha256.
'''
key = prefix + genkeyhex()
if outcompressed:
key = key + '01'
return b58e(key)
|
[
"def",
"genkey",
"(",
"outcompressed",
"=",
"True",
",",
"prefix",
"=",
"'80'",
")",
":",
"key",
"=",
"prefix",
"+",
"genkeyhex",
"(",
")",
"if",
"outcompressed",
":",
"key",
"=",
"key",
"+",
"'01'",
"return",
"b58e",
"(",
"key",
")"
] |
Generate new random Bitcoin private key, using os.urandom and
double-sha256.
|
[
"Generate",
"new",
"random",
"Bitcoin",
"private",
"key",
"using",
"os",
".",
"urandom",
"and",
"double",
"-",
"sha256",
"."
] |
ad332433dfcc067e86d2e77fa0c8f1a27daffb63
|
https://github.com/maxweisspoker/simplebitcoinfuncs/blob/ad332433dfcc067e86d2e77fa0c8f1a27daffb63/simplebitcoinfuncs/miscbitcoinfuncs.py#L59-L68
|
241,224
|
maxweisspoker/simplebitcoinfuncs
|
simplebitcoinfuncs/miscbitcoinfuncs.py
|
getandstrip_varintdata
|
def getandstrip_varintdata(data):
'''
Takes a hex string that begins with varint data, and has extra at
the end, and gets the varint integer, strips the varint bytes, and
returns the integer and the remaining data. So rather than having
to manually read the varint prefix, count, and strip, you can do
it in one function. This function will return a tuple of the data
and the leftover.
For example, let's say you are parsing a transaction from
beginning to end, and you know the next byte is a varint byte.
Here's an example:
fd5d010048304502200187af928e9d155c4b1ac9c1c9118153239aba76774f77
5d7c1f9c3e106ff33c0221008822b0f658edec22274d0b6ae9de10ebf2da06b1
bbdaaba4e50eb078f39e3d78014730440220795f0f4f5941a77ae032ecb9e337
53788d7eb5cb0c78d805575d6b00a1d9bfed02203e1f4ad9332d1416ae01e270
38e945bc9db59c732728a383a6f1ed2fb99da7a4014cc952410491bba2510912
a5bd37da1fb5b1673010e43d2c6d812c514e91bfa9f2eb129e1c183329db55bd
868e209aac2fbc02cb33d98fe74bf23f0c235d6126b1d8334f864104865c4029
3a680cb9c020e7b1e106d8c1916d3cef99aa431a56d253e69256dac09ef122b1
a986818a7cb624532f062c1d1f8722084861c5c3291ccffef4ec687441048d24
55d2403e08708fc1f556002f1b6cd83f992d085097f9974ab08a28838f07896f
bab08f39495e15fa6fad6edbfb1e754e35fa1c7844c41f322a1863d4621353ae
ffffffff0140420f00000000001976a914ae56b4db13554d321c402db3961187
aed1bbed5b88ac00000000
If the above tx fragment is input as a single long string with no
white-space, this function will return the tuple:
('004830...53ae', 'ffffffff...00000000')
See _doctester.py for that example in action.
'''
data = strlify(data)
numbytes = numvarintbytes(data[:2])
varint = data[:2*numbytes]
data = data[2*numbytes:]
tostrip = fromvarint(varint) * 2
return data[:tostrip], data[tostrip:]
|
python
|
def getandstrip_varintdata(data):
'''
Takes a hex string that begins with varint data, and has extra at
the end, and gets the varint integer, strips the varint bytes, and
returns the integer and the remaining data. So rather than having
to manually read the varint prefix, count, and strip, you can do
it in one function. This function will return a tuple of the data
and the leftover.
For example, let's say you are parsing a transaction from
beginning to end, and you know the next byte is a varint byte.
Here's an example:
fd5d010048304502200187af928e9d155c4b1ac9c1c9118153239aba76774f77
5d7c1f9c3e106ff33c0221008822b0f658edec22274d0b6ae9de10ebf2da06b1
bbdaaba4e50eb078f39e3d78014730440220795f0f4f5941a77ae032ecb9e337
53788d7eb5cb0c78d805575d6b00a1d9bfed02203e1f4ad9332d1416ae01e270
38e945bc9db59c732728a383a6f1ed2fb99da7a4014cc952410491bba2510912
a5bd37da1fb5b1673010e43d2c6d812c514e91bfa9f2eb129e1c183329db55bd
868e209aac2fbc02cb33d98fe74bf23f0c235d6126b1d8334f864104865c4029
3a680cb9c020e7b1e106d8c1916d3cef99aa431a56d253e69256dac09ef122b1
a986818a7cb624532f062c1d1f8722084861c5c3291ccffef4ec687441048d24
55d2403e08708fc1f556002f1b6cd83f992d085097f9974ab08a28838f07896f
bab08f39495e15fa6fad6edbfb1e754e35fa1c7844c41f322a1863d4621353ae
ffffffff0140420f00000000001976a914ae56b4db13554d321c402db3961187
aed1bbed5b88ac00000000
If the above tx fragment is input as a single long string with no
white-space, this function will return the tuple:
('004830...53ae', 'ffffffff...00000000')
See _doctester.py for that example in action.
'''
data = strlify(data)
numbytes = numvarintbytes(data[:2])
varint = data[:2*numbytes]
data = data[2*numbytes:]
tostrip = fromvarint(varint) * 2
return data[:tostrip], data[tostrip:]
|
[
"def",
"getandstrip_varintdata",
"(",
"data",
")",
":",
"data",
"=",
"strlify",
"(",
"data",
")",
"numbytes",
"=",
"numvarintbytes",
"(",
"data",
"[",
":",
"2",
"]",
")",
"varint",
"=",
"data",
"[",
":",
"2",
"*",
"numbytes",
"]",
"data",
"=",
"data",
"[",
"2",
"*",
"numbytes",
":",
"]",
"tostrip",
"=",
"fromvarint",
"(",
"varint",
")",
"*",
"2",
"return",
"data",
"[",
":",
"tostrip",
"]",
",",
"data",
"[",
"tostrip",
":",
"]"
] |
Takes a hex string that begins with varint data, and has extra at
the end, and gets the varint integer, strips the varint bytes, and
returns the integer and the remaining data. So rather than having
to manually read the varint prefix, count, and strip, you can do
it in one function. This function will return a tuple of the data
and the leftover.
For example, let's say you are parsing a transaction from
beginning to end, and you know the next byte is a varint byte.
Here's an example:
fd5d010048304502200187af928e9d155c4b1ac9c1c9118153239aba76774f77
5d7c1f9c3e106ff33c0221008822b0f658edec22274d0b6ae9de10ebf2da06b1
bbdaaba4e50eb078f39e3d78014730440220795f0f4f5941a77ae032ecb9e337
53788d7eb5cb0c78d805575d6b00a1d9bfed02203e1f4ad9332d1416ae01e270
38e945bc9db59c732728a383a6f1ed2fb99da7a4014cc952410491bba2510912
a5bd37da1fb5b1673010e43d2c6d812c514e91bfa9f2eb129e1c183329db55bd
868e209aac2fbc02cb33d98fe74bf23f0c235d6126b1d8334f864104865c4029
3a680cb9c020e7b1e106d8c1916d3cef99aa431a56d253e69256dac09ef122b1
a986818a7cb624532f062c1d1f8722084861c5c3291ccffef4ec687441048d24
55d2403e08708fc1f556002f1b6cd83f992d085097f9974ab08a28838f07896f
bab08f39495e15fa6fad6edbfb1e754e35fa1c7844c41f322a1863d4621353ae
ffffffff0140420f00000000001976a914ae56b4db13554d321c402db3961187
aed1bbed5b88ac00000000
If the above tx fragment is input as a single long string with no
white-space, this function will return the tuple:
('004830...53ae', 'ffffffff...00000000')
See _doctester.py for that example in action.
|
[
"Takes",
"a",
"hex",
"string",
"that",
"begins",
"with",
"varint",
"data",
"and",
"has",
"extra",
"at",
"the",
"end",
"and",
"gets",
"the",
"varint",
"integer",
"strips",
"the",
"varint",
"bytes",
"and",
"returns",
"the",
"integer",
"and",
"the",
"remaining",
"data",
".",
"So",
"rather",
"than",
"having",
"to",
"manually",
"read",
"the",
"varint",
"prefix",
"count",
"and",
"strip",
"you",
"can",
"do",
"it",
"in",
"one",
"function",
".",
"This",
"function",
"will",
"return",
"a",
"tuple",
"of",
"the",
"data",
"and",
"the",
"leftover",
"."
] |
ad332433dfcc067e86d2e77fa0c8f1a27daffb63
|
https://github.com/maxweisspoker/simplebitcoinfuncs/blob/ad332433dfcc067e86d2e77fa0c8f1a27daffb63/simplebitcoinfuncs/miscbitcoinfuncs.py#L147-L187
|
241,225
|
maxweisspoker/simplebitcoinfuncs
|
simplebitcoinfuncs/miscbitcoinfuncs.py
|
LEB128toint
|
def LEB128toint(LEBinput):
'''
Convert unsigned LEB128 hex to integer
'''
reversedbytes = hexreverse(LEBinput)
binstr = ""
for i in range(len(LEBinput) // 2):
if i == 0:
assert int(reversedbytes[2*i:(2*i + 2)],16) < 128
else:
assert int(reversedbytes[2*i:(2*i + 2)],16) >= 128
tempbin = str(bin(int(reversedbytes[2*i:(2*i + 2)],16))) \
.lstrip("0b").replace("b","").replace("L","") \
.replace("'","").replace('"',"") \
.zfill(8)
binstr += tempbin[1:]
return int(binstr,2)
|
python
|
def LEB128toint(LEBinput):
'''
Convert unsigned LEB128 hex to integer
'''
reversedbytes = hexreverse(LEBinput)
binstr = ""
for i in range(len(LEBinput) // 2):
if i == 0:
assert int(reversedbytes[2*i:(2*i + 2)],16) < 128
else:
assert int(reversedbytes[2*i:(2*i + 2)],16) >= 128
tempbin = str(bin(int(reversedbytes[2*i:(2*i + 2)],16))) \
.lstrip("0b").replace("b","").replace("L","") \
.replace("'","").replace('"',"") \
.zfill(8)
binstr += tempbin[1:]
return int(binstr,2)
|
[
"def",
"LEB128toint",
"(",
"LEBinput",
")",
":",
"reversedbytes",
"=",
"hexreverse",
"(",
"LEBinput",
")",
"binstr",
"=",
"\"\"",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"LEBinput",
")",
"//",
"2",
")",
":",
"if",
"i",
"==",
"0",
":",
"assert",
"int",
"(",
"reversedbytes",
"[",
"2",
"*",
"i",
":",
"(",
"2",
"*",
"i",
"+",
"2",
")",
"]",
",",
"16",
")",
"<",
"128",
"else",
":",
"assert",
"int",
"(",
"reversedbytes",
"[",
"2",
"*",
"i",
":",
"(",
"2",
"*",
"i",
"+",
"2",
")",
"]",
",",
"16",
")",
">=",
"128",
"tempbin",
"=",
"str",
"(",
"bin",
"(",
"int",
"(",
"reversedbytes",
"[",
"2",
"*",
"i",
":",
"(",
"2",
"*",
"i",
"+",
"2",
")",
"]",
",",
"16",
")",
")",
")",
".",
"lstrip",
"(",
"\"0b\"",
")",
".",
"replace",
"(",
"\"b\"",
",",
"\"\"",
")",
".",
"replace",
"(",
"\"L\"",
",",
"\"\"",
")",
".",
"replace",
"(",
"\"'\"",
",",
"\"\"",
")",
".",
"replace",
"(",
"'\"'",
",",
"\"\"",
")",
".",
"zfill",
"(",
"8",
")",
"binstr",
"+=",
"tempbin",
"[",
"1",
":",
"]",
"return",
"int",
"(",
"binstr",
",",
"2",
")"
] |
Convert unsigned LEB128 hex to integer
|
[
"Convert",
"unsigned",
"LEB128",
"hex",
"to",
"integer"
] |
ad332433dfcc067e86d2e77fa0c8f1a27daffb63
|
https://github.com/maxweisspoker/simplebitcoinfuncs/blob/ad332433dfcc067e86d2e77fa0c8f1a27daffb63/simplebitcoinfuncs/miscbitcoinfuncs.py#L226-L243
|
241,226
|
Nekroze/librarian
|
librarian/card.py
|
Card.add_attribute
|
def add_attribute(self, attribute):
"""
Add the given attribute to this Card. Returns the length of
attributes after addition.
"""
self.attributes.append(attribute)
return len(self.attributes)
|
python
|
def add_attribute(self, attribute):
"""
Add the given attribute to this Card. Returns the length of
attributes after addition.
"""
self.attributes.append(attribute)
return len(self.attributes)
|
[
"def",
"add_attribute",
"(",
"self",
",",
"attribute",
")",
":",
"self",
".",
"attributes",
".",
"append",
"(",
"attribute",
")",
"return",
"len",
"(",
"self",
".",
"attributes",
")"
] |
Add the given attribute to this Card. Returns the length of
attributes after addition.
|
[
"Add",
"the",
"given",
"attribute",
"to",
"this",
"Card",
".",
"Returns",
"the",
"length",
"of",
"attributes",
"after",
"addition",
"."
] |
5d3da2980d91a637f80ad7164fbf204a2dd2bd58
|
https://github.com/Nekroze/librarian/blob/5d3da2980d91a637f80ad7164fbf204a2dd2bd58/librarian/card.py#L42-L48
|
241,227
|
Nekroze/librarian
|
librarian/card.py
|
Card.add_ability
|
def add_ability(self, phase, ability):
"""Add the given ability to this Card under the given phase. Returns
the length of the abilities for the given phase after the addition.
"""
if phase not in self.abilities:
self.abilities[phase] = []
self.abilities[phase].append(ability)
return len(self.abilities[phase])
|
python
|
def add_ability(self, phase, ability):
"""Add the given ability to this Card under the given phase. Returns
the length of the abilities for the given phase after the addition.
"""
if phase not in self.abilities:
self.abilities[phase] = []
self.abilities[phase].append(ability)
return len(self.abilities[phase])
|
[
"def",
"add_ability",
"(",
"self",
",",
"phase",
",",
"ability",
")",
":",
"if",
"phase",
"not",
"in",
"self",
".",
"abilities",
":",
"self",
".",
"abilities",
"[",
"phase",
"]",
"=",
"[",
"]",
"self",
".",
"abilities",
"[",
"phase",
"]",
".",
"append",
"(",
"ability",
")",
"return",
"len",
"(",
"self",
".",
"abilities",
"[",
"phase",
"]",
")"
] |
Add the given ability to this Card under the given phase. Returns
the length of the abilities for the given phase after the addition.
|
[
"Add",
"the",
"given",
"ability",
"to",
"this",
"Card",
"under",
"the",
"given",
"phase",
".",
"Returns",
"the",
"length",
"of",
"the",
"abilities",
"for",
"the",
"given",
"phase",
"after",
"the",
"addition",
"."
] |
5d3da2980d91a637f80ad7164fbf204a2dd2bd58
|
https://github.com/Nekroze/librarian/blob/5d3da2980d91a637f80ad7164fbf204a2dd2bd58/librarian/card.py#L54-L61
|
241,228
|
Nekroze/librarian
|
librarian/card.py
|
Card.set_info
|
def set_info(self, key, value, append=True):
"""
Set any special info you wish to the given key. Each info is stored in
a list and will be appended to rather then overriden unless append is
False.
"""
if append:
if key not in self.info:
self.info[key] = []
self.info[key].append(value)
else:
self.info[key] = value
|
python
|
def set_info(self, key, value, append=True):
"""
Set any special info you wish to the given key. Each info is stored in
a list and will be appended to rather then overriden unless append is
False.
"""
if append:
if key not in self.info:
self.info[key] = []
self.info[key].append(value)
else:
self.info[key] = value
|
[
"def",
"set_info",
"(",
"self",
",",
"key",
",",
"value",
",",
"append",
"=",
"True",
")",
":",
"if",
"append",
":",
"if",
"key",
"not",
"in",
"self",
".",
"info",
":",
"self",
".",
"info",
"[",
"key",
"]",
"=",
"[",
"]",
"self",
".",
"info",
"[",
"key",
"]",
".",
"append",
"(",
"value",
")",
"else",
":",
"self",
".",
"info",
"[",
"key",
"]",
"=",
"value"
] |
Set any special info you wish to the given key. Each info is stored in
a list and will be appended to rather then overriden unless append is
False.
|
[
"Set",
"any",
"special",
"info",
"you",
"wish",
"to",
"the",
"given",
"key",
".",
"Each",
"info",
"is",
"stored",
"in",
"a",
"list",
"and",
"will",
"be",
"appended",
"to",
"rather",
"then",
"overriden",
"unless",
"append",
"is",
"False",
"."
] |
5d3da2980d91a637f80ad7164fbf204a2dd2bd58
|
https://github.com/Nekroze/librarian/blob/5d3da2980d91a637f80ad7164fbf204a2dd2bd58/librarian/card.py#L67-L78
|
241,229
|
Nekroze/librarian
|
librarian/card.py
|
Card.save
|
def save(self):
"""
Converts the Card as is into a dictionary capable of reconstructing the
card with ``Card.load`` or serialized to a string for storage.
"""
return dict(code=self.code, name=self.name, abilities=self.abilities,
attributes=self.attributes, info=self.info)
|
python
|
def save(self):
"""
Converts the Card as is into a dictionary capable of reconstructing the
card with ``Card.load`` or serialized to a string for storage.
"""
return dict(code=self.code, name=self.name, abilities=self.abilities,
attributes=self.attributes, info=self.info)
|
[
"def",
"save",
"(",
"self",
")",
":",
"return",
"dict",
"(",
"code",
"=",
"self",
".",
"code",
",",
"name",
"=",
"self",
".",
"name",
",",
"abilities",
"=",
"self",
".",
"abilities",
",",
"attributes",
"=",
"self",
".",
"attributes",
",",
"info",
"=",
"self",
".",
"info",
")"
] |
Converts the Card as is into a dictionary capable of reconstructing the
card with ``Card.load`` or serialized to a string for storage.
|
[
"Converts",
"the",
"Card",
"as",
"is",
"into",
"a",
"dictionary",
"capable",
"of",
"reconstructing",
"the",
"card",
"with",
"Card",
".",
"load",
"or",
"serialized",
"to",
"a",
"string",
"for",
"storage",
"."
] |
5d3da2980d91a637f80ad7164fbf204a2dd2bd58
|
https://github.com/Nekroze/librarian/blob/5d3da2980d91a637f80ad7164fbf204a2dd2bd58/librarian/card.py#L80-L86
|
241,230
|
Nekroze/librarian
|
librarian/card.py
|
Card.load
|
def load(self, carddict):
"""
Takes a carddict as produced by ``Card.save`` and sets this card
instances information to the previously saved cards information.
"""
self.code = carddict["code"]
if isinstance(self.code, text_type):
self.code = eval(self.code)
self.name = carddict["name"]
self.abilities = carddict["abilities"]
if isinstance(self.abilities, text_type):
self.abilities = eval(self.abilities)
self.attributes = carddict["attributes"]
if isinstance(self.attributes, text_type):
self.attributes = eval(self.attributes)
self.info = carddict["info"]
if isinstance(self.info, text_type):
self.info = eval(self.info)
return self
|
python
|
def load(self, carddict):
"""
Takes a carddict as produced by ``Card.save`` and sets this card
instances information to the previously saved cards information.
"""
self.code = carddict["code"]
if isinstance(self.code, text_type):
self.code = eval(self.code)
self.name = carddict["name"]
self.abilities = carddict["abilities"]
if isinstance(self.abilities, text_type):
self.abilities = eval(self.abilities)
self.attributes = carddict["attributes"]
if isinstance(self.attributes, text_type):
self.attributes = eval(self.attributes)
self.info = carddict["info"]
if isinstance(self.info, text_type):
self.info = eval(self.info)
return self
|
[
"def",
"load",
"(",
"self",
",",
"carddict",
")",
":",
"self",
".",
"code",
"=",
"carddict",
"[",
"\"code\"",
"]",
"if",
"isinstance",
"(",
"self",
".",
"code",
",",
"text_type",
")",
":",
"self",
".",
"code",
"=",
"eval",
"(",
"self",
".",
"code",
")",
"self",
".",
"name",
"=",
"carddict",
"[",
"\"name\"",
"]",
"self",
".",
"abilities",
"=",
"carddict",
"[",
"\"abilities\"",
"]",
"if",
"isinstance",
"(",
"self",
".",
"abilities",
",",
"text_type",
")",
":",
"self",
".",
"abilities",
"=",
"eval",
"(",
"self",
".",
"abilities",
")",
"self",
".",
"attributes",
"=",
"carddict",
"[",
"\"attributes\"",
"]",
"if",
"isinstance",
"(",
"self",
".",
"attributes",
",",
"text_type",
")",
":",
"self",
".",
"attributes",
"=",
"eval",
"(",
"self",
".",
"attributes",
")",
"self",
".",
"info",
"=",
"carddict",
"[",
"\"info\"",
"]",
"if",
"isinstance",
"(",
"self",
".",
"info",
",",
"text_type",
")",
":",
"self",
".",
"info",
"=",
"eval",
"(",
"self",
".",
"info",
")",
"return",
"self"
] |
Takes a carddict as produced by ``Card.save`` and sets this card
instances information to the previously saved cards information.
|
[
"Takes",
"a",
"carddict",
"as",
"produced",
"by",
"Card",
".",
"save",
"and",
"sets",
"this",
"card",
"instances",
"information",
"to",
"the",
"previously",
"saved",
"cards",
"information",
"."
] |
5d3da2980d91a637f80ad7164fbf204a2dd2bd58
|
https://github.com/Nekroze/librarian/blob/5d3da2980d91a637f80ad7164fbf204a2dd2bd58/librarian/card.py#L88-L106
|
241,231
|
andreycizov/python-xrpc
|
xrpc/examples/exceptional.py
|
Exceptional.ep
|
def ep(self, exc: Exception) -> bool:
"""Return False if the exception had not been handled gracefully"""
if not isinstance(exc, ConnectionAbortedError):
return False
if len(exc.args) != 2:
return False
origin, reason = exc.args
logging.getLogger(__name__).warning('Exited')
return True
|
python
|
def ep(self, exc: Exception) -> bool:
"""Return False if the exception had not been handled gracefully"""
if not isinstance(exc, ConnectionAbortedError):
return False
if len(exc.args) != 2:
return False
origin, reason = exc.args
logging.getLogger(__name__).warning('Exited')
return True
|
[
"def",
"ep",
"(",
"self",
",",
"exc",
":",
"Exception",
")",
"->",
"bool",
":",
"if",
"not",
"isinstance",
"(",
"exc",
",",
"ConnectionAbortedError",
")",
":",
"return",
"False",
"if",
"len",
"(",
"exc",
".",
"args",
")",
"!=",
"2",
":",
"return",
"False",
"origin",
",",
"reason",
"=",
"exc",
".",
"args",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
".",
"warning",
"(",
"'Exited'",
")",
"return",
"True"
] |
Return False if the exception had not been handled gracefully
|
[
"Return",
"False",
"if",
"the",
"exception",
"had",
"not",
"been",
"handled",
"gracefully"
] |
4f916383cda7de3272962f3ba07a64f7ec451098
|
https://github.com/andreycizov/python-xrpc/blob/4f916383cda7de3272962f3ba07a64f7ec451098/xrpc/examples/exceptional.py#L19-L31
|
241,232
|
edeposit/edeposit.amqp.ltp
|
src/edeposit/amqp/ltp/ltp.py
|
_get_package_name
|
def _get_package_name(prefix=settings.TEMP_DIR, book_id=None):
"""
Return package path. Use uuid to generate package's directory name.
Args:
book_id (str, default None): UUID of the book.
prefix (str, default settings.TEMP_DIR): Where the package will be
stored. Default :attr:`settings.TEMP_DIR`.
Returns:
str: Path to the root directory.
"""
if book_id is None:
book_id = str(uuid.uuid4())
return os.path.join(prefix, book_id)
|
python
|
def _get_package_name(prefix=settings.TEMP_DIR, book_id=None):
"""
Return package path. Use uuid to generate package's directory name.
Args:
book_id (str, default None): UUID of the book.
prefix (str, default settings.TEMP_DIR): Where the package will be
stored. Default :attr:`settings.TEMP_DIR`.
Returns:
str: Path to the root directory.
"""
if book_id is None:
book_id = str(uuid.uuid4())
return os.path.join(prefix, book_id)
|
[
"def",
"_get_package_name",
"(",
"prefix",
"=",
"settings",
".",
"TEMP_DIR",
",",
"book_id",
"=",
"None",
")",
":",
"if",
"book_id",
"is",
"None",
":",
"book_id",
"=",
"str",
"(",
"uuid",
".",
"uuid4",
"(",
")",
")",
"return",
"os",
".",
"path",
".",
"join",
"(",
"prefix",
",",
"book_id",
")"
] |
Return package path. Use uuid to generate package's directory name.
Args:
book_id (str, default None): UUID of the book.
prefix (str, default settings.TEMP_DIR): Where the package will be
stored. Default :attr:`settings.TEMP_DIR`.
Returns:
str: Path to the root directory.
|
[
"Return",
"package",
"path",
".",
"Use",
"uuid",
"to",
"generate",
"package",
"s",
"directory",
"name",
"."
] |
df9ac7ec6cbdbeaaeed438ca66df75ea967b6d8e
|
https://github.com/edeposit/edeposit.amqp.ltp/blob/df9ac7ec6cbdbeaaeed438ca66df75ea967b6d8e/src/edeposit/amqp/ltp/ltp.py#L22-L37
|
241,233
|
edeposit/edeposit.amqp.ltp
|
src/edeposit/amqp/ltp/ltp.py
|
_create_package_hierarchy
|
def _create_package_hierarchy(prefix=settings.TEMP_DIR, book_id=None):
"""
Create hierarchy of directories, at it is required in specification.
`root_dir` is root of the package generated using :attr:`settings.TEMP_DIR`
and :func:`_get_package_name`.
`orig_dir` is path to the directory, where the data files are stored.
`metadata_dir` is path to the directory with MODS metadata.
Args:
book_id (str, default None): UUID of the book.
prefix (str, default settings.TEMP_DIR): Where the package will be
stored. Default :attr:`settings.TEMP_DIR`.
Warning:
If the `root_dir` exists, it is REMOVED!
Returns:
list of str: root_dir, orig_dir, metadata_dir
"""
root_dir = _get_package_name(book_id=book_id, prefix=prefix)
if os.path.exists(root_dir):
shutil.rmtree(root_dir)
os.mkdir(root_dir)
original_dir = os.path.join(root_dir, "original")
metadata_dir = os.path.join(root_dir, "metadata")
os.mkdir(original_dir)
os.mkdir(metadata_dir)
return root_dir, original_dir, metadata_dir
|
python
|
def _create_package_hierarchy(prefix=settings.TEMP_DIR, book_id=None):
"""
Create hierarchy of directories, at it is required in specification.
`root_dir` is root of the package generated using :attr:`settings.TEMP_DIR`
and :func:`_get_package_name`.
`orig_dir` is path to the directory, where the data files are stored.
`metadata_dir` is path to the directory with MODS metadata.
Args:
book_id (str, default None): UUID of the book.
prefix (str, default settings.TEMP_DIR): Where the package will be
stored. Default :attr:`settings.TEMP_DIR`.
Warning:
If the `root_dir` exists, it is REMOVED!
Returns:
list of str: root_dir, orig_dir, metadata_dir
"""
root_dir = _get_package_name(book_id=book_id, prefix=prefix)
if os.path.exists(root_dir):
shutil.rmtree(root_dir)
os.mkdir(root_dir)
original_dir = os.path.join(root_dir, "original")
metadata_dir = os.path.join(root_dir, "metadata")
os.mkdir(original_dir)
os.mkdir(metadata_dir)
return root_dir, original_dir, metadata_dir
|
[
"def",
"_create_package_hierarchy",
"(",
"prefix",
"=",
"settings",
".",
"TEMP_DIR",
",",
"book_id",
"=",
"None",
")",
":",
"root_dir",
"=",
"_get_package_name",
"(",
"book_id",
"=",
"book_id",
",",
"prefix",
"=",
"prefix",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"root_dir",
")",
":",
"shutil",
".",
"rmtree",
"(",
"root_dir",
")",
"os",
".",
"mkdir",
"(",
"root_dir",
")",
"original_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"\"original\"",
")",
"metadata_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"\"metadata\"",
")",
"os",
".",
"mkdir",
"(",
"original_dir",
")",
"os",
".",
"mkdir",
"(",
"metadata_dir",
")",
"return",
"root_dir",
",",
"original_dir",
",",
"metadata_dir"
] |
Create hierarchy of directories, at it is required in specification.
`root_dir` is root of the package generated using :attr:`settings.TEMP_DIR`
and :func:`_get_package_name`.
`orig_dir` is path to the directory, where the data files are stored.
`metadata_dir` is path to the directory with MODS metadata.
Args:
book_id (str, default None): UUID of the book.
prefix (str, default settings.TEMP_DIR): Where the package will be
stored. Default :attr:`settings.TEMP_DIR`.
Warning:
If the `root_dir` exists, it is REMOVED!
Returns:
list of str: root_dir, orig_dir, metadata_dir
|
[
"Create",
"hierarchy",
"of",
"directories",
"at",
"it",
"is",
"required",
"in",
"specification",
"."
] |
df9ac7ec6cbdbeaaeed438ca66df75ea967b6d8e
|
https://github.com/edeposit/edeposit.amqp.ltp/blob/df9ac7ec6cbdbeaaeed438ca66df75ea967b6d8e/src/edeposit/amqp/ltp/ltp.py#L40-L75
|
241,234
|
edeposit/edeposit.amqp.ltp
|
src/edeposit/amqp/ltp/ltp.py
|
create_ltp_package
|
def create_ltp_package(aleph_record, book_id, ebook_fn, data, url,
urn_nbn=None):
"""
Create LTP package as it is specified in specification v1.0 as I understand
it.
Args:
aleph_record (str): XML containing full aleph record.
book_id (str): UUID of the book.
ebook_fn (str): Original filename of the ebook.
data (str/bytes): Ebook's content.
url (str): URL of the publication used when the URL can't be found in
`aleph_record`.
urn_nbn (str, default None): URN:NBN.
Returns:
str: Name of the package's directory in ``/tmp``.
"""
root_dir, orig_dir, meta_dir = _create_package_hierarchy(book_id=book_id)
# create original file
original_fn = os.path.join(
orig_dir,
fn_composers.original_fn(book_id, ebook_fn)
)
with open(original_fn, "wb") as f:
f.write(data)
# create metadata files
metadata_filenames = []
records = marcxml2mods(marc_xml=aleph_record, uuid=book_id, url=url)
for cnt, mods_record in enumerate(records):
fn = os.path.join(
meta_dir,
fn_composers.volume_fn(cnt)
)
with open(fn, "w") as f:
f.write(mods_record)
metadata_filenames.append(fn)
# collect md5 sums
md5_fn = os.path.join(root_dir, fn_composers.checksum_fn(book_id))
checksums = checksum_generator.generate_hashfile(root_dir)
with open(md5_fn, "w") as f:
f.write(checksums)
# create info file
info_fn = os.path.join(root_dir, fn_composers.info_fn(book_id))
with open(info_fn, "w") as f:
f.write(
info_composer.compose_info(
root_dir=root_dir,
files=[original_fn] + metadata_filenames,
hash_fn=md5_fn,
aleph_record=aleph_record,
urn_nbn=urn_nbn,
)
)
return root_dir
|
python
|
def create_ltp_package(aleph_record, book_id, ebook_fn, data, url,
urn_nbn=None):
"""
Create LTP package as it is specified in specification v1.0 as I understand
it.
Args:
aleph_record (str): XML containing full aleph record.
book_id (str): UUID of the book.
ebook_fn (str): Original filename of the ebook.
data (str/bytes): Ebook's content.
url (str): URL of the publication used when the URL can't be found in
`aleph_record`.
urn_nbn (str, default None): URN:NBN.
Returns:
str: Name of the package's directory in ``/tmp``.
"""
root_dir, orig_dir, meta_dir = _create_package_hierarchy(book_id=book_id)
# create original file
original_fn = os.path.join(
orig_dir,
fn_composers.original_fn(book_id, ebook_fn)
)
with open(original_fn, "wb") as f:
f.write(data)
# create metadata files
metadata_filenames = []
records = marcxml2mods(marc_xml=aleph_record, uuid=book_id, url=url)
for cnt, mods_record in enumerate(records):
fn = os.path.join(
meta_dir,
fn_composers.volume_fn(cnt)
)
with open(fn, "w") as f:
f.write(mods_record)
metadata_filenames.append(fn)
# collect md5 sums
md5_fn = os.path.join(root_dir, fn_composers.checksum_fn(book_id))
checksums = checksum_generator.generate_hashfile(root_dir)
with open(md5_fn, "w") as f:
f.write(checksums)
# create info file
info_fn = os.path.join(root_dir, fn_composers.info_fn(book_id))
with open(info_fn, "w") as f:
f.write(
info_composer.compose_info(
root_dir=root_dir,
files=[original_fn] + metadata_filenames,
hash_fn=md5_fn,
aleph_record=aleph_record,
urn_nbn=urn_nbn,
)
)
return root_dir
|
[
"def",
"create_ltp_package",
"(",
"aleph_record",
",",
"book_id",
",",
"ebook_fn",
",",
"data",
",",
"url",
",",
"urn_nbn",
"=",
"None",
")",
":",
"root_dir",
",",
"orig_dir",
",",
"meta_dir",
"=",
"_create_package_hierarchy",
"(",
"book_id",
"=",
"book_id",
")",
"# create original file",
"original_fn",
"=",
"os",
".",
"path",
".",
"join",
"(",
"orig_dir",
",",
"fn_composers",
".",
"original_fn",
"(",
"book_id",
",",
"ebook_fn",
")",
")",
"with",
"open",
"(",
"original_fn",
",",
"\"wb\"",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"data",
")",
"# create metadata files",
"metadata_filenames",
"=",
"[",
"]",
"records",
"=",
"marcxml2mods",
"(",
"marc_xml",
"=",
"aleph_record",
",",
"uuid",
"=",
"book_id",
",",
"url",
"=",
"url",
")",
"for",
"cnt",
",",
"mods_record",
"in",
"enumerate",
"(",
"records",
")",
":",
"fn",
"=",
"os",
".",
"path",
".",
"join",
"(",
"meta_dir",
",",
"fn_composers",
".",
"volume_fn",
"(",
"cnt",
")",
")",
"with",
"open",
"(",
"fn",
",",
"\"w\"",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"mods_record",
")",
"metadata_filenames",
".",
"append",
"(",
"fn",
")",
"# collect md5 sums",
"md5_fn",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"fn_composers",
".",
"checksum_fn",
"(",
"book_id",
")",
")",
"checksums",
"=",
"checksum_generator",
".",
"generate_hashfile",
"(",
"root_dir",
")",
"with",
"open",
"(",
"md5_fn",
",",
"\"w\"",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"checksums",
")",
"# create info file",
"info_fn",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"fn_composers",
".",
"info_fn",
"(",
"book_id",
")",
")",
"with",
"open",
"(",
"info_fn",
",",
"\"w\"",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"info_composer",
".",
"compose_info",
"(",
"root_dir",
"=",
"root_dir",
",",
"files",
"=",
"[",
"original_fn",
"]",
"+",
"metadata_filenames",
",",
"hash_fn",
"=",
"md5_fn",
",",
"aleph_record",
"=",
"aleph_record",
",",
"urn_nbn",
"=",
"urn_nbn",
",",
")",
")",
"return",
"root_dir"
] |
Create LTP package as it is specified in specification v1.0 as I understand
it.
Args:
aleph_record (str): XML containing full aleph record.
book_id (str): UUID of the book.
ebook_fn (str): Original filename of the ebook.
data (str/bytes): Ebook's content.
url (str): URL of the publication used when the URL can't be found in
`aleph_record`.
urn_nbn (str, default None): URN:NBN.
Returns:
str: Name of the package's directory in ``/tmp``.
|
[
"Create",
"LTP",
"package",
"as",
"it",
"is",
"specified",
"in",
"specification",
"v1",
".",
"0",
"as",
"I",
"understand",
"it",
"."
] |
df9ac7ec6cbdbeaaeed438ca66df75ea967b6d8e
|
https://github.com/edeposit/edeposit.amqp.ltp/blob/df9ac7ec6cbdbeaaeed438ca66df75ea967b6d8e/src/edeposit/amqp/ltp/ltp.py#L78-L139
|
241,235
|
ArtoLabs/SimpleSteem
|
simplesteem/steemconnectutil.py
|
SteemConnect.steemconnect
|
def steemconnect(self, accesstoken=None):
''' Initializes the SteemConnect Client
class
'''
if self.sc is not None:
return self.sc
if accesstoken is not None:
self.accesstoken = accesstoken
if self.accesstoken is None:
self.sc = Client(client_id=self.client_id,
client_secret=self.client_secret)
else:
self.sc = Client(access_token=self.accesstoken,
client_id=self.client_id,
client_secret=self.client_secret)
return self.sc
|
python
|
def steemconnect(self, accesstoken=None):
''' Initializes the SteemConnect Client
class
'''
if self.sc is not None:
return self.sc
if accesstoken is not None:
self.accesstoken = accesstoken
if self.accesstoken is None:
self.sc = Client(client_id=self.client_id,
client_secret=self.client_secret)
else:
self.sc = Client(access_token=self.accesstoken,
client_id=self.client_id,
client_secret=self.client_secret)
return self.sc
|
[
"def",
"steemconnect",
"(",
"self",
",",
"accesstoken",
"=",
"None",
")",
":",
"if",
"self",
".",
"sc",
"is",
"not",
"None",
":",
"return",
"self",
".",
"sc",
"if",
"accesstoken",
"is",
"not",
"None",
":",
"self",
".",
"accesstoken",
"=",
"accesstoken",
"if",
"self",
".",
"accesstoken",
"is",
"None",
":",
"self",
".",
"sc",
"=",
"Client",
"(",
"client_id",
"=",
"self",
".",
"client_id",
",",
"client_secret",
"=",
"self",
".",
"client_secret",
")",
"else",
":",
"self",
".",
"sc",
"=",
"Client",
"(",
"access_token",
"=",
"self",
".",
"accesstoken",
",",
"client_id",
"=",
"self",
".",
"client_id",
",",
"client_secret",
"=",
"self",
".",
"client_secret",
")",
"return",
"self",
".",
"sc"
] |
Initializes the SteemConnect Client
class
|
[
"Initializes",
"the",
"SteemConnect",
"Client",
"class"
] |
ce8be0ae81f8878b460bc156693f1957f7dd34a3
|
https://github.com/ArtoLabs/SimpleSteem/blob/ce8be0ae81f8878b460bc156693f1957f7dd34a3/simplesteem/steemconnectutil.py#L24-L39
|
241,236
|
ArtoLabs/SimpleSteem
|
simplesteem/steemconnectutil.py
|
SteemConnect.get_token
|
def get_token(self, code=None):
''' Uses a SteemConnect refresh token
to retreive an access token
'''
tokenobj = self.steemconnect().get_access_token(code)
for t in tokenobj:
if t == 'error':
self.msg.error_message(str(tokenobj[t]))
return False
elif t == 'access_token':
self.username = tokenobj['username']
self.refresh_token = tokenobj['refresh_token']
return tokenobj[t]
|
python
|
def get_token(self, code=None):
''' Uses a SteemConnect refresh token
to retreive an access token
'''
tokenobj = self.steemconnect().get_access_token(code)
for t in tokenobj:
if t == 'error':
self.msg.error_message(str(tokenobj[t]))
return False
elif t == 'access_token':
self.username = tokenobj['username']
self.refresh_token = tokenobj['refresh_token']
return tokenobj[t]
|
[
"def",
"get_token",
"(",
"self",
",",
"code",
"=",
"None",
")",
":",
"tokenobj",
"=",
"self",
".",
"steemconnect",
"(",
")",
".",
"get_access_token",
"(",
"code",
")",
"for",
"t",
"in",
"tokenobj",
":",
"if",
"t",
"==",
"'error'",
":",
"self",
".",
"msg",
".",
"error_message",
"(",
"str",
"(",
"tokenobj",
"[",
"t",
"]",
")",
")",
"return",
"False",
"elif",
"t",
"==",
"'access_token'",
":",
"self",
".",
"username",
"=",
"tokenobj",
"[",
"'username'",
"]",
"self",
".",
"refresh_token",
"=",
"tokenobj",
"[",
"'refresh_token'",
"]",
"return",
"tokenobj",
"[",
"t",
"]"
] |
Uses a SteemConnect refresh token
to retreive an access token
|
[
"Uses",
"a",
"SteemConnect",
"refresh",
"token",
"to",
"retreive",
"an",
"access",
"token"
] |
ce8be0ae81f8878b460bc156693f1957f7dd34a3
|
https://github.com/ArtoLabs/SimpleSteem/blob/ce8be0ae81f8878b460bc156693f1957f7dd34a3/simplesteem/steemconnectutil.py#L42-L54
|
241,237
|
ArtoLabs/SimpleSteem
|
simplesteem/steemconnectutil.py
|
SteemConnect.vote
|
def vote(self, voter, author, permlink, voteweight):
''' Uses a SteemConnect accses token
to vote.
'''
vote = Vote(voter, author, permlink, voteweight)
result = self.steemconnect().broadcast(
[vote.to_operation_structure()])
return result
|
python
|
def vote(self, voter, author, permlink, voteweight):
''' Uses a SteemConnect accses token
to vote.
'''
vote = Vote(voter, author, permlink, voteweight)
result = self.steemconnect().broadcast(
[vote.to_operation_structure()])
return result
|
[
"def",
"vote",
"(",
"self",
",",
"voter",
",",
"author",
",",
"permlink",
",",
"voteweight",
")",
":",
"vote",
"=",
"Vote",
"(",
"voter",
",",
"author",
",",
"permlink",
",",
"voteweight",
")",
"result",
"=",
"self",
".",
"steemconnect",
"(",
")",
".",
"broadcast",
"(",
"[",
"vote",
".",
"to_operation_structure",
"(",
")",
"]",
")",
"return",
"result"
] |
Uses a SteemConnect accses token
to vote.
|
[
"Uses",
"a",
"SteemConnect",
"accses",
"token",
"to",
"vote",
"."
] |
ce8be0ae81f8878b460bc156693f1957f7dd34a3
|
https://github.com/ArtoLabs/SimpleSteem/blob/ce8be0ae81f8878b460bc156693f1957f7dd34a3/simplesteem/steemconnectutil.py#L67-L74
|
241,238
|
the01/python-paps
|
paps/si/sensorClientAdapter.py
|
SensorClientAdapter.on_person_new
|
def on_person_new(self, people):
"""
Add new people
All people supported need to be added simultaneously,
since on every call a unjoin() followed by a join() is issued
:param people: People to add
:type people: list[paps.people.People]
:rtype: None
:raises Exception: On error (for now just an exception)
"""
try:
self.on_person_leave([])
except:
# Already caught and logged
pass
try:
self.sensor_client.join(people)
except:
self.exception("Failed to join audience")
raise Exception("Joining audience failed")
|
python
|
def on_person_new(self, people):
"""
Add new people
All people supported need to be added simultaneously,
since on every call a unjoin() followed by a join() is issued
:param people: People to add
:type people: list[paps.people.People]
:rtype: None
:raises Exception: On error (for now just an exception)
"""
try:
self.on_person_leave([])
except:
# Already caught and logged
pass
try:
self.sensor_client.join(people)
except:
self.exception("Failed to join audience")
raise Exception("Joining audience failed")
|
[
"def",
"on_person_new",
"(",
"self",
",",
"people",
")",
":",
"try",
":",
"self",
".",
"on_person_leave",
"(",
"[",
"]",
")",
"except",
":",
"# Already caught and logged",
"pass",
"try",
":",
"self",
".",
"sensor_client",
".",
"join",
"(",
"people",
")",
"except",
":",
"self",
".",
"exception",
"(",
"\"Failed to join audience\"",
")",
"raise",
"Exception",
"(",
"\"Joining audience failed\"",
")"
] |
Add new people
All people supported need to be added simultaneously,
since on every call a unjoin() followed by a join() is issued
:param people: People to add
:type people: list[paps.people.People]
:rtype: None
:raises Exception: On error (for now just an exception)
|
[
"Add",
"new",
"people"
] |
2dde5a71913e4c7b22901cf05c6ecedd890919c4
|
https://github.com/the01/python-paps/blob/2dde5a71913e4c7b22901cf05c6ecedd890919c4/paps/si/sensorClientAdapter.py#L48-L70
|
241,239
|
the01/python-paps
|
paps/si/sensorClientAdapter.py
|
SensorClientAdapter.on_person_update
|
def on_person_update(self, people):
"""
People have changed
Should always include all people
(all that were added via on_person_new)
:param people: People to update
:type people: list[paps.people.People]
:rtype: None
:raises Exception: On error (for now just an exception)
"""
try:
self.sensor_client.person_update(people)
except:
self.exception("Failed to update people")
raise Exception("Updating people failed")
|
python
|
def on_person_update(self, people):
"""
People have changed
Should always include all people
(all that were added via on_person_new)
:param people: People to update
:type people: list[paps.people.People]
:rtype: None
:raises Exception: On error (for now just an exception)
"""
try:
self.sensor_client.person_update(people)
except:
self.exception("Failed to update people")
raise Exception("Updating people failed")
|
[
"def",
"on_person_update",
"(",
"self",
",",
"people",
")",
":",
"try",
":",
"self",
".",
"sensor_client",
".",
"person_update",
"(",
"people",
")",
"except",
":",
"self",
".",
"exception",
"(",
"\"Failed to update people\"",
")",
"raise",
"Exception",
"(",
"\"Updating people failed\"",
")"
] |
People have changed
Should always include all people
(all that were added via on_person_new)
:param people: People to update
:type people: list[paps.people.People]
:rtype: None
:raises Exception: On error (for now just an exception)
|
[
"People",
"have",
"changed"
] |
2dde5a71913e4c7b22901cf05c6ecedd890919c4
|
https://github.com/the01/python-paps/blob/2dde5a71913e4c7b22901cf05c6ecedd890919c4/paps/si/sensorClientAdapter.py#L90-L106
|
241,240
|
diffeo/rejester
|
rejester/_redis.py
|
RedisBase.delete_namespace
|
def delete_namespace(self):
'''Remove all keys from the namespace
'''
conn = redis.Redis(connection_pool=self.pool)
keys = conn.keys("%s*" % self._namespace_str)
for i in xrange(0, len(keys), 10000):
conn.delete(*keys[i:i+10000])
logger.debug('tearing down %r', self._namespace_str)
|
python
|
def delete_namespace(self):
'''Remove all keys from the namespace
'''
conn = redis.Redis(connection_pool=self.pool)
keys = conn.keys("%s*" % self._namespace_str)
for i in xrange(0, len(keys), 10000):
conn.delete(*keys[i:i+10000])
logger.debug('tearing down %r', self._namespace_str)
|
[
"def",
"delete_namespace",
"(",
"self",
")",
":",
"conn",
"=",
"redis",
".",
"Redis",
"(",
"connection_pool",
"=",
"self",
".",
"pool",
")",
"keys",
"=",
"conn",
".",
"keys",
"(",
"\"%s*\"",
"%",
"self",
".",
"_namespace_str",
")",
"for",
"i",
"in",
"xrange",
"(",
"0",
",",
"len",
"(",
"keys",
")",
",",
"10000",
")",
":",
"conn",
".",
"delete",
"(",
"*",
"keys",
"[",
"i",
":",
"i",
"+",
"10000",
"]",
")",
"logger",
".",
"debug",
"(",
"'tearing down %r'",
",",
"self",
".",
"_namespace_str",
")"
] |
Remove all keys from the namespace
|
[
"Remove",
"all",
"keys",
"from",
"the",
"namespace"
] |
5438a4a18be2801d7826c46e2079ba9639d2ecb4
|
https://github.com/diffeo/rejester/blob/5438a4a18be2801d7826c46e2079ba9639d2ecb4/rejester/_redis.py#L67-L75
|
241,241
|
unitedstack/steth
|
stetho/agent/common/utils.py
|
get_interface
|
def get_interface(interface):
"""Support Centos standard physical interface,
such as eth0.
"""
# Supported CentOS Version
supported_dists = ['7.0', '6.5']
def format_centos_7_0(inf):
pattern = r'<([A-Z]+)'
state = re.search(pattern, stdout[0]).groups()[0]
state = 'UP' if not cmp(state, 'UP') else 'DOWN'
inf.state = state
stdout.pop(0)
pattern = r'inet\s(.*)\s\snetmask\s(.*)\s\sbroadcast\s(.*)'
for line in stdout:
if line.startswith('inet '):
tmp = re.search(pattern, line).groups()
(inf.inet, inf.netmask, inf.broadcast) = tmp
stdout.remove(line)
break
for line in stdout:
if line.startswith('ether'):
inf.ether = line[6:23]
break
return stdcode, '', inf.make_dict()
def format_centos_6_5(inf):
pattern = r'HWaddr\s(.*)'
inf.ether = re.search(pattern, stdout[0]).groups()[0]
stdout.pop(0)
pattern = r'addr:(.*)\s\sBcast:(.*)\s\sMask:(.*)'
for line in stdout:
if line.startswith('inet '):
tmp = re.search(pattern, line).groups()
(inf.inet, inf.broadcast, inf.netmask) = tmp
stdout.remove(line)
break
inf.state = 'DOWN'
for line in stdout:
if 'RUNNING' in line:
state = line[:2]
state = 'UP' if not cmp(state, 'UP') else 'DOWN'
inf.state = state
break
return stdcode, '', inf.make_dict()
linux_dist = platform.linux_distribution()[1][:3]
if linux_dist in supported_dists:
try:
cmd = ['ifconfig', interface]
stdcode, stdout = execute(cmd)
inf = resource.Interface(interface)
if not cmp(linux_dist, '6.5'):
return format_centos_6_5(inf)
elif not cmp(linux_dist, '7.0'):
return format_centos_7_0(inf)
except Exception as e:
message = stdout.pop(0)
return stdcode, message, None
# Unsupported OS distribute
message = 'Unsupported OS distribute %s, only support for CentOS %s.'
message = message % (linux_dist, str(supported_dists))
return 1, message, None
|
python
|
def get_interface(interface):
"""Support Centos standard physical interface,
such as eth0.
"""
# Supported CentOS Version
supported_dists = ['7.0', '6.5']
def format_centos_7_0(inf):
pattern = r'<([A-Z]+)'
state = re.search(pattern, stdout[0]).groups()[0]
state = 'UP' if not cmp(state, 'UP') else 'DOWN'
inf.state = state
stdout.pop(0)
pattern = r'inet\s(.*)\s\snetmask\s(.*)\s\sbroadcast\s(.*)'
for line in stdout:
if line.startswith('inet '):
tmp = re.search(pattern, line).groups()
(inf.inet, inf.netmask, inf.broadcast) = tmp
stdout.remove(line)
break
for line in stdout:
if line.startswith('ether'):
inf.ether = line[6:23]
break
return stdcode, '', inf.make_dict()
def format_centos_6_5(inf):
pattern = r'HWaddr\s(.*)'
inf.ether = re.search(pattern, stdout[0]).groups()[0]
stdout.pop(0)
pattern = r'addr:(.*)\s\sBcast:(.*)\s\sMask:(.*)'
for line in stdout:
if line.startswith('inet '):
tmp = re.search(pattern, line).groups()
(inf.inet, inf.broadcast, inf.netmask) = tmp
stdout.remove(line)
break
inf.state = 'DOWN'
for line in stdout:
if 'RUNNING' in line:
state = line[:2]
state = 'UP' if not cmp(state, 'UP') else 'DOWN'
inf.state = state
break
return stdcode, '', inf.make_dict()
linux_dist = platform.linux_distribution()[1][:3]
if linux_dist in supported_dists:
try:
cmd = ['ifconfig', interface]
stdcode, stdout = execute(cmd)
inf = resource.Interface(interface)
if not cmp(linux_dist, '6.5'):
return format_centos_6_5(inf)
elif not cmp(linux_dist, '7.0'):
return format_centos_7_0(inf)
except Exception as e:
message = stdout.pop(0)
return stdcode, message, None
# Unsupported OS distribute
message = 'Unsupported OS distribute %s, only support for CentOS %s.'
message = message % (linux_dist, str(supported_dists))
return 1, message, None
|
[
"def",
"get_interface",
"(",
"interface",
")",
":",
"# Supported CentOS Version",
"supported_dists",
"=",
"[",
"'7.0'",
",",
"'6.5'",
"]",
"def",
"format_centos_7_0",
"(",
"inf",
")",
":",
"pattern",
"=",
"r'<([A-Z]+)'",
"state",
"=",
"re",
".",
"search",
"(",
"pattern",
",",
"stdout",
"[",
"0",
"]",
")",
".",
"groups",
"(",
")",
"[",
"0",
"]",
"state",
"=",
"'UP'",
"if",
"not",
"cmp",
"(",
"state",
",",
"'UP'",
")",
"else",
"'DOWN'",
"inf",
".",
"state",
"=",
"state",
"stdout",
".",
"pop",
"(",
"0",
")",
"pattern",
"=",
"r'inet\\s(.*)\\s\\snetmask\\s(.*)\\s\\sbroadcast\\s(.*)'",
"for",
"line",
"in",
"stdout",
":",
"if",
"line",
".",
"startswith",
"(",
"'inet '",
")",
":",
"tmp",
"=",
"re",
".",
"search",
"(",
"pattern",
",",
"line",
")",
".",
"groups",
"(",
")",
"(",
"inf",
".",
"inet",
",",
"inf",
".",
"netmask",
",",
"inf",
".",
"broadcast",
")",
"=",
"tmp",
"stdout",
".",
"remove",
"(",
"line",
")",
"break",
"for",
"line",
"in",
"stdout",
":",
"if",
"line",
".",
"startswith",
"(",
"'ether'",
")",
":",
"inf",
".",
"ether",
"=",
"line",
"[",
"6",
":",
"23",
"]",
"break",
"return",
"stdcode",
",",
"''",
",",
"inf",
".",
"make_dict",
"(",
")",
"def",
"format_centos_6_5",
"(",
"inf",
")",
":",
"pattern",
"=",
"r'HWaddr\\s(.*)'",
"inf",
".",
"ether",
"=",
"re",
".",
"search",
"(",
"pattern",
",",
"stdout",
"[",
"0",
"]",
")",
".",
"groups",
"(",
")",
"[",
"0",
"]",
"stdout",
".",
"pop",
"(",
"0",
")",
"pattern",
"=",
"r'addr:(.*)\\s\\sBcast:(.*)\\s\\sMask:(.*)'",
"for",
"line",
"in",
"stdout",
":",
"if",
"line",
".",
"startswith",
"(",
"'inet '",
")",
":",
"tmp",
"=",
"re",
".",
"search",
"(",
"pattern",
",",
"line",
")",
".",
"groups",
"(",
")",
"(",
"inf",
".",
"inet",
",",
"inf",
".",
"broadcast",
",",
"inf",
".",
"netmask",
")",
"=",
"tmp",
"stdout",
".",
"remove",
"(",
"line",
")",
"break",
"inf",
".",
"state",
"=",
"'DOWN'",
"for",
"line",
"in",
"stdout",
":",
"if",
"'RUNNING'",
"in",
"line",
":",
"state",
"=",
"line",
"[",
":",
"2",
"]",
"state",
"=",
"'UP'",
"if",
"not",
"cmp",
"(",
"state",
",",
"'UP'",
")",
"else",
"'DOWN'",
"inf",
".",
"state",
"=",
"state",
"break",
"return",
"stdcode",
",",
"''",
",",
"inf",
".",
"make_dict",
"(",
")",
"linux_dist",
"=",
"platform",
".",
"linux_distribution",
"(",
")",
"[",
"1",
"]",
"[",
":",
"3",
"]",
"if",
"linux_dist",
"in",
"supported_dists",
":",
"try",
":",
"cmd",
"=",
"[",
"'ifconfig'",
",",
"interface",
"]",
"stdcode",
",",
"stdout",
"=",
"execute",
"(",
"cmd",
")",
"inf",
"=",
"resource",
".",
"Interface",
"(",
"interface",
")",
"if",
"not",
"cmp",
"(",
"linux_dist",
",",
"'6.5'",
")",
":",
"return",
"format_centos_6_5",
"(",
"inf",
")",
"elif",
"not",
"cmp",
"(",
"linux_dist",
",",
"'7.0'",
")",
":",
"return",
"format_centos_7_0",
"(",
"inf",
")",
"except",
"Exception",
"as",
"e",
":",
"message",
"=",
"stdout",
".",
"pop",
"(",
"0",
")",
"return",
"stdcode",
",",
"message",
",",
"None",
"# Unsupported OS distribute",
"message",
"=",
"'Unsupported OS distribute %s, only support for CentOS %s.'",
"message",
"=",
"message",
"%",
"(",
"linux_dist",
",",
"str",
"(",
"supported_dists",
")",
")",
"return",
"1",
",",
"message",
",",
"None"
] |
Support Centos standard physical interface,
such as eth0.
|
[
"Support",
"Centos",
"standard",
"physical",
"interface",
"such",
"as",
"eth0",
"."
] |
955884ceebf3bdc474c93cc5cf555e67d16458f1
|
https://github.com/unitedstack/steth/blob/955884ceebf3bdc474c93cc5cf555e67d16458f1/stetho/agent/common/utils.py#L84-L147
|
241,242
|
armstrong/armstrong.apps.images
|
setup.py
|
convert_to_str
|
def convert_to_str(d):
"""
Recursively convert all values in a dictionary to strings
This is required because setup() does not like unicode in
the values it is supplied.
"""
d2 = {}
for k, v in d.items():
k = str(k)
if type(v) in [list, tuple]:
d2[k] = [str(a) for a in v]
elif type(v) is dict:
d2[k] = convert_to_str(v)
else:
d2[k] = str(v)
return d2
|
python
|
def convert_to_str(d):
"""
Recursively convert all values in a dictionary to strings
This is required because setup() does not like unicode in
the values it is supplied.
"""
d2 = {}
for k, v in d.items():
k = str(k)
if type(v) in [list, tuple]:
d2[k] = [str(a) for a in v]
elif type(v) is dict:
d2[k] = convert_to_str(v)
else:
d2[k] = str(v)
return d2
|
[
"def",
"convert_to_str",
"(",
"d",
")",
":",
"d2",
"=",
"{",
"}",
"for",
"k",
",",
"v",
"in",
"d",
".",
"items",
"(",
")",
":",
"k",
"=",
"str",
"(",
"k",
")",
"if",
"type",
"(",
"v",
")",
"in",
"[",
"list",
",",
"tuple",
"]",
":",
"d2",
"[",
"k",
"]",
"=",
"[",
"str",
"(",
"a",
")",
"for",
"a",
"in",
"v",
"]",
"elif",
"type",
"(",
"v",
")",
"is",
"dict",
":",
"d2",
"[",
"k",
"]",
"=",
"convert_to_str",
"(",
"v",
")",
"else",
":",
"d2",
"[",
"k",
"]",
"=",
"str",
"(",
"v",
")",
"return",
"d2"
] |
Recursively convert all values in a dictionary to strings
This is required because setup() does not like unicode in
the values it is supplied.
|
[
"Recursively",
"convert",
"all",
"values",
"in",
"a",
"dictionary",
"to",
"strings"
] |
f334697ee6e2273deac12092069d02119d913e67
|
https://github.com/armstrong/armstrong.apps.images/blob/f334697ee6e2273deac12092069d02119d913e67/setup.py#L15-L31
|
241,243
|
racker/torment
|
torment/contexts/docker/compose.py
|
up
|
def up(services: Iterable[str] = ()) -> int:
'''Start the specified docker-compose services.
Parameters
----------
:``services``: a list of docker-compose service names to start (must be
defined in docker-compose.yml)
Return Value(s)
---------------
The integer status of ``docker-compose up``.
'''
services = list(services)
if not len(services):
raise ValueError('empty iterable passed to up(): {0}'.format(services))
return _call('docker-compose up --no-color -d ' + ' '.join(services), shell = True)
|
python
|
def up(services: Iterable[str] = ()) -> int:
'''Start the specified docker-compose services.
Parameters
----------
:``services``: a list of docker-compose service names to start (must be
defined in docker-compose.yml)
Return Value(s)
---------------
The integer status of ``docker-compose up``.
'''
services = list(services)
if not len(services):
raise ValueError('empty iterable passed to up(): {0}'.format(services))
return _call('docker-compose up --no-color -d ' + ' '.join(services), shell = True)
|
[
"def",
"up",
"(",
"services",
":",
"Iterable",
"[",
"str",
"]",
"=",
"(",
")",
")",
"->",
"int",
":",
"services",
"=",
"list",
"(",
"services",
")",
"if",
"not",
"len",
"(",
"services",
")",
":",
"raise",
"ValueError",
"(",
"'empty iterable passed to up(): {0}'",
".",
"format",
"(",
"services",
")",
")",
"return",
"_call",
"(",
"'docker-compose up --no-color -d '",
"+",
"' '",
".",
"join",
"(",
"services",
")",
",",
"shell",
"=",
"True",
")"
] |
Start the specified docker-compose services.
Parameters
----------
:``services``: a list of docker-compose service names to start (must be
defined in docker-compose.yml)
Return Value(s)
---------------
The integer status of ``docker-compose up``.
|
[
"Start",
"the",
"specified",
"docker",
"-",
"compose",
"services",
"."
] |
bd5d2f978324bf9b7360edfae76d853b226c63e1
|
https://github.com/racker/torment/blob/bd5d2f978324bf9b7360edfae76d853b226c63e1/torment/contexts/docker/compose.py#L54-L75
|
241,244
|
racker/torment
|
torment/contexts/docker/compose.py
|
_call
|
def _call(command: str, *args, **kwargs) -> int:
'''Wrapper around ``subprocess.Popen`` that sends command output to logger.
.. seealso::
``subprocess.Popen``_
Parameters
----------
:``command``: string form of the command to execute
All other parameters are passed directly to ``subprocess.Popen``.
Return Value(s)
---------------
The integer status of command.
'''
child = subprocess.Popen(command, stdout = subprocess.PIPE, stderr = subprocess.PIPE, *args, **kwargs)
def log():
'''Send processes stdout and stderr to logger.'''
for fh in select.select(( child.stdout, child.stderr, ), (), (), 0)[0]:
line = fh.readline()[:-1]
if len(line):
getattr(logger, {
child.stdout: 'debug',
child.stderr: 'error',
}[fh])('%s: %s', command, line)
while child.poll() is None:
log()
log()
return child.wait()
|
python
|
def _call(command: str, *args, **kwargs) -> int:
'''Wrapper around ``subprocess.Popen`` that sends command output to logger.
.. seealso::
``subprocess.Popen``_
Parameters
----------
:``command``: string form of the command to execute
All other parameters are passed directly to ``subprocess.Popen``.
Return Value(s)
---------------
The integer status of command.
'''
child = subprocess.Popen(command, stdout = subprocess.PIPE, stderr = subprocess.PIPE, *args, **kwargs)
def log():
'''Send processes stdout and stderr to logger.'''
for fh in select.select(( child.stdout, child.stderr, ), (), (), 0)[0]:
line = fh.readline()[:-1]
if len(line):
getattr(logger, {
child.stdout: 'debug',
child.stderr: 'error',
}[fh])('%s: %s', command, line)
while child.poll() is None:
log()
log()
return child.wait()
|
[
"def",
"_call",
"(",
"command",
":",
"str",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"->",
"int",
":",
"child",
"=",
"subprocess",
".",
"Popen",
"(",
"command",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"PIPE",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"def",
"log",
"(",
")",
":",
"'''Send processes stdout and stderr to logger.'''",
"for",
"fh",
"in",
"select",
".",
"select",
"(",
"(",
"child",
".",
"stdout",
",",
"child",
".",
"stderr",
",",
")",
",",
"(",
")",
",",
"(",
")",
",",
"0",
")",
"[",
"0",
"]",
":",
"line",
"=",
"fh",
".",
"readline",
"(",
")",
"[",
":",
"-",
"1",
"]",
"if",
"len",
"(",
"line",
")",
":",
"getattr",
"(",
"logger",
",",
"{",
"child",
".",
"stdout",
":",
"'debug'",
",",
"child",
".",
"stderr",
":",
"'error'",
",",
"}",
"[",
"fh",
"]",
")",
"(",
"'%s: %s'",
",",
"command",
",",
"line",
")",
"while",
"child",
".",
"poll",
"(",
")",
"is",
"None",
":",
"log",
"(",
")",
"log",
"(",
")",
"return",
"child",
".",
"wait",
"(",
")"
] |
Wrapper around ``subprocess.Popen`` that sends command output to logger.
.. seealso::
``subprocess.Popen``_
Parameters
----------
:``command``: string form of the command to execute
All other parameters are passed directly to ``subprocess.Popen``.
Return Value(s)
---------------
The integer status of command.
|
[
"Wrapper",
"around",
"subprocess",
".",
"Popen",
"that",
"sends",
"command",
"output",
"to",
"logger",
"."
] |
bd5d2f978324bf9b7360edfae76d853b226c63e1
|
https://github.com/racker/torment/blob/bd5d2f978324bf9b7360edfae76d853b226c63e1/torment/contexts/docker/compose.py#L78-L118
|
241,245
|
rmed/flask-waffleconf
|
flask_waffleconf/watcher.py
|
_file_watcher
|
def _file_watcher(state):
"""Watch for file changes and reload config when needed.
Arguments:
state (_WaffleState): Object that contains reference to app and its
configstore.
"""
conf = state.app.config
file_path = conf.get('WAFFLE_WATCHER_FILE', '/tmp/waffleconf.txt')
if not os.path.isfile(file_path):
# Create watch file
open(file_path, 'a').close()
while True:
tstamp = os.path.getmtime(file_path)
# Compare timestamps and update config if needed
if tstamp > state._tstamp:
state.update_conf()
state._tstamp = tstamp
# Not too critical
time.sleep(10)
|
python
|
def _file_watcher(state):
"""Watch for file changes and reload config when needed.
Arguments:
state (_WaffleState): Object that contains reference to app and its
configstore.
"""
conf = state.app.config
file_path = conf.get('WAFFLE_WATCHER_FILE', '/tmp/waffleconf.txt')
if not os.path.isfile(file_path):
# Create watch file
open(file_path, 'a').close()
while True:
tstamp = os.path.getmtime(file_path)
# Compare timestamps and update config if needed
if tstamp > state._tstamp:
state.update_conf()
state._tstamp = tstamp
# Not too critical
time.sleep(10)
|
[
"def",
"_file_watcher",
"(",
"state",
")",
":",
"conf",
"=",
"state",
".",
"app",
".",
"config",
"file_path",
"=",
"conf",
".",
"get",
"(",
"'WAFFLE_WATCHER_FILE'",
",",
"'/tmp/waffleconf.txt'",
")",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"file_path",
")",
":",
"# Create watch file",
"open",
"(",
"file_path",
",",
"'a'",
")",
".",
"close",
"(",
")",
"while",
"True",
":",
"tstamp",
"=",
"os",
".",
"path",
".",
"getmtime",
"(",
"file_path",
")",
"# Compare timestamps and update config if needed",
"if",
"tstamp",
">",
"state",
".",
"_tstamp",
":",
"state",
".",
"update_conf",
"(",
")",
"state",
".",
"_tstamp",
"=",
"tstamp",
"# Not too critical",
"time",
".",
"sleep",
"(",
"10",
")"
] |
Watch for file changes and reload config when needed.
Arguments:
state (_WaffleState): Object that contains reference to app and its
configstore.
|
[
"Watch",
"for",
"file",
"changes",
"and",
"reload",
"config",
"when",
"needed",
"."
] |
a75ed69101796c9f3f42eff9f91e91dc6dd13869
|
https://github.com/rmed/flask-waffleconf/blob/a75ed69101796c9f3f42eff9f91e91dc6dd13869/flask_waffleconf/watcher.py#L50-L74
|
241,246
|
rmed/flask-waffleconf
|
flask_waffleconf/watcher.py
|
_redis_watcher
|
def _redis_watcher(state):
"""Listen to redis channel for a configuration update notifications.
Arguments:
state (_WaffleState): Object that contains reference to app and its
configstore.
"""
conf = state.app.config
r = redis.client.StrictRedis(
host=conf.get('WAFFLE_REDIS_HOST', 'localhost'),
port=conf.get('WAFFLE_REDIS_PORT', 6379))
sub = r.pubsub(ignore_subscribe_messages=True)
sub.subscribe(conf.get('WAFFLE_REDIS_CHANNEL', 'waffleconf'))
while True:
for msg in sub.listen():
# Skip non-messages
if not msg['type'] == 'message':
continue
tstamp = float(msg['data'])
# Compare timestamps and update config if needed
if tstamp > state._tstamp:
state.update_conf()
state._tstamp = tstamp
|
python
|
def _redis_watcher(state):
"""Listen to redis channel for a configuration update notifications.
Arguments:
state (_WaffleState): Object that contains reference to app and its
configstore.
"""
conf = state.app.config
r = redis.client.StrictRedis(
host=conf.get('WAFFLE_REDIS_HOST', 'localhost'),
port=conf.get('WAFFLE_REDIS_PORT', 6379))
sub = r.pubsub(ignore_subscribe_messages=True)
sub.subscribe(conf.get('WAFFLE_REDIS_CHANNEL', 'waffleconf'))
while True:
for msg in sub.listen():
# Skip non-messages
if not msg['type'] == 'message':
continue
tstamp = float(msg['data'])
# Compare timestamps and update config if needed
if tstamp > state._tstamp:
state.update_conf()
state._tstamp = tstamp
|
[
"def",
"_redis_watcher",
"(",
"state",
")",
":",
"conf",
"=",
"state",
".",
"app",
".",
"config",
"r",
"=",
"redis",
".",
"client",
".",
"StrictRedis",
"(",
"host",
"=",
"conf",
".",
"get",
"(",
"'WAFFLE_REDIS_HOST'",
",",
"'localhost'",
")",
",",
"port",
"=",
"conf",
".",
"get",
"(",
"'WAFFLE_REDIS_PORT'",
",",
"6379",
")",
")",
"sub",
"=",
"r",
".",
"pubsub",
"(",
"ignore_subscribe_messages",
"=",
"True",
")",
"sub",
".",
"subscribe",
"(",
"conf",
".",
"get",
"(",
"'WAFFLE_REDIS_CHANNEL'",
",",
"'waffleconf'",
")",
")",
"while",
"True",
":",
"for",
"msg",
"in",
"sub",
".",
"listen",
"(",
")",
":",
"# Skip non-messages",
"if",
"not",
"msg",
"[",
"'type'",
"]",
"==",
"'message'",
":",
"continue",
"tstamp",
"=",
"float",
"(",
"msg",
"[",
"'data'",
"]",
")",
"# Compare timestamps and update config if needed",
"if",
"tstamp",
">",
"state",
".",
"_tstamp",
":",
"state",
".",
"update_conf",
"(",
")",
"state",
".",
"_tstamp",
"=",
"tstamp"
] |
Listen to redis channel for a configuration update notifications.
Arguments:
state (_WaffleState): Object that contains reference to app and its
configstore.
|
[
"Listen",
"to",
"redis",
"channel",
"for",
"a",
"configuration",
"update",
"notifications",
"."
] |
a75ed69101796c9f3f42eff9f91e91dc6dd13869
|
https://github.com/rmed/flask-waffleconf/blob/a75ed69101796c9f3f42eff9f91e91dc6dd13869/flask_waffleconf/watcher.py#L76-L103
|
241,247
|
rmed/flask-waffleconf
|
flask_waffleconf/watcher.py
|
_file_notifier
|
def _file_notifier(state):
"""Notify of configuration update through file.
Arguments:
state (_WaffleState): Object that contains reference to app and its
configstore.
"""
tstamp = time.time()
state._tstamp = tstamp
conf = state.app.config
file_path = conf.get('WAFFLE_WATCHER_FILE', '/tmp/waffleconf.txt')
if not os.path.isfile(file_path):
# Create watch file
open(file_path, 'a').close()
# Update timestamp
os.utime(file_path, (tstamp, tstamp))
|
python
|
def _file_notifier(state):
"""Notify of configuration update through file.
Arguments:
state (_WaffleState): Object that contains reference to app and its
configstore.
"""
tstamp = time.time()
state._tstamp = tstamp
conf = state.app.config
file_path = conf.get('WAFFLE_WATCHER_FILE', '/tmp/waffleconf.txt')
if not os.path.isfile(file_path):
# Create watch file
open(file_path, 'a').close()
# Update timestamp
os.utime(file_path, (tstamp, tstamp))
|
[
"def",
"_file_notifier",
"(",
"state",
")",
":",
"tstamp",
"=",
"time",
".",
"time",
"(",
")",
"state",
".",
"_tstamp",
"=",
"tstamp",
"conf",
"=",
"state",
".",
"app",
".",
"config",
"file_path",
"=",
"conf",
".",
"get",
"(",
"'WAFFLE_WATCHER_FILE'",
",",
"'/tmp/waffleconf.txt'",
")",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"file_path",
")",
":",
"# Create watch file",
"open",
"(",
"file_path",
",",
"'a'",
")",
".",
"close",
"(",
")",
"# Update timestamp",
"os",
".",
"utime",
"(",
"file_path",
",",
"(",
"tstamp",
",",
"tstamp",
")",
")"
] |
Notify of configuration update through file.
Arguments:
state (_WaffleState): Object that contains reference to app and its
configstore.
|
[
"Notify",
"of",
"configuration",
"update",
"through",
"file",
"."
] |
a75ed69101796c9f3f42eff9f91e91dc6dd13869
|
https://github.com/rmed/flask-waffleconf/blob/a75ed69101796c9f3f42eff9f91e91dc6dd13869/flask_waffleconf/watcher.py#L121-L139
|
241,248
|
rmed/flask-waffleconf
|
flask_waffleconf/watcher.py
|
_redis_notifier
|
def _redis_notifier(state):
"""Notify of configuration update through redis.
Arguments:
state (_WaffleState): Object that contains reference to app and its
configstore.
"""
tstamp = time.time()
state._tstamp = tstamp
conf = state.app.config
# Notify timestamp
r = redis.client.StrictRedis()
r.publish(conf.get('WAFFLE_REDIS_CHANNEL', 'waffleconf'), tstamp)
|
python
|
def _redis_notifier(state):
"""Notify of configuration update through redis.
Arguments:
state (_WaffleState): Object that contains reference to app and its
configstore.
"""
tstamp = time.time()
state._tstamp = tstamp
conf = state.app.config
# Notify timestamp
r = redis.client.StrictRedis()
r.publish(conf.get('WAFFLE_REDIS_CHANNEL', 'waffleconf'), tstamp)
|
[
"def",
"_redis_notifier",
"(",
"state",
")",
":",
"tstamp",
"=",
"time",
".",
"time",
"(",
")",
"state",
".",
"_tstamp",
"=",
"tstamp",
"conf",
"=",
"state",
".",
"app",
".",
"config",
"# Notify timestamp",
"r",
"=",
"redis",
".",
"client",
".",
"StrictRedis",
"(",
")",
"r",
".",
"publish",
"(",
"conf",
".",
"get",
"(",
"'WAFFLE_REDIS_CHANNEL'",
",",
"'waffleconf'",
")",
",",
"tstamp",
")"
] |
Notify of configuration update through redis.
Arguments:
state (_WaffleState): Object that contains reference to app and its
configstore.
|
[
"Notify",
"of",
"configuration",
"update",
"through",
"redis",
"."
] |
a75ed69101796c9f3f42eff9f91e91dc6dd13869
|
https://github.com/rmed/flask-waffleconf/blob/a75ed69101796c9f3f42eff9f91e91dc6dd13869/flask_waffleconf/watcher.py#L141-L154
|
241,249
|
jordanncg/Bison
|
bison/libs/common.py
|
Common.make_dir
|
def make_dir(cls, directory_name):
"""Create a directory in the system"""
if not os.path.exists(directory_name):
os.makedirs(directory_name)
|
python
|
def make_dir(cls, directory_name):
"""Create a directory in the system"""
if not os.path.exists(directory_name):
os.makedirs(directory_name)
|
[
"def",
"make_dir",
"(",
"cls",
",",
"directory_name",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"directory_name",
")",
":",
"os",
".",
"makedirs",
"(",
"directory_name",
")"
] |
Create a directory in the system
|
[
"Create",
"a",
"directory",
"in",
"the",
"system"
] |
c7f04fd67d141fe26cd29db3c3fb3fc0fd0c45df
|
https://github.com/jordanncg/Bison/blob/c7f04fd67d141fe26cd29db3c3fb3fc0fd0c45df/bison/libs/common.py#L35-L38
|
241,250
|
edwards-lab/libGWAS
|
libgwas/pedigree_parser.py
|
Parser.load_mapfile
|
def load_mapfile(self, map3=False):
"""Load the marker data
:param map3: When true, ignore the gen. distance column
Builds up the marker list according to the boundary configuration
"""
cols = [0, 1, 3]
if map3:
cols = [0, 1, 2]
markers = numpy.loadtxt(self.mapfile, dtype=str, usecols=cols)
self.snp_mask = numpy.ones(markers.shape[0]*2,
dtype=numpy.int8).reshape(-1, 2)
if DataParser.boundary.NoExclusions():
self.markers = numpy.zeros((markers.shape[0], 2), dtype=int)
# Check for plink's "off" mode
mask = markers[:, 2].astype(int) >= 0
# Turn them all 'on'
self.snp_mask[:,0] = ~mask
self.snp_mask[:,1] = ~mask
snp_count = numpy.sum(self.snp_mask[:, 0] == 0)
self.markers[0:snp_count, 0] = markers[:, 0].astype(int)[mask]
self.markers[0:snp_count, 1] = markers[:, 2].astype(int)[mask]
self.rsids = markers[:, 1][mask]
self.markers = self.markers[0:snp_count]
else:
idx = 0
self.markers = []
self.rsids = []
for locus in markers:
if DataParser.boundary.TestBoundary(int(locus[0]),
int(locus[2]), locus[1]):
self.markers.append([locus[0], locus[2]])
self.rsids.append(locus[1])
self.snp_mask[idx] = 0
idx += 1
self.markers = numpy.array(self.markers, dtype=numpy.int)
self.rsids = numpy.array(self.rsids)
# We don't follow these rules here
DataParser.boundary.beyond_upper_bound = False
self.locus_count = len(self.markers)
|
python
|
def load_mapfile(self, map3=False):
"""Load the marker data
:param map3: When true, ignore the gen. distance column
Builds up the marker list according to the boundary configuration
"""
cols = [0, 1, 3]
if map3:
cols = [0, 1, 2]
markers = numpy.loadtxt(self.mapfile, dtype=str, usecols=cols)
self.snp_mask = numpy.ones(markers.shape[0]*2,
dtype=numpy.int8).reshape(-1, 2)
if DataParser.boundary.NoExclusions():
self.markers = numpy.zeros((markers.shape[0], 2), dtype=int)
# Check for plink's "off" mode
mask = markers[:, 2].astype(int) >= 0
# Turn them all 'on'
self.snp_mask[:,0] = ~mask
self.snp_mask[:,1] = ~mask
snp_count = numpy.sum(self.snp_mask[:, 0] == 0)
self.markers[0:snp_count, 0] = markers[:, 0].astype(int)[mask]
self.markers[0:snp_count, 1] = markers[:, 2].astype(int)[mask]
self.rsids = markers[:, 1][mask]
self.markers = self.markers[0:snp_count]
else:
idx = 0
self.markers = []
self.rsids = []
for locus in markers:
if DataParser.boundary.TestBoundary(int(locus[0]),
int(locus[2]), locus[1]):
self.markers.append([locus[0], locus[2]])
self.rsids.append(locus[1])
self.snp_mask[idx] = 0
idx += 1
self.markers = numpy.array(self.markers, dtype=numpy.int)
self.rsids = numpy.array(self.rsids)
# We don't follow these rules here
DataParser.boundary.beyond_upper_bound = False
self.locus_count = len(self.markers)
|
[
"def",
"load_mapfile",
"(",
"self",
",",
"map3",
"=",
"False",
")",
":",
"cols",
"=",
"[",
"0",
",",
"1",
",",
"3",
"]",
"if",
"map3",
":",
"cols",
"=",
"[",
"0",
",",
"1",
",",
"2",
"]",
"markers",
"=",
"numpy",
".",
"loadtxt",
"(",
"self",
".",
"mapfile",
",",
"dtype",
"=",
"str",
",",
"usecols",
"=",
"cols",
")",
"self",
".",
"snp_mask",
"=",
"numpy",
".",
"ones",
"(",
"markers",
".",
"shape",
"[",
"0",
"]",
"*",
"2",
",",
"dtype",
"=",
"numpy",
".",
"int8",
")",
".",
"reshape",
"(",
"-",
"1",
",",
"2",
")",
"if",
"DataParser",
".",
"boundary",
".",
"NoExclusions",
"(",
")",
":",
"self",
".",
"markers",
"=",
"numpy",
".",
"zeros",
"(",
"(",
"markers",
".",
"shape",
"[",
"0",
"]",
",",
"2",
")",
",",
"dtype",
"=",
"int",
")",
"# Check for plink's \"off\" mode",
"mask",
"=",
"markers",
"[",
":",
",",
"2",
"]",
".",
"astype",
"(",
"int",
")",
">=",
"0",
"# Turn them all 'on'",
"self",
".",
"snp_mask",
"[",
":",
",",
"0",
"]",
"=",
"~",
"mask",
"self",
".",
"snp_mask",
"[",
":",
",",
"1",
"]",
"=",
"~",
"mask",
"snp_count",
"=",
"numpy",
".",
"sum",
"(",
"self",
".",
"snp_mask",
"[",
":",
",",
"0",
"]",
"==",
"0",
")",
"self",
".",
"markers",
"[",
"0",
":",
"snp_count",
",",
"0",
"]",
"=",
"markers",
"[",
":",
",",
"0",
"]",
".",
"astype",
"(",
"int",
")",
"[",
"mask",
"]",
"self",
".",
"markers",
"[",
"0",
":",
"snp_count",
",",
"1",
"]",
"=",
"markers",
"[",
":",
",",
"2",
"]",
".",
"astype",
"(",
"int",
")",
"[",
"mask",
"]",
"self",
".",
"rsids",
"=",
"markers",
"[",
":",
",",
"1",
"]",
"[",
"mask",
"]",
"self",
".",
"markers",
"=",
"self",
".",
"markers",
"[",
"0",
":",
"snp_count",
"]",
"else",
":",
"idx",
"=",
"0",
"self",
".",
"markers",
"=",
"[",
"]",
"self",
".",
"rsids",
"=",
"[",
"]",
"for",
"locus",
"in",
"markers",
":",
"if",
"DataParser",
".",
"boundary",
".",
"TestBoundary",
"(",
"int",
"(",
"locus",
"[",
"0",
"]",
")",
",",
"int",
"(",
"locus",
"[",
"2",
"]",
")",
",",
"locus",
"[",
"1",
"]",
")",
":",
"self",
".",
"markers",
".",
"append",
"(",
"[",
"locus",
"[",
"0",
"]",
",",
"locus",
"[",
"2",
"]",
"]",
")",
"self",
".",
"rsids",
".",
"append",
"(",
"locus",
"[",
"1",
"]",
")",
"self",
".",
"snp_mask",
"[",
"idx",
"]",
"=",
"0",
"idx",
"+=",
"1",
"self",
".",
"markers",
"=",
"numpy",
".",
"array",
"(",
"self",
".",
"markers",
",",
"dtype",
"=",
"numpy",
".",
"int",
")",
"self",
".",
"rsids",
"=",
"numpy",
".",
"array",
"(",
"self",
".",
"rsids",
")",
"# We don't follow these rules here",
"DataParser",
".",
"boundary",
".",
"beyond_upper_bound",
"=",
"False",
"self",
".",
"locus_count",
"=",
"len",
"(",
"self",
".",
"markers",
")"
] |
Load the marker data
:param map3: When true, ignore the gen. distance column
Builds up the marker list according to the boundary configuration
|
[
"Load",
"the",
"marker",
"data"
] |
d68c9a083d443dfa5d7c5112de29010909cfe23f
|
https://github.com/edwards-lab/libGWAS/blob/d68c9a083d443dfa5d7c5112de29010909cfe23f/libgwas/pedigree_parser.py#L92-L138
|
241,251
|
hobson/pug-dj
|
pug/dj/db.py
|
normalize_values_queryset
|
def normalize_values_queryset(values_queryset, model=None, app=None, verbosity=1):
'''Shoehorn the values from one database table into another
* Remove padding (leading/trailing spaces) from `CharField` and `TextField` values
* Truncate all `CharField`s to the max_length of the destination `model`
* Subsititue blanks ('') for any None values destined for `null=False` fields
Returns a list of unsaved Model objects rather than a queryset
'''
model = model or values_queryset.model
app = app or DEFAULT_APP
new_list = []
for record in values_queryset:
new_record = {}
for k, v in record.iteritems():
field_name = find_field_name(k, model=model, app=app)
field_class = model._meta.get_field(field_name)
# if isinstance(field_class, (djmodels.fields.DateTimeField, djmodels.fields.DateField)):
# new_record[field_name] = unix_timestamp(v)
# try:
if isinstance(field_class, (djmodels.fields.CharField, djmodels.fields.TextField)) or isinstance(v, basestring):
if v is None:
v = ''
else:
v = unicode(v).strip()
if isinstance(field_class, djmodels.fields.CharField):
if len(v) > getattr(field_class, 'max_length', 0):
if verbosity > 0:
print k, v, len(v), '>', field_class.max_length
print 'string = %s' % repr(v)
# truncate strings that are too long for the database field
v = v[:getattr(field_class, 'max_length', 0)]
new_record[field_name] = v
# except:
# pass
if (v is None or new_record[field_name] is None) and not getattr(field_class, 'null'):
new_record[field_name] = ''
if verbosity > 1:
print new_record
new_list += [new_record]
return new_list
|
python
|
def normalize_values_queryset(values_queryset, model=None, app=None, verbosity=1):
'''Shoehorn the values from one database table into another
* Remove padding (leading/trailing spaces) from `CharField` and `TextField` values
* Truncate all `CharField`s to the max_length of the destination `model`
* Subsititue blanks ('') for any None values destined for `null=False` fields
Returns a list of unsaved Model objects rather than a queryset
'''
model = model or values_queryset.model
app = app or DEFAULT_APP
new_list = []
for record in values_queryset:
new_record = {}
for k, v in record.iteritems():
field_name = find_field_name(k, model=model, app=app)
field_class = model._meta.get_field(field_name)
# if isinstance(field_class, (djmodels.fields.DateTimeField, djmodels.fields.DateField)):
# new_record[field_name] = unix_timestamp(v)
# try:
if isinstance(field_class, (djmodels.fields.CharField, djmodels.fields.TextField)) or isinstance(v, basestring):
if v is None:
v = ''
else:
v = unicode(v).strip()
if isinstance(field_class, djmodels.fields.CharField):
if len(v) > getattr(field_class, 'max_length', 0):
if verbosity > 0:
print k, v, len(v), '>', field_class.max_length
print 'string = %s' % repr(v)
# truncate strings that are too long for the database field
v = v[:getattr(field_class, 'max_length', 0)]
new_record[field_name] = v
# except:
# pass
if (v is None or new_record[field_name] is None) and not getattr(field_class, 'null'):
new_record[field_name] = ''
if verbosity > 1:
print new_record
new_list += [new_record]
return new_list
|
[
"def",
"normalize_values_queryset",
"(",
"values_queryset",
",",
"model",
"=",
"None",
",",
"app",
"=",
"None",
",",
"verbosity",
"=",
"1",
")",
":",
"model",
"=",
"model",
"or",
"values_queryset",
".",
"model",
"app",
"=",
"app",
"or",
"DEFAULT_APP",
"new_list",
"=",
"[",
"]",
"for",
"record",
"in",
"values_queryset",
":",
"new_record",
"=",
"{",
"}",
"for",
"k",
",",
"v",
"in",
"record",
".",
"iteritems",
"(",
")",
":",
"field_name",
"=",
"find_field_name",
"(",
"k",
",",
"model",
"=",
"model",
",",
"app",
"=",
"app",
")",
"field_class",
"=",
"model",
".",
"_meta",
".",
"get_field",
"(",
"field_name",
")",
"# if isinstance(field_class, (djmodels.fields.DateTimeField, djmodels.fields.DateField)):",
"# new_record[field_name] = unix_timestamp(v)",
"# try:",
"if",
"isinstance",
"(",
"field_class",
",",
"(",
"djmodels",
".",
"fields",
".",
"CharField",
",",
"djmodels",
".",
"fields",
".",
"TextField",
")",
")",
"or",
"isinstance",
"(",
"v",
",",
"basestring",
")",
":",
"if",
"v",
"is",
"None",
":",
"v",
"=",
"''",
"else",
":",
"v",
"=",
"unicode",
"(",
"v",
")",
".",
"strip",
"(",
")",
"if",
"isinstance",
"(",
"field_class",
",",
"djmodels",
".",
"fields",
".",
"CharField",
")",
":",
"if",
"len",
"(",
"v",
")",
">",
"getattr",
"(",
"field_class",
",",
"'max_length'",
",",
"0",
")",
":",
"if",
"verbosity",
">",
"0",
":",
"print",
"k",
",",
"v",
",",
"len",
"(",
"v",
")",
",",
"'>'",
",",
"field_class",
".",
"max_length",
"print",
"'string = %s'",
"%",
"repr",
"(",
"v",
")",
"# truncate strings that are too long for the database field",
"v",
"=",
"v",
"[",
":",
"getattr",
"(",
"field_class",
",",
"'max_length'",
",",
"0",
")",
"]",
"new_record",
"[",
"field_name",
"]",
"=",
"v",
"# except:",
"# pass",
"if",
"(",
"v",
"is",
"None",
"or",
"new_record",
"[",
"field_name",
"]",
"is",
"None",
")",
"and",
"not",
"getattr",
"(",
"field_class",
",",
"'null'",
")",
":",
"new_record",
"[",
"field_name",
"]",
"=",
"''",
"if",
"verbosity",
">",
"1",
":",
"print",
"new_record",
"new_list",
"+=",
"[",
"new_record",
"]",
"return",
"new_list"
] |
Shoehorn the values from one database table into another
* Remove padding (leading/trailing spaces) from `CharField` and `TextField` values
* Truncate all `CharField`s to the max_length of the destination `model`
* Subsititue blanks ('') for any None values destined for `null=False` fields
Returns a list of unsaved Model objects rather than a queryset
|
[
"Shoehorn",
"the",
"values",
"from",
"one",
"database",
"table",
"into",
"another"
] |
55678b08755a55366ce18e7d3b8ea8fa4491ab04
|
https://github.com/hobson/pug-dj/blob/55678b08755a55366ce18e7d3b8ea8fa4491ab04/pug/dj/db.py#L110-L150
|
241,252
|
hobson/pug-dj
|
pug/dj/db.py
|
make_choices
|
def make_choices(*args):
"""Convert a 1-D sequence into a 2-D sequence of tuples for use in a Django field choices attribute
>>> make_choices(range(3))
((0, u'0'), (1, u'1'), (2, u'2'))
>>> make_choices(dict(enumerate('abcd')))
((0, u'a'), (1, u'b'), (2, u'c'), (3, u'd'))
>>> make_choices('hello')
(('hello', u'hello'),)
>>> make_choices('hello', 'world') == make_choices(['hello', 'world']) == (('hello', u'hello'), ('world', u'world'))
True
"""
if not args:
return tuple()
if isinstance(args[0], (list, tuple)):
return make_choices(*tuple(args[0]))
elif isinstance(args[0], collections.Mapping):
return tuple((k, unicode(v)) for (k, v) in args[0].iteritems())
elif all(isinstance(arg, (int, float, Decimal, basestring)) for arg in args):
return tuple((k, unicode(k)) for k in args)
|
python
|
def make_choices(*args):
"""Convert a 1-D sequence into a 2-D sequence of tuples for use in a Django field choices attribute
>>> make_choices(range(3))
((0, u'0'), (1, u'1'), (2, u'2'))
>>> make_choices(dict(enumerate('abcd')))
((0, u'a'), (1, u'b'), (2, u'c'), (3, u'd'))
>>> make_choices('hello')
(('hello', u'hello'),)
>>> make_choices('hello', 'world') == make_choices(['hello', 'world']) == (('hello', u'hello'), ('world', u'world'))
True
"""
if not args:
return tuple()
if isinstance(args[0], (list, tuple)):
return make_choices(*tuple(args[0]))
elif isinstance(args[0], collections.Mapping):
return tuple((k, unicode(v)) for (k, v) in args[0].iteritems())
elif all(isinstance(arg, (int, float, Decimal, basestring)) for arg in args):
return tuple((k, unicode(k)) for k in args)
|
[
"def",
"make_choices",
"(",
"*",
"args",
")",
":",
"if",
"not",
"args",
":",
"return",
"tuple",
"(",
")",
"if",
"isinstance",
"(",
"args",
"[",
"0",
"]",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"return",
"make_choices",
"(",
"*",
"tuple",
"(",
"args",
"[",
"0",
"]",
")",
")",
"elif",
"isinstance",
"(",
"args",
"[",
"0",
"]",
",",
"collections",
".",
"Mapping",
")",
":",
"return",
"tuple",
"(",
"(",
"k",
",",
"unicode",
"(",
"v",
")",
")",
"for",
"(",
"k",
",",
"v",
")",
"in",
"args",
"[",
"0",
"]",
".",
"iteritems",
"(",
")",
")",
"elif",
"all",
"(",
"isinstance",
"(",
"arg",
",",
"(",
"int",
",",
"float",
",",
"Decimal",
",",
"basestring",
")",
")",
"for",
"arg",
"in",
"args",
")",
":",
"return",
"tuple",
"(",
"(",
"k",
",",
"unicode",
"(",
"k",
")",
")",
"for",
"k",
"in",
"args",
")"
] |
Convert a 1-D sequence into a 2-D sequence of tuples for use in a Django field choices attribute
>>> make_choices(range(3))
((0, u'0'), (1, u'1'), (2, u'2'))
>>> make_choices(dict(enumerate('abcd')))
((0, u'a'), (1, u'b'), (2, u'c'), (3, u'd'))
>>> make_choices('hello')
(('hello', u'hello'),)
>>> make_choices('hello', 'world') == make_choices(['hello', 'world']) == (('hello', u'hello'), ('world', u'world'))
True
|
[
"Convert",
"a",
"1",
"-",
"D",
"sequence",
"into",
"a",
"2",
"-",
"D",
"sequence",
"of",
"tuples",
"for",
"use",
"in",
"a",
"Django",
"field",
"choices",
"attribute"
] |
55678b08755a55366ce18e7d3b8ea8fa4491ab04
|
https://github.com/hobson/pug-dj/blob/55678b08755a55366ce18e7d3b8ea8fa4491ab04/pug/dj/db.py#L153-L172
|
241,253
|
hobson/pug-dj
|
pug/dj/db.py
|
normalize_choices
|
def normalize_choices(db_values, field_name, app=DEFAULT_APP, model_name='', human_readable=True, none_value='Null',
blank_value='Unknown', missing_value='Unknown DB Code'):
'''Output the human-readable strings associated with the list of database values for a model field.
Uses the translation dictionary `CHOICES_<FIELD_NAME>` attribute for the given `model_name`.
In addition, translate `None` into 'Null', or whatever string is indicated by `none_value`.
'''
if app and isinstance(app, basestring):
app = get_app(app)
if not db_values:
return
try:
db_values = dict(db_values)
except:
raise NotImplemented("This function can only handle objects that can be converted to a dict, not lists or querysets returned by django `.values().aggregate()`.")
if not field_name in db_values:
return db_values
if human_readable:
for i, db_value in enumerate(db_values[field_name]):
if db_value in (None, 'None') or app in (None, 'None'):
db_values[field_name][i] = none_value
continue
if isinstance(db_value, basestring):
normalized_code = str(db_value).strip().upper()
# the app is actually the models.py module, NOT the app_name package
# so don't look in app.models, you'll only find django.db.models there (app_name.models.models)
choices = getattr(app, 'CHOICES_%s' % field_name.upper(), [])
normalized_name = None
if choices:
normalized_name = str(choices.get(normalized_code, missing_value)).strip()
elif normalized_code:
normalized_name = 'DB Code: "%s"' % normalized_code
db_values[field_name][i] = normalized_name or blank_value
else:
raise NotImplemented("This function can only convert database choices to human-readable strings.")
return db_values
|
python
|
def normalize_choices(db_values, field_name, app=DEFAULT_APP, model_name='', human_readable=True, none_value='Null',
blank_value='Unknown', missing_value='Unknown DB Code'):
'''Output the human-readable strings associated with the list of database values for a model field.
Uses the translation dictionary `CHOICES_<FIELD_NAME>` attribute for the given `model_name`.
In addition, translate `None` into 'Null', or whatever string is indicated by `none_value`.
'''
if app and isinstance(app, basestring):
app = get_app(app)
if not db_values:
return
try:
db_values = dict(db_values)
except:
raise NotImplemented("This function can only handle objects that can be converted to a dict, not lists or querysets returned by django `.values().aggregate()`.")
if not field_name in db_values:
return db_values
if human_readable:
for i, db_value in enumerate(db_values[field_name]):
if db_value in (None, 'None') or app in (None, 'None'):
db_values[field_name][i] = none_value
continue
if isinstance(db_value, basestring):
normalized_code = str(db_value).strip().upper()
# the app is actually the models.py module, NOT the app_name package
# so don't look in app.models, you'll only find django.db.models there (app_name.models.models)
choices = getattr(app, 'CHOICES_%s' % field_name.upper(), [])
normalized_name = None
if choices:
normalized_name = str(choices.get(normalized_code, missing_value)).strip()
elif normalized_code:
normalized_name = 'DB Code: "%s"' % normalized_code
db_values[field_name][i] = normalized_name or blank_value
else:
raise NotImplemented("This function can only convert database choices to human-readable strings.")
return db_values
|
[
"def",
"normalize_choices",
"(",
"db_values",
",",
"field_name",
",",
"app",
"=",
"DEFAULT_APP",
",",
"model_name",
"=",
"''",
",",
"human_readable",
"=",
"True",
",",
"none_value",
"=",
"'Null'",
",",
"blank_value",
"=",
"'Unknown'",
",",
"missing_value",
"=",
"'Unknown DB Code'",
")",
":",
"if",
"app",
"and",
"isinstance",
"(",
"app",
",",
"basestring",
")",
":",
"app",
"=",
"get_app",
"(",
"app",
")",
"if",
"not",
"db_values",
":",
"return",
"try",
":",
"db_values",
"=",
"dict",
"(",
"db_values",
")",
"except",
":",
"raise",
"NotImplemented",
"(",
"\"This function can only handle objects that can be converted to a dict, not lists or querysets returned by django `.values().aggregate()`.\"",
")",
"if",
"not",
"field_name",
"in",
"db_values",
":",
"return",
"db_values",
"if",
"human_readable",
":",
"for",
"i",
",",
"db_value",
"in",
"enumerate",
"(",
"db_values",
"[",
"field_name",
"]",
")",
":",
"if",
"db_value",
"in",
"(",
"None",
",",
"'None'",
")",
"or",
"app",
"in",
"(",
"None",
",",
"'None'",
")",
":",
"db_values",
"[",
"field_name",
"]",
"[",
"i",
"]",
"=",
"none_value",
"continue",
"if",
"isinstance",
"(",
"db_value",
",",
"basestring",
")",
":",
"normalized_code",
"=",
"str",
"(",
"db_value",
")",
".",
"strip",
"(",
")",
".",
"upper",
"(",
")",
"# the app is actually the models.py module, NOT the app_name package",
"# so don't look in app.models, you'll only find django.db.models there (app_name.models.models)",
"choices",
"=",
"getattr",
"(",
"app",
",",
"'CHOICES_%s'",
"%",
"field_name",
".",
"upper",
"(",
")",
",",
"[",
"]",
")",
"normalized_name",
"=",
"None",
"if",
"choices",
":",
"normalized_name",
"=",
"str",
"(",
"choices",
".",
"get",
"(",
"normalized_code",
",",
"missing_value",
")",
")",
".",
"strip",
"(",
")",
"elif",
"normalized_code",
":",
"normalized_name",
"=",
"'DB Code: \"%s\"'",
"%",
"normalized_code",
"db_values",
"[",
"field_name",
"]",
"[",
"i",
"]",
"=",
"normalized_name",
"or",
"blank_value",
"else",
":",
"raise",
"NotImplemented",
"(",
"\"This function can only convert database choices to human-readable strings.\"",
")",
"return",
"db_values"
] |
Output the human-readable strings associated with the list of database values for a model field.
Uses the translation dictionary `CHOICES_<FIELD_NAME>` attribute for the given `model_name`.
In addition, translate `None` into 'Null', or whatever string is indicated by `none_value`.
|
[
"Output",
"the",
"human",
"-",
"readable",
"strings",
"associated",
"with",
"the",
"list",
"of",
"database",
"values",
"for",
"a",
"model",
"field",
"."
] |
55678b08755a55366ce18e7d3b8ea8fa4491ab04
|
https://github.com/hobson/pug-dj/blob/55678b08755a55366ce18e7d3b8ea8fa4491ab04/pug/dj/db.py#L177-L213
|
241,254
|
hobson/pug-dj
|
pug/dj/db.py
|
get_app
|
def get_app(app=None, verbosity=0):
"""Uses django.db.djmodels.get_app and fuzzywuzzy to get the models module for a django app
Retrieve an app module from an app name string, even if mispelled (uses fuzzywuzzy to find the best match)
To get a list of all the apps use `get_app(None)` or `get_app([]) or get_app(())`
To get a single random app use `get_app('')`
>>> get_app('call').__class__.__name__ == 'module'
True
>>> get_app('model').__name__ == 'miner.models'
True
>>> isinstance(get_app('whatever'), ModuleType)
True
>>> isinstance(get_app(''), ModuleType)
True
isinstance(get_app(), ModuleType)
False
isinstance(get_app(), list)
True
"""
# print 'get_app(', app
if not app:
# for an empty list, tuple or None, just get all apps
if isinstance(app, (type(None), list, tuple)):
return [app_class.__package__ for app_class in djmodels.get_apps() if app_class and app_class.__package__]
# for a blank string, get the default app(s)
else:
if get_app.default:
return get_app(get_app.default)
else:
return djmodels.get_apps()[-1]
elif isinstance(app, ModuleType):
return app
elif isinstance(app, basestring):
if app.strip().endswith('.models'):
return get_app(app[:-len('.models')])
elif '.' in app:
return get_app('.'.join(app.split('.')[1:])) # django.db.models only looks at the module name in the INSTALLED_APPS list!
try:
if verbosity > 1:
print 'Attempting django.db.models.get_app(%r)' % app
return djmodels.get_app(app)
except ImproperlyConfigured:
if verbosity > 0:
print 'WARNING: unable to find app = %r' % app
if verbosity > 2:
print 'Trying a fuzzy match on app = %r' % app
app_names = [app_class.__package__ for app_class in djmodels.get_apps() if app_class and app_class.__package__]
fuzzy_app_name = fuzzy.extractOne(str(app), app_names)[0]
if verbosity > 0:
print 'WARNING: Best fuzzy match for app name %r is %s' % (app, fuzzy_app_name)
return djmodels.get_app(fuzzy_app_name.split('.')[-1])
|
python
|
def get_app(app=None, verbosity=0):
"""Uses django.db.djmodels.get_app and fuzzywuzzy to get the models module for a django app
Retrieve an app module from an app name string, even if mispelled (uses fuzzywuzzy to find the best match)
To get a list of all the apps use `get_app(None)` or `get_app([]) or get_app(())`
To get a single random app use `get_app('')`
>>> get_app('call').__class__.__name__ == 'module'
True
>>> get_app('model').__name__ == 'miner.models'
True
>>> isinstance(get_app('whatever'), ModuleType)
True
>>> isinstance(get_app(''), ModuleType)
True
isinstance(get_app(), ModuleType)
False
isinstance(get_app(), list)
True
"""
# print 'get_app(', app
if not app:
# for an empty list, tuple or None, just get all apps
if isinstance(app, (type(None), list, tuple)):
return [app_class.__package__ for app_class in djmodels.get_apps() if app_class and app_class.__package__]
# for a blank string, get the default app(s)
else:
if get_app.default:
return get_app(get_app.default)
else:
return djmodels.get_apps()[-1]
elif isinstance(app, ModuleType):
return app
elif isinstance(app, basestring):
if app.strip().endswith('.models'):
return get_app(app[:-len('.models')])
elif '.' in app:
return get_app('.'.join(app.split('.')[1:])) # django.db.models only looks at the module name in the INSTALLED_APPS list!
try:
if verbosity > 1:
print 'Attempting django.db.models.get_app(%r)' % app
return djmodels.get_app(app)
except ImproperlyConfigured:
if verbosity > 0:
print 'WARNING: unable to find app = %r' % app
if verbosity > 2:
print 'Trying a fuzzy match on app = %r' % app
app_names = [app_class.__package__ for app_class in djmodels.get_apps() if app_class and app_class.__package__]
fuzzy_app_name = fuzzy.extractOne(str(app), app_names)[0]
if verbosity > 0:
print 'WARNING: Best fuzzy match for app name %r is %s' % (app, fuzzy_app_name)
return djmodels.get_app(fuzzy_app_name.split('.')[-1])
|
[
"def",
"get_app",
"(",
"app",
"=",
"None",
",",
"verbosity",
"=",
"0",
")",
":",
"# print 'get_app(', app",
"if",
"not",
"app",
":",
"# for an empty list, tuple or None, just get all apps",
"if",
"isinstance",
"(",
"app",
",",
"(",
"type",
"(",
"None",
")",
",",
"list",
",",
"tuple",
")",
")",
":",
"return",
"[",
"app_class",
".",
"__package__",
"for",
"app_class",
"in",
"djmodels",
".",
"get_apps",
"(",
")",
"if",
"app_class",
"and",
"app_class",
".",
"__package__",
"]",
"# for a blank string, get the default app(s)",
"else",
":",
"if",
"get_app",
".",
"default",
":",
"return",
"get_app",
"(",
"get_app",
".",
"default",
")",
"else",
":",
"return",
"djmodels",
".",
"get_apps",
"(",
")",
"[",
"-",
"1",
"]",
"elif",
"isinstance",
"(",
"app",
",",
"ModuleType",
")",
":",
"return",
"app",
"elif",
"isinstance",
"(",
"app",
",",
"basestring",
")",
":",
"if",
"app",
".",
"strip",
"(",
")",
".",
"endswith",
"(",
"'.models'",
")",
":",
"return",
"get_app",
"(",
"app",
"[",
":",
"-",
"len",
"(",
"'.models'",
")",
"]",
")",
"elif",
"'.'",
"in",
"app",
":",
"return",
"get_app",
"(",
"'.'",
".",
"join",
"(",
"app",
".",
"split",
"(",
"'.'",
")",
"[",
"1",
":",
"]",
")",
")",
"# django.db.models only looks at the module name in the INSTALLED_APPS list! ",
"try",
":",
"if",
"verbosity",
">",
"1",
":",
"print",
"'Attempting django.db.models.get_app(%r)'",
"%",
"app",
"return",
"djmodels",
".",
"get_app",
"(",
"app",
")",
"except",
"ImproperlyConfigured",
":",
"if",
"verbosity",
">",
"0",
":",
"print",
"'WARNING: unable to find app = %r'",
"%",
"app",
"if",
"verbosity",
">",
"2",
":",
"print",
"'Trying a fuzzy match on app = %r'",
"%",
"app",
"app_names",
"=",
"[",
"app_class",
".",
"__package__",
"for",
"app_class",
"in",
"djmodels",
".",
"get_apps",
"(",
")",
"if",
"app_class",
"and",
"app_class",
".",
"__package__",
"]",
"fuzzy_app_name",
"=",
"fuzzy",
".",
"extractOne",
"(",
"str",
"(",
"app",
")",
",",
"app_names",
")",
"[",
"0",
"]",
"if",
"verbosity",
">",
"0",
":",
"print",
"'WARNING: Best fuzzy match for app name %r is %s'",
"%",
"(",
"app",
",",
"fuzzy_app_name",
")",
"return",
"djmodels",
".",
"get_app",
"(",
"fuzzy_app_name",
".",
"split",
"(",
"'.'",
")",
"[",
"-",
"1",
"]",
")"
] |
Uses django.db.djmodels.get_app and fuzzywuzzy to get the models module for a django app
Retrieve an app module from an app name string, even if mispelled (uses fuzzywuzzy to find the best match)
To get a list of all the apps use `get_app(None)` or `get_app([]) or get_app(())`
To get a single random app use `get_app('')`
>>> get_app('call').__class__.__name__ == 'module'
True
>>> get_app('model').__name__ == 'miner.models'
True
>>> isinstance(get_app('whatever'), ModuleType)
True
>>> isinstance(get_app(''), ModuleType)
True
isinstance(get_app(), ModuleType)
False
isinstance(get_app(), list)
True
|
[
"Uses",
"django",
".",
"db",
".",
"djmodels",
".",
"get_app",
"and",
"fuzzywuzzy",
"to",
"get",
"the",
"models",
"module",
"for",
"a",
"django",
"app"
] |
55678b08755a55366ce18e7d3b8ea8fa4491ab04
|
https://github.com/hobson/pug-dj/blob/55678b08755a55366ce18e7d3b8ea8fa4491ab04/pug/dj/db.py#L216-L267
|
241,255
|
hobson/pug-dj
|
pug/dj/db.py
|
get_field
|
def get_field(field):
"""Return a field object based on a dot-delimited app.model.field name"""
if isinstance(field, djmodels.fields.Field):
return field
elif isinstance(field, basestring):
field = field.split('.')
if len(field) == 3:
model = get_model(app=field[0], model=field[1])
elif len(field) == 2:
model = get_model(app=DEFAULT_APP, model=field[0])
else:
return None
raise NotImplementedError("Unknown default model name. Don't know where to look for field %s" % '.'.join(field))
field = model._meta.get_field(field[-1])
return field
|
python
|
def get_field(field):
"""Return a field object based on a dot-delimited app.model.field name"""
if isinstance(field, djmodels.fields.Field):
return field
elif isinstance(field, basestring):
field = field.split('.')
if len(field) == 3:
model = get_model(app=field[0], model=field[1])
elif len(field) == 2:
model = get_model(app=DEFAULT_APP, model=field[0])
else:
return None
raise NotImplementedError("Unknown default model name. Don't know where to look for field %s" % '.'.join(field))
field = model._meta.get_field(field[-1])
return field
|
[
"def",
"get_field",
"(",
"field",
")",
":",
"if",
"isinstance",
"(",
"field",
",",
"djmodels",
".",
"fields",
".",
"Field",
")",
":",
"return",
"field",
"elif",
"isinstance",
"(",
"field",
",",
"basestring",
")",
":",
"field",
"=",
"field",
".",
"split",
"(",
"'.'",
")",
"if",
"len",
"(",
"field",
")",
"==",
"3",
":",
"model",
"=",
"get_model",
"(",
"app",
"=",
"field",
"[",
"0",
"]",
",",
"model",
"=",
"field",
"[",
"1",
"]",
")",
"elif",
"len",
"(",
"field",
")",
"==",
"2",
":",
"model",
"=",
"get_model",
"(",
"app",
"=",
"DEFAULT_APP",
",",
"model",
"=",
"field",
"[",
"0",
"]",
")",
"else",
":",
"return",
"None",
"raise",
"NotImplementedError",
"(",
"\"Unknown default model name. Don't know where to look for field %s\"",
"%",
"'.'",
".",
"join",
"(",
"field",
")",
")",
"field",
"=",
"model",
".",
"_meta",
".",
"get_field",
"(",
"field",
"[",
"-",
"1",
"]",
")",
"return",
"field"
] |
Return a field object based on a dot-delimited app.model.field name
|
[
"Return",
"a",
"field",
"object",
"based",
"on",
"a",
"dot",
"-",
"delimited",
"app",
".",
"model",
".",
"field",
"name"
] |
55678b08755a55366ce18e7d3b8ea8fa4491ab04
|
https://github.com/hobson/pug-dj/blob/55678b08755a55366ce18e7d3b8ea8fa4491ab04/pug/dj/db.py#L380-L394
|
241,256
|
hobson/pug-dj
|
pug/dj/db.py
|
get_primary_key
|
def get_primary_key(model):
"""Get the name of the field in a model that has primary_key=True"""
model = get_model(model)
return (field.name for field in model._meta.fields if field.primary_key).next()
|
python
|
def get_primary_key(model):
"""Get the name of the field in a model that has primary_key=True"""
model = get_model(model)
return (field.name for field in model._meta.fields if field.primary_key).next()
|
[
"def",
"get_primary_key",
"(",
"model",
")",
":",
"model",
"=",
"get_model",
"(",
"model",
")",
"return",
"(",
"field",
".",
"name",
"for",
"field",
"in",
"model",
".",
"_meta",
".",
"fields",
"if",
"field",
".",
"primary_key",
")",
".",
"next",
"(",
")"
] |
Get the name of the field in a model that has primary_key=True
|
[
"Get",
"the",
"name",
"of",
"the",
"field",
"in",
"a",
"model",
"that",
"has",
"primary_key",
"=",
"True"
] |
55678b08755a55366ce18e7d3b8ea8fa4491ab04
|
https://github.com/hobson/pug-dj/blob/55678b08755a55366ce18e7d3b8ea8fa4491ab04/pug/dj/db.py#L397-L400
|
241,257
|
hobson/pug-dj
|
pug/dj/db.py
|
querysets_from_title_prefix
|
def querysets_from_title_prefix(title_prefix=None, model=DEFAULT_MODEL, app=DEFAULT_APP):
"""Return a list of Querysets from a list of model numbers"""
if title_prefix is None:
title_prefix = [None]
filter_dicts = []
model_list = []
if isinstance(title_prefix, basestring):
title_prefix = title_prefix.split(',')
elif not isinstance(title_prefix, dict):
title_prefix = title_prefix
if isinstance(title_prefix, (list, tuple)):
for i, title_prefix in enumerate(title_prefix):
if isinstance(title_prefix, basestring):
if title_prefix.lower().endswith('sales'):
title_prefix = title_prefix[:-5].strip('_')
title_prefix += [title_prefix]
model_list += ['WikiItem']
else:
model_list += [DEFAULT_MODEL]
filter_dicts += [{'model__startswith': title_prefix}]
elif isinstance(title_prefix, dict):
filter_dicts = [title_prefix]
elif isinstance(title_prefix, (list, tuple)):
filter_dicts = util.listify(title_prefix)
model = get_model(model, app)
querysets = []
for filter_dict, model in zip(filter_dicts, model_list):
filter_dict = filter_dict or {}
querysets += [model.objects.filter(**filter_dict)]
|
python
|
def querysets_from_title_prefix(title_prefix=None, model=DEFAULT_MODEL, app=DEFAULT_APP):
"""Return a list of Querysets from a list of model numbers"""
if title_prefix is None:
title_prefix = [None]
filter_dicts = []
model_list = []
if isinstance(title_prefix, basestring):
title_prefix = title_prefix.split(',')
elif not isinstance(title_prefix, dict):
title_prefix = title_prefix
if isinstance(title_prefix, (list, tuple)):
for i, title_prefix in enumerate(title_prefix):
if isinstance(title_prefix, basestring):
if title_prefix.lower().endswith('sales'):
title_prefix = title_prefix[:-5].strip('_')
title_prefix += [title_prefix]
model_list += ['WikiItem']
else:
model_list += [DEFAULT_MODEL]
filter_dicts += [{'model__startswith': title_prefix}]
elif isinstance(title_prefix, dict):
filter_dicts = [title_prefix]
elif isinstance(title_prefix, (list, tuple)):
filter_dicts = util.listify(title_prefix)
model = get_model(model, app)
querysets = []
for filter_dict, model in zip(filter_dicts, model_list):
filter_dict = filter_dict or {}
querysets += [model.objects.filter(**filter_dict)]
|
[
"def",
"querysets_from_title_prefix",
"(",
"title_prefix",
"=",
"None",
",",
"model",
"=",
"DEFAULT_MODEL",
",",
"app",
"=",
"DEFAULT_APP",
")",
":",
"if",
"title_prefix",
"is",
"None",
":",
"title_prefix",
"=",
"[",
"None",
"]",
"filter_dicts",
"=",
"[",
"]",
"model_list",
"=",
"[",
"]",
"if",
"isinstance",
"(",
"title_prefix",
",",
"basestring",
")",
":",
"title_prefix",
"=",
"title_prefix",
".",
"split",
"(",
"','",
")",
"elif",
"not",
"isinstance",
"(",
"title_prefix",
",",
"dict",
")",
":",
"title_prefix",
"=",
"title_prefix",
"if",
"isinstance",
"(",
"title_prefix",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"for",
"i",
",",
"title_prefix",
"in",
"enumerate",
"(",
"title_prefix",
")",
":",
"if",
"isinstance",
"(",
"title_prefix",
",",
"basestring",
")",
":",
"if",
"title_prefix",
".",
"lower",
"(",
")",
".",
"endswith",
"(",
"'sales'",
")",
":",
"title_prefix",
"=",
"title_prefix",
"[",
":",
"-",
"5",
"]",
".",
"strip",
"(",
"'_'",
")",
"title_prefix",
"+=",
"[",
"title_prefix",
"]",
"model_list",
"+=",
"[",
"'WikiItem'",
"]",
"else",
":",
"model_list",
"+=",
"[",
"DEFAULT_MODEL",
"]",
"filter_dicts",
"+=",
"[",
"{",
"'model__startswith'",
":",
"title_prefix",
"}",
"]",
"elif",
"isinstance",
"(",
"title_prefix",
",",
"dict",
")",
":",
"filter_dicts",
"=",
"[",
"title_prefix",
"]",
"elif",
"isinstance",
"(",
"title_prefix",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"filter_dicts",
"=",
"util",
".",
"listify",
"(",
"title_prefix",
")",
"model",
"=",
"get_model",
"(",
"model",
",",
"app",
")",
"querysets",
"=",
"[",
"]",
"for",
"filter_dict",
",",
"model",
"in",
"zip",
"(",
"filter_dicts",
",",
"model_list",
")",
":",
"filter_dict",
"=",
"filter_dict",
"or",
"{",
"}",
"querysets",
"+=",
"[",
"model",
".",
"objects",
".",
"filter",
"(",
"*",
"*",
"filter_dict",
")",
"]"
] |
Return a list of Querysets from a list of model numbers
|
[
"Return",
"a",
"list",
"of",
"Querysets",
"from",
"a",
"list",
"of",
"model",
"numbers"
] |
55678b08755a55366ce18e7d3b8ea8fa4491ab04
|
https://github.com/hobson/pug-dj/blob/55678b08755a55366ce18e7d3b8ea8fa4491ab04/pug/dj/db.py#L481-L513
|
241,258
|
hobson/pug-dj
|
pug/dj/db.py
|
find_field_names
|
def find_field_names(fields, model=DEFAULT_MODEL, app=DEFAULT_APP, score_cutoff=50, pad_with_none=False):
"""Use fuzzy string matching to find similar model field names without consulting a synonyms list
Returns:
list: A list model field names (strings) sorted from most likely to least likely.
[] If no similar field names could be found in the indicated model
[None] If none found and and `pad_with_none` set
Examples:
>>> find_field_names(['date_time', 'title_prefix', 'sales'], model='WikiItem')
['date', 'model', 'net_sales']
"""
fields = util.listify(fields)
model = get_model(model, app)
available_field_names = model._meta.get_all_field_names()
matched_fields = []
for field_name in fields:
match = fuzzy.extractOne(str(field_name), available_field_names)
if match and match[1] is not None and match[1] >= score_cutoff:
matched_fields += [match[0]]
elif pad_with_none:
matched_fields += [None]
return matched_fields
|
python
|
def find_field_names(fields, model=DEFAULT_MODEL, app=DEFAULT_APP, score_cutoff=50, pad_with_none=False):
"""Use fuzzy string matching to find similar model field names without consulting a synonyms list
Returns:
list: A list model field names (strings) sorted from most likely to least likely.
[] If no similar field names could be found in the indicated model
[None] If none found and and `pad_with_none` set
Examples:
>>> find_field_names(['date_time', 'title_prefix', 'sales'], model='WikiItem')
['date', 'model', 'net_sales']
"""
fields = util.listify(fields)
model = get_model(model, app)
available_field_names = model._meta.get_all_field_names()
matched_fields = []
for field_name in fields:
match = fuzzy.extractOne(str(field_name), available_field_names)
if match and match[1] is not None and match[1] >= score_cutoff:
matched_fields += [match[0]]
elif pad_with_none:
matched_fields += [None]
return matched_fields
|
[
"def",
"find_field_names",
"(",
"fields",
",",
"model",
"=",
"DEFAULT_MODEL",
",",
"app",
"=",
"DEFAULT_APP",
",",
"score_cutoff",
"=",
"50",
",",
"pad_with_none",
"=",
"False",
")",
":",
"fields",
"=",
"util",
".",
"listify",
"(",
"fields",
")",
"model",
"=",
"get_model",
"(",
"model",
",",
"app",
")",
"available_field_names",
"=",
"model",
".",
"_meta",
".",
"get_all_field_names",
"(",
")",
"matched_fields",
"=",
"[",
"]",
"for",
"field_name",
"in",
"fields",
":",
"match",
"=",
"fuzzy",
".",
"extractOne",
"(",
"str",
"(",
"field_name",
")",
",",
"available_field_names",
")",
"if",
"match",
"and",
"match",
"[",
"1",
"]",
"is",
"not",
"None",
"and",
"match",
"[",
"1",
"]",
">=",
"score_cutoff",
":",
"matched_fields",
"+=",
"[",
"match",
"[",
"0",
"]",
"]",
"elif",
"pad_with_none",
":",
"matched_fields",
"+=",
"[",
"None",
"]",
"return",
"matched_fields"
] |
Use fuzzy string matching to find similar model field names without consulting a synonyms list
Returns:
list: A list model field names (strings) sorted from most likely to least likely.
[] If no similar field names could be found in the indicated model
[None] If none found and and `pad_with_none` set
Examples:
>>> find_field_names(['date_time', 'title_prefix', 'sales'], model='WikiItem')
['date', 'model', 'net_sales']
|
[
"Use",
"fuzzy",
"string",
"matching",
"to",
"find",
"similar",
"model",
"field",
"names",
"without",
"consulting",
"a",
"synonyms",
"list"
] |
55678b08755a55366ce18e7d3b8ea8fa4491ab04
|
https://github.com/hobson/pug-dj/blob/55678b08755a55366ce18e7d3b8ea8fa4491ab04/pug/dj/db.py#L709-L733
|
241,259
|
hobson/pug-dj
|
pug/dj/db.py
|
model_from_path
|
def model_from_path(model_path, fuzziness=False):
"""Find the model class for a given model path like 'project.app.model'
Args:
path (str): dot-delimited model path, like 'project.app.model'
Returns:
Django Model-based class
"""
app_name = '.'.join(model_path.split('.')[:-1])
model_name = model_path.split('.')[-1]
if not app_name:
return None
module = importlib.import_module(app_name)
try:
model = getattr(module, model_name)
except AttributeError:
try:
model = getattr(getattr(module, 'models'), model_name)
except AttributeError:
model = get_model(model_name, app_name, fuzziness=fuzziness)
return model
|
python
|
def model_from_path(model_path, fuzziness=False):
"""Find the model class for a given model path like 'project.app.model'
Args:
path (str): dot-delimited model path, like 'project.app.model'
Returns:
Django Model-based class
"""
app_name = '.'.join(model_path.split('.')[:-1])
model_name = model_path.split('.')[-1]
if not app_name:
return None
module = importlib.import_module(app_name)
try:
model = getattr(module, model_name)
except AttributeError:
try:
model = getattr(getattr(module, 'models'), model_name)
except AttributeError:
model = get_model(model_name, app_name, fuzziness=fuzziness)
return model
|
[
"def",
"model_from_path",
"(",
"model_path",
",",
"fuzziness",
"=",
"False",
")",
":",
"app_name",
"=",
"'.'",
".",
"join",
"(",
"model_path",
".",
"split",
"(",
"'.'",
")",
"[",
":",
"-",
"1",
"]",
")",
"model_name",
"=",
"model_path",
".",
"split",
"(",
"'.'",
")",
"[",
"-",
"1",
"]",
"if",
"not",
"app_name",
":",
"return",
"None",
"module",
"=",
"importlib",
".",
"import_module",
"(",
"app_name",
")",
"try",
":",
"model",
"=",
"getattr",
"(",
"module",
",",
"model_name",
")",
"except",
"AttributeError",
":",
"try",
":",
"model",
"=",
"getattr",
"(",
"getattr",
"(",
"module",
",",
"'models'",
")",
",",
"model_name",
")",
"except",
"AttributeError",
":",
"model",
"=",
"get_model",
"(",
"model_name",
",",
"app_name",
",",
"fuzziness",
"=",
"fuzziness",
")",
"return",
"model"
] |
Find the model class for a given model path like 'project.app.model'
Args:
path (str): dot-delimited model path, like 'project.app.model'
Returns:
Django Model-based class
|
[
"Find",
"the",
"model",
"class",
"for",
"a",
"given",
"model",
"path",
"like",
"project",
".",
"app",
".",
"model"
] |
55678b08755a55366ce18e7d3b8ea8fa4491ab04
|
https://github.com/hobson/pug-dj/blob/55678b08755a55366ce18e7d3b8ea8fa4491ab04/pug/dj/db.py#L736-L760
|
241,260
|
hobson/pug-dj
|
pug/dj/db.py
|
find_synonymous_field
|
def find_synonymous_field(field, model=DEFAULT_MODEL, app=DEFAULT_APP, score_cutoff=50, root_preference=1.02):
"""Use a dictionary of synonyms and fuzzy string matching to find a similarly named field
Returns:
A single model field name (string)
Examples:
>>> find_synonymous_field('date', model='WikiItem')
'end_date_time'
>>> find_synonymous_field('date', model='WikiItem')
'date_time'
>>> find_synonymous_field('time', model='WikiItem')
'date_time'
"""
fields = util.listify(field) + list(synonyms(field))
model = get_model(model, app)
available_field_names = model._meta.get_all_field_names()
best_match, best_ratio = None, None
for i, field_name in enumerate(fields):
match = fuzzy.extractOne(str(field_name), available_field_names)
# print match
if match and match[1] >= score_cutoff:
if not best_match or match[1] > (root_preference * best_ratio):
best_match, best_ratio = match
return best_match
|
python
|
def find_synonymous_field(field, model=DEFAULT_MODEL, app=DEFAULT_APP, score_cutoff=50, root_preference=1.02):
"""Use a dictionary of synonyms and fuzzy string matching to find a similarly named field
Returns:
A single model field name (string)
Examples:
>>> find_synonymous_field('date', model='WikiItem')
'end_date_time'
>>> find_synonymous_field('date', model='WikiItem')
'date_time'
>>> find_synonymous_field('time', model='WikiItem')
'date_time'
"""
fields = util.listify(field) + list(synonyms(field))
model = get_model(model, app)
available_field_names = model._meta.get_all_field_names()
best_match, best_ratio = None, None
for i, field_name in enumerate(fields):
match = fuzzy.extractOne(str(field_name), available_field_names)
# print match
if match and match[1] >= score_cutoff:
if not best_match or match[1] > (root_preference * best_ratio):
best_match, best_ratio = match
return best_match
|
[
"def",
"find_synonymous_field",
"(",
"field",
",",
"model",
"=",
"DEFAULT_MODEL",
",",
"app",
"=",
"DEFAULT_APP",
",",
"score_cutoff",
"=",
"50",
",",
"root_preference",
"=",
"1.02",
")",
":",
"fields",
"=",
"util",
".",
"listify",
"(",
"field",
")",
"+",
"list",
"(",
"synonyms",
"(",
"field",
")",
")",
"model",
"=",
"get_model",
"(",
"model",
",",
"app",
")",
"available_field_names",
"=",
"model",
".",
"_meta",
".",
"get_all_field_names",
"(",
")",
"best_match",
",",
"best_ratio",
"=",
"None",
",",
"None",
"for",
"i",
",",
"field_name",
"in",
"enumerate",
"(",
"fields",
")",
":",
"match",
"=",
"fuzzy",
".",
"extractOne",
"(",
"str",
"(",
"field_name",
")",
",",
"available_field_names",
")",
"# print match",
"if",
"match",
"and",
"match",
"[",
"1",
"]",
">=",
"score_cutoff",
":",
"if",
"not",
"best_match",
"or",
"match",
"[",
"1",
"]",
">",
"(",
"root_preference",
"*",
"best_ratio",
")",
":",
"best_match",
",",
"best_ratio",
"=",
"match",
"return",
"best_match"
] |
Use a dictionary of synonyms and fuzzy string matching to find a similarly named field
Returns:
A single model field name (string)
Examples:
>>> find_synonymous_field('date', model='WikiItem')
'end_date_time'
>>> find_synonymous_field('date', model='WikiItem')
'date_time'
>>> find_synonymous_field('time', model='WikiItem')
'date_time'
|
[
"Use",
"a",
"dictionary",
"of",
"synonyms",
"and",
"fuzzy",
"string",
"matching",
"to",
"find",
"a",
"similarly",
"named",
"field"
] |
55678b08755a55366ce18e7d3b8ea8fa4491ab04
|
https://github.com/hobson/pug-dj/blob/55678b08755a55366ce18e7d3b8ea8fa4491ab04/pug/dj/db.py#L763-L789
|
241,261
|
hobson/pug-dj
|
pug/dj/db.py
|
find_model
|
def find_model(model_name, apps=settings.INSTALLED_APPS, fuzziness=0):
"""Find model_name among indicated Django apps and return Model class
Examples:
To find models in an app called "miner":
>>> find_model('WikiItem', 'miner')
>>> find_model('Connection', 'miner')
>>> find_model('InvalidModelName')
"""
# if it looks like a file system path rather than django project.app.model path the return it as a string
if '/' in model_name:
return model_name
if not apps and isinstance(model_name, basestring) and '.' in model_name:
apps = [model_name.split('.')[0]]
apps = util.listify(apps or settings.INSTALLED_APPS)
for app in apps:
# print 'getting %r, from app %r' % (model_name, app)
model = get_model(model=model_name, app=app, fuzziness=fuzziness)
if model:
return model
return None
|
python
|
def find_model(model_name, apps=settings.INSTALLED_APPS, fuzziness=0):
"""Find model_name among indicated Django apps and return Model class
Examples:
To find models in an app called "miner":
>>> find_model('WikiItem', 'miner')
>>> find_model('Connection', 'miner')
>>> find_model('InvalidModelName')
"""
# if it looks like a file system path rather than django project.app.model path the return it as a string
if '/' in model_name:
return model_name
if not apps and isinstance(model_name, basestring) and '.' in model_name:
apps = [model_name.split('.')[0]]
apps = util.listify(apps or settings.INSTALLED_APPS)
for app in apps:
# print 'getting %r, from app %r' % (model_name, app)
model = get_model(model=model_name, app=app, fuzziness=fuzziness)
if model:
return model
return None
|
[
"def",
"find_model",
"(",
"model_name",
",",
"apps",
"=",
"settings",
".",
"INSTALLED_APPS",
",",
"fuzziness",
"=",
"0",
")",
":",
"# if it looks like a file system path rather than django project.app.model path the return it as a string",
"if",
"'/'",
"in",
"model_name",
":",
"return",
"model_name",
"if",
"not",
"apps",
"and",
"isinstance",
"(",
"model_name",
",",
"basestring",
")",
"and",
"'.'",
"in",
"model_name",
":",
"apps",
"=",
"[",
"model_name",
".",
"split",
"(",
"'.'",
")",
"[",
"0",
"]",
"]",
"apps",
"=",
"util",
".",
"listify",
"(",
"apps",
"or",
"settings",
".",
"INSTALLED_APPS",
")",
"for",
"app",
"in",
"apps",
":",
"# print 'getting %r, from app %r' % (model_name, app)",
"model",
"=",
"get_model",
"(",
"model",
"=",
"model_name",
",",
"app",
"=",
"app",
",",
"fuzziness",
"=",
"fuzziness",
")",
"if",
"model",
":",
"return",
"model",
"return",
"None"
] |
Find model_name among indicated Django apps and return Model class
Examples:
To find models in an app called "miner":
>>> find_model('WikiItem', 'miner')
>>> find_model('Connection', 'miner')
>>> find_model('InvalidModelName')
|
[
"Find",
"model_name",
"among",
"indicated",
"Django",
"apps",
"and",
"return",
"Model",
"class"
] |
55678b08755a55366ce18e7d3b8ea8fa4491ab04
|
https://github.com/hobson/pug-dj/blob/55678b08755a55366ce18e7d3b8ea8fa4491ab04/pug/dj/db.py#L792-L814
|
241,262
|
hobson/pug-dj
|
pug/dj/db.py
|
lagged_in_date
|
def lagged_in_date(x=None, y=None, filter_dict=None, model='WikiItem', app=DEFAULT_APP, sort=True, limit=30000, lag=1, pad=0, truncate=True):
"""
Lag the y values by the specified number of samples.
FIXME: sort has no effect when sequences provided in x, y instead of field names
>>> lagged_in_date(x=[.1,.2,.3,.4], y=[1,2,3,4], limit=4, lag=1)
([0.1, 0.2, 0.3, 0.4], [0, 1, 2, 3])
>>> lagged_in_date(x=[.1,.2,.3,.4], y=[1,2,3,4], lag=1, truncate=True)
([0.1, 0.2, 0.3, 0.4], [0, 1, 2, 3])
"""
lag = int(lag or 0)
#print 'X, Y:', x, y
if isinstance(x, basestring) and isinstance(y, basestring):
x, y = sequence_from_filter_spec([find_synonymous_field(x), find_synonymous_field(y)], filter_dict, model=model,
app=app, sort=sort, limit=limit)
if y and len(y) == len(x):
if sort:
xy = sorted(zip(x,y), reverse=bool(int(sort) < 0))
x, y = [col1 for col1, col2 in xy], [col2 for col1, col2 in xy]
return x, lagged_seq(y, lag=lag, pad=pad, truncate=truncate)
if x and len(x) and 2 == len(x) <= len(x[0]):
#print 'X:', x
x, y = x[0], lagged_seq(x[1], lag=lag, pad=pad, truncate=truncate)
if truncate:
print truncate, lag
if lag >= 0:
x = x[lag:]
else:
x = x[:lag]
#print x, y
return x, y
|
python
|
def lagged_in_date(x=None, y=None, filter_dict=None, model='WikiItem', app=DEFAULT_APP, sort=True, limit=30000, lag=1, pad=0, truncate=True):
"""
Lag the y values by the specified number of samples.
FIXME: sort has no effect when sequences provided in x, y instead of field names
>>> lagged_in_date(x=[.1,.2,.3,.4], y=[1,2,3,4], limit=4, lag=1)
([0.1, 0.2, 0.3, 0.4], [0, 1, 2, 3])
>>> lagged_in_date(x=[.1,.2,.3,.4], y=[1,2,3,4], lag=1, truncate=True)
([0.1, 0.2, 0.3, 0.4], [0, 1, 2, 3])
"""
lag = int(lag or 0)
#print 'X, Y:', x, y
if isinstance(x, basestring) and isinstance(y, basestring):
x, y = sequence_from_filter_spec([find_synonymous_field(x), find_synonymous_field(y)], filter_dict, model=model,
app=app, sort=sort, limit=limit)
if y and len(y) == len(x):
if sort:
xy = sorted(zip(x,y), reverse=bool(int(sort) < 0))
x, y = [col1 for col1, col2 in xy], [col2 for col1, col2 in xy]
return x, lagged_seq(y, lag=lag, pad=pad, truncate=truncate)
if x and len(x) and 2 == len(x) <= len(x[0]):
#print 'X:', x
x, y = x[0], lagged_seq(x[1], lag=lag, pad=pad, truncate=truncate)
if truncate:
print truncate, lag
if lag >= 0:
x = x[lag:]
else:
x = x[:lag]
#print x, y
return x, y
|
[
"def",
"lagged_in_date",
"(",
"x",
"=",
"None",
",",
"y",
"=",
"None",
",",
"filter_dict",
"=",
"None",
",",
"model",
"=",
"'WikiItem'",
",",
"app",
"=",
"DEFAULT_APP",
",",
"sort",
"=",
"True",
",",
"limit",
"=",
"30000",
",",
"lag",
"=",
"1",
",",
"pad",
"=",
"0",
",",
"truncate",
"=",
"True",
")",
":",
"lag",
"=",
"int",
"(",
"lag",
"or",
"0",
")",
"#print 'X, Y:', x, y",
"if",
"isinstance",
"(",
"x",
",",
"basestring",
")",
"and",
"isinstance",
"(",
"y",
",",
"basestring",
")",
":",
"x",
",",
"y",
"=",
"sequence_from_filter_spec",
"(",
"[",
"find_synonymous_field",
"(",
"x",
")",
",",
"find_synonymous_field",
"(",
"y",
")",
"]",
",",
"filter_dict",
",",
"model",
"=",
"model",
",",
"app",
"=",
"app",
",",
"sort",
"=",
"sort",
",",
"limit",
"=",
"limit",
")",
"if",
"y",
"and",
"len",
"(",
"y",
")",
"==",
"len",
"(",
"x",
")",
":",
"if",
"sort",
":",
"xy",
"=",
"sorted",
"(",
"zip",
"(",
"x",
",",
"y",
")",
",",
"reverse",
"=",
"bool",
"(",
"int",
"(",
"sort",
")",
"<",
"0",
")",
")",
"x",
",",
"y",
"=",
"[",
"col1",
"for",
"col1",
",",
"col2",
"in",
"xy",
"]",
",",
"[",
"col2",
"for",
"col1",
",",
"col2",
"in",
"xy",
"]",
"return",
"x",
",",
"lagged_seq",
"(",
"y",
",",
"lag",
"=",
"lag",
",",
"pad",
"=",
"pad",
",",
"truncate",
"=",
"truncate",
")",
"if",
"x",
"and",
"len",
"(",
"x",
")",
"and",
"2",
"==",
"len",
"(",
"x",
")",
"<=",
"len",
"(",
"x",
"[",
"0",
"]",
")",
":",
"#print 'X:', x",
"x",
",",
"y",
"=",
"x",
"[",
"0",
"]",
",",
"lagged_seq",
"(",
"x",
"[",
"1",
"]",
",",
"lag",
"=",
"lag",
",",
"pad",
"=",
"pad",
",",
"truncate",
"=",
"truncate",
")",
"if",
"truncate",
":",
"print",
"truncate",
",",
"lag",
"if",
"lag",
">=",
"0",
":",
"x",
"=",
"x",
"[",
"lag",
":",
"]",
"else",
":",
"x",
"=",
"x",
"[",
":",
"lag",
"]",
"#print x, y",
"return",
"x",
",",
"y"
] |
Lag the y values by the specified number of samples.
FIXME: sort has no effect when sequences provided in x, y instead of field names
>>> lagged_in_date(x=[.1,.2,.3,.4], y=[1,2,3,4], limit=4, lag=1)
([0.1, 0.2, 0.3, 0.4], [0, 1, 2, 3])
>>> lagged_in_date(x=[.1,.2,.3,.4], y=[1,2,3,4], lag=1, truncate=True)
([0.1, 0.2, 0.3, 0.4], [0, 1, 2, 3])
|
[
"Lag",
"the",
"y",
"values",
"by",
"the",
"specified",
"number",
"of",
"samples",
"."
] |
55678b08755a55366ce18e7d3b8ea8fa4491ab04
|
https://github.com/hobson/pug-dj/blob/55678b08755a55366ce18e7d3b8ea8fa4491ab04/pug/dj/db.py#L830-L861
|
241,263
|
hobson/pug-dj
|
pug/dj/db.py
|
django_object_from_row
|
def django_object_from_row(row, model, field_names=None, ignore_fields=('id', 'pk'), ignore_related=True, strip=True, ignore_errors=True, verbosity=0):
"""Construct Django model instance from values provided in a python dict or Mapping
Args:
row (list or dict): Data (values of any type) to be assigned to fields in the Django object.
If `row` is a list, then the column names (header row) can be provided in `field_names`.
If `row` is a list and no field_names are provided, then `field_names` will be taken from the
Django model class field names, in the order they appear within the class definition.
model (django.db.models.Model): The model class to be constructed with data from `row`
field_names (list or tuple of str): The field names to place the row values in.
Defaults to the keys of the dict of `row` (if `row` is a `dict`) or the names of the fields
in the Django model being constructed.
ignore_fields (list or tuple of str): The field names to ignore if place the row values in.
Returns:
Model instance: Django model instance constructed with values from `row` in fields
from `field_names` or `model`'s fields
"""
field_dict, errors = field_dict_from_row(row, model, field_names=field_names, ignore_fields=ignore_fields, strip=strip,
ignore_errors=ignore_errors, ignore_related=ignore_related, verbosity=verbosity)
if verbosity >= 3:
print 'field_dict = %r' % field_dict
try:
obj = model(**field_dict)
return obj, errors
except:
print_exc()
raise ValueError('Unable to coerce the dict = %r into a %r object' % (field_dict, model))
|
python
|
def django_object_from_row(row, model, field_names=None, ignore_fields=('id', 'pk'), ignore_related=True, strip=True, ignore_errors=True, verbosity=0):
"""Construct Django model instance from values provided in a python dict or Mapping
Args:
row (list or dict): Data (values of any type) to be assigned to fields in the Django object.
If `row` is a list, then the column names (header row) can be provided in `field_names`.
If `row` is a list and no field_names are provided, then `field_names` will be taken from the
Django model class field names, in the order they appear within the class definition.
model (django.db.models.Model): The model class to be constructed with data from `row`
field_names (list or tuple of str): The field names to place the row values in.
Defaults to the keys of the dict of `row` (if `row` is a `dict`) or the names of the fields
in the Django model being constructed.
ignore_fields (list or tuple of str): The field names to ignore if place the row values in.
Returns:
Model instance: Django model instance constructed with values from `row` in fields
from `field_names` or `model`'s fields
"""
field_dict, errors = field_dict_from_row(row, model, field_names=field_names, ignore_fields=ignore_fields, strip=strip,
ignore_errors=ignore_errors, ignore_related=ignore_related, verbosity=verbosity)
if verbosity >= 3:
print 'field_dict = %r' % field_dict
try:
obj = model(**field_dict)
return obj, errors
except:
print_exc()
raise ValueError('Unable to coerce the dict = %r into a %r object' % (field_dict, model))
|
[
"def",
"django_object_from_row",
"(",
"row",
",",
"model",
",",
"field_names",
"=",
"None",
",",
"ignore_fields",
"=",
"(",
"'id'",
",",
"'pk'",
")",
",",
"ignore_related",
"=",
"True",
",",
"strip",
"=",
"True",
",",
"ignore_errors",
"=",
"True",
",",
"verbosity",
"=",
"0",
")",
":",
"field_dict",
",",
"errors",
"=",
"field_dict_from_row",
"(",
"row",
",",
"model",
",",
"field_names",
"=",
"field_names",
",",
"ignore_fields",
"=",
"ignore_fields",
",",
"strip",
"=",
"strip",
",",
"ignore_errors",
"=",
"ignore_errors",
",",
"ignore_related",
"=",
"ignore_related",
",",
"verbosity",
"=",
"verbosity",
")",
"if",
"verbosity",
">=",
"3",
":",
"print",
"'field_dict = %r'",
"%",
"field_dict",
"try",
":",
"obj",
"=",
"model",
"(",
"*",
"*",
"field_dict",
")",
"return",
"obj",
",",
"errors",
"except",
":",
"print_exc",
"(",
")",
"raise",
"ValueError",
"(",
"'Unable to coerce the dict = %r into a %r object'",
"%",
"(",
"field_dict",
",",
"model",
")",
")"
] |
Construct Django model instance from values provided in a python dict or Mapping
Args:
row (list or dict): Data (values of any type) to be assigned to fields in the Django object.
If `row` is a list, then the column names (header row) can be provided in `field_names`.
If `row` is a list and no field_names are provided, then `field_names` will be taken from the
Django model class field names, in the order they appear within the class definition.
model (django.db.models.Model): The model class to be constructed with data from `row`
field_names (list or tuple of str): The field names to place the row values in.
Defaults to the keys of the dict of `row` (if `row` is a `dict`) or the names of the fields
in the Django model being constructed.
ignore_fields (list or tuple of str): The field names to ignore if place the row values in.
Returns:
Model instance: Django model instance constructed with values from `row` in fields
from `field_names` or `model`'s fields
|
[
"Construct",
"Django",
"model",
"instance",
"from",
"values",
"provided",
"in",
"a",
"python",
"dict",
"or",
"Mapping"
] |
55678b08755a55366ce18e7d3b8ea8fa4491ab04
|
https://github.com/hobson/pug-dj/blob/55678b08755a55366ce18e7d3b8ea8fa4491ab04/pug/dj/db.py#L1343-L1370
|
241,264
|
hobson/pug-dj
|
pug/dj/db.py
|
count_lines
|
def count_lines(fname, mode='rU'):
'''Count the number of lines in a file
Only faster way would be to utilize multiple processor cores to perform parallel reads.
http://stackoverflow.com/q/845058/623735
'''
with open(fname, mode) as f:
for i, l in enumerate(f):
pass
return i + 1
|
python
|
def count_lines(fname, mode='rU'):
'''Count the number of lines in a file
Only faster way would be to utilize multiple processor cores to perform parallel reads.
http://stackoverflow.com/q/845058/623735
'''
with open(fname, mode) as f:
for i, l in enumerate(f):
pass
return i + 1
|
[
"def",
"count_lines",
"(",
"fname",
",",
"mode",
"=",
"'rU'",
")",
":",
"with",
"open",
"(",
"fname",
",",
"mode",
")",
"as",
"f",
":",
"for",
"i",
",",
"l",
"in",
"enumerate",
"(",
"f",
")",
":",
"pass",
"return",
"i",
"+",
"1"
] |
Count the number of lines in a file
Only faster way would be to utilize multiple processor cores to perform parallel reads.
http://stackoverflow.com/q/845058/623735
|
[
"Count",
"the",
"number",
"of",
"lines",
"in",
"a",
"file"
] |
55678b08755a55366ce18e7d3b8ea8fa4491ab04
|
https://github.com/hobson/pug-dj/blob/55678b08755a55366ce18e7d3b8ea8fa4491ab04/pug/dj/db.py#L1487-L1497
|
241,265
|
hobson/pug-dj
|
pug/dj/db.py
|
write_queryset_to_csv
|
def write_queryset_to_csv(qs, filename):
"""Write a QuerySet or ValuesListQuerySet to a CSV file
based on djangosnippets by zbyte64 and http://palewi.re
Arguments:
qs (QuerySet or ValuesListQuerySet): The records your want to write to a text file (UTF-8)
filename (str): full path and file name to write to
"""
model = qs.model
with open(filename, 'w') as fp:
writer = csv.writer(fp)
try:
headers = list(qs._fields)
except:
headers = [field.name for field in model._meta.fields]
writer.writerow(headers)
for obj in qs:
row = []
for colnum, field in enumerate(headers):
try:
value = getattr(obj, field, obj[colnum])
except:
value = ''
if callable(value):
value = value()
if isinstance(value, basestring):
value = value.encode("utf-8")
else:
value = str(value).encode("utf-8")
row += [value]
writer.writerow(row)
|
python
|
def write_queryset_to_csv(qs, filename):
"""Write a QuerySet or ValuesListQuerySet to a CSV file
based on djangosnippets by zbyte64 and http://palewi.re
Arguments:
qs (QuerySet or ValuesListQuerySet): The records your want to write to a text file (UTF-8)
filename (str): full path and file name to write to
"""
model = qs.model
with open(filename, 'w') as fp:
writer = csv.writer(fp)
try:
headers = list(qs._fields)
except:
headers = [field.name for field in model._meta.fields]
writer.writerow(headers)
for obj in qs:
row = []
for colnum, field in enumerate(headers):
try:
value = getattr(obj, field, obj[colnum])
except:
value = ''
if callable(value):
value = value()
if isinstance(value, basestring):
value = value.encode("utf-8")
else:
value = str(value).encode("utf-8")
row += [value]
writer.writerow(row)
|
[
"def",
"write_queryset_to_csv",
"(",
"qs",
",",
"filename",
")",
":",
"model",
"=",
"qs",
".",
"model",
"with",
"open",
"(",
"filename",
",",
"'w'",
")",
"as",
"fp",
":",
"writer",
"=",
"csv",
".",
"writer",
"(",
"fp",
")",
"try",
":",
"headers",
"=",
"list",
"(",
"qs",
".",
"_fields",
")",
"except",
":",
"headers",
"=",
"[",
"field",
".",
"name",
"for",
"field",
"in",
"model",
".",
"_meta",
".",
"fields",
"]",
"writer",
".",
"writerow",
"(",
"headers",
")",
"for",
"obj",
"in",
"qs",
":",
"row",
"=",
"[",
"]",
"for",
"colnum",
",",
"field",
"in",
"enumerate",
"(",
"headers",
")",
":",
"try",
":",
"value",
"=",
"getattr",
"(",
"obj",
",",
"field",
",",
"obj",
"[",
"colnum",
"]",
")",
"except",
":",
"value",
"=",
"''",
"if",
"callable",
"(",
"value",
")",
":",
"value",
"=",
"value",
"(",
")",
"if",
"isinstance",
"(",
"value",
",",
"basestring",
")",
":",
"value",
"=",
"value",
".",
"encode",
"(",
"\"utf-8\"",
")",
"else",
":",
"value",
"=",
"str",
"(",
"value",
")",
".",
"encode",
"(",
"\"utf-8\"",
")",
"row",
"+=",
"[",
"value",
"]",
"writer",
".",
"writerow",
"(",
"row",
")"
] |
Write a QuerySet or ValuesListQuerySet to a CSV file
based on djangosnippets by zbyte64 and http://palewi.re
Arguments:
qs (QuerySet or ValuesListQuerySet): The records your want to write to a text file (UTF-8)
filename (str): full path and file name to write to
|
[
"Write",
"a",
"QuerySet",
"or",
"ValuesListQuerySet",
"to",
"a",
"CSV",
"file"
] |
55678b08755a55366ce18e7d3b8ea8fa4491ab04
|
https://github.com/hobson/pug-dj/blob/55678b08755a55366ce18e7d3b8ea8fa4491ab04/pug/dj/db.py#L1638-L1670
|
241,266
|
hobson/pug-dj
|
pug/dj/db.py
|
make_date
|
def make_date(dt, date_parser=parse_date):
"""Coerce a datetime or string into datetime.date object
Arguments:
dt (str or datetime.datetime or atetime.time or numpy.Timestamp): time or date
to be coerced into a `datetime.time` object
Returns:
datetime.time: Time of day portion of a `datetime` string or object
>>> make_date('')
datetime.date(1970, 1, 1)
>>> make_date(None)
datetime.date(1970, 1, 1)
>>> make_date("11:59 PM") == datetime.date.today()
True
>>> make_date(datetime.datetime(1999, 12, 31, 23, 59, 59))
datetime.date(1999, 12, 31)
"""
if not dt:
return datetime.date(1970, 1, 1)
if isinstance(dt, basestring):
dt = date_parser(dt)
try:
dt = dt.timetuple()[:3]
except:
dt = tuple(dt)[:3]
return datetime.date(*dt)
|
python
|
def make_date(dt, date_parser=parse_date):
"""Coerce a datetime or string into datetime.date object
Arguments:
dt (str or datetime.datetime or atetime.time or numpy.Timestamp): time or date
to be coerced into a `datetime.time` object
Returns:
datetime.time: Time of day portion of a `datetime` string or object
>>> make_date('')
datetime.date(1970, 1, 1)
>>> make_date(None)
datetime.date(1970, 1, 1)
>>> make_date("11:59 PM") == datetime.date.today()
True
>>> make_date(datetime.datetime(1999, 12, 31, 23, 59, 59))
datetime.date(1999, 12, 31)
"""
if not dt:
return datetime.date(1970, 1, 1)
if isinstance(dt, basestring):
dt = date_parser(dt)
try:
dt = dt.timetuple()[:3]
except:
dt = tuple(dt)[:3]
return datetime.date(*dt)
|
[
"def",
"make_date",
"(",
"dt",
",",
"date_parser",
"=",
"parse_date",
")",
":",
"if",
"not",
"dt",
":",
"return",
"datetime",
".",
"date",
"(",
"1970",
",",
"1",
",",
"1",
")",
"if",
"isinstance",
"(",
"dt",
",",
"basestring",
")",
":",
"dt",
"=",
"date_parser",
"(",
"dt",
")",
"try",
":",
"dt",
"=",
"dt",
".",
"timetuple",
"(",
")",
"[",
":",
"3",
"]",
"except",
":",
"dt",
"=",
"tuple",
"(",
"dt",
")",
"[",
":",
"3",
"]",
"return",
"datetime",
".",
"date",
"(",
"*",
"dt",
")"
] |
Coerce a datetime or string into datetime.date object
Arguments:
dt (str or datetime.datetime or atetime.time or numpy.Timestamp): time or date
to be coerced into a `datetime.time` object
Returns:
datetime.time: Time of day portion of a `datetime` string or object
>>> make_date('')
datetime.date(1970, 1, 1)
>>> make_date(None)
datetime.date(1970, 1, 1)
>>> make_date("11:59 PM") == datetime.date.today()
True
>>> make_date(datetime.datetime(1999, 12, 31, 23, 59, 59))
datetime.date(1999, 12, 31)
|
[
"Coerce",
"a",
"datetime",
"or",
"string",
"into",
"datetime",
".",
"date",
"object"
] |
55678b08755a55366ce18e7d3b8ea8fa4491ab04
|
https://github.com/hobson/pug-dj/blob/55678b08755a55366ce18e7d3b8ea8fa4491ab04/pug/dj/db.py#L1764-L1791
|
241,267
|
hobson/pug-dj
|
pug/dj/db.py
|
make_time
|
def make_time(dt, date_parser=parse_date):
"""Ignore date information in a datetime string or object
Arguments:
dt (str or datetime.datetime or atetime.time or numpy.Timestamp): time or date
to be coerced into a `datetime.time` object
Returns:
datetime.time: Time of day portion of a `datetime` string or object
>>> make_time(None)
datetime.time(0, 0)
>>> make_time("11:59 PM")
datetime.time(23, 59)
>>> make_time(datetime.datetime(1999, 12, 31, 23, 59, 59))
datetime.time(23, 59, 59)
"""
if not dt:
return datetime.time(0, 0)
if isinstance(dt, basestring):
try:
dt = date_parser(dt)
except:
print 'Unable to parse {0}'.format(repr(dt))
print_exc()
return datetime.time(0, 0)
try:
dt = dt.timetuple()[3:6]
except:
dt = tuple(dt)[3:6]
return datetime.time(*dt)
|
python
|
def make_time(dt, date_parser=parse_date):
"""Ignore date information in a datetime string or object
Arguments:
dt (str or datetime.datetime or atetime.time or numpy.Timestamp): time or date
to be coerced into a `datetime.time` object
Returns:
datetime.time: Time of day portion of a `datetime` string or object
>>> make_time(None)
datetime.time(0, 0)
>>> make_time("11:59 PM")
datetime.time(23, 59)
>>> make_time(datetime.datetime(1999, 12, 31, 23, 59, 59))
datetime.time(23, 59, 59)
"""
if not dt:
return datetime.time(0, 0)
if isinstance(dt, basestring):
try:
dt = date_parser(dt)
except:
print 'Unable to parse {0}'.format(repr(dt))
print_exc()
return datetime.time(0, 0)
try:
dt = dt.timetuple()[3:6]
except:
dt = tuple(dt)[3:6]
return datetime.time(*dt)
|
[
"def",
"make_time",
"(",
"dt",
",",
"date_parser",
"=",
"parse_date",
")",
":",
"if",
"not",
"dt",
":",
"return",
"datetime",
".",
"time",
"(",
"0",
",",
"0",
")",
"if",
"isinstance",
"(",
"dt",
",",
"basestring",
")",
":",
"try",
":",
"dt",
"=",
"date_parser",
"(",
"dt",
")",
"except",
":",
"print",
"'Unable to parse {0}'",
".",
"format",
"(",
"repr",
"(",
"dt",
")",
")",
"print_exc",
"(",
")",
"return",
"datetime",
".",
"time",
"(",
"0",
",",
"0",
")",
"try",
":",
"dt",
"=",
"dt",
".",
"timetuple",
"(",
")",
"[",
"3",
":",
"6",
"]",
"except",
":",
"dt",
"=",
"tuple",
"(",
"dt",
")",
"[",
"3",
":",
"6",
"]",
"return",
"datetime",
".",
"time",
"(",
"*",
"dt",
")"
] |
Ignore date information in a datetime string or object
Arguments:
dt (str or datetime.datetime or atetime.time or numpy.Timestamp): time or date
to be coerced into a `datetime.time` object
Returns:
datetime.time: Time of day portion of a `datetime` string or object
>>> make_time(None)
datetime.time(0, 0)
>>> make_time("11:59 PM")
datetime.time(23, 59)
>>> make_time(datetime.datetime(1999, 12, 31, 23, 59, 59))
datetime.time(23, 59, 59)
|
[
"Ignore",
"date",
"information",
"in",
"a",
"datetime",
"string",
"or",
"object"
] |
55678b08755a55366ce18e7d3b8ea8fa4491ab04
|
https://github.com/hobson/pug-dj/blob/55678b08755a55366ce18e7d3b8ea8fa4491ab04/pug/dj/db.py#L1794-L1824
|
241,268
|
hobson/pug-dj
|
pug/dj/db.py
|
flatten_excel
|
def flatten_excel(path='.', ext='xlsx', sheetname=0, skiprows=None, header=0, date_parser=parse_date, verbosity=0, output_ext=None):
"""Load all Excel files in the given path, write .flat.csv files, return `DataFrame` dict
Arguments:
path (str): file or folder to retrieve CSV files and `pandas.DataFrame`s from
ext (str): file name extension (to filter files by)
date_parser (function): if the MultiIndex can be interpretted as a datetime, this parser will be used
Returns:
dict of DataFrame: { file_path: flattened_data_frame }
"""
date_parser = date_parser or (lambda x: x)
dotted_ext, dotted_output_ext = None, None
if ext != None and output_ext != None:
dotted_ext = ('' if ext.startswith('.') else '.') + ext
dotted_output_ext = ('' if output_ext.startswith('.') else '.') + output_ext
table = {}
for file_properties in util.find_files(path, ext=ext or '', verbosity=verbosity):
file_path = file_properties['path']
if output_ext and (dotted_output_ext + '.') in file_path:
continue
df = dataframe_from_excel(file_path, sheetname=sheetname, header=header, skiprows=skiprows)
df = flatten_dataframe(df, verbosity=verbosity)
if dotted_ext != None and dotted_output_ext != None:
df.to_csv(file_path[:-len(dotted_ext)] + dotted_output_ext + dotted_ext)
return table
|
python
|
def flatten_excel(path='.', ext='xlsx', sheetname=0, skiprows=None, header=0, date_parser=parse_date, verbosity=0, output_ext=None):
"""Load all Excel files in the given path, write .flat.csv files, return `DataFrame` dict
Arguments:
path (str): file or folder to retrieve CSV files and `pandas.DataFrame`s from
ext (str): file name extension (to filter files by)
date_parser (function): if the MultiIndex can be interpretted as a datetime, this parser will be used
Returns:
dict of DataFrame: { file_path: flattened_data_frame }
"""
date_parser = date_parser or (lambda x: x)
dotted_ext, dotted_output_ext = None, None
if ext != None and output_ext != None:
dotted_ext = ('' if ext.startswith('.') else '.') + ext
dotted_output_ext = ('' if output_ext.startswith('.') else '.') + output_ext
table = {}
for file_properties in util.find_files(path, ext=ext or '', verbosity=verbosity):
file_path = file_properties['path']
if output_ext and (dotted_output_ext + '.') in file_path:
continue
df = dataframe_from_excel(file_path, sheetname=sheetname, header=header, skiprows=skiprows)
df = flatten_dataframe(df, verbosity=verbosity)
if dotted_ext != None and dotted_output_ext != None:
df.to_csv(file_path[:-len(dotted_ext)] + dotted_output_ext + dotted_ext)
return table
|
[
"def",
"flatten_excel",
"(",
"path",
"=",
"'.'",
",",
"ext",
"=",
"'xlsx'",
",",
"sheetname",
"=",
"0",
",",
"skiprows",
"=",
"None",
",",
"header",
"=",
"0",
",",
"date_parser",
"=",
"parse_date",
",",
"verbosity",
"=",
"0",
",",
"output_ext",
"=",
"None",
")",
":",
"date_parser",
"=",
"date_parser",
"or",
"(",
"lambda",
"x",
":",
"x",
")",
"dotted_ext",
",",
"dotted_output_ext",
"=",
"None",
",",
"None",
"if",
"ext",
"!=",
"None",
"and",
"output_ext",
"!=",
"None",
":",
"dotted_ext",
"=",
"(",
"''",
"if",
"ext",
".",
"startswith",
"(",
"'.'",
")",
"else",
"'.'",
")",
"+",
"ext",
"dotted_output_ext",
"=",
"(",
"''",
"if",
"output_ext",
".",
"startswith",
"(",
"'.'",
")",
"else",
"'.'",
")",
"+",
"output_ext",
"table",
"=",
"{",
"}",
"for",
"file_properties",
"in",
"util",
".",
"find_files",
"(",
"path",
",",
"ext",
"=",
"ext",
"or",
"''",
",",
"verbosity",
"=",
"verbosity",
")",
":",
"file_path",
"=",
"file_properties",
"[",
"'path'",
"]",
"if",
"output_ext",
"and",
"(",
"dotted_output_ext",
"+",
"'.'",
")",
"in",
"file_path",
":",
"continue",
"df",
"=",
"dataframe_from_excel",
"(",
"file_path",
",",
"sheetname",
"=",
"sheetname",
",",
"header",
"=",
"header",
",",
"skiprows",
"=",
"skiprows",
")",
"df",
"=",
"flatten_dataframe",
"(",
"df",
",",
"verbosity",
"=",
"verbosity",
")",
"if",
"dotted_ext",
"!=",
"None",
"and",
"dotted_output_ext",
"!=",
"None",
":",
"df",
".",
"to_csv",
"(",
"file_path",
"[",
":",
"-",
"len",
"(",
"dotted_ext",
")",
"]",
"+",
"dotted_output_ext",
"+",
"dotted_ext",
")",
"return",
"table"
] |
Load all Excel files in the given path, write .flat.csv files, return `DataFrame` dict
Arguments:
path (str): file or folder to retrieve CSV files and `pandas.DataFrame`s from
ext (str): file name extension (to filter files by)
date_parser (function): if the MultiIndex can be interpretted as a datetime, this parser will be used
Returns:
dict of DataFrame: { file_path: flattened_data_frame }
|
[
"Load",
"all",
"Excel",
"files",
"in",
"the",
"given",
"path",
"write",
".",
"flat",
".",
"csv",
"files",
"return",
"DataFrame",
"dict"
] |
55678b08755a55366ce18e7d3b8ea8fa4491ab04
|
https://github.com/hobson/pug-dj/blob/55678b08755a55366ce18e7d3b8ea8fa4491ab04/pug/dj/db.py#L1915-L1941
|
241,269
|
hobson/pug-dj
|
pug/dj/db.py
|
hash_model_values
|
def hash_model_values(model, clear=True, hash_field='values_hash', hash_fun=hash, ignore_pk=True, ignore_fields=[]):
"""Hash values of DB table records to facilitate tracking changes to the DB table
Intended for comparing records in one table to those in another (with potentially differing id/pk values)
For example, changes to a table in a read-only MS SQL database can be quickly identified
and mirrored to a writeable PostGRE DB where these hash values are stored along side the data.
"""
qs = getattr(model, 'objects', model)
model = qs.model
if ignore_pk:
ignore_fields += [model._meta.pk.name]
if not hasattr(model, hash_field):
warnings.warn("%r doesn't have a field named %s in which to store a hash value. Skipping." % (model, hash_field))
return
for obj in qs:
# ignore primary key (id field) when hashing values
h = hash_fun(tuple([getattr(obj, k) for k in obj._meta.get_all_field_names() if k not in ignore_fields]))
tracking_obj, created = ChangeLog.get_or_create(app=model._meta.app_label, model=model._meta.object_name, primary_key=obj.pk)
tracking_obj.update(hash_value=h)
|
python
|
def hash_model_values(model, clear=True, hash_field='values_hash', hash_fun=hash, ignore_pk=True, ignore_fields=[]):
"""Hash values of DB table records to facilitate tracking changes to the DB table
Intended for comparing records in one table to those in another (with potentially differing id/pk values)
For example, changes to a table in a read-only MS SQL database can be quickly identified
and mirrored to a writeable PostGRE DB where these hash values are stored along side the data.
"""
qs = getattr(model, 'objects', model)
model = qs.model
if ignore_pk:
ignore_fields += [model._meta.pk.name]
if not hasattr(model, hash_field):
warnings.warn("%r doesn't have a field named %s in which to store a hash value. Skipping." % (model, hash_field))
return
for obj in qs:
# ignore primary key (id field) when hashing values
h = hash_fun(tuple([getattr(obj, k) for k in obj._meta.get_all_field_names() if k not in ignore_fields]))
tracking_obj, created = ChangeLog.get_or_create(app=model._meta.app_label, model=model._meta.object_name, primary_key=obj.pk)
tracking_obj.update(hash_value=h)
|
[
"def",
"hash_model_values",
"(",
"model",
",",
"clear",
"=",
"True",
",",
"hash_field",
"=",
"'values_hash'",
",",
"hash_fun",
"=",
"hash",
",",
"ignore_pk",
"=",
"True",
",",
"ignore_fields",
"=",
"[",
"]",
")",
":",
"qs",
"=",
"getattr",
"(",
"model",
",",
"'objects'",
",",
"model",
")",
"model",
"=",
"qs",
".",
"model",
"if",
"ignore_pk",
":",
"ignore_fields",
"+=",
"[",
"model",
".",
"_meta",
".",
"pk",
".",
"name",
"]",
"if",
"not",
"hasattr",
"(",
"model",
",",
"hash_field",
")",
":",
"warnings",
".",
"warn",
"(",
"\"%r doesn't have a field named %s in which to store a hash value. Skipping.\"",
"%",
"(",
"model",
",",
"hash_field",
")",
")",
"return",
"for",
"obj",
"in",
"qs",
":",
"# ignore primary key (id field) when hashing values",
"h",
"=",
"hash_fun",
"(",
"tuple",
"(",
"[",
"getattr",
"(",
"obj",
",",
"k",
")",
"for",
"k",
"in",
"obj",
".",
"_meta",
".",
"get_all_field_names",
"(",
")",
"if",
"k",
"not",
"in",
"ignore_fields",
"]",
")",
")",
"tracking_obj",
",",
"created",
"=",
"ChangeLog",
".",
"get_or_create",
"(",
"app",
"=",
"model",
".",
"_meta",
".",
"app_label",
",",
"model",
"=",
"model",
".",
"_meta",
".",
"object_name",
",",
"primary_key",
"=",
"obj",
".",
"pk",
")",
"tracking_obj",
".",
"update",
"(",
"hash_value",
"=",
"h",
")"
] |
Hash values of DB table records to facilitate tracking changes to the DB table
Intended for comparing records in one table to those in another (with potentially differing id/pk values)
For example, changes to a table in a read-only MS SQL database can be quickly identified
and mirrored to a writeable PostGRE DB where these hash values are stored along side the data.
|
[
"Hash",
"values",
"of",
"DB",
"table",
"records",
"to",
"facilitate",
"tracking",
"changes",
"to",
"the",
"DB",
"table"
] |
55678b08755a55366ce18e7d3b8ea8fa4491ab04
|
https://github.com/hobson/pug-dj/blob/55678b08755a55366ce18e7d3b8ea8fa4491ab04/pug/dj/db.py#L1982-L2000
|
241,270
|
hobson/pug-dj
|
pug/dj/db.py
|
bulk_update
|
def bulk_update(object_list, ignore_errors=False, delete_first=False, verbosity=0):
'''Bulk_create objects in provided list of model instances, delete database rows for the original pks in the object list.
Returns any delta in the number of rows in the database table that resulted from the update.
If nonzero, an error has likely occurred and database integrity is suspect.
# delete_first = True is required if your model has unique constraints that would be violated by creating duplicate records
# FIXME: check for unique constraints and raise exception if any exist (won't work because new objects may violate!)
'''
if not object_list:
return 0
model = object_list[0].__class__
N_before = model.objects.count()
pks_to_delete = set()
for i, obj in enumerate(object_list):
pks_to_delete.add(obj.pk)
if delete_first:
object_list[i] = deepcopy(obj)
object_list[i].pk = None
if verbosity > 1:
print 'Creating %d %r objects.' % (len(object_list), model)
print 'BEFORE: %d' % model.objects.count()
if not delete_first:
model.objects.bulk_create(object_list)
if verbosity > 0:
print 'Deleting %d objects with pks: %r ........' % (len(pks_to_delete), pks_to_delete)
objs_to_delete = model.objects.filter(pk__in=pks_to_delete)
num_to_delete = objs_to_delete.count()
if num_to_delete != len(pks_to_delete):
msg = 'Attempt to delete redundant pks (len %d)! Queryset has count %d. Query was `filter(pk__in=%r). Queryset = %r' % (
len(pks_to_delete), num_to_delete, pks_to_delete, objs_to_delete)
if ignore_errors:
if verbosity > 0:
print msg
else:
raise RuntimeError(msg)
if verbosity > 1:
print 'Queryset to delete has %d objects' % objs_to_delete.count()
objs_to_delete.delete()
if delete_first:
model.objects.bulk_create(object_list)
if verbosity > 1:
print 'AFTER: %d' % model.objects.count()
N_after = model.objects.count()
if ignore_errors:
if verbosity > 1:
print 'AFTER: %d' % N_after
else:
if N_after != N_before:
print 'Number of records in %r changed by %d during bulk_create of %r.\n ' % (model, N_after - N_before, object_list)
msg = 'Records before and after bulk_create are not equal!!! Before=%d, After=%d' % (N_before, N_after)
raise RuntimeError(msg)
return N_before - N_after
|
python
|
def bulk_update(object_list, ignore_errors=False, delete_first=False, verbosity=0):
'''Bulk_create objects in provided list of model instances, delete database rows for the original pks in the object list.
Returns any delta in the number of rows in the database table that resulted from the update.
If nonzero, an error has likely occurred and database integrity is suspect.
# delete_first = True is required if your model has unique constraints that would be violated by creating duplicate records
# FIXME: check for unique constraints and raise exception if any exist (won't work because new objects may violate!)
'''
if not object_list:
return 0
model = object_list[0].__class__
N_before = model.objects.count()
pks_to_delete = set()
for i, obj in enumerate(object_list):
pks_to_delete.add(obj.pk)
if delete_first:
object_list[i] = deepcopy(obj)
object_list[i].pk = None
if verbosity > 1:
print 'Creating %d %r objects.' % (len(object_list), model)
print 'BEFORE: %d' % model.objects.count()
if not delete_first:
model.objects.bulk_create(object_list)
if verbosity > 0:
print 'Deleting %d objects with pks: %r ........' % (len(pks_to_delete), pks_to_delete)
objs_to_delete = model.objects.filter(pk__in=pks_to_delete)
num_to_delete = objs_to_delete.count()
if num_to_delete != len(pks_to_delete):
msg = 'Attempt to delete redundant pks (len %d)! Queryset has count %d. Query was `filter(pk__in=%r). Queryset = %r' % (
len(pks_to_delete), num_to_delete, pks_to_delete, objs_to_delete)
if ignore_errors:
if verbosity > 0:
print msg
else:
raise RuntimeError(msg)
if verbosity > 1:
print 'Queryset to delete has %d objects' % objs_to_delete.count()
objs_to_delete.delete()
if delete_first:
model.objects.bulk_create(object_list)
if verbosity > 1:
print 'AFTER: %d' % model.objects.count()
N_after = model.objects.count()
if ignore_errors:
if verbosity > 1:
print 'AFTER: %d' % N_after
else:
if N_after != N_before:
print 'Number of records in %r changed by %d during bulk_create of %r.\n ' % (model, N_after - N_before, object_list)
msg = 'Records before and after bulk_create are not equal!!! Before=%d, After=%d' % (N_before, N_after)
raise RuntimeError(msg)
return N_before - N_after
|
[
"def",
"bulk_update",
"(",
"object_list",
",",
"ignore_errors",
"=",
"False",
",",
"delete_first",
"=",
"False",
",",
"verbosity",
"=",
"0",
")",
":",
"if",
"not",
"object_list",
":",
"return",
"0",
"model",
"=",
"object_list",
"[",
"0",
"]",
".",
"__class__",
"N_before",
"=",
"model",
".",
"objects",
".",
"count",
"(",
")",
"pks_to_delete",
"=",
"set",
"(",
")",
"for",
"i",
",",
"obj",
"in",
"enumerate",
"(",
"object_list",
")",
":",
"pks_to_delete",
".",
"add",
"(",
"obj",
".",
"pk",
")",
"if",
"delete_first",
":",
"object_list",
"[",
"i",
"]",
"=",
"deepcopy",
"(",
"obj",
")",
"object_list",
"[",
"i",
"]",
".",
"pk",
"=",
"None",
"if",
"verbosity",
">",
"1",
":",
"print",
"'Creating %d %r objects.'",
"%",
"(",
"len",
"(",
"object_list",
")",
",",
"model",
")",
"print",
"'BEFORE: %d'",
"%",
"model",
".",
"objects",
".",
"count",
"(",
")",
"if",
"not",
"delete_first",
":",
"model",
".",
"objects",
".",
"bulk_create",
"(",
"object_list",
")",
"if",
"verbosity",
">",
"0",
":",
"print",
"'Deleting %d objects with pks: %r ........'",
"%",
"(",
"len",
"(",
"pks_to_delete",
")",
",",
"pks_to_delete",
")",
"objs_to_delete",
"=",
"model",
".",
"objects",
".",
"filter",
"(",
"pk__in",
"=",
"pks_to_delete",
")",
"num_to_delete",
"=",
"objs_to_delete",
".",
"count",
"(",
")",
"if",
"num_to_delete",
"!=",
"len",
"(",
"pks_to_delete",
")",
":",
"msg",
"=",
"'Attempt to delete redundant pks (len %d)! Queryset has count %d. Query was `filter(pk__in=%r). Queryset = %r'",
"%",
"(",
"len",
"(",
"pks_to_delete",
")",
",",
"num_to_delete",
",",
"pks_to_delete",
",",
"objs_to_delete",
")",
"if",
"ignore_errors",
":",
"if",
"verbosity",
">",
"0",
":",
"print",
"msg",
"else",
":",
"raise",
"RuntimeError",
"(",
"msg",
")",
"if",
"verbosity",
">",
"1",
":",
"print",
"'Queryset to delete has %d objects'",
"%",
"objs_to_delete",
".",
"count",
"(",
")",
"objs_to_delete",
".",
"delete",
"(",
")",
"if",
"delete_first",
":",
"model",
".",
"objects",
".",
"bulk_create",
"(",
"object_list",
")",
"if",
"verbosity",
">",
"1",
":",
"print",
"'AFTER: %d'",
"%",
"model",
".",
"objects",
".",
"count",
"(",
")",
"N_after",
"=",
"model",
".",
"objects",
".",
"count",
"(",
")",
"if",
"ignore_errors",
":",
"if",
"verbosity",
">",
"1",
":",
"print",
"'AFTER: %d'",
"%",
"N_after",
"else",
":",
"if",
"N_after",
"!=",
"N_before",
":",
"print",
"'Number of records in %r changed by %d during bulk_create of %r.\\n '",
"%",
"(",
"model",
",",
"N_after",
"-",
"N_before",
",",
"object_list",
")",
"msg",
"=",
"'Records before and after bulk_create are not equal!!! Before=%d, After=%d'",
"%",
"(",
"N_before",
",",
"N_after",
")",
"raise",
"RuntimeError",
"(",
"msg",
")",
"return",
"N_before",
"-",
"N_after"
] |
Bulk_create objects in provided list of model instances, delete database rows for the original pks in the object list.
Returns any delta in the number of rows in the database table that resulted from the update.
If nonzero, an error has likely occurred and database integrity is suspect.
# delete_first = True is required if your model has unique constraints that would be violated by creating duplicate records
# FIXME: check for unique constraints and raise exception if any exist (won't work because new objects may violate!)
|
[
"Bulk_create",
"objects",
"in",
"provided",
"list",
"of",
"model",
"instances",
"delete",
"database",
"rows",
"for",
"the",
"original",
"pks",
"in",
"the",
"object",
"list",
"."
] |
55678b08755a55366ce18e7d3b8ea8fa4491ab04
|
https://github.com/hobson/pug-dj/blob/55678b08755a55366ce18e7d3b8ea8fa4491ab04/pug/dj/db.py#L2561-L2614
|
241,271
|
hobson/pug-dj
|
pug/dj/db.py
|
generate_queryset_batches
|
def generate_queryset_batches(queryset, batch_len=1000, verbosity=1):
"""Filter a queryset by the pk in such a way that no batch is larger than the requested batch_len
SEE ALSO: pug.nlp.util.generate_slices
>>> from miner.models import TestModel
>>> sum(len(list(batch)) for batch in generate_queryset_batches(TestModel, batch_len=7)) == TestModel.objects.count()
True
"""
if batch_len == 1:
for obj in queryset:
yield obj
N = queryset.count()
if not N:
raise StopIteration("Queryset is empty!")
if N == 1:
for obj in queryset:
yield obj
if verbosity > 0:
widgets = [pb.Counter(), '/%d rows: ' % N, pb.Percentage(), ' ', pb.RotatingMarker(), ' ', pb.Bar(),' ', pb.ETA()]
i, pbar = 0, pb.ProgressBar(widgets=widgets, maxval=N).start()
pk_queryset = queryset.filter(pk__isnull=False).values_list('pk', flat=True).order_by('pk')
N_nonnull = pk_queryset.count()
N_batches = int(N_nonnull/float(batch_len)) + 1
if verbosity > 1:
print 'Splitting %d primary_key values (%d nonnull) from %r into %d querysets of size %d or smaller. First loading pks into RAM...' % (N, N_nonnull, queryset.model, N_batches, batch_len)
nonnull_pk_list = tuple(pk_queryset)
pk_list = []
if verbosity > 1:
print 'Extracting the %d dividing (fencepost) primary keys for use in splitting the querysets with filter queries...' % (N_batches + 1)
for j in range(N_batches - 1):
pk_list += [(nonnull_pk_list[j*batch_len], nonnull_pk_list[(j+1)*batch_len - 1])]
last_batch_len = N_nonnull - (N_batches-1) * batch_len
pk_list += [(nonnull_pk_list[(N_batches-1) * batch_len], nonnull_pk_list[N-1])]
if verbosity > 1:
del(nonnull_pk_list)
print 'Yielding the %d batches according to the %d dividing (fencepost) primary keys...' % (N_batches, len(pk_list))
for j in range(N_batches):
if verbosity > 0:
pbar.update(i)
if j < N_batches - 1:
i += batch_len
else:
i += last_batch_len
# inclusive inequality ensures that even if PKs are repeated they will all be included in the queryset returned
yield queryset.filter(pk__gte=pk_list[j][0], pk__lte=pk_list[j][1])
if verbosity > 0:
pbar.finish()
|
python
|
def generate_queryset_batches(queryset, batch_len=1000, verbosity=1):
"""Filter a queryset by the pk in such a way that no batch is larger than the requested batch_len
SEE ALSO: pug.nlp.util.generate_slices
>>> from miner.models import TestModel
>>> sum(len(list(batch)) for batch in generate_queryset_batches(TestModel, batch_len=7)) == TestModel.objects.count()
True
"""
if batch_len == 1:
for obj in queryset:
yield obj
N = queryset.count()
if not N:
raise StopIteration("Queryset is empty!")
if N == 1:
for obj in queryset:
yield obj
if verbosity > 0:
widgets = [pb.Counter(), '/%d rows: ' % N, pb.Percentage(), ' ', pb.RotatingMarker(), ' ', pb.Bar(),' ', pb.ETA()]
i, pbar = 0, pb.ProgressBar(widgets=widgets, maxval=N).start()
pk_queryset = queryset.filter(pk__isnull=False).values_list('pk', flat=True).order_by('pk')
N_nonnull = pk_queryset.count()
N_batches = int(N_nonnull/float(batch_len)) + 1
if verbosity > 1:
print 'Splitting %d primary_key values (%d nonnull) from %r into %d querysets of size %d or smaller. First loading pks into RAM...' % (N, N_nonnull, queryset.model, N_batches, batch_len)
nonnull_pk_list = tuple(pk_queryset)
pk_list = []
if verbosity > 1:
print 'Extracting the %d dividing (fencepost) primary keys for use in splitting the querysets with filter queries...' % (N_batches + 1)
for j in range(N_batches - 1):
pk_list += [(nonnull_pk_list[j*batch_len], nonnull_pk_list[(j+1)*batch_len - 1])]
last_batch_len = N_nonnull - (N_batches-1) * batch_len
pk_list += [(nonnull_pk_list[(N_batches-1) * batch_len], nonnull_pk_list[N-1])]
if verbosity > 1:
del(nonnull_pk_list)
print 'Yielding the %d batches according to the %d dividing (fencepost) primary keys...' % (N_batches, len(pk_list))
for j in range(N_batches):
if verbosity > 0:
pbar.update(i)
if j < N_batches - 1:
i += batch_len
else:
i += last_batch_len
# inclusive inequality ensures that even if PKs are repeated they will all be included in the queryset returned
yield queryset.filter(pk__gte=pk_list[j][0], pk__lte=pk_list[j][1])
if verbosity > 0:
pbar.finish()
|
[
"def",
"generate_queryset_batches",
"(",
"queryset",
",",
"batch_len",
"=",
"1000",
",",
"verbosity",
"=",
"1",
")",
":",
"if",
"batch_len",
"==",
"1",
":",
"for",
"obj",
"in",
"queryset",
":",
"yield",
"obj",
"N",
"=",
"queryset",
".",
"count",
"(",
")",
"if",
"not",
"N",
":",
"raise",
"StopIteration",
"(",
"\"Queryset is empty!\"",
")",
"if",
"N",
"==",
"1",
":",
"for",
"obj",
"in",
"queryset",
":",
"yield",
"obj",
"if",
"verbosity",
">",
"0",
":",
"widgets",
"=",
"[",
"pb",
".",
"Counter",
"(",
")",
",",
"'/%d rows: '",
"%",
"N",
",",
"pb",
".",
"Percentage",
"(",
")",
",",
"' '",
",",
"pb",
".",
"RotatingMarker",
"(",
")",
",",
"' '",
",",
"pb",
".",
"Bar",
"(",
")",
",",
"' '",
",",
"pb",
".",
"ETA",
"(",
")",
"]",
"i",
",",
"pbar",
"=",
"0",
",",
"pb",
".",
"ProgressBar",
"(",
"widgets",
"=",
"widgets",
",",
"maxval",
"=",
"N",
")",
".",
"start",
"(",
")",
"pk_queryset",
"=",
"queryset",
".",
"filter",
"(",
"pk__isnull",
"=",
"False",
")",
".",
"values_list",
"(",
"'pk'",
",",
"flat",
"=",
"True",
")",
".",
"order_by",
"(",
"'pk'",
")",
"N_nonnull",
"=",
"pk_queryset",
".",
"count",
"(",
")",
"N_batches",
"=",
"int",
"(",
"N_nonnull",
"/",
"float",
"(",
"batch_len",
")",
")",
"+",
"1",
"if",
"verbosity",
">",
"1",
":",
"print",
"'Splitting %d primary_key values (%d nonnull) from %r into %d querysets of size %d or smaller. First loading pks into RAM...'",
"%",
"(",
"N",
",",
"N_nonnull",
",",
"queryset",
".",
"model",
",",
"N_batches",
",",
"batch_len",
")",
"nonnull_pk_list",
"=",
"tuple",
"(",
"pk_queryset",
")",
"pk_list",
"=",
"[",
"]",
"if",
"verbosity",
">",
"1",
":",
"print",
"'Extracting the %d dividing (fencepost) primary keys for use in splitting the querysets with filter queries...'",
"%",
"(",
"N_batches",
"+",
"1",
")",
"for",
"j",
"in",
"range",
"(",
"N_batches",
"-",
"1",
")",
":",
"pk_list",
"+=",
"[",
"(",
"nonnull_pk_list",
"[",
"j",
"*",
"batch_len",
"]",
",",
"nonnull_pk_list",
"[",
"(",
"j",
"+",
"1",
")",
"*",
"batch_len",
"-",
"1",
"]",
")",
"]",
"last_batch_len",
"=",
"N_nonnull",
"-",
"(",
"N_batches",
"-",
"1",
")",
"*",
"batch_len",
"pk_list",
"+=",
"[",
"(",
"nonnull_pk_list",
"[",
"(",
"N_batches",
"-",
"1",
")",
"*",
"batch_len",
"]",
",",
"nonnull_pk_list",
"[",
"N",
"-",
"1",
"]",
")",
"]",
"if",
"verbosity",
">",
"1",
":",
"del",
"(",
"nonnull_pk_list",
")",
"print",
"'Yielding the %d batches according to the %d dividing (fencepost) primary keys...'",
"%",
"(",
"N_batches",
",",
"len",
"(",
"pk_list",
")",
")",
"for",
"j",
"in",
"range",
"(",
"N_batches",
")",
":",
"if",
"verbosity",
">",
"0",
":",
"pbar",
".",
"update",
"(",
"i",
")",
"if",
"j",
"<",
"N_batches",
"-",
"1",
":",
"i",
"+=",
"batch_len",
"else",
":",
"i",
"+=",
"last_batch_len",
"# inclusive inequality ensures that even if PKs are repeated they will all be included in the queryset returned",
"yield",
"queryset",
".",
"filter",
"(",
"pk__gte",
"=",
"pk_list",
"[",
"j",
"]",
"[",
"0",
"]",
",",
"pk__lte",
"=",
"pk_list",
"[",
"j",
"]",
"[",
"1",
"]",
")",
"if",
"verbosity",
">",
"0",
":",
"pbar",
".",
"finish",
"(",
")"
] |
Filter a queryset by the pk in such a way that no batch is larger than the requested batch_len
SEE ALSO: pug.nlp.util.generate_slices
>>> from miner.models import TestModel
>>> sum(len(list(batch)) for batch in generate_queryset_batches(TestModel, batch_len=7)) == TestModel.objects.count()
True
|
[
"Filter",
"a",
"queryset",
"by",
"the",
"pk",
"in",
"such",
"a",
"way",
"that",
"no",
"batch",
"is",
"larger",
"than",
"the",
"requested",
"batch_len"
] |
55678b08755a55366ce18e7d3b8ea8fa4491ab04
|
https://github.com/hobson/pug-dj/blob/55678b08755a55366ce18e7d3b8ea8fa4491ab04/pug/dj/db.py#L2617-L2673
|
241,272
|
hobson/pug-dj
|
pug/dj/db.py
|
optimize_filter_dict
|
def optimize_filter_dict(filter_dict, trgm=True):
"""Improve query speed for a Django queryset `filter` or `exclude` kwargs dict
WARNING: Wtthout `trgm`, this only improves the speed of exclude filters by 0.4%
Arguments:
filter_dict (dict): kwargs for Django ORM queryset `filter` and `exclude` queries
trgm (bool): whether to assume the Django ORM trigram (djorm-trgm) extension is available
Examples:
>>> # This is nothing different than what Django already does:
>>> optimize_filter_dict({'name__in': ['Smith', 'Jones', 'Smith']}) == {'name__in': set(('Smith', 'Jones'))}
True
>>> # This is an optimjization that Django doesn't do yet, probably because it actually slows `filter` queries down by 0.4%!:
>>> # However, it does speed up an `objects.exclude` query by about 1%:
>>> optimize_filter_dict({'name__in': ['Smith']}) == {'name': 'Smith'}
True
>>> # This is the only optimization that actually does significant good, but it requires `djorm-trgm`
>>> optimize_filter_dict({'name__contains': 'ith'}) == {'name__similar': 'ith', 'name__contains': 'ith'}
True
"""
optimized = {}
for k, v in filter_dict.iteritems():
if k.endswith('__in'):
v = set(v)
if len(v) == 1:
optimized[k[:-4]] = tuple(v)[0]
else:
optimized[k] = v
else:
optimized[k] = v
# This is the only optimization that actuall does some good
if trgm:
optimized_copy = dict(optimized)
for k, v in optimized_copy.iteritems():
if k.endswith('__contains'):
optimized[k[:-10] + '__similar'] = v
elif k.endswith('__icontains'):
optimized[k[:-11] + '__similar'] = v
return optimized
|
python
|
def optimize_filter_dict(filter_dict, trgm=True):
"""Improve query speed for a Django queryset `filter` or `exclude` kwargs dict
WARNING: Wtthout `trgm`, this only improves the speed of exclude filters by 0.4%
Arguments:
filter_dict (dict): kwargs for Django ORM queryset `filter` and `exclude` queries
trgm (bool): whether to assume the Django ORM trigram (djorm-trgm) extension is available
Examples:
>>> # This is nothing different than what Django already does:
>>> optimize_filter_dict({'name__in': ['Smith', 'Jones', 'Smith']}) == {'name__in': set(('Smith', 'Jones'))}
True
>>> # This is an optimjization that Django doesn't do yet, probably because it actually slows `filter` queries down by 0.4%!:
>>> # However, it does speed up an `objects.exclude` query by about 1%:
>>> optimize_filter_dict({'name__in': ['Smith']}) == {'name': 'Smith'}
True
>>> # This is the only optimization that actually does significant good, but it requires `djorm-trgm`
>>> optimize_filter_dict({'name__contains': 'ith'}) == {'name__similar': 'ith', 'name__contains': 'ith'}
True
"""
optimized = {}
for k, v in filter_dict.iteritems():
if k.endswith('__in'):
v = set(v)
if len(v) == 1:
optimized[k[:-4]] = tuple(v)[0]
else:
optimized[k] = v
else:
optimized[k] = v
# This is the only optimization that actuall does some good
if trgm:
optimized_copy = dict(optimized)
for k, v in optimized_copy.iteritems():
if k.endswith('__contains'):
optimized[k[:-10] + '__similar'] = v
elif k.endswith('__icontains'):
optimized[k[:-11] + '__similar'] = v
return optimized
|
[
"def",
"optimize_filter_dict",
"(",
"filter_dict",
",",
"trgm",
"=",
"True",
")",
":",
"optimized",
"=",
"{",
"}",
"for",
"k",
",",
"v",
"in",
"filter_dict",
".",
"iteritems",
"(",
")",
":",
"if",
"k",
".",
"endswith",
"(",
"'__in'",
")",
":",
"v",
"=",
"set",
"(",
"v",
")",
"if",
"len",
"(",
"v",
")",
"==",
"1",
":",
"optimized",
"[",
"k",
"[",
":",
"-",
"4",
"]",
"]",
"=",
"tuple",
"(",
"v",
")",
"[",
"0",
"]",
"else",
":",
"optimized",
"[",
"k",
"]",
"=",
"v",
"else",
":",
"optimized",
"[",
"k",
"]",
"=",
"v",
"# This is the only optimization that actuall does some good",
"if",
"trgm",
":",
"optimized_copy",
"=",
"dict",
"(",
"optimized",
")",
"for",
"k",
",",
"v",
"in",
"optimized_copy",
".",
"iteritems",
"(",
")",
":",
"if",
"k",
".",
"endswith",
"(",
"'__contains'",
")",
":",
"optimized",
"[",
"k",
"[",
":",
"-",
"10",
"]",
"+",
"'__similar'",
"]",
"=",
"v",
"elif",
"k",
".",
"endswith",
"(",
"'__icontains'",
")",
":",
"optimized",
"[",
"k",
"[",
":",
"-",
"11",
"]",
"+",
"'__similar'",
"]",
"=",
"v",
"return",
"optimized"
] |
Improve query speed for a Django queryset `filter` or `exclude` kwargs dict
WARNING: Wtthout `trgm`, this only improves the speed of exclude filters by 0.4%
Arguments:
filter_dict (dict): kwargs for Django ORM queryset `filter` and `exclude` queries
trgm (bool): whether to assume the Django ORM trigram (djorm-trgm) extension is available
Examples:
>>> # This is nothing different than what Django already does:
>>> optimize_filter_dict({'name__in': ['Smith', 'Jones', 'Smith']}) == {'name__in': set(('Smith', 'Jones'))}
True
>>> # This is an optimjization that Django doesn't do yet, probably because it actually slows `filter` queries down by 0.4%!:
>>> # However, it does speed up an `objects.exclude` query by about 1%:
>>> optimize_filter_dict({'name__in': ['Smith']}) == {'name': 'Smith'}
True
>>> # This is the only optimization that actually does significant good, but it requires `djorm-trgm`
>>> optimize_filter_dict({'name__contains': 'ith'}) == {'name__similar': 'ith', 'name__contains': 'ith'}
True
|
[
"Improve",
"query",
"speed",
"for",
"a",
"Django",
"queryset",
"filter",
"or",
"exclude",
"kwargs",
"dict"
] |
55678b08755a55366ce18e7d3b8ea8fa4491ab04
|
https://github.com/hobson/pug-dj/blob/55678b08755a55366ce18e7d3b8ea8fa4491ab04/pug/dj/db.py#L2739-L2778
|
241,273
|
hobson/pug-dj
|
pug/dj/db.py
|
dump_json
|
def dump_json(model, batch_len=200000, use_natural_keys=True, verbosity=1):
"""Dump database records to .json Django fixture file, one file for each batch of `batch_len` records
Files are suitable for loading with "python manage.py loaddata folder_name_containing_files/*".
"""
model = get_model(model)
N = model.objects.count()
if verbosity > 0:
widgets = [pb.Counter(), '/%d rows: ' % (N,), pb.Percentage(), ' ', pb.RotatingMarker(), ' ', pb.Bar(),' ', pb.ETA()]
i, pbar = 0, pb.ProgressBar(widgets=widgets, maxval=N).start()
JSONSerializer = serializers.get_serializer("json")
jser = JSONSerializer()
if verbosity > 0:
pbar.update(0)
for i, partial_qs in enumerate(util.generate_slices(model.objects.all(), batch_len=batch_len)):
with open(model._meta.app_label.lower() + '--' + model._meta.object_name.lower() + '--%04d.json' % i, 'w') as fpout:
if verbosity > 0:
pbar.update(i*batch_len)
jser.serialize(partial_qs, indent=1, stream=fpout, use_natural_keys=use_natural_keys)
if verbosity > 0:
pbar.finish()
|
python
|
def dump_json(model, batch_len=200000, use_natural_keys=True, verbosity=1):
"""Dump database records to .json Django fixture file, one file for each batch of `batch_len` records
Files are suitable for loading with "python manage.py loaddata folder_name_containing_files/*".
"""
model = get_model(model)
N = model.objects.count()
if verbosity > 0:
widgets = [pb.Counter(), '/%d rows: ' % (N,), pb.Percentage(), ' ', pb.RotatingMarker(), ' ', pb.Bar(),' ', pb.ETA()]
i, pbar = 0, pb.ProgressBar(widgets=widgets, maxval=N).start()
JSONSerializer = serializers.get_serializer("json")
jser = JSONSerializer()
if verbosity > 0:
pbar.update(0)
for i, partial_qs in enumerate(util.generate_slices(model.objects.all(), batch_len=batch_len)):
with open(model._meta.app_label.lower() + '--' + model._meta.object_name.lower() + '--%04d.json' % i, 'w') as fpout:
if verbosity > 0:
pbar.update(i*batch_len)
jser.serialize(partial_qs, indent=1, stream=fpout, use_natural_keys=use_natural_keys)
if verbosity > 0:
pbar.finish()
|
[
"def",
"dump_json",
"(",
"model",
",",
"batch_len",
"=",
"200000",
",",
"use_natural_keys",
"=",
"True",
",",
"verbosity",
"=",
"1",
")",
":",
"model",
"=",
"get_model",
"(",
"model",
")",
"N",
"=",
"model",
".",
"objects",
".",
"count",
"(",
")",
"if",
"verbosity",
">",
"0",
":",
"widgets",
"=",
"[",
"pb",
".",
"Counter",
"(",
")",
",",
"'/%d rows: '",
"%",
"(",
"N",
",",
")",
",",
"pb",
".",
"Percentage",
"(",
")",
",",
"' '",
",",
"pb",
".",
"RotatingMarker",
"(",
")",
",",
"' '",
",",
"pb",
".",
"Bar",
"(",
")",
",",
"' '",
",",
"pb",
".",
"ETA",
"(",
")",
"]",
"i",
",",
"pbar",
"=",
"0",
",",
"pb",
".",
"ProgressBar",
"(",
"widgets",
"=",
"widgets",
",",
"maxval",
"=",
"N",
")",
".",
"start",
"(",
")",
"JSONSerializer",
"=",
"serializers",
".",
"get_serializer",
"(",
"\"json\"",
")",
"jser",
"=",
"JSONSerializer",
"(",
")",
"if",
"verbosity",
">",
"0",
":",
"pbar",
".",
"update",
"(",
"0",
")",
"for",
"i",
",",
"partial_qs",
"in",
"enumerate",
"(",
"util",
".",
"generate_slices",
"(",
"model",
".",
"objects",
".",
"all",
"(",
")",
",",
"batch_len",
"=",
"batch_len",
")",
")",
":",
"with",
"open",
"(",
"model",
".",
"_meta",
".",
"app_label",
".",
"lower",
"(",
")",
"+",
"'--'",
"+",
"model",
".",
"_meta",
".",
"object_name",
".",
"lower",
"(",
")",
"+",
"'--%04d.json'",
"%",
"i",
",",
"'w'",
")",
"as",
"fpout",
":",
"if",
"verbosity",
">",
"0",
":",
"pbar",
".",
"update",
"(",
"i",
"*",
"batch_len",
")",
"jser",
".",
"serialize",
"(",
"partial_qs",
",",
"indent",
"=",
"1",
",",
"stream",
"=",
"fpout",
",",
"use_natural_keys",
"=",
"use_natural_keys",
")",
"if",
"verbosity",
">",
"0",
":",
"pbar",
".",
"finish",
"(",
")"
] |
Dump database records to .json Django fixture file, one file for each batch of `batch_len` records
Files are suitable for loading with "python manage.py loaddata folder_name_containing_files/*".
|
[
"Dump",
"database",
"records",
"to",
".",
"json",
"Django",
"fixture",
"file",
"one",
"file",
"for",
"each",
"batch",
"of",
"batch_len",
"records"
] |
55678b08755a55366ce18e7d3b8ea8fa4491ab04
|
https://github.com/hobson/pug-dj/blob/55678b08755a55366ce18e7d3b8ea8fa4491ab04/pug/dj/db.py#L2822-L2846
|
241,274
|
hobson/pug-dj
|
pug/dj/db.py
|
filter_exclude_dicts
|
def filter_exclude_dicts(filter_dict=None, exclude_dict=None, name='acctno', values=[], swap=False):
"""Produces kwargs dicts for Django Queryset `filter` and `exclude` from a list of values
The last, critical step in generating Django ORM kwargs dicts from a natural language query.
Properly parses "NOT" unary operators on each field value in the list.
Assumes the lists have been pre-processed to consolidate NOTs and normalize values and syntax.
Examples:
>>> filter_exclude_dicts(name='num', values=['NOT 1', '2', '3', 'NOT 4']
... ) == ({'num__in': ['2', '3']}, {'num__in': ['1', '4']})
True
"""
filter_dict = filter_dict or {}
exclude_dict = exclude_dict or {}
if not name.endswith('__in'):
name += '__in'
filter_dict[name], exclude_dict[name] = [], []
for v in values:
# "NOT " means switch from include (filter) to exclude for that one account number
if v.startswith('NOT '):
exclude_dict[name] += [v[4:]]
else:
filter_dict[name] += [v]
if swap:
return exclude_dict, filter_dict
return filter_dict, exclude_dict
|
python
|
def filter_exclude_dicts(filter_dict=None, exclude_dict=None, name='acctno', values=[], swap=False):
"""Produces kwargs dicts for Django Queryset `filter` and `exclude` from a list of values
The last, critical step in generating Django ORM kwargs dicts from a natural language query.
Properly parses "NOT" unary operators on each field value in the list.
Assumes the lists have been pre-processed to consolidate NOTs and normalize values and syntax.
Examples:
>>> filter_exclude_dicts(name='num', values=['NOT 1', '2', '3', 'NOT 4']
... ) == ({'num__in': ['2', '3']}, {'num__in': ['1', '4']})
True
"""
filter_dict = filter_dict or {}
exclude_dict = exclude_dict or {}
if not name.endswith('__in'):
name += '__in'
filter_dict[name], exclude_dict[name] = [], []
for v in values:
# "NOT " means switch from include (filter) to exclude for that one account number
if v.startswith('NOT '):
exclude_dict[name] += [v[4:]]
else:
filter_dict[name] += [v]
if swap:
return exclude_dict, filter_dict
return filter_dict, exclude_dict
|
[
"def",
"filter_exclude_dicts",
"(",
"filter_dict",
"=",
"None",
",",
"exclude_dict",
"=",
"None",
",",
"name",
"=",
"'acctno'",
",",
"values",
"=",
"[",
"]",
",",
"swap",
"=",
"False",
")",
":",
"filter_dict",
"=",
"filter_dict",
"or",
"{",
"}",
"exclude_dict",
"=",
"exclude_dict",
"or",
"{",
"}",
"if",
"not",
"name",
".",
"endswith",
"(",
"'__in'",
")",
":",
"name",
"+=",
"'__in'",
"filter_dict",
"[",
"name",
"]",
",",
"exclude_dict",
"[",
"name",
"]",
"=",
"[",
"]",
",",
"[",
"]",
"for",
"v",
"in",
"values",
":",
"# \"NOT \" means switch from include (filter) to exclude for that one account number",
"if",
"v",
".",
"startswith",
"(",
"'NOT '",
")",
":",
"exclude_dict",
"[",
"name",
"]",
"+=",
"[",
"v",
"[",
"4",
":",
"]",
"]",
"else",
":",
"filter_dict",
"[",
"name",
"]",
"+=",
"[",
"v",
"]",
"if",
"swap",
":",
"return",
"exclude_dict",
",",
"filter_dict",
"return",
"filter_dict",
",",
"exclude_dict"
] |
Produces kwargs dicts for Django Queryset `filter` and `exclude` from a list of values
The last, critical step in generating Django ORM kwargs dicts from a natural language query.
Properly parses "NOT" unary operators on each field value in the list.
Assumes the lists have been pre-processed to consolidate NOTs and normalize values and syntax.
Examples:
>>> filter_exclude_dicts(name='num', values=['NOT 1', '2', '3', 'NOT 4']
... ) == ({'num__in': ['2', '3']}, {'num__in': ['1', '4']})
True
|
[
"Produces",
"kwargs",
"dicts",
"for",
"Django",
"Queryset",
"filter",
"and",
"exclude",
"from",
"a",
"list",
"of",
"values"
] |
55678b08755a55366ce18e7d3b8ea8fa4491ab04
|
https://github.com/hobson/pug-dj/blob/55678b08755a55366ce18e7d3b8ea8fa4491ab04/pug/dj/db.py#L2849-L2876
|
241,275
|
hobson/pug-dj
|
pug/dj/db.py
|
Columns.process_kwargs
|
def process_kwargs(self, kwargs, prefix='default_', delete=True):
"""
set self attributes based on kwargs, optionally deleting kwargs that are processed
"""
processed = []
for k in kwargs:
if hasattr(self, prefix + k):
processed += [k]
setattr(self, prefix + k, kwargs[k])
for k in processed:
del(kwargs[k])
return kwargs
|
python
|
def process_kwargs(self, kwargs, prefix='default_', delete=True):
"""
set self attributes based on kwargs, optionally deleting kwargs that are processed
"""
processed = []
for k in kwargs:
if hasattr(self, prefix + k):
processed += [k]
setattr(self, prefix + k, kwargs[k])
for k in processed:
del(kwargs[k])
return kwargs
|
[
"def",
"process_kwargs",
"(",
"self",
",",
"kwargs",
",",
"prefix",
"=",
"'default_'",
",",
"delete",
"=",
"True",
")",
":",
"processed",
"=",
"[",
"]",
"for",
"k",
"in",
"kwargs",
":",
"if",
"hasattr",
"(",
"self",
",",
"prefix",
"+",
"k",
")",
":",
"processed",
"+=",
"[",
"k",
"]",
"setattr",
"(",
"self",
",",
"prefix",
"+",
"k",
",",
"kwargs",
"[",
"k",
"]",
")",
"for",
"k",
"in",
"processed",
":",
"del",
"(",
"kwargs",
"[",
"k",
"]",
")",
"return",
"kwargs"
] |
set self attributes based on kwargs, optionally deleting kwargs that are processed
|
[
"set",
"self",
"attributes",
"based",
"on",
"kwargs",
"optionally",
"deleting",
"kwargs",
"that",
"are",
"processed"
] |
55678b08755a55366ce18e7d3b8ea8fa4491ab04
|
https://github.com/hobson/pug-dj/blob/55678b08755a55366ce18e7d3b8ea8fa4491ab04/pug/dj/db.py#L1105-L1116
|
241,276
|
hobson/pug-dj
|
pug/dj/db.py
|
Columns.as_column_wise_lists
|
def as_column_wise_lists(self, transpose=False):
"""Generator over the columns of lists"""
# make this a generator of generators?
if transpose:
ans = self.from_row_wise_lists(self.as_column_wise_lists(transpose=False))
return ans
#print self
return self.values()
|
python
|
def as_column_wise_lists(self, transpose=False):
"""Generator over the columns of lists"""
# make this a generator of generators?
if transpose:
ans = self.from_row_wise_lists(self.as_column_wise_lists(transpose=False))
return ans
#print self
return self.values()
|
[
"def",
"as_column_wise_lists",
"(",
"self",
",",
"transpose",
"=",
"False",
")",
":",
"# make this a generator of generators?",
"if",
"transpose",
":",
"ans",
"=",
"self",
".",
"from_row_wise_lists",
"(",
"self",
".",
"as_column_wise_lists",
"(",
"transpose",
"=",
"False",
")",
")",
"return",
"ans",
"#print self",
"return",
"self",
".",
"values",
"(",
")"
] |
Generator over the columns of lists
|
[
"Generator",
"over",
"the",
"columns",
"of",
"lists"
] |
55678b08755a55366ce18e7d3b8ea8fa4491ab04
|
https://github.com/hobson/pug-dj/blob/55678b08755a55366ce18e7d3b8ea8fa4491ab04/pug/dj/db.py#L1257-L1264
|
241,277
|
AliGhahraei/verarandom
|
verarandom/_random_generator.py
|
VeraRandom.randint
|
def randint(self, a: int, b: int, n: Optional[int] = None) -> Union[List[int], int]:
""" Generate n numbers as a list or a single one if no n is given.
n is used to minimize the number of requests made and return type changes to be compatible
with :py:mod:`random`'s interface
"""
max_n = self.config.MAX_NUMBER_OF_INTEGERS
return self._generate_randoms(self._request_randints, max_n=max_n, a=a, b=b, n=n)
|
python
|
def randint(self, a: int, b: int, n: Optional[int] = None) -> Union[List[int], int]:
""" Generate n numbers as a list or a single one if no n is given.
n is used to minimize the number of requests made and return type changes to be compatible
with :py:mod:`random`'s interface
"""
max_n = self.config.MAX_NUMBER_OF_INTEGERS
return self._generate_randoms(self._request_randints, max_n=max_n, a=a, b=b, n=n)
|
[
"def",
"randint",
"(",
"self",
",",
"a",
":",
"int",
",",
"b",
":",
"int",
",",
"n",
":",
"Optional",
"[",
"int",
"]",
"=",
"None",
")",
"->",
"Union",
"[",
"List",
"[",
"int",
"]",
",",
"int",
"]",
":",
"max_n",
"=",
"self",
".",
"config",
".",
"MAX_NUMBER_OF_INTEGERS",
"return",
"self",
".",
"_generate_randoms",
"(",
"self",
".",
"_request_randints",
",",
"max_n",
"=",
"max_n",
",",
"a",
"=",
"a",
",",
"b",
"=",
"b",
",",
"n",
"=",
"n",
")"
] |
Generate n numbers as a list or a single one if no n is given.
n is used to minimize the number of requests made and return type changes to be compatible
with :py:mod:`random`'s interface
|
[
"Generate",
"n",
"numbers",
"as",
"a",
"list",
"or",
"a",
"single",
"one",
"if",
"no",
"n",
"is",
"given",
"."
] |
63d9a5bd2776e40368933f54e58c3f4b4f333f03
|
https://github.com/AliGhahraei/verarandom/blob/63d9a5bd2776e40368933f54e58c3f4b4f333f03/verarandom/_random_generator.py#L69-L76
|
241,278
|
AliGhahraei/verarandom
|
verarandom/_random_generator.py
|
VeraRandomQuota._check_quota
|
def _check_quota(self):
""" If IP can't make requests, raise BitQuotaExceeded. Called before generating numbers. """
self._request_remaining_quota_if_unset()
if self.quota_estimate < self.quota_limit:
raise BitQuotaExceeded(self.quota_estimate)
|
python
|
def _check_quota(self):
""" If IP can't make requests, raise BitQuotaExceeded. Called before generating numbers. """
self._request_remaining_quota_if_unset()
if self.quota_estimate < self.quota_limit:
raise BitQuotaExceeded(self.quota_estimate)
|
[
"def",
"_check_quota",
"(",
"self",
")",
":",
"self",
".",
"_request_remaining_quota_if_unset",
"(",
")",
"if",
"self",
".",
"quota_estimate",
"<",
"self",
".",
"quota_limit",
":",
"raise",
"BitQuotaExceeded",
"(",
"self",
".",
"quota_estimate",
")"
] |
If IP can't make requests, raise BitQuotaExceeded. Called before generating numbers.
|
[
"If",
"IP",
"can",
"t",
"make",
"requests",
"raise",
"BitQuotaExceeded",
".",
"Called",
"before",
"generating",
"numbers",
"."
] |
63d9a5bd2776e40368933f54e58c3f4b4f333f03
|
https://github.com/AliGhahraei/verarandom/blob/63d9a5bd2776e40368933f54e58c3f4b4f333f03/verarandom/_random_generator.py#L171-L175
|
241,279
|
musicmetric/mmpy
|
src/entity.py
|
Entity._construct_timeseries
|
def _construct_timeseries(self, timeseries, constraints={}):
"""
wraps response_from for timeseries calls, returns the resulting dict
"""
self.response_from(timeseries, constraints)
if self.response == None:
return None
return {'data':self.response['data'],
'period':self.response['period'],
'start time':datetime.datetime.fromtimestamp(self.response['start_time']),
'end time':datetime.datetime.fromtimestamp(self.response['end_time'])}
|
python
|
def _construct_timeseries(self, timeseries, constraints={}):
"""
wraps response_from for timeseries calls, returns the resulting dict
"""
self.response_from(timeseries, constraints)
if self.response == None:
return None
return {'data':self.response['data'],
'period':self.response['period'],
'start time':datetime.datetime.fromtimestamp(self.response['start_time']),
'end time':datetime.datetime.fromtimestamp(self.response['end_time'])}
|
[
"def",
"_construct_timeseries",
"(",
"self",
",",
"timeseries",
",",
"constraints",
"=",
"{",
"}",
")",
":",
"self",
".",
"response_from",
"(",
"timeseries",
",",
"constraints",
")",
"if",
"self",
".",
"response",
"==",
"None",
":",
"return",
"None",
"return",
"{",
"'data'",
":",
"self",
".",
"response",
"[",
"'data'",
"]",
",",
"'period'",
":",
"self",
".",
"response",
"[",
"'period'",
"]",
",",
"'start time'",
":",
"datetime",
".",
"datetime",
".",
"fromtimestamp",
"(",
"self",
".",
"response",
"[",
"'start_time'",
"]",
")",
",",
"'end time'",
":",
"datetime",
".",
"datetime",
".",
"fromtimestamp",
"(",
"self",
".",
"response",
"[",
"'end_time'",
"]",
")",
"}"
] |
wraps response_from for timeseries calls, returns the resulting dict
|
[
"wraps",
"response_from",
"for",
"timeseries",
"calls",
"returns",
"the",
"resulting",
"dict"
] |
2b5d975c61f9ea8c7f19f76a90b59771833ef881
|
https://github.com/musicmetric/mmpy/blob/2b5d975c61f9ea8c7f19f76a90b59771833ef881/src/entity.py#L58-L68
|
241,280
|
shreyaspotnis/rampage
|
rampage/widgets/ChannelWidgets.py
|
QChannelInfoBox.edit_channel_info
|
def edit_channel_info(self, new_ch_name, ch_dct):
"""Parent widget calls this whenever the user edits channel info.
"""
self.ch_name = new_ch_name
self.dct = ch_dct
if ch_dct['type'] == 'analog':
fmter = fmt.green
else:
fmter = fmt.blue
self.ch_name_label.setText(fmt.b(fmter(self.ch_name)))
self.generateToolTip()
|
python
|
def edit_channel_info(self, new_ch_name, ch_dct):
"""Parent widget calls this whenever the user edits channel info.
"""
self.ch_name = new_ch_name
self.dct = ch_dct
if ch_dct['type'] == 'analog':
fmter = fmt.green
else:
fmter = fmt.blue
self.ch_name_label.setText(fmt.b(fmter(self.ch_name)))
self.generateToolTip()
|
[
"def",
"edit_channel_info",
"(",
"self",
",",
"new_ch_name",
",",
"ch_dct",
")",
":",
"self",
".",
"ch_name",
"=",
"new_ch_name",
"self",
".",
"dct",
"=",
"ch_dct",
"if",
"ch_dct",
"[",
"'type'",
"]",
"==",
"'analog'",
":",
"fmter",
"=",
"fmt",
".",
"green",
"else",
":",
"fmter",
"=",
"fmt",
".",
"blue",
"self",
".",
"ch_name_label",
".",
"setText",
"(",
"fmt",
".",
"b",
"(",
"fmter",
"(",
"self",
".",
"ch_name",
")",
")",
")",
"self",
".",
"generateToolTip",
"(",
")"
] |
Parent widget calls this whenever the user edits channel info.
|
[
"Parent",
"widget",
"calls",
"this",
"whenever",
"the",
"user",
"edits",
"channel",
"info",
"."
] |
e2565aef7ee16ee06523de975e8aa41aca14e3b2
|
https://github.com/shreyaspotnis/rampage/blob/e2565aef7ee16ee06523de975e8aa41aca14e3b2/rampage/widgets/ChannelWidgets.py#L135-L145
|
241,281
|
hobson/pug-dj
|
pug/dj/miner/views.py
|
connections
|
def connections(request, edges):
"""
Plot a force-directed graph based on the edges provided
"""
edge_list, node_list = parse.graph_definition(edges)
data = {'nodes': json.dumps(node_list), 'edges': json.dumps(edge_list)}
return render_to_response('miner/connections.html', data)
|
python
|
def connections(request, edges):
"""
Plot a force-directed graph based on the edges provided
"""
edge_list, node_list = parse.graph_definition(edges)
data = {'nodes': json.dumps(node_list), 'edges': json.dumps(edge_list)}
return render_to_response('miner/connections.html', data)
|
[
"def",
"connections",
"(",
"request",
",",
"edges",
")",
":",
"edge_list",
",",
"node_list",
"=",
"parse",
".",
"graph_definition",
"(",
"edges",
")",
"data",
"=",
"{",
"'nodes'",
":",
"json",
".",
"dumps",
"(",
"node_list",
")",
",",
"'edges'",
":",
"json",
".",
"dumps",
"(",
"edge_list",
")",
"}",
"return",
"render_to_response",
"(",
"'miner/connections.html'",
",",
"data",
")"
] |
Plot a force-directed graph based on the edges provided
|
[
"Plot",
"a",
"force",
"-",
"directed",
"graph",
"based",
"on",
"the",
"edges",
"provided"
] |
55678b08755a55366ce18e7d3b8ea8fa4491ab04
|
https://github.com/hobson/pug-dj/blob/55678b08755a55366ce18e7d3b8ea8fa4491ab04/pug/dj/miner/views.py#L56-L62
|
241,282
|
hobson/pug-dj
|
pug/dj/miner/views.py
|
csv_response_from_context
|
def csv_response_from_context(context=None, filename=None, field_names=None, null_string='', eval_python=True):
"""Generate the response for a Download CSV button from data within the context dict
The CSV data must be in one of these places/formats:
* context as a list of lists of python values (strings for headers in first list)
* context['data']['d3data'] as a string in json format (python) for a list of lists of repr(python_value)s
* context['data']['cases'] as a list of lists of python values (strings for headers in first list)
* context['data']['cases'] as a django queryset or iterable of model instances (list, tuple, generator)
If the input data is a list of lists (table) that has more columns that rows it will be trasposed before being processed
"""
filename = filename or context.get('filename') or 'table_download.csv'
field_names = field_names or context.get('field_names', [])
# FIXME: too slow!
if field_names and all(field_names) and all(all(c in (string.letters + string.digits + '_.') for c in s) for s in field_names):
eval_python=False
data = context
# find the data table within the context dict. should be named 'data.cases' or 'data.d3data'
if not (isinstance(data, (tuple, list)) and isinstance(data[0], (tuple, list))):
data = json.loads(data.get('data', {}).get('d3data', '[[]]'))
if not data or not any(data):
data = context.get('data', {}).get('cases', [[]])
if not isinstance(data, (list, tuple)) or not isinstance(data[0], (list, tuple)):
data = table_generator_from_list_of_instances(data, field_names=field_names, eval_python=eval_python)
try:
if len(data) < len(data[0]):
data = util.transposed_lists(data) # list(list(row) for row in data)
except TypeError:
# no need to transpose if a generator was provided instead of a list or tuple (anything with a len attribute)
pass
# Create the HttpResponse object with the appropriate CSV header.
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="%s"' % filename
writer = csv.writer(response)
for row in data:
newrow = []
for s in row:
try:
newrow.append(s.encode('utf-8')) #handles strings, unicodes, utf-8s
except AttributeError: #will happen when we try to encode a class object or number
newrow.append(s)
except: #not sure it ever will be touched.
newrow.append(unicode(s))
writer.writerow(newrow)
return response
|
python
|
def csv_response_from_context(context=None, filename=None, field_names=None, null_string='', eval_python=True):
"""Generate the response for a Download CSV button from data within the context dict
The CSV data must be in one of these places/formats:
* context as a list of lists of python values (strings for headers in first list)
* context['data']['d3data'] as a string in json format (python) for a list of lists of repr(python_value)s
* context['data']['cases'] as a list of lists of python values (strings for headers in first list)
* context['data']['cases'] as a django queryset or iterable of model instances (list, tuple, generator)
If the input data is a list of lists (table) that has more columns that rows it will be trasposed before being processed
"""
filename = filename or context.get('filename') or 'table_download.csv'
field_names = field_names or context.get('field_names', [])
# FIXME: too slow!
if field_names and all(field_names) and all(all(c in (string.letters + string.digits + '_.') for c in s) for s in field_names):
eval_python=False
data = context
# find the data table within the context dict. should be named 'data.cases' or 'data.d3data'
if not (isinstance(data, (tuple, list)) and isinstance(data[0], (tuple, list))):
data = json.loads(data.get('data', {}).get('d3data', '[[]]'))
if not data or not any(data):
data = context.get('data', {}).get('cases', [[]])
if not isinstance(data, (list, tuple)) or not isinstance(data[0], (list, tuple)):
data = table_generator_from_list_of_instances(data, field_names=field_names, eval_python=eval_python)
try:
if len(data) < len(data[0]):
data = util.transposed_lists(data) # list(list(row) for row in data)
except TypeError:
# no need to transpose if a generator was provided instead of a list or tuple (anything with a len attribute)
pass
# Create the HttpResponse object with the appropriate CSV header.
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="%s"' % filename
writer = csv.writer(response)
for row in data:
newrow = []
for s in row:
try:
newrow.append(s.encode('utf-8')) #handles strings, unicodes, utf-8s
except AttributeError: #will happen when we try to encode a class object or number
newrow.append(s)
except: #not sure it ever will be touched.
newrow.append(unicode(s))
writer.writerow(newrow)
return response
|
[
"def",
"csv_response_from_context",
"(",
"context",
"=",
"None",
",",
"filename",
"=",
"None",
",",
"field_names",
"=",
"None",
",",
"null_string",
"=",
"''",
",",
"eval_python",
"=",
"True",
")",
":",
"filename",
"=",
"filename",
"or",
"context",
".",
"get",
"(",
"'filename'",
")",
"or",
"'table_download.csv'",
"field_names",
"=",
"field_names",
"or",
"context",
".",
"get",
"(",
"'field_names'",
",",
"[",
"]",
")",
"# FIXME: too slow!",
"if",
"field_names",
"and",
"all",
"(",
"field_names",
")",
"and",
"all",
"(",
"all",
"(",
"c",
"in",
"(",
"string",
".",
"letters",
"+",
"string",
".",
"digits",
"+",
"'_.'",
")",
"for",
"c",
"in",
"s",
")",
"for",
"s",
"in",
"field_names",
")",
":",
"eval_python",
"=",
"False",
"data",
"=",
"context",
"# find the data table within the context dict. should be named 'data.cases' or 'data.d3data'",
"if",
"not",
"(",
"isinstance",
"(",
"data",
",",
"(",
"tuple",
",",
"list",
")",
")",
"and",
"isinstance",
"(",
"data",
"[",
"0",
"]",
",",
"(",
"tuple",
",",
"list",
")",
")",
")",
":",
"data",
"=",
"json",
".",
"loads",
"(",
"data",
".",
"get",
"(",
"'data'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'d3data'",
",",
"'[[]]'",
")",
")",
"if",
"not",
"data",
"or",
"not",
"any",
"(",
"data",
")",
":",
"data",
"=",
"context",
".",
"get",
"(",
"'data'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'cases'",
",",
"[",
"[",
"]",
"]",
")",
"if",
"not",
"isinstance",
"(",
"data",
",",
"(",
"list",
",",
"tuple",
")",
")",
"or",
"not",
"isinstance",
"(",
"data",
"[",
"0",
"]",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"data",
"=",
"table_generator_from_list_of_instances",
"(",
"data",
",",
"field_names",
"=",
"field_names",
",",
"eval_python",
"=",
"eval_python",
")",
"try",
":",
"if",
"len",
"(",
"data",
")",
"<",
"len",
"(",
"data",
"[",
"0",
"]",
")",
":",
"data",
"=",
"util",
".",
"transposed_lists",
"(",
"data",
")",
"# list(list(row) for row in data)",
"except",
"TypeError",
":",
"# no need to transpose if a generator was provided instead of a list or tuple (anything with a len attribute)",
"pass",
"# Create the HttpResponse object with the appropriate CSV header.",
"response",
"=",
"HttpResponse",
"(",
"content_type",
"=",
"'text/csv'",
")",
"response",
"[",
"'Content-Disposition'",
"]",
"=",
"'attachment; filename=\"%s\"'",
"%",
"filename",
"writer",
"=",
"csv",
".",
"writer",
"(",
"response",
")",
"for",
"row",
"in",
"data",
":",
"newrow",
"=",
"[",
"]",
"for",
"s",
"in",
"row",
":",
"try",
":",
"newrow",
".",
"append",
"(",
"s",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"#handles strings, unicodes, utf-8s",
"except",
"AttributeError",
":",
"#will happen when we try to encode a class object or number",
"newrow",
".",
"append",
"(",
"s",
")",
"except",
":",
"#not sure it ever will be touched.",
"newrow",
".",
"append",
"(",
"unicode",
"(",
"s",
")",
")",
"writer",
".",
"writerow",
"(",
"newrow",
")",
"return",
"response"
] |
Generate the response for a Download CSV button from data within the context dict
The CSV data must be in one of these places/formats:
* context as a list of lists of python values (strings for headers in first list)
* context['data']['d3data'] as a string in json format (python) for a list of lists of repr(python_value)s
* context['data']['cases'] as a list of lists of python values (strings for headers in first list)
* context['data']['cases'] as a django queryset or iterable of model instances (list, tuple, generator)
If the input data is a list of lists (table) that has more columns that rows it will be trasposed before being processed
|
[
"Generate",
"the",
"response",
"for",
"a",
"Download",
"CSV",
"button",
"from",
"data",
"within",
"the",
"context",
"dict"
] |
55678b08755a55366ce18e7d3b8ea8fa4491ab04
|
https://github.com/hobson/pug-dj/blob/55678b08755a55366ce18e7d3b8ea8fa4491ab04/pug/dj/miner/views.py#L541-L592
|
241,283
|
hobson/pug-dj
|
pug/dj/miner/views.py
|
JSONView.render_to_response
|
def render_to_response(self, context, indent=None):
"Returns a JSON response containing 'context' as payload"
return self.get_json_response(self.convert_context_to_json(context, indent=indent))
|
python
|
def render_to_response(self, context, indent=None):
"Returns a JSON response containing 'context' as payload"
return self.get_json_response(self.convert_context_to_json(context, indent=indent))
|
[
"def",
"render_to_response",
"(",
"self",
",",
"context",
",",
"indent",
"=",
"None",
")",
":",
"return",
"self",
".",
"get_json_response",
"(",
"self",
".",
"convert_context_to_json",
"(",
"context",
",",
"indent",
"=",
"indent",
")",
")"
] |
Returns a JSON response containing 'context' as payload
|
[
"Returns",
"a",
"JSON",
"response",
"containing",
"context",
"as",
"payload"
] |
55678b08755a55366ce18e7d3b8ea8fa4491ab04
|
https://github.com/hobson/pug-dj/blob/55678b08755a55366ce18e7d3b8ea8fa4491ab04/pug/dj/miner/views.py#L126-L128
|
241,284
|
pavelsof/ipalint
|
ipalint/strnorm.py
|
Normaliser.report
|
def report(self, reporter, ignore_nfd=False, ignore_ws=False):
"""
Adds the problems that have been found so far to the given Reporter
instance. The two keyword args can be used to restrict the error types
to be reported.
"""
if self.strip_errors and not ignore_ws:
reporter.add(self.strip_errors, 'leading or trailing whitespace')
if self.norm_errors and not ignore_nfd:
reporter.add(self.norm_errors, 'not in Unicode NFD')
|
python
|
def report(self, reporter, ignore_nfd=False, ignore_ws=False):
"""
Adds the problems that have been found so far to the given Reporter
instance. The two keyword args can be used to restrict the error types
to be reported.
"""
if self.strip_errors and not ignore_ws:
reporter.add(self.strip_errors, 'leading or trailing whitespace')
if self.norm_errors and not ignore_nfd:
reporter.add(self.norm_errors, 'not in Unicode NFD')
|
[
"def",
"report",
"(",
"self",
",",
"reporter",
",",
"ignore_nfd",
"=",
"False",
",",
"ignore_ws",
"=",
"False",
")",
":",
"if",
"self",
".",
"strip_errors",
"and",
"not",
"ignore_ws",
":",
"reporter",
".",
"add",
"(",
"self",
".",
"strip_errors",
",",
"'leading or trailing whitespace'",
")",
"if",
"self",
".",
"norm_errors",
"and",
"not",
"ignore_nfd",
":",
"reporter",
".",
"add",
"(",
"self",
".",
"norm_errors",
",",
"'not in Unicode NFD'",
")"
] |
Adds the problems that have been found so far to the given Reporter
instance. The two keyword args can be used to restrict the error types
to be reported.
|
[
"Adds",
"the",
"problems",
"that",
"have",
"been",
"found",
"so",
"far",
"to",
"the",
"given",
"Reporter",
"instance",
".",
"The",
"two",
"keyword",
"args",
"can",
"be",
"used",
"to",
"restrict",
"the",
"error",
"types",
"to",
"be",
"reported",
"."
] |
763e5979ede6980cbfc746b06fd007b379762eeb
|
https://github.com/pavelsof/ipalint/blob/763e5979ede6980cbfc746b06fd007b379762eeb/ipalint/strnorm.py#L63-L73
|
241,285
|
peepall/FancyLogger
|
FancyLogger/processing/__init__.py
|
MultiprocessingLogger.run
|
def run(self):
"""
The main loop for the logger process. Will receive remote processes orders one by one and wait for the next one.
Then return from this method when the main application calls for exit, which is a regular command.
"""
# Initialize the file logger
self.log = getLogger()
# Deserialize configuration
self.set_config_command = dill.loads(self.set_config_command)
self.set_configuration(self.set_config_command)
for handler in self.file_handlers:
if isinstance(handler, StreamHandler)\
and (handler.stream == sys.stdout or handler.stream == sys.stderr):
self.critical(LogMessageCommand(text='Cannot use logging.StreamHandler with \'sys.stdout\' nor '
'\'sys.stderr\' because those are reserved by the logger process',
level=logging.CRITICAL))
continue
self.log.addHandler(hdlr=handler)
self.log.setLevel(self.console_level)
while True:
o = dill.loads(self.queue.get())
if isinstance(o, LogMessageCommand):
if o.level == logging.DEBUG:
self.debug(command=o)
elif o.level == logging.INFO:
self.info(command=o)
elif o.level == logging.WARNING:
self.warning(command=o)
elif o.level == logging.ERROR:
self.error(command=o)
elif o.level == logging.CRITICAL:
self.critical(command=o)
elif isinstance(o, UpdateProgressCommand):
self.update(command=o)
elif isinstance(o, NewTaskCommand):
self.set_task(command=o)
elif isinstance(o, FlushCommand):
self.flush()
elif isinstance(o, StacktraceCommand):
self.throw(command=o)
elif isinstance(o, SetConfigurationCommand):
self.set_configuration(command=o)
elif isinstance(o, ExitCommand):
return
elif isinstance(o, SetLevelCommand):
self.set_level(command=o)
|
python
|
def run(self):
"""
The main loop for the logger process. Will receive remote processes orders one by one and wait for the next one.
Then return from this method when the main application calls for exit, which is a regular command.
"""
# Initialize the file logger
self.log = getLogger()
# Deserialize configuration
self.set_config_command = dill.loads(self.set_config_command)
self.set_configuration(self.set_config_command)
for handler in self.file_handlers:
if isinstance(handler, StreamHandler)\
and (handler.stream == sys.stdout or handler.stream == sys.stderr):
self.critical(LogMessageCommand(text='Cannot use logging.StreamHandler with \'sys.stdout\' nor '
'\'sys.stderr\' because those are reserved by the logger process',
level=logging.CRITICAL))
continue
self.log.addHandler(hdlr=handler)
self.log.setLevel(self.console_level)
while True:
o = dill.loads(self.queue.get())
if isinstance(o, LogMessageCommand):
if o.level == logging.DEBUG:
self.debug(command=o)
elif o.level == logging.INFO:
self.info(command=o)
elif o.level == logging.WARNING:
self.warning(command=o)
elif o.level == logging.ERROR:
self.error(command=o)
elif o.level == logging.CRITICAL:
self.critical(command=o)
elif isinstance(o, UpdateProgressCommand):
self.update(command=o)
elif isinstance(o, NewTaskCommand):
self.set_task(command=o)
elif isinstance(o, FlushCommand):
self.flush()
elif isinstance(o, StacktraceCommand):
self.throw(command=o)
elif isinstance(o, SetConfigurationCommand):
self.set_configuration(command=o)
elif isinstance(o, ExitCommand):
return
elif isinstance(o, SetLevelCommand):
self.set_level(command=o)
|
[
"def",
"run",
"(",
"self",
")",
":",
"# Initialize the file logger",
"self",
".",
"log",
"=",
"getLogger",
"(",
")",
"# Deserialize configuration",
"self",
".",
"set_config_command",
"=",
"dill",
".",
"loads",
"(",
"self",
".",
"set_config_command",
")",
"self",
".",
"set_configuration",
"(",
"self",
".",
"set_config_command",
")",
"for",
"handler",
"in",
"self",
".",
"file_handlers",
":",
"if",
"isinstance",
"(",
"handler",
",",
"StreamHandler",
")",
"and",
"(",
"handler",
".",
"stream",
"==",
"sys",
".",
"stdout",
"or",
"handler",
".",
"stream",
"==",
"sys",
".",
"stderr",
")",
":",
"self",
".",
"critical",
"(",
"LogMessageCommand",
"(",
"text",
"=",
"'Cannot use logging.StreamHandler with \\'sys.stdout\\' nor '",
"'\\'sys.stderr\\' because those are reserved by the logger process'",
",",
"level",
"=",
"logging",
".",
"CRITICAL",
")",
")",
"continue",
"self",
".",
"log",
".",
"addHandler",
"(",
"hdlr",
"=",
"handler",
")",
"self",
".",
"log",
".",
"setLevel",
"(",
"self",
".",
"console_level",
")",
"while",
"True",
":",
"o",
"=",
"dill",
".",
"loads",
"(",
"self",
".",
"queue",
".",
"get",
"(",
")",
")",
"if",
"isinstance",
"(",
"o",
",",
"LogMessageCommand",
")",
":",
"if",
"o",
".",
"level",
"==",
"logging",
".",
"DEBUG",
":",
"self",
".",
"debug",
"(",
"command",
"=",
"o",
")",
"elif",
"o",
".",
"level",
"==",
"logging",
".",
"INFO",
":",
"self",
".",
"info",
"(",
"command",
"=",
"o",
")",
"elif",
"o",
".",
"level",
"==",
"logging",
".",
"WARNING",
":",
"self",
".",
"warning",
"(",
"command",
"=",
"o",
")",
"elif",
"o",
".",
"level",
"==",
"logging",
".",
"ERROR",
":",
"self",
".",
"error",
"(",
"command",
"=",
"o",
")",
"elif",
"o",
".",
"level",
"==",
"logging",
".",
"CRITICAL",
":",
"self",
".",
"critical",
"(",
"command",
"=",
"o",
")",
"elif",
"isinstance",
"(",
"o",
",",
"UpdateProgressCommand",
")",
":",
"self",
".",
"update",
"(",
"command",
"=",
"o",
")",
"elif",
"isinstance",
"(",
"o",
",",
"NewTaskCommand",
")",
":",
"self",
".",
"set_task",
"(",
"command",
"=",
"o",
")",
"elif",
"isinstance",
"(",
"o",
",",
"FlushCommand",
")",
":",
"self",
".",
"flush",
"(",
")",
"elif",
"isinstance",
"(",
"o",
",",
"StacktraceCommand",
")",
":",
"self",
".",
"throw",
"(",
"command",
"=",
"o",
")",
"elif",
"isinstance",
"(",
"o",
",",
"SetConfigurationCommand",
")",
":",
"self",
".",
"set_configuration",
"(",
"command",
"=",
"o",
")",
"elif",
"isinstance",
"(",
"o",
",",
"ExitCommand",
")",
":",
"return",
"elif",
"isinstance",
"(",
"o",
",",
"SetLevelCommand",
")",
":",
"self",
".",
"set_level",
"(",
"command",
"=",
"o",
")"
] |
The main loop for the logger process. Will receive remote processes orders one by one and wait for the next one.
Then return from this method when the main application calls for exit, which is a regular command.
|
[
"The",
"main",
"loop",
"for",
"the",
"logger",
"process",
".",
"Will",
"receive",
"remote",
"processes",
"orders",
"one",
"by",
"one",
"and",
"wait",
"for",
"the",
"next",
"one",
".",
"Then",
"return",
"from",
"this",
"method",
"when",
"the",
"main",
"application",
"calls",
"for",
"exit",
"which",
"is",
"a",
"regular",
"command",
"."
] |
7f13f1397e76ed768fb6b6358194118831fafc6d
|
https://github.com/peepall/FancyLogger/blob/7f13f1397e76ed768fb6b6358194118831fafc6d/FancyLogger/processing/__init__.py#L225-L283
|
241,286
|
peepall/FancyLogger
|
FancyLogger/processing/__init__.py
|
MultiprocessingLogger.redraw
|
def redraw(self):
"""
Clears the console and performs a complete redraw of all progress bars and then awaiting logger messages if the
minimum time elapsed since the last redraw is enough.
"""
# Check if the refresh time lapse has elapsed and if a change requires to redraw
lapse_since_last_refresh = millis() - self.refresh_timer
if not lapse_since_last_refresh > self.redraw_frequency_millis or not self.changes_made:
return
# If yes, then reset change indicator and chrono
self.changes_made = False
self.refresh_timer = millis()
# Clear the system console
os.system(self.os_flush_command)
# For each task, check if it has complete. If so, start its chrono
# Once the chrono has reached the maximum timeout time, delete the task
# For the other tasks that have not completed yet, redraw them
# Delete tasks that have been marked for deletion
if len(self.to_delete) > 0:
for task_id in self.to_delete:
del self.tasks[task_id]
self.to_delete = []
# If a task has been deleted, recalculate the maximum prefix length to keep progress bars aligned
self.longest_bar_prefix_size = self.longest_bar_prefix_value()
for task_id, task in self.tasks.items():
# If a task has completed, force its value to its maximum to prevent progress bar overflow
# Then start its timeout chrono
if task.progress >= task.total and not task.keep_alive:
# Prevent bar overflow
task.progress = task.total
# Start task's timeout chrono
if not task.timeout_chrono:
task.timeout_chrono = millis()
# If task's chrono has reached the maximum timeout time, mark it for deletion
elif millis() - task.timeout_chrono >= self.task_millis_to_removal:
self.to_delete.append(task_id)
# Redraw the task's progress bar through standard output
self.print_progress_bar(task=task)
# Keep space for future tasks if needed
slots = self.permanent_progressbar_slots - len(self.tasks)
if slots > 0:
for i in range(slots):
sys.stdout.write('\n\t\t---\n')
# Draw some space between bars and messages
if len(self.messages) > 0:
if self.permanent_progressbar_slots > 0 or len(self.tasks) > 0:
sys.stdout.write('\n\n')
# Print all the last log messages through standard output
for m in self.messages:
sys.stdout.write(m)
# Draw some space between messages and exceptions
if len(self.exceptions) > 0:
if len(self.messages) > 0:
sys.stdout.write('\n\n')
# Print all the exceptions through error output
for ex in self.exceptions:
sys.stderr.write(ex)
|
python
|
def redraw(self):
"""
Clears the console and performs a complete redraw of all progress bars and then awaiting logger messages if the
minimum time elapsed since the last redraw is enough.
"""
# Check if the refresh time lapse has elapsed and if a change requires to redraw
lapse_since_last_refresh = millis() - self.refresh_timer
if not lapse_since_last_refresh > self.redraw_frequency_millis or not self.changes_made:
return
# If yes, then reset change indicator and chrono
self.changes_made = False
self.refresh_timer = millis()
# Clear the system console
os.system(self.os_flush_command)
# For each task, check if it has complete. If so, start its chrono
# Once the chrono has reached the maximum timeout time, delete the task
# For the other tasks that have not completed yet, redraw them
# Delete tasks that have been marked for deletion
if len(self.to_delete) > 0:
for task_id in self.to_delete:
del self.tasks[task_id]
self.to_delete = []
# If a task has been deleted, recalculate the maximum prefix length to keep progress bars aligned
self.longest_bar_prefix_size = self.longest_bar_prefix_value()
for task_id, task in self.tasks.items():
# If a task has completed, force its value to its maximum to prevent progress bar overflow
# Then start its timeout chrono
if task.progress >= task.total and not task.keep_alive:
# Prevent bar overflow
task.progress = task.total
# Start task's timeout chrono
if not task.timeout_chrono:
task.timeout_chrono = millis()
# If task's chrono has reached the maximum timeout time, mark it for deletion
elif millis() - task.timeout_chrono >= self.task_millis_to_removal:
self.to_delete.append(task_id)
# Redraw the task's progress bar through standard output
self.print_progress_bar(task=task)
# Keep space for future tasks if needed
slots = self.permanent_progressbar_slots - len(self.tasks)
if slots > 0:
for i in range(slots):
sys.stdout.write('\n\t\t---\n')
# Draw some space between bars and messages
if len(self.messages) > 0:
if self.permanent_progressbar_slots > 0 or len(self.tasks) > 0:
sys.stdout.write('\n\n')
# Print all the last log messages through standard output
for m in self.messages:
sys.stdout.write(m)
# Draw some space between messages and exceptions
if len(self.exceptions) > 0:
if len(self.messages) > 0:
sys.stdout.write('\n\n')
# Print all the exceptions through error output
for ex in self.exceptions:
sys.stderr.write(ex)
|
[
"def",
"redraw",
"(",
"self",
")",
":",
"# Check if the refresh time lapse has elapsed and if a change requires to redraw",
"lapse_since_last_refresh",
"=",
"millis",
"(",
")",
"-",
"self",
".",
"refresh_timer",
"if",
"not",
"lapse_since_last_refresh",
">",
"self",
".",
"redraw_frequency_millis",
"or",
"not",
"self",
".",
"changes_made",
":",
"return",
"# If yes, then reset change indicator and chrono",
"self",
".",
"changes_made",
"=",
"False",
"self",
".",
"refresh_timer",
"=",
"millis",
"(",
")",
"# Clear the system console",
"os",
".",
"system",
"(",
"self",
".",
"os_flush_command",
")",
"# For each task, check if it has complete. If so, start its chrono",
"# Once the chrono has reached the maximum timeout time, delete the task",
"# For the other tasks that have not completed yet, redraw them",
"# Delete tasks that have been marked for deletion",
"if",
"len",
"(",
"self",
".",
"to_delete",
")",
">",
"0",
":",
"for",
"task_id",
"in",
"self",
".",
"to_delete",
":",
"del",
"self",
".",
"tasks",
"[",
"task_id",
"]",
"self",
".",
"to_delete",
"=",
"[",
"]",
"# If a task has been deleted, recalculate the maximum prefix length to keep progress bars aligned",
"self",
".",
"longest_bar_prefix_size",
"=",
"self",
".",
"longest_bar_prefix_value",
"(",
")",
"for",
"task_id",
",",
"task",
"in",
"self",
".",
"tasks",
".",
"items",
"(",
")",
":",
"# If a task has completed, force its value to its maximum to prevent progress bar overflow",
"# Then start its timeout chrono",
"if",
"task",
".",
"progress",
">=",
"task",
".",
"total",
"and",
"not",
"task",
".",
"keep_alive",
":",
"# Prevent bar overflow",
"task",
".",
"progress",
"=",
"task",
".",
"total",
"# Start task's timeout chrono",
"if",
"not",
"task",
".",
"timeout_chrono",
":",
"task",
".",
"timeout_chrono",
"=",
"millis",
"(",
")",
"# If task's chrono has reached the maximum timeout time, mark it for deletion",
"elif",
"millis",
"(",
")",
"-",
"task",
".",
"timeout_chrono",
">=",
"self",
".",
"task_millis_to_removal",
":",
"self",
".",
"to_delete",
".",
"append",
"(",
"task_id",
")",
"# Redraw the task's progress bar through standard output",
"self",
".",
"print_progress_bar",
"(",
"task",
"=",
"task",
")",
"# Keep space for future tasks if needed",
"slots",
"=",
"self",
".",
"permanent_progressbar_slots",
"-",
"len",
"(",
"self",
".",
"tasks",
")",
"if",
"slots",
">",
"0",
":",
"for",
"i",
"in",
"range",
"(",
"slots",
")",
":",
"sys",
".",
"stdout",
".",
"write",
"(",
"'\\n\\t\\t---\\n'",
")",
"# Draw some space between bars and messages",
"if",
"len",
"(",
"self",
".",
"messages",
")",
">",
"0",
":",
"if",
"self",
".",
"permanent_progressbar_slots",
">",
"0",
"or",
"len",
"(",
"self",
".",
"tasks",
")",
">",
"0",
":",
"sys",
".",
"stdout",
".",
"write",
"(",
"'\\n\\n'",
")",
"# Print all the last log messages through standard output",
"for",
"m",
"in",
"self",
".",
"messages",
":",
"sys",
".",
"stdout",
".",
"write",
"(",
"m",
")",
"# Draw some space between messages and exceptions",
"if",
"len",
"(",
"self",
".",
"exceptions",
")",
">",
"0",
":",
"if",
"len",
"(",
"self",
".",
"messages",
")",
">",
"0",
":",
"sys",
".",
"stdout",
".",
"write",
"(",
"'\\n\\n'",
")",
"# Print all the exceptions through error output",
"for",
"ex",
"in",
"self",
".",
"exceptions",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"ex",
")"
] |
Clears the console and performs a complete redraw of all progress bars and then awaiting logger messages if the
minimum time elapsed since the last redraw is enough.
|
[
"Clears",
"the",
"console",
"and",
"performs",
"a",
"complete",
"redraw",
"of",
"all",
"progress",
"bars",
"and",
"then",
"awaiting",
"logger",
"messages",
"if",
"the",
"minimum",
"time",
"elapsed",
"since",
"the",
"last",
"redraw",
"is",
"enough",
"."
] |
7f13f1397e76ed768fb6b6358194118831fafc6d
|
https://github.com/peepall/FancyLogger/blob/7f13f1397e76ed768fb6b6358194118831fafc6d/FancyLogger/processing/__init__.py#L386-L455
|
241,287
|
esterhui/pypu
|
pypu/servicemanager.py
|
servicemanager.GetServices
|
def GetServices(self,filename):
"""Returns a list of service objects handling this file type"""
objlist=[]
for sobj in self.services:
if sobj.KnowsFile(filename) :
objlist.append(sobj)
if len(objlist)==0:
return None
return objlist
|
python
|
def GetServices(self,filename):
"""Returns a list of service objects handling this file type"""
objlist=[]
for sobj in self.services:
if sobj.KnowsFile(filename) :
objlist.append(sobj)
if len(objlist)==0:
return None
return objlist
|
[
"def",
"GetServices",
"(",
"self",
",",
"filename",
")",
":",
"objlist",
"=",
"[",
"]",
"for",
"sobj",
"in",
"self",
".",
"services",
":",
"if",
"sobj",
".",
"KnowsFile",
"(",
"filename",
")",
":",
"objlist",
".",
"append",
"(",
"sobj",
")",
"if",
"len",
"(",
"objlist",
")",
"==",
"0",
":",
"return",
"None",
"return",
"objlist"
] |
Returns a list of service objects handling this file type
|
[
"Returns",
"a",
"list",
"of",
"service",
"objects",
"handling",
"this",
"file",
"type"
] |
cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec
|
https://github.com/esterhui/pypu/blob/cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec/pypu/servicemanager.py#L13-L23
|
241,288
|
esterhui/pypu
|
pypu/servicemanager.py
|
servicemanager.GetServiceObj
|
def GetServiceObj(self,servicename):
"""Given a service name string, returns
the object that corresponds to the service"""
for sobj in self.services:
if sobj.GetName().lower()==servicename.lower():
return sobj
return None
|
python
|
def GetServiceObj(self,servicename):
"""Given a service name string, returns
the object that corresponds to the service"""
for sobj in self.services:
if sobj.GetName().lower()==servicename.lower():
return sobj
return None
|
[
"def",
"GetServiceObj",
"(",
"self",
",",
"servicename",
")",
":",
"for",
"sobj",
"in",
"self",
".",
"services",
":",
"if",
"sobj",
".",
"GetName",
"(",
")",
".",
"lower",
"(",
")",
"==",
"servicename",
".",
"lower",
"(",
")",
":",
"return",
"sobj",
"return",
"None"
] |
Given a service name string, returns
the object that corresponds to the service
|
[
"Given",
"a",
"service",
"name",
"string",
"returns",
"the",
"object",
"that",
"corresponds",
"to",
"the",
"service"
] |
cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec
|
https://github.com/esterhui/pypu/blob/cc3e259d59f024c2c4c0fbb9c8a1547e51de75ec/pypu/servicemanager.py#L25-L32
|
241,289
|
fictorial/filesysdb
|
filesysdb/__init__.py
|
object_path
|
def object_path(collection, id):
"""Returns path to the backing file of the object
with the given ``id`` in the given ``collection``.
Note that the ``id`` is made filesystem-safe by
"normalizing" its string representation."""
_logger.debug(type(id))
_logger.debug(id)
if isinstance(id, dict) and 'id' in id:
id = id['id']
normalized_id = normalize_text(str(id), lcase=False)
return os.path.join(_basepath, collection,
'%s.%s' % (normalized_id, _ext))
|
python
|
def object_path(collection, id):
"""Returns path to the backing file of the object
with the given ``id`` in the given ``collection``.
Note that the ``id`` is made filesystem-safe by
"normalizing" its string representation."""
_logger.debug(type(id))
_logger.debug(id)
if isinstance(id, dict) and 'id' in id:
id = id['id']
normalized_id = normalize_text(str(id), lcase=False)
return os.path.join(_basepath, collection,
'%s.%s' % (normalized_id, _ext))
|
[
"def",
"object_path",
"(",
"collection",
",",
"id",
")",
":",
"_logger",
".",
"debug",
"(",
"type",
"(",
"id",
")",
")",
"_logger",
".",
"debug",
"(",
"id",
")",
"if",
"isinstance",
"(",
"id",
",",
"dict",
")",
"and",
"'id'",
"in",
"id",
":",
"id",
"=",
"id",
"[",
"'id'",
"]",
"normalized_id",
"=",
"normalize_text",
"(",
"str",
"(",
"id",
")",
",",
"lcase",
"=",
"False",
")",
"return",
"os",
".",
"path",
".",
"join",
"(",
"_basepath",
",",
"collection",
",",
"'%s.%s'",
"%",
"(",
"normalized_id",
",",
"_ext",
")",
")"
] |
Returns path to the backing file of the object
with the given ``id`` in the given ``collection``.
Note that the ``id`` is made filesystem-safe by
"normalizing" its string representation.
|
[
"Returns",
"path",
"to",
"the",
"backing",
"file",
"of",
"the",
"object",
"with",
"the",
"given",
"id",
"in",
"the",
"given",
"collection",
".",
"Note",
"that",
"the",
"id",
"is",
"made",
"filesystem",
"-",
"safe",
"by",
"normalizing",
"its",
"string",
"representation",
"."
] |
bbf1e32218b71c7c15c33ada660433fffc6fa6ab
|
https://github.com/fictorial/filesysdb/blob/bbf1e32218b71c7c15c33ada660433fffc6fa6ab/filesysdb/__init__.py#L49-L60
|
241,290
|
fictorial/filesysdb
|
filesysdb/__init__.py
|
load_object_at_path
|
def load_object_at_path(path):
"""Load an object from disk at explicit path"""
with open(path, 'r') as f:
data = _deserialize(f.read())
return aadict(data)
|
python
|
def load_object_at_path(path):
"""Load an object from disk at explicit path"""
with open(path, 'r') as f:
data = _deserialize(f.read())
return aadict(data)
|
[
"def",
"load_object_at_path",
"(",
"path",
")",
":",
"with",
"open",
"(",
"path",
",",
"'r'",
")",
"as",
"f",
":",
"data",
"=",
"_deserialize",
"(",
"f",
".",
"read",
"(",
")",
")",
"return",
"aadict",
"(",
"data",
")"
] |
Load an object from disk at explicit path
|
[
"Load",
"an",
"object",
"from",
"disk",
"at",
"explicit",
"path"
] |
bbf1e32218b71c7c15c33ada660433fffc6fa6ab
|
https://github.com/fictorial/filesysdb/blob/bbf1e32218b71c7c15c33ada660433fffc6fa6ab/filesysdb/__init__.py#L68-L72
|
241,291
|
fictorial/filesysdb
|
filesysdb/__init__.py
|
add_collection
|
def add_collection(collection,
cache_size=1000,
cache_cls=LRUCache,
**cache_args):
"""Add a collection named ``collection``."""
assert collection not in _db
cache = cache_cls(maxsize=cache_size,
missing=lambda id: load_object(collection, id),
**cache_args)
_db[collection] = aadict(cache=cache, indexes={})
|
python
|
def add_collection(collection,
cache_size=1000,
cache_cls=LRUCache,
**cache_args):
"""Add a collection named ``collection``."""
assert collection not in _db
cache = cache_cls(maxsize=cache_size,
missing=lambda id: load_object(collection, id),
**cache_args)
_db[collection] = aadict(cache=cache, indexes={})
|
[
"def",
"add_collection",
"(",
"collection",
",",
"cache_size",
"=",
"1000",
",",
"cache_cls",
"=",
"LRUCache",
",",
"*",
"*",
"cache_args",
")",
":",
"assert",
"collection",
"not",
"in",
"_db",
"cache",
"=",
"cache_cls",
"(",
"maxsize",
"=",
"cache_size",
",",
"missing",
"=",
"lambda",
"id",
":",
"load_object",
"(",
"collection",
",",
"id",
")",
",",
"*",
"*",
"cache_args",
")",
"_db",
"[",
"collection",
"]",
"=",
"aadict",
"(",
"cache",
"=",
"cache",
",",
"indexes",
"=",
"{",
"}",
")"
] |
Add a collection named ``collection``.
|
[
"Add",
"a",
"collection",
"named",
"collection",
"."
] |
bbf1e32218b71c7c15c33ada660433fffc6fa6ab
|
https://github.com/fictorial/filesysdb/blob/bbf1e32218b71c7c15c33ada660433fffc6fa6ab/filesysdb/__init__.py#L87-L96
|
241,292
|
fictorial/filesysdb
|
filesysdb/__init__.py
|
prepare
|
def prepare(base_path='data',
serialize=json.dumps,
deserialize=json.loads,
file_ext='json'):
"""After you have added your collections, prepare the database
for use."""
global _basepath, _deserialize, _serialize, _ext
_basepath = base_path
assert callable(serialize)
assert callable(deserialize)
_serialize = serialize
_deserialize = deserialize
_ext = file_ext
_logger.debug('preparing with base path %s and file ext %s',
_basepath, _ext)
assert len(_db)
for collection in _db.keys():
c_path = collection_path(collection)
os.makedirs(c_path, exist_ok=True)
_logger.info('collection "%s": %d objects',
collection, object_count(collection))
|
python
|
def prepare(base_path='data',
serialize=json.dumps,
deserialize=json.loads,
file_ext='json'):
"""After you have added your collections, prepare the database
for use."""
global _basepath, _deserialize, _serialize, _ext
_basepath = base_path
assert callable(serialize)
assert callable(deserialize)
_serialize = serialize
_deserialize = deserialize
_ext = file_ext
_logger.debug('preparing with base path %s and file ext %s',
_basepath, _ext)
assert len(_db)
for collection in _db.keys():
c_path = collection_path(collection)
os.makedirs(c_path, exist_ok=True)
_logger.info('collection "%s": %d objects',
collection, object_count(collection))
|
[
"def",
"prepare",
"(",
"base_path",
"=",
"'data'",
",",
"serialize",
"=",
"json",
".",
"dumps",
",",
"deserialize",
"=",
"json",
".",
"loads",
",",
"file_ext",
"=",
"'json'",
")",
":",
"global",
"_basepath",
",",
"_deserialize",
",",
"_serialize",
",",
"_ext",
"_basepath",
"=",
"base_path",
"assert",
"callable",
"(",
"serialize",
")",
"assert",
"callable",
"(",
"deserialize",
")",
"_serialize",
"=",
"serialize",
"_deserialize",
"=",
"deserialize",
"_ext",
"=",
"file_ext",
"_logger",
".",
"debug",
"(",
"'preparing with base path %s and file ext %s'",
",",
"_basepath",
",",
"_ext",
")",
"assert",
"len",
"(",
"_db",
")",
"for",
"collection",
"in",
"_db",
".",
"keys",
"(",
")",
":",
"c_path",
"=",
"collection_path",
"(",
"collection",
")",
"os",
".",
"makedirs",
"(",
"c_path",
",",
"exist_ok",
"=",
"True",
")",
"_logger",
".",
"info",
"(",
"'collection \"%s\": %d objects'",
",",
"collection",
",",
"object_count",
"(",
"collection",
")",
")"
] |
After you have added your collections, prepare the database
for use.
|
[
"After",
"you",
"have",
"added",
"your",
"collections",
"prepare",
"the",
"database",
"for",
"use",
"."
] |
bbf1e32218b71c7c15c33ada660433fffc6fa6ab
|
https://github.com/fictorial/filesysdb/blob/bbf1e32218b71c7c15c33ada660433fffc6fa6ab/filesysdb/__init__.py#L103-L123
|
241,293
|
fictorial/filesysdb
|
filesysdb/__init__.py
|
each_object
|
def each_object(collection):
"""Yields each object in the given ``collection``.
The objects are loaded from cache and failing that,
from disk."""
c_path = collection_path(collection)
paths = glob('%s/*.%s' % (c_path, _ext))
for path in paths:
yield load_object_at_path(path)
|
python
|
def each_object(collection):
"""Yields each object in the given ``collection``.
The objects are loaded from cache and failing that,
from disk."""
c_path = collection_path(collection)
paths = glob('%s/*.%s' % (c_path, _ext))
for path in paths:
yield load_object_at_path(path)
|
[
"def",
"each_object",
"(",
"collection",
")",
":",
"c_path",
"=",
"collection_path",
"(",
"collection",
")",
"paths",
"=",
"glob",
"(",
"'%s/*.%s'",
"%",
"(",
"c_path",
",",
"_ext",
")",
")",
"for",
"path",
"in",
"paths",
":",
"yield",
"load_object_at_path",
"(",
"path",
")"
] |
Yields each object in the given ``collection``.
The objects are loaded from cache and failing that,
from disk.
|
[
"Yields",
"each",
"object",
"in",
"the",
"given",
"collection",
".",
"The",
"objects",
"are",
"loaded",
"from",
"cache",
"and",
"failing",
"that",
"from",
"disk",
"."
] |
bbf1e32218b71c7c15c33ada660433fffc6fa6ab
|
https://github.com/fictorial/filesysdb/blob/bbf1e32218b71c7c15c33ada660433fffc6fa6ab/filesysdb/__init__.py#L131-L138
|
241,294
|
fictorial/filesysdb
|
filesysdb/__init__.py
|
each_object_id
|
def each_object_id(collection):
"""Yields each object ID in the given ``collection``.
The objects are not loaded."""
c_path = collection_path(collection)
paths = glob('%s/*.%s' % (c_path, _ext))
for path in paths:
match = regex.match(r'.+/(.+)\.%s$' % _ext, path)
yield match.groups()[0]
|
python
|
def each_object_id(collection):
"""Yields each object ID in the given ``collection``.
The objects are not loaded."""
c_path = collection_path(collection)
paths = glob('%s/*.%s' % (c_path, _ext))
for path in paths:
match = regex.match(r'.+/(.+)\.%s$' % _ext, path)
yield match.groups()[0]
|
[
"def",
"each_object_id",
"(",
"collection",
")",
":",
"c_path",
"=",
"collection_path",
"(",
"collection",
")",
"paths",
"=",
"glob",
"(",
"'%s/*.%s'",
"%",
"(",
"c_path",
",",
"_ext",
")",
")",
"for",
"path",
"in",
"paths",
":",
"match",
"=",
"regex",
".",
"match",
"(",
"r'.+/(.+)\\.%s$'",
"%",
"_ext",
",",
"path",
")",
"yield",
"match",
".",
"groups",
"(",
")",
"[",
"0",
"]"
] |
Yields each object ID in the given ``collection``.
The objects are not loaded.
|
[
"Yields",
"each",
"object",
"ID",
"in",
"the",
"given",
"collection",
".",
"The",
"objects",
"are",
"not",
"loaded",
"."
] |
bbf1e32218b71c7c15c33ada660433fffc6fa6ab
|
https://github.com/fictorial/filesysdb/blob/bbf1e32218b71c7c15c33ada660433fffc6fa6ab/filesysdb/__init__.py#L141-L148
|
241,295
|
fictorial/filesysdb
|
filesysdb/__init__.py
|
save_object
|
def save_object(collection, obj):
"""Save an object ``obj`` to the given ``collection``.
``obj.id`` must be unique across all other existing objects in
the given collection. If ``id`` is not present in the object, a
*UUID* is assigned as the object's ``id``.
Indexes already defined on the ``collection`` are updated after
the object is saved.
Returns the object.
"""
if 'id' not in obj:
obj.id = uuid()
id = obj.id
path = object_path(collection, id)
temp_path = '%s.temp' % path
with open(temp_path, 'w') as f:
data = _serialize(obj)
f.write(data)
shutil.move(temp_path, path)
if id in _db[collection].cache:
_db[collection].cache[id] = obj
_update_indexes_for_mutated_object(collection, obj)
return obj
|
python
|
def save_object(collection, obj):
"""Save an object ``obj`` to the given ``collection``.
``obj.id`` must be unique across all other existing objects in
the given collection. If ``id`` is not present in the object, a
*UUID* is assigned as the object's ``id``.
Indexes already defined on the ``collection`` are updated after
the object is saved.
Returns the object.
"""
if 'id' not in obj:
obj.id = uuid()
id = obj.id
path = object_path(collection, id)
temp_path = '%s.temp' % path
with open(temp_path, 'w') as f:
data = _serialize(obj)
f.write(data)
shutil.move(temp_path, path)
if id in _db[collection].cache:
_db[collection].cache[id] = obj
_update_indexes_for_mutated_object(collection, obj)
return obj
|
[
"def",
"save_object",
"(",
"collection",
",",
"obj",
")",
":",
"if",
"'id'",
"not",
"in",
"obj",
":",
"obj",
".",
"id",
"=",
"uuid",
"(",
")",
"id",
"=",
"obj",
".",
"id",
"path",
"=",
"object_path",
"(",
"collection",
",",
"id",
")",
"temp_path",
"=",
"'%s.temp'",
"%",
"path",
"with",
"open",
"(",
"temp_path",
",",
"'w'",
")",
"as",
"f",
":",
"data",
"=",
"_serialize",
"(",
"obj",
")",
"f",
".",
"write",
"(",
"data",
")",
"shutil",
".",
"move",
"(",
"temp_path",
",",
"path",
")",
"if",
"id",
"in",
"_db",
"[",
"collection",
"]",
".",
"cache",
":",
"_db",
"[",
"collection",
"]",
".",
"cache",
"[",
"id",
"]",
"=",
"obj",
"_update_indexes_for_mutated_object",
"(",
"collection",
",",
"obj",
")",
"return",
"obj"
] |
Save an object ``obj`` to the given ``collection``.
``obj.id`` must be unique across all other existing objects in
the given collection. If ``id`` is not present in the object, a
*UUID* is assigned as the object's ``id``.
Indexes already defined on the ``collection`` are updated after
the object is saved.
Returns the object.
|
[
"Save",
"an",
"object",
"obj",
"to",
"the",
"given",
"collection",
"."
] |
bbf1e32218b71c7c15c33ada660433fffc6fa6ab
|
https://github.com/fictorial/filesysdb/blob/bbf1e32218b71c7c15c33ada660433fffc6fa6ab/filesysdb/__init__.py#L152-L176
|
241,296
|
fictorial/filesysdb
|
filesysdb/__init__.py
|
add_index
|
def add_index(collection,
name,
fields,
transformer=None,
unique=False,
case_insensitive=False):
"""
Add a secondary index for a collection ``collection`` on one or
more ``fields``.
The values at each of the ``fields`` are loaded from existing
objects and their object ids added to the index.
You can later iterate the objects of an index via
``each_indexed_object``.
If you update an object and call ``save_object``, the index will
be updated with the latest values from the updated object.
If you delete an object via ``delete_object``, the object will
be removed from any indexes on the object's collection.
If a function is provided for ``transformer``, the values
extracted from each object in the collection will be passed to
the ``transformer``. The ``transformer`` should return a list
of values that will go into the index.
If ``unique`` is true, then there may only be at most one object
in the collection with a unique set of values for each the
``fields`` provided.
If ``case_insensitive`` is true, then the value stored in the
index will be lower-cased and comparisons thereto will be
lower-cased as well.
"""
assert len(name) > 0
assert len(fields) > 0
indexes = _db[collection].indexes
index = indexes.setdefault(name, aadict())
index.transformer = transformer
index.value_map = {} # json([value]) => set(object_id)
index.unique = unique
index.case_insensitive = case_insensitive
index.fields = fields
for obj in each_object(collection):
_add_to_index(index, obj)
_logger.info('added %s, %s index to collection %s on fields: %s',
'unique' if unique else 'non-unique',
'case-insensitive' if case_insensitive else 'case-sensitive',
collection, ', '.join(fields))
|
python
|
def add_index(collection,
name,
fields,
transformer=None,
unique=False,
case_insensitive=False):
"""
Add a secondary index for a collection ``collection`` on one or
more ``fields``.
The values at each of the ``fields`` are loaded from existing
objects and their object ids added to the index.
You can later iterate the objects of an index via
``each_indexed_object``.
If you update an object and call ``save_object``, the index will
be updated with the latest values from the updated object.
If you delete an object via ``delete_object``, the object will
be removed from any indexes on the object's collection.
If a function is provided for ``transformer``, the values
extracted from each object in the collection will be passed to
the ``transformer``. The ``transformer`` should return a list
of values that will go into the index.
If ``unique`` is true, then there may only be at most one object
in the collection with a unique set of values for each the
``fields`` provided.
If ``case_insensitive`` is true, then the value stored in the
index will be lower-cased and comparisons thereto will be
lower-cased as well.
"""
assert len(name) > 0
assert len(fields) > 0
indexes = _db[collection].indexes
index = indexes.setdefault(name, aadict())
index.transformer = transformer
index.value_map = {} # json([value]) => set(object_id)
index.unique = unique
index.case_insensitive = case_insensitive
index.fields = fields
for obj in each_object(collection):
_add_to_index(index, obj)
_logger.info('added %s, %s index to collection %s on fields: %s',
'unique' if unique else 'non-unique',
'case-insensitive' if case_insensitive else 'case-sensitive',
collection, ', '.join(fields))
|
[
"def",
"add_index",
"(",
"collection",
",",
"name",
",",
"fields",
",",
"transformer",
"=",
"None",
",",
"unique",
"=",
"False",
",",
"case_insensitive",
"=",
"False",
")",
":",
"assert",
"len",
"(",
"name",
")",
">",
"0",
"assert",
"len",
"(",
"fields",
")",
">",
"0",
"indexes",
"=",
"_db",
"[",
"collection",
"]",
".",
"indexes",
"index",
"=",
"indexes",
".",
"setdefault",
"(",
"name",
",",
"aadict",
"(",
")",
")",
"index",
".",
"transformer",
"=",
"transformer",
"index",
".",
"value_map",
"=",
"{",
"}",
"# json([value]) => set(object_id)",
"index",
".",
"unique",
"=",
"unique",
"index",
".",
"case_insensitive",
"=",
"case_insensitive",
"index",
".",
"fields",
"=",
"fields",
"for",
"obj",
"in",
"each_object",
"(",
"collection",
")",
":",
"_add_to_index",
"(",
"index",
",",
"obj",
")",
"_logger",
".",
"info",
"(",
"'added %s, %s index to collection %s on fields: %s'",
",",
"'unique'",
"if",
"unique",
"else",
"'non-unique'",
",",
"'case-insensitive'",
"if",
"case_insensitive",
"else",
"'case-sensitive'",
",",
"collection",
",",
"', '",
".",
"join",
"(",
"fields",
")",
")"
] |
Add a secondary index for a collection ``collection`` on one or
more ``fields``.
The values at each of the ``fields`` are loaded from existing
objects and their object ids added to the index.
You can later iterate the objects of an index via
``each_indexed_object``.
If you update an object and call ``save_object``, the index will
be updated with the latest values from the updated object.
If you delete an object via ``delete_object``, the object will
be removed from any indexes on the object's collection.
If a function is provided for ``transformer``, the values
extracted from each object in the collection will be passed to
the ``transformer``. The ``transformer`` should return a list
of values that will go into the index.
If ``unique`` is true, then there may only be at most one object
in the collection with a unique set of values for each the
``fields`` provided.
If ``case_insensitive`` is true, then the value stored in the
index will be lower-cased and comparisons thereto will be
lower-cased as well.
|
[
"Add",
"a",
"secondary",
"index",
"for",
"a",
"collection",
"collection",
"on",
"one",
"or",
"more",
"fields",
"."
] |
bbf1e32218b71c7c15c33ada660433fffc6fa6ab
|
https://github.com/fictorial/filesysdb/blob/bbf1e32218b71c7c15c33ada660433fffc6fa6ab/filesysdb/__init__.py#L198-L247
|
241,297
|
fictorial/filesysdb
|
filesysdb/__init__.py
|
_add_to_index
|
def _add_to_index(index, obj):
"""Adds the given object ``obj`` to the given ``index``"""
id_set = index.value_map.setdefault(indexed_value(index, obj), set())
if index.unique:
if len(id_set) > 0:
raise UniqueConstraintError()
id_set.add(obj.id)
|
python
|
def _add_to_index(index, obj):
"""Adds the given object ``obj`` to the given ``index``"""
id_set = index.value_map.setdefault(indexed_value(index, obj), set())
if index.unique:
if len(id_set) > 0:
raise UniqueConstraintError()
id_set.add(obj.id)
|
[
"def",
"_add_to_index",
"(",
"index",
",",
"obj",
")",
":",
"id_set",
"=",
"index",
".",
"value_map",
".",
"setdefault",
"(",
"indexed_value",
"(",
"index",
",",
"obj",
")",
",",
"set",
"(",
")",
")",
"if",
"index",
".",
"unique",
":",
"if",
"len",
"(",
"id_set",
")",
">",
"0",
":",
"raise",
"UniqueConstraintError",
"(",
")",
"id_set",
".",
"add",
"(",
"obj",
".",
"id",
")"
] |
Adds the given object ``obj`` to the given ``index``
|
[
"Adds",
"the",
"given",
"object",
"obj",
"to",
"the",
"given",
"index"
] |
bbf1e32218b71c7c15c33ada660433fffc6fa6ab
|
https://github.com/fictorial/filesysdb/blob/bbf1e32218b71c7c15c33ada660433fffc6fa6ab/filesysdb/__init__.py#L250-L256
|
241,298
|
fictorial/filesysdb
|
filesysdb/__init__.py
|
_remove_from_index
|
def _remove_from_index(index, obj):
"""Removes object ``obj`` from the ``index``."""
try:
index.value_map[indexed_value(index, obj)].remove(obj.id)
except KeyError:
pass
|
python
|
def _remove_from_index(index, obj):
"""Removes object ``obj`` from the ``index``."""
try:
index.value_map[indexed_value(index, obj)].remove(obj.id)
except KeyError:
pass
|
[
"def",
"_remove_from_index",
"(",
"index",
",",
"obj",
")",
":",
"try",
":",
"index",
".",
"value_map",
"[",
"indexed_value",
"(",
"index",
",",
"obj",
")",
"]",
".",
"remove",
"(",
"obj",
".",
"id",
")",
"except",
"KeyError",
":",
"pass"
] |
Removes object ``obj`` from the ``index``.
|
[
"Removes",
"object",
"obj",
"from",
"the",
"index",
"."
] |
bbf1e32218b71c7c15c33ada660433fffc6fa6ab
|
https://github.com/fictorial/filesysdb/blob/bbf1e32218b71c7c15c33ada660433fffc6fa6ab/filesysdb/__init__.py#L259-L264
|
241,299
|
fictorial/filesysdb
|
filesysdb/__init__.py
|
each_indexed_object
|
def each_indexed_object(collection, index_name, **where):
"""Yields each object indexed by the index with
name ``name`` with ``values`` matching on indexed
field values."""
index = _db[collection].indexes[index_name]
for id in index.value_map.get(indexed_value(index, where), []):
yield get_object(collection, id)
|
python
|
def each_indexed_object(collection, index_name, **where):
"""Yields each object indexed by the index with
name ``name`` with ``values`` matching on indexed
field values."""
index = _db[collection].indexes[index_name]
for id in index.value_map.get(indexed_value(index, where), []):
yield get_object(collection, id)
|
[
"def",
"each_indexed_object",
"(",
"collection",
",",
"index_name",
",",
"*",
"*",
"where",
")",
":",
"index",
"=",
"_db",
"[",
"collection",
"]",
".",
"indexes",
"[",
"index_name",
"]",
"for",
"id",
"in",
"index",
".",
"value_map",
".",
"get",
"(",
"indexed_value",
"(",
"index",
",",
"where",
")",
",",
"[",
"]",
")",
":",
"yield",
"get_object",
"(",
"collection",
",",
"id",
")"
] |
Yields each object indexed by the index with
name ``name`` with ``values`` matching on indexed
field values.
|
[
"Yields",
"each",
"object",
"indexed",
"by",
"the",
"index",
"with",
"name",
"name",
"with",
"values",
"matching",
"on",
"indexed",
"field",
"values",
"."
] |
bbf1e32218b71c7c15c33ada660433fffc6fa6ab
|
https://github.com/fictorial/filesysdb/blob/bbf1e32218b71c7c15c33ada660433fffc6fa6ab/filesysdb/__init__.py#L267-L273
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.