sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
|---|---|---|
def create_server(self, server):
"""
Create a server and its storages based on a (locally created) Server object.
Populates the given Server instance with the API response.
0.3.0: also supports giving the entire POST body as a dict that is directly
serialised into JSON. Refer to the REST API documentation for correct format.
Example:
server1 = Server( core_number = 1,
memory_amount = 1024,
hostname = "my.example.1",
zone = ZONE.London,
storage_devices = [
Storage(os = "Ubuntu 14.04", size=10, tier=maxiops, title='The OS drive'),
Storage(size=10),
Storage()
title = "My Example Server"
])
manager.create_server(server1)
One storage should contain an OS. Otherwise storage fields are optional.
- size defaults to 10,
- title defaults to hostname + " OS disk" and hostname + " storage disk id"
(id is a running starting from 1)
- tier defaults to maxiops
- valid operating systems are:
"CentOS 6.5", "CentOS 7.0"
"Debian 7.8"
"Ubuntu 12.04", "Ubuntu 14.04"
"Windows 2003","Windows 2008" ,"Windows 2012"
"""
if isinstance(server, Server):
body = server.prepare_post_body()
else:
server = Server._create_server_obj(server, cloud_manager=self)
body = server.prepare_post_body()
res = self.post_request('/server', body)
server_to_return = server
server_to_return._reset(
res['server'],
cloud_manager=self,
populated=True
)
return server_to_return
|
Create a server and its storages based on a (locally created) Server object.
Populates the given Server instance with the API response.
0.3.0: also supports giving the entire POST body as a dict that is directly
serialised into JSON. Refer to the REST API documentation for correct format.
Example:
server1 = Server( core_number = 1,
memory_amount = 1024,
hostname = "my.example.1",
zone = ZONE.London,
storage_devices = [
Storage(os = "Ubuntu 14.04", size=10, tier=maxiops, title='The OS drive'),
Storage(size=10),
Storage()
title = "My Example Server"
])
manager.create_server(server1)
One storage should contain an OS. Otherwise storage fields are optional.
- size defaults to 10,
- title defaults to hostname + " OS disk" and hostname + " storage disk id"
(id is a running starting from 1)
- tier defaults to maxiops
- valid operating systems are:
"CentOS 6.5", "CentOS 7.0"
"Debian 7.8"
"Ubuntu 12.04", "Ubuntu 14.04"
"Windows 2003","Windows 2008" ,"Windows 2012"
|
entailment
|
def modify_server(self, UUID, **kwargs):
"""
modify_server allows updating the server's updateable_fields.
Note: Server's IP-addresses and Storages are managed by their own add/remove methods.
"""
body = dict()
body['server'] = {}
for arg in kwargs:
if arg not in Server.updateable_fields:
Exception('{0} is not an updateable field'.format(arg))
body['server'][arg] = kwargs[arg]
res = self.request('PUT', '/server/{0}'.format(UUID), body)
server = res['server']
# Populate subobjects
IPAddresses = IPAddress._create_ip_address_objs(server.pop('ip_addresses'),
cloud_manager=self)
storages = Storage._create_storage_objs(server.pop('storage_devices'),
cloud_manager=self)
return Server(
server,
ip_addresses=IPAddresses,
storage_devices=storages,
populated=True,
cloud_manager=self
)
|
modify_server allows updating the server's updateable_fields.
Note: Server's IP-addresses and Storages are managed by their own add/remove methods.
|
entailment
|
def get_server_data(self, UUID):
"""
Return '/server/uuid' data in Python dict.
Creates object representations of any IP-address and Storage.
"""
data = self.get_request('/server/{0}'.format(UUID))
server = data['server']
# Populate subobjects
IPAddresses = IPAddress._create_ip_address_objs(server.pop('ip_addresses'),
cloud_manager=self)
storages = Storage._create_storage_objs(server.pop('storage_devices'),
cloud_manager=self)
return server, IPAddresses, storages
|
Return '/server/uuid' data in Python dict.
Creates object representations of any IP-address and Storage.
|
entailment
|
def feed(f, limit=25):
"""
Pull a feed
:param f: feed name (eg: csirtgadgetes/correlated)
:param limit: return value limit (default 25)
:return: Feed dict
"""
if '/' not in f:
raise ValueError('feed name must be formatted like: '
'csirtgadgets/scanners')
user, f = f.split('/')
return Feed().show(user, f, limit=limit)
|
Pull a feed
:param f: feed name (eg: csirtgadgetes/correlated)
:param limit: return value limit (default 25)
:return: Feed dict
|
entailment
|
def indicator_create(f, i):
"""
Create an indicator in a feed
:param f: feed name (eg: wes/test)
:param i: indicator dict (eg: {'indicator': 'example.com', 'tags': ['ssh'],
'description': 'this is a test'})
:return: dict of indicator
"""
if '/' not in f:
raise ValueError('feed name must be formatted like: '
'csirtgadgets/scanners')
if not i:
raise ValueError('missing indicator dict')
u, f = f.split('/')
i['user'] = u
i['feed'] = f
ret = Indicator(i).submit()
return ret
|
Create an indicator in a feed
:param f: feed name (eg: wes/test)
:param i: indicator dict (eg: {'indicator': 'example.com', 'tags': ['ssh'],
'description': 'this is a test'})
:return: dict of indicator
|
entailment
|
def convert_from_file(file):
"""
Reads the content of file in IDX format, converts it into numpy.ndarray and
returns it.
file is a file-like object (with read() method) or a file name.
"""
if isinstance(file, six_string_types):
with open(file, 'rb') as f:
return _internal_convert(f)
else:
return _internal_convert(file)
|
Reads the content of file in IDX format, converts it into numpy.ndarray and
returns it.
file is a file-like object (with read() method) or a file name.
|
entailment
|
def _internal_convert(inp):
"""
Converts file in IDX format provided by file-like input into numpy.ndarray
and returns it.
"""
'''
Converts file in IDX format provided by file-like input into numpy.ndarray
and returns it.
'''
# Read the "magic number" - 4 bytes.
try:
mn = struct.unpack('>BBBB', inp.read(4))
except struct.error:
raise FormatError(struct.error)
# First two bytes are always zero, check it.
if mn[0] != 0 or mn[1] != 0:
msg = ("Incorrect first two bytes of the magic number: " +
"0x{0:02X} 0x{1:02X}".format(mn[0], mn[1]))
raise FormatError(msg)
# 3rd byte is the data type code.
dtype_code = mn[2]
if dtype_code not in _DATA_TYPES_IDX:
msg = "Incorrect data type code: 0x{0:02X}".format(dtype_code)
raise FormatError(msg)
# 4th byte is the number of dimensions.
dims = int(mn[3])
# See possible data types description.
dtype, dtype_s, el_size = _DATA_TYPES_IDX[dtype_code]
# 4-byte integer for length of each dimension.
try:
dims_sizes = struct.unpack('>' + 'I' * dims, inp.read(4 * dims))
except struct.error as e:
raise FormatError('Dims sizes: {0}'.format(e))
# Full length of data.
full_length = reduce(operator.mul, dims_sizes, 1)
# Create a numpy array from the data
try:
result_array = numpy.frombuffer(
inp.read(full_length * el_size),
dtype=numpy.dtype(dtype)
).reshape(dims_sizes)
except ValueError as e:
raise FormatError('Error creating numpy array: {0}'.format(e))
# Check for superfluous data.
if len(inp.read(1)) > 0:
raise FormatError('Superfluous data detected.')
return result_array
|
Converts file in IDX format provided by file-like input into numpy.ndarray
and returns it.
|
entailment
|
def convert_to_file(file, ndarr):
"""
Writes the contents of the numpy.ndarray ndarr to file in IDX format.
file is a file-like object (with write() method) or a file name.
"""
if isinstance(file, six_string_types):
with open(file, 'wb') as fp:
_internal_write(fp, ndarr)
else:
_internal_write(file, ndarr)
|
Writes the contents of the numpy.ndarray ndarr to file in IDX format.
file is a file-like object (with write() method) or a file name.
|
entailment
|
def convert_to_string(ndarr):
"""
Writes the contents of the numpy.ndarray ndarr to bytes in IDX format and
returns it.
"""
with contextlib.closing(BytesIO()) as bytesio:
_internal_write(bytesio, ndarr)
return bytesio.getvalue()
|
Writes the contents of the numpy.ndarray ndarr to bytes in IDX format and
returns it.
|
entailment
|
def _internal_write(out_stream, arr):
"""
Writes numpy.ndarray arr to a file-like object (with write() method) in
IDX format.
"""
if arr.size == 0:
raise FormatError('Cannot encode empty array.')
try:
type_byte, struct_lib_type = _DATA_TYPES_NUMPY[str(arr.dtype)]
except KeyError:
raise FormatError('numpy ndarray type not supported by IDX format.')
if arr.ndim > _MAX_IDX_DIMENSIONS:
raise FormatError(
'IDX format cannot encode array with dimensions > 255')
if max(arr.shape) > _MAX_AXIS_LENGTH:
raise FormatError('IDX format cannot encode array with more than ' +
str(_MAX_AXIS_LENGTH) + ' elements along any axis')
# Write magic number
out_stream.write(struct.pack('BBBB', 0, 0, type_byte, arr.ndim))
# Write array dimensions
out_stream.write(struct.pack('>' + 'I' * arr.ndim, *arr.shape))
# Horrible hack to deal with horrible bug when using struct.pack to encode
# unsigned ints in 2.7 and lower, see http://bugs.python.org/issue2263
if sys.version_info < (2, 7) and str(arr.dtype) == 'uint8':
arr_as_list = [int(i) for i in arr.reshape(-1)]
out_stream.write(struct.pack('>' + struct_lib_type * arr.size,
*arr_as_list))
else:
# Write array contents - note that the limit to number of arguments
# doesn't apply to unrolled arguments
out_stream.write(struct.pack('>' + struct_lib_type * arr.size,
*arr.reshape(-1)))
|
Writes numpy.ndarray arr to a file-like object (with write() method) in
IDX format.
|
entailment
|
def fetch_access_token_by_client_credentials(self):
'''
There are three ways to let you start using KKBOX's Open/Partner
API. The first way among them is to generate a client
credential to fetch an access token to let KKBOX identify
you. It allows you to access public data from KKBOX such as
public albums, playlists and so on.
However, you cannot use client credentials to access private
data of a user. You have to let users to log-in into KKBOX and
grant permissions for you to do so. You cannot use client
credentials to do media playback either, since it requires a
Premium Membership.
:return: an access token
:rtype: :class:`kkbox_sdk.KKBOXAccessToken`
See `https://docs-en.kkbox.codes/docs/appendix-a`.
'''
client_credential_base = '%s:%s' % (self.client_id, self.client_secret)
try:
client_credentials = base64.b64encode(
bytes(client_credential_base, 'utf-8'))
except:
client_credentials = base64.b64encode(client_credential_base)
client_credentials = client_credentials.decode('utf-8')
headers = {'Authorization': 'Basic ' + client_credentials,
'Content-type': 'application/x-www-form-urlencoded'}
post_parameters = {'grant_type': 'client_credentials',
'scope': 'user_profile user_territory'}
json_object = self.http._post_data(KKBOXOAuth.OAUTH_TOKEN_URL, post_parameters,
headers)
self.access_token = KKBOXAccessToken(**json_object)
return self.access_token
|
There are three ways to let you start using KKBOX's Open/Partner
API. The first way among them is to generate a client
credential to fetch an access token to let KKBOX identify
you. It allows you to access public data from KKBOX such as
public albums, playlists and so on.
However, you cannot use client credentials to access private
data of a user. You have to let users to log-in into KKBOX and
grant permissions for you to do so. You cannot use client
credentials to do media playback either, since it requires a
Premium Membership.
:return: an access token
:rtype: :class:`kkbox_sdk.KKBOXAccessToken`
See `https://docs-en.kkbox.codes/docs/appendix-a`.
|
entailment
|
def get_OS_UUID(cls, os):
"""
Validate Storage OS and its UUID.
If the OS is a custom OS UUID, don't validate against templates.
"""
if os in cls.templates:
return cls.templates[os]
uuid_regexp = '^[0-9a-z]{8}-[0-9a-z]{4}-[0-9a-z]{4}-[0-9a-z]{4}-[0-9a-z]{12}$'
if re.search(uuid_regexp, os):
return os
raise Exception((
"Invalid OS -- valid options are: 'CentOS 6.5', 'CentOS 7.0', "
"'Debian 7.8', 'Debian 8.0' ,'Ubuntu 12.04', 'Ubuntu 14.04', 'Ubuntu 16.04', "
"'Windows 2008', 'Windows 2012'"
))
|
Validate Storage OS and its UUID.
If the OS is a custom OS UUID, don't validate against templates.
|
entailment
|
def execute(self, requests, resp_generator, *args, **kwargs):
'''
Calls the resp_generator for all the requests in parallel in an asynchronous way.
'''
result_futures = [self.executor_pool.submit(resp_generator, req, *args, **kwargs) for req in requests]
resp = [res_future.result() for res_future in result_futures]
return resp
|
Calls the resp_generator for all the requests in parallel in an asynchronous way.
|
entailment
|
def execute(self, requests, resp_generator, *args, **kwargs):
'''
Calls the resp_generator for all the requests in sequential order.
'''
return [resp_generator(request) for request in requests]
|
Calls the resp_generator for all the requests in sequential order.
|
entailment
|
def setup_logging(args):
"""
Sets up basic logging
:param args: ArgParse arguments
:return: nothing. sets logger up globally
"""
loglevel = logging.WARNING
if args.verbose:
loglevel = logging.INFO
if args.debug:
loglevel = logging.DEBUG
console = logging.StreamHandler()
logging.getLogger('').setLevel(loglevel)
console.setFormatter(logging.Formatter(LOG_FORMAT))
logging.getLogger('').addHandler(console)
|
Sets up basic logging
:param args: ArgParse arguments
:return: nothing. sets logger up globally
|
entailment
|
def get_ip(self, address):
"""
Get an IPAddress object with the IP address (string) from the API.
e.g manager.get_ip('80.69.175.210')
"""
res = self.get_request('/ip_address/' + address)
return IPAddress(cloud_manager=self, **res['ip_address'])
|
Get an IPAddress object with the IP address (string) from the API.
e.g manager.get_ip('80.69.175.210')
|
entailment
|
def get_ips(self):
"""
Get all IPAddress objects from the API.
"""
res = self.get_request('/ip_address')
IPs = IPAddress._create_ip_address_objs(res['ip_addresses'], cloud_manager=self)
return IPs
|
Get all IPAddress objects from the API.
|
entailment
|
def attach_ip(self, server, family='IPv4'):
"""
Attach a new (random) IPAddress to the given server (object or UUID).
"""
body = {
'ip_address': {
'server': str(server),
'family': family
}
}
res = self.request('POST', '/ip_address', body)
return IPAddress(cloud_manager=self, **res['ip_address'])
|
Attach a new (random) IPAddress to the given server (object or UUID).
|
entailment
|
def modify_ip(self, ip_addr, ptr_record):
"""
Modify an IP address' ptr-record (Reverse DNS).
Accepts an IPAddress instance (object) or its address (string).
"""
body = {
'ip_address': {
'ptr_record': ptr_record
}
}
res = self.request('PUT', '/ip_address/' + str(ip_addr), body)
return IPAddress(cloud_manager=self, **res['ip_address'])
|
Modify an IP address' ptr-record (Reverse DNS).
Accepts an IPAddress instance (object) or its address (string).
|
entailment
|
def new(self, user, name, description=None):
"""
Creates a new Feed object
:param user: feed username
:param name: feed name
:param description: feed description
:return: dict
"""
uri = self.client.remote + '/users/{0}/feeds'.format(user)
data = {
'feed': {
'name': name,
'description': description
}
}
resp = self.client.post(uri, data)
return resp
|
Creates a new Feed object
:param user: feed username
:param name: feed name
:param description: feed description
:return: dict
|
entailment
|
def delete(self, user, name):
"""
Removes a feed
:param user: feed username
:param name: feed name
:return: true/false
"""
uri = self.client.remote + '/users/{}/feeds/{}'.format(user, name)
resp = self.client.session.delete(uri)
return resp.status_code
|
Removes a feed
:param user: feed username
:param name: feed name
:return: true/false
|
entailment
|
def index(self, user):
"""
Returns a list of Feeds from the API
:param user: feed username
:return: list
Example:
ret = feed.index('csirtgadgets')
"""
uri = self.client.remote + '/users/{0}/feeds'.format(user)
return self.client.get(uri)
|
Returns a list of Feeds from the API
:param user: feed username
:return: list
Example:
ret = feed.index('csirtgadgets')
|
entailment
|
def show(self, user, name, limit=None, lasttime=None):
"""
Returns a specific Feed from the API
:param user: feed username
:param name: feed name
:param limit: limit the results
:param lasttime: only show >= lasttime
:return: dict
Example:
ret = feed.show('csirtgadgets', 'port-scanners', limit=5)
"""
uri = self.client.remote + '/users/{0}/feeds/{1}'.format(user, name)
return self.client.get(uri, params={'limit': limit, 'lasttime': lasttime})
|
Returns a specific Feed from the API
:param user: feed username
:param name: feed name
:param limit: limit the results
:param lasttime: only show >= lasttime
:return: dict
Example:
ret = feed.show('csirtgadgets', 'port-scanners', limit=5)
|
entailment
|
def codenerix(request):
'''
Codenerix CONTEXT
'''
# Get values
DEBUG = getattr(settings, 'DEBUG', False)
VERSION = getattr(settings, 'VERSION', _('WARNING: No version set to this code, add VERSION contant to your configuration'))
# Set environment
return {
'DEBUG': DEBUG,
'VERSION': VERSION,
'CODENERIX_VERSION': __version__,
}
|
Codenerix CONTEXT
|
entailment
|
def build_object(self, obj):
"""Override django-bakery to skip profiles that raise 404"""
try:
build_path = self.get_build_path(obj)
self.request = self.create_request(build_path)
self.request.user = AnonymousUser()
self.set_kwargs(obj)
self.build_file(build_path, self.get_content())
except Http404:
# cleanup directory
self.unbuild_object(obj)
|
Override django-bakery to skip profiles that raise 404
|
entailment
|
def make_schedule_row(schedule_day, slot, seen_items):
"""Create a row for the schedule table."""
row = ScheduleRow(schedule_day, slot)
skip = {}
expanding = {}
all_items = list(slot.scheduleitem_set
.select_related('talk', 'page', 'venue')
.all())
for item in all_items:
if item in seen_items:
# Inc rowspan
seen_items[item]['rowspan'] += 1
# Note that we need to skip this during colspan checks
skip[item.venue] = seen_items[item]
continue
scheditem = {'item': item, 'rowspan': 1, 'colspan': 1}
row.items[item.venue] = scheditem
seen_items[item] = scheditem
if item.expand:
expanding[item.venue] = []
empty = []
expanding_right = None
skipping = 0
skip_item = None
for venue in schedule_day.venues:
if venue in skip:
# We need to skip all the venues this item spans over
skipping = 1
skip_item = skip[venue]
continue
if venue in expanding:
item = row.items[venue]
for empty_venue in empty:
row.items.pop(empty_venue)
item['colspan'] += 1
empty = []
expanding_right = item
elif venue in row.items:
empty = []
expanding_right = None
elif expanding_right:
expanding_right['colspan'] += 1
elif skipping > 0 and skipping < skip_item['colspan']:
skipping += 1
else:
skipping = 0
empty.append(venue)
row.items[venue] = {'item': None, 'rowspan': 1, 'colspan': 1}
return row
|
Create a row for the schedule table.
|
entailment
|
def generate_schedule(today=None):
"""Helper function which creates an ordered list of schedule days"""
# We create a list of slots and schedule items
schedule_days = {}
seen_items = {}
for slot in Slot.objects.all().order_by('end_time', 'start_time', 'day'):
day = slot.get_day()
if today and day != today:
# Restrict ourselves to only today
continue
schedule_day = schedule_days.get(day)
if schedule_day is None:
schedule_day = schedule_days[day] = ScheduleDay(day)
row = make_schedule_row(schedule_day, slot, seen_items)
schedule_day.rows.append(row)
return sorted(schedule_days.values(), key=lambda x: x.day.date)
|
Helper function which creates an ordered list of schedule days
|
entailment
|
def get_context_data(self, **kwargs):
"""Allow adding a 'render_description' parameter"""
context = super(ScheduleXmlView, self).get_context_data(**kwargs)
if self.request.GET.get('render_description', None) == '1':
context['render_description'] = True
else:
context['render_description'] = False
return context
|
Allow adding a 'render_description' parameter
|
entailment
|
def get(self, request):
"""Create a iCal file from the schedule"""
# Heavily inspired by https://djangosnippets.org/snippets/2223/ and
# the icalendar documentation
calendar = Calendar()
site = get_current_site(request)
calendar.add('prodid', '-//%s Schedule//%s//' % (site.name, site.domain))
calendar.add('version', '2.0')
# Since we don't need to format anything here, we can just use a list
# of schedule items
for item in ScheduleItem.objects.all():
sched_event = Event()
sched_event.add('dtstamp', item.last_updated)
sched_event.add('summary', item.get_title())
sched_event.add('location', item.venue.name)
sched_event.add('dtstart', item.get_start_datetime())
sched_event.add('duration', datetime.timedelta(minutes=item.get_duration_minutes()))
sched_event.add('class', 'PUBLIC')
sched_event.add('uid', '%s@%s' % (item.pk, site.domain))
calendar.add_component(sched_event)
response = HttpResponse(calendar.to_ical(), content_type="text/calendar")
response['Content-Disposition'] = 'attachment; filename=schedule.ics'
return response
|
Create a iCal file from the schedule
|
entailment
|
def slug(request, url):
"""Look up a page by url (which is a tree of slugs)"""
page = None
if url:
for slug in url.split('/'):
if not slug:
continue
try:
page = Page.objects.get(slug=slug, parent=page)
except Page.DoesNotExist:
raise Http404
else:
try:
page = Page.objects.get(slug='index', parent=None)
except Page.DoesNotExist:
return TemplateView.as_view(
template_name='wafer/index.html')(request)
if 'edit' in request.GET:
if not request.user.has_perm('pages.change_page'):
raise PermissionDenied
return EditPage.as_view()(request, pk=page.id)
if 'compare' in request.GET:
if not request.user.has_perm('pages.change_page'):
raise PermissionDenied
return ComparePage.as_view()(request, pk=page.id)
return ShowPage.as_view()(request, pk=page.id)
|
Look up a page by url (which is a tree of slugs)
|
entailment
|
def build_object(self, obj):
"""Override django-bakery to skip pages marked exclude_from_static"""
if not obj.exclude_from_static:
super(ShowPage, self).build_object(obj)
|
Override django-bakery to skip pages marked exclude_from_static
|
entailment
|
def build_object(self, obj):
"""Override django-bakery to skip talks that raise 403"""
try:
super(TalkView, self).build_object(obj)
except PermissionDenied:
# We cleanup the directory created
self.unbuild_object(obj)
|
Override django-bakery to skip talks that raise 403
|
entailment
|
def get_object(self, *args, **kwargs):
'''Only talk owners can see talks, unless they've been accepted'''
object_ = super(TalkView, self).get_object(*args, **kwargs)
if not object_.can_view(self.request.user):
raise PermissionDenied
return object_
|
Only talk owners can see talks, unless they've been accepted
|
entailment
|
def render_to_response(self, *args, **kwargs):
'''Canonicalize the URL if the slug changed'''
if self.request.path != self.object.get_absolute_url():
return HttpResponseRedirect(self.object.get_absolute_url())
return super(TalkView, self).render_to_response(*args, **kwargs)
|
Canonicalize the URL if the slug changed
|
entailment
|
def delete(self, request, *args, **kwargs):
"""Override delete to only withdraw"""
talk = self.get_object()
talk.status = WITHDRAWN
talk.save()
revisions.set_user(self.request.user)
revisions.set_comment("Talk Withdrawn")
return HttpResponseRedirect(self.success_url)
|
Override delete to only withdraw
|
entailment
|
def order_results_by(*fields):
"""A decorator that applies an ordering to the QuerySet returned by a
function.
"""
def decorator(f):
@functools.wraps(f)
def wrapper(*args, **kw):
result = f(*args, **kw)
return result.order_by(*fields)
return wrapper
return decorator
|
A decorator that applies an ordering to the QuerySet returned by a
function.
|
entailment
|
def cache_result(cache_key, timeout):
"""A decorator for caching the result of a function."""
def decorator(f):
cache_name = settings.WAFER_CACHE
@functools.wraps(f)
def wrapper(*args, **kw):
cache = caches[cache_name]
result = cache.get(cache_key)
if result is None:
result = f(*args, **kw)
cache.set(cache_key, result, timeout)
return result
def invalidate():
cache = caches[cache_name]
cache.delete(cache_key)
wrapper.invalidate = invalidate
return wrapper
return decorator
|
A decorator for caching the result of a function.
|
entailment
|
def build_queryset(self):
"""Override django-bakery's build logic to fake pagination."""
paths = [(os.path.join(self.build_prefix, 'index.html'), {})]
self.request = None
queryset = self.get_queryset()
paginator = self.get_paginator(queryset, self.get_paginate_by(queryset))
for page in paginator.page_range:
paths.append(
(os.path.join(self.build_prefix, 'page',
'%d' % page, 'index.html'), {'page': page}))
for build_path, kwargs in paths:
self.request = self.create_request(build_path)
# Add a user with no permissions
self.request.user = AnonymousUser()
# Fake context so views work as expected
self.kwargs = kwargs
self.prep_directory(build_path)
target_path = os.path.join(settings.BUILD_DIR, build_path)
self.build_file(target_path, self.get_content())
|
Override django-bakery's build logic to fake pagination.
|
entailment
|
def get_groups(self, gs=None, processed=[], initial=True):
'''
<--------------------------------------- 12 columns ------------------------------------>
<--- 6 columns ---> <--- 6 columns --->
------------------------------------------ ------------------------------------------
| Info | | Personal |
|==========================================| |==========================================|
| ----------------- ------------------ | | |
| | Passport | | Name | | | Phone Zipcode |
| |=================| | [.....] [.....] | | | [...........................] [.......] |
| | CID Country | | <- 6 -> <- 6 -> | | | <--- 8 columns ---> <-4 col-> |
| | [.....] [.....] | | | | | |
| | <- 6 -> <- 6 -> | ----------------- | | Address |
| ----------------- | | [.....................................] |
------------------------------------------ | <--- 12 columns ---> |
| [..] number |
| <--- 12 columns ---> |
| |
------------------------------------------
group = [
(_('Info'),(6,'#8a6d3b','#fcf8e3','center'),
(_('Identification'),6,
["cid",6],
["country",6],
),
(None,6,
["name",None,6],
["surname",None,6,False],
),
),
(_('Personal'),6,
["phone",None,8],
["zipcode",None,4],
["address",None,12],
["number",None,12, True],
),
]
Group: it is defined as tuple with 3 or more elements:
Grammar: (<Name>, <Attributes>, <Element1>, <Element2>, ..., <ElementN>)
If <Name> is None: no name will be given to the group and no panel decoration will be shown
If <Size in columns> is None: default of 6 will be used
<Attributes>:
it can be an integer that represent the size in columns
it can be a tuple with several attributes where each element represents:
(<Size in columns>,'#<Font color>','#<Background color>','<Alignment>')
<Element>:
it can be a Group
it can be a Field
Examples:
('Info', 6, ["name",6], ["surname",6]) -> Info panel using 6 columns with 2 boxes 6 columns for each with name and surname inputs
('Info', (6,None,'#fcf8e3','center'), ["name",6], ["surname",6]) -> Info panel using 6 columns with a yellow brackground in centered title, 2 boxes, 6 columns for each with name and surname inputs
('Info', 12, ('Name', 6, ["name",12]), ('Surname',6, ["surname",12])) -> Info panel using 12 columns with 2 panels inside
of 6 columns each named "Name" and "Surname" and inside each of them an input "name" and "surname" where it belongs.
Field: must be a list with at least 1 element in it:
Grammar: [<Name of field>, <Size in columns>, <Label>]
<Name of field>:
This must be filled always
It is the input's name inside the form
Must exists as a form element or as a grouped form element
<Size in columns>:
Size of the input in columns
If it is not defined or if it is defined as None: default of 6 will be used
<Label>:
It it is defined as False: the label for this field will not be shown
If it is not defined or if it is defined as None: default of True will be used (default input's label will be shown)
If it is a string: this string will be shown as a label
Examples:
['age'] Input 'age' will be shown with 6 columns and its default label
['age',8] Input 'age' will be shown with 8 columns and its default label
['age', None, False] Input 'age' will be shown with 6 columns and NO LABEL
['age',8,False] Input 'age' will be shown with 8 columns and NO LABEL
['age',8,_("Age in days")] Input 'age' will be shown with 8 columns and translated label text "Age in days" to user's language
['age',8,_("Age in days"), True] Input 'age' will be shown with 8 columns and translated label text "Age in days" to user's language, and input inline with label
['age',6, None, None, None, None, None, ["ng-click=functionjs('param1')", "ng-change=functionjs2()"]] Input 'age' with extras functions
['age',None,None,None,None, 'filter'] Input 'age' with extras filter ONLY DETAILS
['age',6, {'color': 'red'} Input 'age' will be shown with red title
'''
# Check if language is set
if not self.__language:
raise IOError("ERROR: No language suplied!")
# Initialize the list
if initial:
processed = []
# Where to look for fields
if 'list_fields' in dir(self):
list_fields = self.list_fields
check_system = "html_name"
else:
list_fields = self
check_system = "name"
# Default attributes for fields
attributes = [
('columns', 6),
('color', None),
('bgcolor', None),
('textalign', None),
('inline', False), # input in line with label
('label', True),
('extra', None),
('extra_div', None),
('foreign_info', {}),
]
labels = [x[0] for x in attributes]
# Get groups if none was given
if gs is None:
gs = self.__groups__()
# Prepare the answer
groups = []
# Prepare focus control
focus_first = None
focus_must = None
# html helper for groups and fields
html_helper = self.html_helper()
# Start processing
for g in gs:
token = {}
token['name'] = g[0]
if token['name'] in html_helper:
if 'pre' in html_helper[token['name']]:
token["html_helper_pre"] = html_helper[token['name']]['pre']
if 'post' in html_helper[token['name']]:
token["html_helper_post"] = html_helper[token['name']]['post']
styles = g[1]
if type(styles) is tuple:
if len(styles) >= 1:
token['columns'] = g[1][0]
if len(styles) >= 2:
token['color'] = g[1][1]
if len(styles) >= 3:
token['bgcolor'] = g[1][2]
if len(styles) >= 4:
token['textalign'] = g[1][3]
if len(styles) >= 5:
token['inline'] = g[1][4]
if len(styles) >= 7:
token['extra'] = g[1][5]
if len(styles) >= 8:
token['extra_div'] = g[1][6]
else:
token['columns'] = g[1]
fs = g[2:]
fields = []
for f in fs:
# Field
atr = {}
# Decide weather this is a Group or not
if type(f) == tuple:
# Recursive
fields += self.get_groups([list(f)], processed, False)
else:
try:
list_type = [str, unicode, ]
except NameError:
list_type = [str, ]
# Check if it is a list
if type(f) == list:
# This is a field with attributes, get the name
field = f[0]
if html_helper and token['name'] in html_helper and 'items' in html_helper[token['name']] and field in html_helper[token['name']]['items']:
if 'pre' in html_helper[token['name']]['items'][field]:
atr["html_helper_pre"] = html_helper[token['name']]['items'][field]['pre']
if 'post' in html_helper[token['name']]['items'][field]:
atr["html_helper_post"] = html_helper[token['name']]['items'][field]['post']
# Process each attribute (if any)
dictionary = False
for idx, element in enumerate(f[1:]):
if type(element) == dict:
dictionary = True
for key in element.keys():
if key in labels:
atr[key] = element[key]
else:
raise IOError("Unknown attribute '{0}' as field '{1}' in list of fields".format(key, field))
else:
if not dictionary:
if element is not None:
atr[attributes[idx][0]] = element
else:
raise IOError("We already processed a dicionary element in this list of fields, you can not add anoother type of elements to it, you must keep going with dictionaries")
elif type(f) in list_type:
field = f
else:
raise IOError("Uknown element type '{0}' inside group '{1}'".format(type(f), token['name']))
# Get the Django Field object
found = None
for infield in list_fields:
if infield.__dict__[check_system] == field:
found = infield
break
if found:
# Get attributes (required and original attributes)
wrequired = found.field.widget.is_required
wattrs = found.field.widget.attrs
# Fill base attributes
atr['name'] = found.html_name
atr['input'] = found
atr['focus'] = False
# Set focus
if focus_must is None:
if focus_first is None:
focus_first = atr
if wrequired:
focus_must = atr
# Autocomplete
if 'autofill' in dir(self.Meta):
autofill = self.Meta.autofill.get(found.html_name, None)
atr['autofill'] = autofill
if autofill:
# Check format of the request
autokind = autofill[0]
if type(autokind) == str:
# Using new format
if autokind == 'select':
# If autofill is True for this field set the DynamicSelect widget
found.field.widget = DynamicSelect(wattrs)
elif autokind == 'multiselect':
# If autofill is True for this field set the DynamicSelect widget
found.field.widget = MultiDynamicSelect(wattrs)
elif autokind == 'input':
# If autofill is True for this field set the DynamicSelect widget
found.field.widget = DynamicInput(wattrs)
else:
raise IOError("Autofill filled using new format but autokind is '{}' and I only know 'input' or 'select'".format(autokind))
# Configure the field
found.field.widget.is_required = wrequired
found.field.widget.form_name = self.form_name
found.field.widget.field_name = infield.html_name
found.field.widget.autofill_deepness = autofill[1]
found.field.widget.autofill_url = autofill[2]
found.field.widget.autofill = autofill[3:]
else:
# Get old information [COMPATIBILITY WITH OLD VERSION]
# If autofill is True for this field set the DynamicSelect widget
found.field.widget = DynamicSelect(wattrs)
found.field.widget.is_required = wrequired
found.field.widget.form_name = self.form_name
found.field.widget.field_name = infield.html_name
found.field.widget.autofill_deepness = autofill[0]
found.field.widget.autofill_url = autofill[1]
found.field.widget.autofill = autofill[2:]
else:
# Set we don't have autofill for this field
atr['autofill'] = None
# Check if we have to replace the widget with a newer one
if isinstance(found.field.widget, Select) and not isinstance(found.field.widget, DynamicSelect):
if not isinstance(found.field.widget, MultiStaticSelect):
found.field.widget = StaticSelect(wattrs)
found.field.widget.choices = found.field.choices
found.field.widget.is_required = wrequired
found.field.widget.form_name = self.form_name
found.field.widget.field_name = infield.html_name
# Fill all attributes
for (attribute, default) in attributes:
if attribute not in atr.keys():
atr[attribute] = default
# Fill label
if atr['label'] is True:
atr['label'] = found.label
# Set language
flang = getattr(found.field, "set_language", None)
if flang:
flang(self.__language)
flang = getattr(found.field.widget, "set_language", None)
if flang:
flang(self.__language)
# Attach the element
fields.append(atr)
# Remember we have processed it
processed.append(found.__dict__[check_system])
else:
raise IOError("Unknown field '{0}' specified in group '{1}'".format(f, token['name']))
token['fields'] = fields
groups.append(token)
# Add the rest of attributes we didn't use yet
if initial:
fields = []
for infield in list_fields:
if infield.__dict__[check_system] not in processed:
# Get attributes (required and original attributes)
wattrs = infield.field.widget.attrs
wrequired = infield.field.widget.is_required
# Prepare attr
atr = {}
# Fill base attributes
atr['name'] = infield.html_name
atr['input'] = infield
atr['focus'] = False
# Set focus
if focus_must is None:
if focus_first is None:
focus_first = atr
if wrequired:
focus_must = atr
# Autocomplete
if 'autofill' in dir(self.Meta):
autofill = self.Meta.autofill.get(infield.html_name, None)
atr['autofill'] = autofill
if autofill:
# Check format of the request
autokind = autofill[0]
if type(autokind) == str:
# Get old information
# Using new format
if autokind == 'select':
# If autofill is True for this field set the DynamicSelect widget
infield.field.widget = DynamicSelect(wattrs)
elif autokind == 'multiselect':
# If autofill is True for this field set the DynamicSelect widget
infield.field.widget = MultiDynamicSelect(wattrs)
elif autokind == 'input':
# If autofill is True for this field set the DynamicSelect widget
infield.field.widget = DynamicInput(wattrs)
else:
raise IOError("Autofill filled using new format but autokind is '{}' and I only know 'input' or 'select'".format(autokind))
# Configure the field
infield.field.widget.is_required = wrequired
infield.field.widget.form_name = self.form_name
infield.field.widget.field_name = infield.html_name
infield.field.widget.autofill_deepness = autofill[1]
infield.field.widget.autofill_url = autofill[2]
infield.field.widget.autofill = autofill[3:]
else:
# Get old information [COMPATIBILITY WITH OLD VERSION]
# If autofill is True for this field set the DynamicSelect widget
infield.field.widget = DynamicSelect(wattrs)
infield.field.widget.is_required = wrequired
infield.field.widget.form_name = self.form_name
infield.field.widget.field_name = infield.html_name
infield.field.widget.autofill_deepness = autofill[0]
infield.field.widget.autofill_url = autofill[1]
infield.field.widget.autofill = autofill[2:]
else:
# Set we don't have autofill for this field
atr['autofill'] = None
# Check if we have to replace the widget with a newer one
if isinstance(infield.field.widget, Select) and not isinstance(infield.field.widget, DynamicSelect):
if isinstance(infield.field, NullBooleanField):
infield.field.widget = CheckboxInput(wattrs)
elif not isinstance(infield.field.widget, MultiStaticSelect):
infield.field.widget = StaticSelect(wattrs)
if hasattr(infield.field.widget, 'choices') and hasattr(infield.field, 'choices'):
infield.field.widget.choices = infield.field.choices
infield.field.widget.is_required = wrequired
infield.field.widget.form_name = self.form_name
infield.field.widget.field_name = infield.html_name
# Fill all attributes
for (attribute, default) in attributes:
if attribute not in atr.keys():
atr[attribute] = default
# Fill label
if atr['label'] is True:
atr['label'] = infield.label
# Set language
flang = getattr(infield.field, "set_language", None)
if flang:
flang(self.__language)
flang = getattr(infield.field.widget, "set_language", None)
if flang:
flang(self.__language)
# Attach the attribute
fields.append(atr)
# Save the new elements
if fields:
groups.append({'name': None, 'columns': 12, 'fields': fields})
# Set focus
if focus_must:
focus_must['focus'] = True
elif focus_first is not None:
focus_first['focus'] = True
# Return the resulting groups
return groups
|
<--------------------------------------- 12 columns ------------------------------------>
<--- 6 columns ---> <--- 6 columns --->
------------------------------------------ ------------------------------------------
| Info | | Personal |
|==========================================| |==========================================|
| ----------------- ------------------ | | |
| | Passport | | Name | | | Phone Zipcode |
| |=================| | [.....] [.....] | | | [...........................] [.......] |
| | CID Country | | <- 6 -> <- 6 -> | | | <--- 8 columns ---> <-4 col-> |
| | [.....] [.....] | | | | | |
| | <- 6 -> <- 6 -> | ----------------- | | Address |
| ----------------- | | [.....................................] |
------------------------------------------ | <--- 12 columns ---> |
| [..] number |
| <--- 12 columns ---> |
| |
------------------------------------------
group = [
(_('Info'),(6,'#8a6d3b','#fcf8e3','center'),
(_('Identification'),6,
["cid",6],
["country",6],
),
(None,6,
["name",None,6],
["surname",None,6,False],
),
),
(_('Personal'),6,
["phone",None,8],
["zipcode",None,4],
["address",None,12],
["number",None,12, True],
),
]
Group: it is defined as tuple with 3 or more elements:
Grammar: (<Name>, <Attributes>, <Element1>, <Element2>, ..., <ElementN>)
If <Name> is None: no name will be given to the group and no panel decoration will be shown
If <Size in columns> is None: default of 6 will be used
<Attributes>:
it can be an integer that represent the size in columns
it can be a tuple with several attributes where each element represents:
(<Size in columns>,'#<Font color>','#<Background color>','<Alignment>')
<Element>:
it can be a Group
it can be a Field
Examples:
('Info', 6, ["name",6], ["surname",6]) -> Info panel using 6 columns with 2 boxes 6 columns for each with name and surname inputs
('Info', (6,None,'#fcf8e3','center'), ["name",6], ["surname",6]) -> Info panel using 6 columns with a yellow brackground in centered title, 2 boxes, 6 columns for each with name and surname inputs
('Info', 12, ('Name', 6, ["name",12]), ('Surname',6, ["surname",12])) -> Info panel using 12 columns with 2 panels inside
of 6 columns each named "Name" and "Surname" and inside each of them an input "name" and "surname" where it belongs.
Field: must be a list with at least 1 element in it:
Grammar: [<Name of field>, <Size in columns>, <Label>]
<Name of field>:
This must be filled always
It is the input's name inside the form
Must exists as a form element or as a grouped form element
<Size in columns>:
Size of the input in columns
If it is not defined or if it is defined as None: default of 6 will be used
<Label>:
It it is defined as False: the label for this field will not be shown
If it is not defined or if it is defined as None: default of True will be used (default input's label will be shown)
If it is a string: this string will be shown as a label
Examples:
['age'] Input 'age' will be shown with 6 columns and its default label
['age',8] Input 'age' will be shown with 8 columns and its default label
['age', None, False] Input 'age' will be shown with 6 columns and NO LABEL
['age',8,False] Input 'age' will be shown with 8 columns and NO LABEL
['age',8,_("Age in days")] Input 'age' will be shown with 8 columns and translated label text "Age in days" to user's language
['age',8,_("Age in days"), True] Input 'age' will be shown with 8 columns and translated label text "Age in days" to user's language, and input inline with label
['age',6, None, None, None, None, None, ["ng-click=functionjs('param1')", "ng-change=functionjs2()"]] Input 'age' with extras functions
['age',None,None,None,None, 'filter'] Input 'age' with extras filter ONLY DETAILS
['age',6, {'color': 'red'} Input 'age' will be shown with red title
|
entailment
|
def site_info(request):
'''Expose the site's info to templates'''
site = get_current_site(request)
context = {
'WAFER_CONFERENCE_NAME': site.name,
'WAFER_CONFERENCE_DOMAIN': site.domain,
}
return context
|
Expose the site's info to templates
|
entailment
|
def navigation_info(request):
'''Expose whether to display the navigation header and footer'''
if request.GET.get('wafer_hide_navigation') == "1":
nav_class = "wafer-invisible"
else:
nav_class = "wafer-visible"
context = {
'WAFER_NAVIGATION_VISIBILITY': nav_class,
}
return context
|
Expose whether to display the navigation header and footer
|
entailment
|
def registration_settings(request):
'''Expose selected settings to templates'''
context = {}
for setting in (
'WAFER_SSO',
'WAFER_HIDE_LOGIN',
'WAFER_REGISTRATION_OPEN',
'WAFER_REGISTRATION_MODE',
'WAFER_TALKS_OPEN',
'WAFER_VIDEO_LICENSE',
):
context[setting] = getattr(settings, setting, None)
return context
|
Expose selected settings to templates
|
entailment
|
def profiles(self):
'''
return the rolls this people is related with
'''
limit = []
if self.is_admin():
limit.append(_("Administrator"))
limit.sort()
return limit
|
return the rolls this people is related with
|
entailment
|
def matern_function(Xi, Xj, *args):
r"""Matern covariance function of arbitrary dimension, for use with :py:class:`ArbitraryKernel`.
The Matern kernel has the following hyperparameters, always referenced in
the order listed:
= ===== ====================================
0 sigma prefactor
1 nu order of kernel
2 l1 length scale for the first dimension
3 l2 ...and so on for all dimensions
= ===== ====================================
The kernel is defined as:
.. math::
k_M = \sigma^2 \frac{2^{1-\nu}}{\Gamma(\nu)}
\left (\sqrt{2\nu \sum_i\left (\frac{\tau_i^2}{l_i^2}\right )}\right )^\nu
K_\nu\left(\sqrt{2\nu \sum_i\left(\frac{\tau_i^2}{l_i^2}\right)}\right)
Parameters
----------
Xi, Xj : :py:class:`Array`, :py:class:`mpf`, tuple or scalar float
Points to evaluate the covariance between. If they are :py:class:`Array`,
:py:mod:`scipy` functions are used, otherwise :py:mod:`mpmath`
functions are used.
*args
Remaining arguments are the 2+num_dim hyperparameters as defined above.
"""
num_dim = len(args) - 2
nu = args[1]
if isinstance(Xi, scipy.ndarray):
if isinstance(Xi, scipy.matrix):
Xi = scipy.asarray(Xi, dtype=float)
Xj = scipy.asarray(Xj, dtype=float)
tau = scipy.asarray(Xi - Xj, dtype=float)
l_mat = scipy.tile(args[-num_dim:], (tau.shape[0], 1))
r2l2 = scipy.sum((tau / l_mat)**2, axis=1)
y = scipy.sqrt(2.0 * nu * r2l2)
k = 2.0**(1 - nu) / scipy.special.gamma(nu) * y**nu * scipy.special.kv(nu, y)
k[r2l2 == 0] = 1
else:
try:
tau = [xi - xj for xi, xj in zip(Xi, Xj)]
except TypeError:
tau = Xi - Xj
try:
r2l2 = sum([(t / l)**2 for t, l in zip(tau, args[2:])])
except TypeError:
r2l2 = (tau / args[2])**2
y = mpmath.sqrt(2.0 * nu * r2l2)
k = 2.0**(1 - nu) / mpmath.gamma(nu) * y**nu * mpmath.besselk(nu, y)
k *= args[0]**2.0
return k
|
r"""Matern covariance function of arbitrary dimension, for use with :py:class:`ArbitraryKernel`.
The Matern kernel has the following hyperparameters, always referenced in
the order listed:
= ===== ====================================
0 sigma prefactor
1 nu order of kernel
2 l1 length scale for the first dimension
3 l2 ...and so on for all dimensions
= ===== ====================================
The kernel is defined as:
.. math::
k_M = \sigma^2 \frac{2^{1-\nu}}{\Gamma(\nu)}
\left (\sqrt{2\nu \sum_i\left (\frac{\tau_i^2}{l_i^2}\right )}\right )^\nu
K_\nu\left(\sqrt{2\nu \sum_i\left(\frac{\tau_i^2}{l_i^2}\right)}\right)
Parameters
----------
Xi, Xj : :py:class:`Array`, :py:class:`mpf`, tuple or scalar float
Points to evaluate the covariance between. If they are :py:class:`Array`,
:py:mod:`scipy` functions are used, otherwise :py:mod:`mpmath`
functions are used.
*args
Remaining arguments are the 2+num_dim hyperparameters as defined above.
|
entailment
|
def _compute_k(self, tau):
r"""Evaluate the kernel directly at the given values of `tau`.
Parameters
----------
tau : :py:class:`Matrix`, (`M`, `D`)
`M` inputs with dimension `D`.
Returns
-------
k : :py:class:`Array`, (`M`,)
:math:`k(\tau)` (less the :math:`\sigma^2` prefactor).
"""
y, r2l2 = self._compute_y(tau, return_r2l2=True)
k = 2.0**(1.0 - self.nu) / scipy.special.gamma(self.nu) * y**(self.nu / 2.0) * scipy.special.kv(self.nu, scipy.sqrt(y))
k[r2l2 == 0] = 1.0
return k
|
r"""Evaluate the kernel directly at the given values of `tau`.
Parameters
----------
tau : :py:class:`Matrix`, (`M`, `D`)
`M` inputs with dimension `D`.
Returns
-------
k : :py:class:`Array`, (`M`,)
:math:`k(\tau)` (less the :math:`\sigma^2` prefactor).
|
entailment
|
def _compute_y(self, tau, return_r2l2=False):
r"""Covert tau to :math:`y=2\nu\sum_i(\tau_i^2/l_i^2)`.
Parameters
----------
tau : :py:class:`Matrix`, (`M`, `D`)
`M` inputs with dimension `D`.
return_r2l2 : bool, optional
Set to True to return a tuple of (`y`, `r2l2`). Default is False
(only return `y`).
Returns
-------
y : :py:class:`Array`, (`M`,)
Inner argument of function.
r2l2 : :py:class:`Array`, (`M`,)
Anisotropically scaled distances. Only returned if `return_r2l2` is True.
"""
r2l2 = self._compute_r2l2(tau)
y = 2.0 * self.nu * r2l2
if return_r2l2:
return (y, r2l2)
else:
return y
|
r"""Covert tau to :math:`y=2\nu\sum_i(\tau_i^2/l_i^2)`.
Parameters
----------
tau : :py:class:`Matrix`, (`M`, `D`)
`M` inputs with dimension `D`.
return_r2l2 : bool, optional
Set to True to return a tuple of (`y`, `r2l2`). Default is False
(only return `y`).
Returns
-------
y : :py:class:`Array`, (`M`,)
Inner argument of function.
r2l2 : :py:class:`Array`, (`M`,)
Anisotropically scaled distances. Only returned if `return_r2l2` is True.
|
entailment
|
def _compute_y_wrapper(self, *args):
r"""Convert tau to :math:`y=\sqrt{2\nu\sum_i(\tau_i^2/l_i^2)}`.
Takes `tau` as an argument list for compatibility with :py:func:`mpmath.diff`.
Parameters
----------
tau[0] : scalar float
First element of `tau`.
tau[1] : And so on...
Returns
-------
y : scalar float
Inner part of Matern kernel at the given `tau`.
"""
return self._compute_y(scipy.atleast_2d(scipy.asarray(args, dtype=float)))
|
r"""Convert tau to :math:`y=\sqrt{2\nu\sum_i(\tau_i^2/l_i^2)}`.
Takes `tau` as an argument list for compatibility with :py:func:`mpmath.diff`.
Parameters
----------
tau[0] : scalar float
First element of `tau`.
tau[1] : And so on...
Returns
-------
y : scalar float
Inner part of Matern kernel at the given `tau`.
|
entailment
|
def _compute_dk_dy(self, y, n):
r"""Evaluate the derivative of the outer form of the Matern kernel.
Uses the general Leibniz rule to compute the n-th derivative of:
.. math::
f(y) = \frac{2^{1-\nu}}{\Gamma(\nu)} y^{\nu/2} K_\nu(y^{1/2})
Parameters
----------
y : :py:class:`Array`, (`M`,)
`M` inputs to evaluate at.
n : non-negative scalar int.
Order of derivative to compute.
Returns
-------
dk_dy : :py:class:`Array`, (`M`,)
Specified derivative at specified locations.
"""
return 2.0**(1 - self.nu) / (scipy.special.gamma(self.nu)) * yn2Kn2Der(self.nu, y, n=n)
|
r"""Evaluate the derivative of the outer form of the Matern kernel.
Uses the general Leibniz rule to compute the n-th derivative of:
.. math::
f(y) = \frac{2^{1-\nu}}{\Gamma(\nu)} y^{\nu/2} K_\nu(y^{1/2})
Parameters
----------
y : :py:class:`Array`, (`M`,)
`M` inputs to evaluate at.
n : non-negative scalar int.
Order of derivative to compute.
Returns
-------
dk_dy : :py:class:`Array`, (`M`,)
Specified derivative at specified locations.
|
entailment
|
def _compute_dy_dtau(self, tau, b, r2l2):
r"""Evaluate the derivative of the inner argument of the Matern kernel.
Take the derivative of
.. math::
y = 2 \nu \sum_i(\tau_i^2 / l_i^2)
Parameters
----------
tau : :py:class:`Matrix`, (`M`, `D`)
`M` inputs with dimension `D`.
b : :py:class:`Array`, (`P`,)
Block specifying derivatives to be evaluated.
r2l2 : :py:class:`Array`, (`M`,)
Precomputed anisotropically scaled distance.
Returns
-------
dy_dtau: :py:class:`Array`, (`M`,)
Specified derivative at specified locations.
"""
if len(b) == 0:
return self._compute_y(tau)
elif len(b) == 1:
return 4.0 * self.nu * tau[:, b[0]] / (self.params[2 + b[0]])**2.0
elif (len(b) == 2) and (b[0] == b[1]):
return 4.0 * self.nu / (self.params[2 + b[0]])**2.0
else:
return scipy.zeros_like(r2l2)
|
r"""Evaluate the derivative of the inner argument of the Matern kernel.
Take the derivative of
.. math::
y = 2 \nu \sum_i(\tau_i^2 / l_i^2)
Parameters
----------
tau : :py:class:`Matrix`, (`M`, `D`)
`M` inputs with dimension `D`.
b : :py:class:`Array`, (`P`,)
Block specifying derivatives to be evaluated.
r2l2 : :py:class:`Array`, (`M`,)
Precomputed anisotropically scaled distance.
Returns
-------
dy_dtau: :py:class:`Array`, (`M`,)
Specified derivative at specified locations.
|
entailment
|
def _compute_dk_dtau_on_partition(self, tau, p):
"""Evaluate the term inside the sum of Faa di Bruno's formula for the given partition.
Overrides the version from :py:class:`gptools.kernel.core.ChainRuleKernel`
in order to get the correct behavior at the origin.
Parameters
----------
tau : :py:class:`Matrix`, (`M`, `D`)
`M` inputs with dimension `D`.
p : list of :py:class:`Array`
Each element is a block of the partition representing the
derivative orders to use.
Returns
-------
dk_dtau : :py:class:`Array`, (`M`,)
The specified derivatives over the given partition at the specified
locations.
"""
# Find the derivative order:
n = len(p)
y, r2l2 = self._compute_y(tau, return_r2l2=True)
# Keep track of how many times a given variable has a block of length 1:
n1 = 0
# Build the dy/dtau factor up iteratively:
dy_dtau_factor = scipy.ones_like(y)
for b in p:
# If the partial derivative is exactly zero there is no sense in
# continuing the computation:
if (len(b) > 2) or ((len(b) == 2) and (b[0] != b[1])):
return scipy.zeros_like(y)
dy_dtau_factor *= self._compute_dy_dtau(tau, b, r2l2)
# Count the number of blocks of length 1:
if len(b) == 1:
n1 += 1.0
# Compute d^(|pi|)f/dy^(|pi|) term:
dk_dy = self._compute_dk_dy(y, n)
if n1 > 0:
mask = (y == 0.0)
tau_pow = 2 * (self.nu - n) + n1
if tau_pow == 0:
# In this case the limit does not exist, so it is set to NaN:
dk_dy[mask] = scipy.nan
elif tau_pow > 0:
dk_dy[mask] = 0.0
return dk_dy * dy_dtau_factor
|
Evaluate the term inside the sum of Faa di Bruno's formula for the given partition.
Overrides the version from :py:class:`gptools.kernel.core.ChainRuleKernel`
in order to get the correct behavior at the origin.
Parameters
----------
tau : :py:class:`Matrix`, (`M`, `D`)
`M` inputs with dimension `D`.
p : list of :py:class:`Array`
Each element is a block of the partition representing the
derivative orders to use.
Returns
-------
dk_dtau : :py:class:`Array`, (`M`,)
The specified derivatives over the given partition at the specified
locations.
|
entailment
|
def add_logging_parser(main_parser):
"Build an argparse argument parser to parse the command line."
main_parser.set_defaults(setup_logging=set_logging_level)
verbosity_group = main_parser.add_mutually_exclusive_group(required=False)
verbosity_group.add_argument(
'--verbose',
'-v',
action='count',
help='Output more verbose logging. Can be specified multiple times.')
verbosity_group.add_argument(
'--quiet',
'-q',
action='count',
help='Output less information to the console during operation. Can be \
specified multiple times.')
main_parser.add_argument(
'--silence-urllib3',
action='store_true',
help='Silence urllib3 warnings. See '
'https://urllib3.readthedocs.org/en/latest/security.html for details.')
return verbosity_group
|
Build an argparse argument parser to parse the command line.
|
entailment
|
def set_logging_level(args):
"Computes and sets the logging level from the parsed arguments."
root_logger = logging.getLogger()
level = logging.INFO
logging.getLogger('requests.packages.urllib3').setLevel(logging.WARNING)
if "verbose" in args and args.verbose is not None:
logging.getLogger('requests.packages.urllib3').setLevel(0) # Unset
if args.verbose > 1:
level = 5 # "Trace" level
elif args.verbose > 0:
level = logging.DEBUG
else:
logging.critical("verbose is an unexpected value. (%s) exiting.",
args.verbose)
sys.exit(2)
elif "quiet" in args and args.quiet is not None:
if args.quiet > 1:
level = logging.ERROR
elif args.quiet > 0:
level = logging.WARNING
else:
logging.critical("quiet is an unexpected value. (%s) exiting.",
args.quiet)
if level is not None:
root_logger.setLevel(level)
if args.silence_urllib3:
# See: https://urllib3.readthedocs.org/en/latest/security.html
requests.packages.urllib3.disable_warnings()
|
Computes and sets the logging level from the parsed arguments.
|
entailment
|
def check_auth(user):
'''
Check if the user should or shouldn't be inside the system:
- If the user is staff or superuser: LOGIN GRANTED
- If the user has a Person and it is not "disabled": LOGIN GRANTED
- Elsewhere: LOGIN DENIED
'''
# Initialize authentication
auth = None
person = None
# Check if there is an user
if user:
# It means that Django accepted the user and it is active
if user.is_staff or user.is_superuser:
# This is an administrator, let it in
auth = user
else:
# It is a normal user, check if there is a person behind
person = getattr(user, "person", None)
if not person:
# Check if there is related one
person_related = getattr(user, "people", None)
if person_related:
# Must be only one
if person_related.count() == 1:
person = person_related.get()
if person and ((person.disabled is None) or (person.disabled > timezone.now())):
# There is a person, no disabled found or the found one is fine to log in
auth = user
# Return back the final decision
return auth
|
Check if the user should or shouldn't be inside the system:
- If the user is staff or superuser: LOGIN GRANTED
- If the user has a Person and it is not "disabled": LOGIN GRANTED
- Elsewhere: LOGIN DENIED
|
entailment
|
def debug(self, msg):
'''
Handle the debugging to a file
'''
# If debug is not disabled
if self.__debug is not False:
# If never was set, try to set it up
if self.__debug is None:
# Check what do we have inside settings
debug_filename = getattr(settings, "AD_DEBUG_FILE", None)
if debug_filename:
# Open the debug file pointer
self.__debug = open(settings.AD_DEBUG_FILE, 'a')
else:
# Disable debuging forever
self.__debug = False
if self.__debug:
# Debug the given message
self.__debug.write("{}\n".format(msg))
self.__debug.flush()
|
Handle the debugging to a file
|
entailment
|
def authenticate(self, *args, **kwargs):
'''
Authenticate the user agains LDAP
'''
# Get config
username = kwargs.get("username", None)
password = kwargs.get("password", None)
# Check user in Active Directory (authorization == None if can not connect to Active Directory Server)
authorization = self.ldap_link(username, password, mode='LOGIN')
if authorization:
# The user was validated in Active Directory
user = self.get_or_create_user(username, password)
# Get or get_create_user will revalidate the new user
if user:
# If the user has been properly validated
user.is_active = True
user.save()
else:
# Locate user in our system
user = User.objects.filter(username=username).first()
if user and not user.is_staff:
# If access was denied
if authorization is False or getattr(settings, "AD_LOCK_UNAUTHORIZED", False):
# Deactivate the user
user.is_active = False
user.save()
# No access and no user here
user = None
# Return the final decision
return user
|
Authenticate the user agains LDAP
|
entailment
|
def get_or_create_user(self, username, password):
'''
Get or create the given user
'''
# Get the groups for this user
info = self.get_ad_info(username, password)
self.debug("INFO found: {}".format(info))
# Find the user
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
user = User(username=username)
# Update user
user.first_name = info.get('first_name', '')
user.last_name = info.get('last_name', '')
user.email = info.get('email', '')
# Check if the user is in the Administrators groups
is_admin = False
for domain in info['groups']:
if 'Domain Admins' in info['groups'][domain]:
is_admin = True
break
# Set the user permissions
user.is_staff = is_admin
user.is_superuser = is_admin
# Refresh the password
user.set_password(password)
# Validate the selected user and gotten information
user = self.validate(user, info)
if user:
self.debug("User got validated!")
# Autosave the user until this point
user.save()
# Synchronize user
self.synchronize(user, info)
else:
self.debug("User didn't pass validation!")
# Finally return user
return user
|
Get or create the given user
|
entailment
|
def synchronize(self, user, info):
'''
It tries to do a group synchronization if possible
This methods should be redeclared by the developer
'''
self.debug("Synchronize!")
# Remove all groups from this user
user.groups.clear()
# For all domains found for this user
for domain in info['groups']:
# For all groups he is
for groupname in info['groups'][domain]:
# Lookup for that group
group = Group.objects.filter(name=groupname).first()
if group:
# If found, add the user to that group
user.groups.add(group)
|
It tries to do a group synchronization if possible
This methods should be redeclared by the developer
|
entailment
|
def set_hyperparams(self, new_params):
"""Sets the free hyperparameters to the new parameter values in new_params.
Parameters
----------
new_params : :py:class:`Array` or other Array-like, (len(:py:attr:`self.free_params`),)
New parameter values, ordered as dictated by the docstring for the
class.
"""
new_params = scipy.asarray(new_params, dtype=float)
if len(new_params) == len(self.free_params):
if self.enforce_bounds:
for idx, new_param, bound in zip(range(0, len(new_params)), new_params, self.free_param_bounds):
if bound[0] is not None and new_param < bound[0]:
new_params[idx] = bound[0]
elif bound[1] is not None and new_param > bound[1]:
new_params[idx] = bound[1]
self.params[~self.fixed_params] = new_params
else:
raise ValueError("Length of new_params must be %s!" % (len(self.free_params),))
|
Sets the free hyperparameters to the new parameter values in new_params.
Parameters
----------
new_params : :py:class:`Array` or other Array-like, (len(:py:attr:`self.free_params`),)
New parameter values, ordered as dictated by the docstring for the
class.
|
entailment
|
def _compute_r2l2(self, tau, return_l=False):
r"""Compute the anisotropic :math:`r^2/l^2` term for the given `tau`.
Here, :math:`\tau=X_i-X_j` is the difference vector. Computes
.. math::
\frac{r^2}{l^2} = \sum_i\frac{\tau_i^2}{l_{i}^{2}}
Assumes that the length parameters are the last `num_dim` elements of
:py:attr:`self.params`.
Where `l` and `tau` are both zero, that term is set to zero.
Parameters
----------
tau : :py:class:`Array`, (`M`, `D`)
`M` inputs with dimension `D`.
return_l : bool, optional
Set to True to return a tuple of (`tau`, `l_mat`), where `l_mat`
is the matrix of length scales to match the shape of `tau`. Default
is False (only return `tau`).
Returns
-------
r2l2 : :py:class:`Array`, (`M`,)
Anisotropically scaled distances squared.
l_mat : :py:class:`Array`, (`M`, `D`)
The (`D`,) array of length scales repeated for each of the `M`
inputs. Only returned if `return_l` is True.
"""
l_mat = scipy.tile(self.params[-self.num_dim:], (tau.shape[0], 1))
tau_over_l = tau / l_mat
tau_over_l[(tau == 0) & (l_mat == 0)] = 0.0
r2l2 = scipy.sum((tau_over_l)**2, axis=1)
if return_l:
return (r2l2, l_mat)
else:
return r2l2
|
r"""Compute the anisotropic :math:`r^2/l^2` term for the given `tau`.
Here, :math:`\tau=X_i-X_j` is the difference vector. Computes
.. math::
\frac{r^2}{l^2} = \sum_i\frac{\tau_i^2}{l_{i}^{2}}
Assumes that the length parameters are the last `num_dim` elements of
:py:attr:`self.params`.
Where `l` and `tau` are both zero, that term is set to zero.
Parameters
----------
tau : :py:class:`Array`, (`M`, `D`)
`M` inputs with dimension `D`.
return_l : bool, optional
Set to True to return a tuple of (`tau`, `l_mat`), where `l_mat`
is the matrix of length scales to match the shape of `tau`. Default
is False (only return `tau`).
Returns
-------
r2l2 : :py:class:`Array`, (`M`,)
Anisotropically scaled distances squared.
l_mat : :py:class:`Array`, (`M`, `D`)
The (`D`,) array of length scales repeated for each of the `M`
inputs. Only returned if `return_l` is True.
|
entailment
|
def enforce_bounds(self, v):
"""Set `enforce_bounds` for both of the kernels to a new value.
"""
self._enforce_bounds = v
self.k1.enforce_bounds = v
self.k2.enforce_bounds = v
|
Set `enforce_bounds` for both of the kernels to a new value.
|
entailment
|
def free_param_bounds(self):
"""Returns the bounds of the free hyperparameters.
Returns
-------
free_param_bounds : :py:class:`Array`
Array of the bounds of the free parameters, in order.
"""
return scipy.concatenate((self.k1.free_param_bounds, self.k2.free_param_bounds))
|
Returns the bounds of the free hyperparameters.
Returns
-------
free_param_bounds : :py:class:`Array`
Array of the bounds of the free parameters, in order.
|
entailment
|
def free_param_names(self):
"""Returns the names of the free hyperparameters.
Returns
-------
free_param_names : :py:class:`Array`
Array of the names of the free parameters, in order.
"""
return scipy.concatenate((self.k1.free_param_names, self.k2.free_param_names))
|
Returns the names of the free hyperparameters.
Returns
-------
free_param_names : :py:class:`Array`
Array of the names of the free parameters, in order.
|
entailment
|
def set_hyperparams(self, new_params):
"""Set the (free) hyperparameters.
Parameters
----------
new_params : :py:class:`Array` or other Array-like
New values of the free parameters.
Raises
------
ValueError
If the length of `new_params` is not consistent with :py:attr:`self.params`.
"""
new_params = scipy.asarray(new_params, dtype=float)
if len(new_params) == len(self.free_params):
num_free_k1 = sum(~self.k1.fixed_params)
self.k1.set_hyperparams(new_params[:num_free_k1])
self.k2.set_hyperparams(new_params[num_free_k1:])
else:
raise ValueError("Length of new_params must be %s!" % (len(self.free_params),))
|
Set the (free) hyperparameters.
Parameters
----------
new_params : :py:class:`Array` or other Array-like
New values of the free parameters.
Raises
------
ValueError
If the length of `new_params` is not consistent with :py:attr:`self.params`.
|
entailment
|
def _compute_dk_dtau(self, tau, n):
r"""Evaluate :math:`dk/d\tau` at the specified locations with the specified derivatives.
Parameters
----------
tau : :py:class:`Matrix`, (`M`, `D`)
`M` inputs with dimension `D`.
n : :py:class:`Array`, (`D`,)
Degree of derivative with respect to each dimension.
Returns
-------
dk_dtau : :py:class:`Array`, (`M`,)
Specified derivative at specified locations.
"""
# Construct the derivative pattern:
# For each dimension, this will contain the index of the dimension
# repeated a number of times equal to the order of derivative with
# respect to that dimension.
# Example: For d^3 k(x, y, z) / dx^2 dy, n would be [2, 1, 0] and
# deriv_pattern should be [0, 0, 1]. For k(x, y, z) deriv_pattern is [].
deriv_pattern = []
for idx in xrange(0, len(n)):
deriv_pattern.extend(n[idx] * [idx])
deriv_pattern = scipy.asarray(deriv_pattern, dtype=int)
# Handle non-derivative case separately for efficiency:
if len(deriv_pattern) == 0:
return self._compute_k(tau)
else:
# Compute all partitions of the deriv_pattern:
deriv_partitions = generate_set_partitions(deriv_pattern)
# Compute the requested derivative using the multivariate Faa di Bruno's equation:
dk_dtau = scipy.zeros(tau.shape[0])
# Loop over the partitions:
for partition in deriv_partitions:
dk_dtau += self._compute_dk_dtau_on_partition(tau, partition)
return dk_dtau
|
r"""Evaluate :math:`dk/d\tau` at the specified locations with the specified derivatives.
Parameters
----------
tau : :py:class:`Matrix`, (`M`, `D`)
`M` inputs with dimension `D`.
n : :py:class:`Array`, (`D`,)
Degree of derivative with respect to each dimension.
Returns
-------
dk_dtau : :py:class:`Array`, (`M`,)
Specified derivative at specified locations.
|
entailment
|
def _compute_dk_dtau_on_partition(self, tau, p):
"""Evaluate the term inside the sum of Faa di Bruno's formula for the given partition.
Parameters
----------
tau : :py:class:`Matrix`, (`M`, `D`)
`M` inputs with dimension `D`.
p : list of :py:class:`Array`
Each element is a block of the partition representing the
derivative orders to use.
Returns
-------
dk_dtau : :py:class:`Array`, (`M`,)
The specified derivatives over the given partition at the specified
locations.
"""
y, r2l2 = self._compute_y(tau, return_r2l2=True)
# Compute the d^(|pi|)f/dy term:
dk_dtau = self._compute_dk_dy(y, len(p))
# Multiply in each of the block terms:
for b in p:
dk_dtau *= self._compute_dy_dtau(tau, b, r2l2)
return dk_dtau
|
Evaluate the term inside the sum of Faa di Bruno's formula for the given partition.
Parameters
----------
tau : :py:class:`Matrix`, (`M`, `D`)
`M` inputs with dimension `D`.
p : list of :py:class:`Array`
Each element is a block of the partition representing the
derivative orders to use.
Returns
-------
dk_dtau : :py:class:`Array`, (`M`,)
The specified derivatives over the given partition at the specified
locations.
|
entailment
|
def _mask_cov_func(self, *args):
"""Masks the covariance function into a form usable by :py:func:`mpmath.diff`.
Parameters
----------
*args : `num_dim` * 2 floats
The individual elements of Xi and Xj to be passed to :py:attr:`cov_func`.
"""
# Have to do it in two cases to get the 1d unwrapped properly:
if self.num_dim == 1:
return self.cov_func(args[0], args[1], *self.params)
else:
return self.cov_func(args[:self.num_dim], args[self.num_dim:], *self.params)
|
Masks the covariance function into a form usable by :py:func:`mpmath.diff`.
Parameters
----------
*args : `num_dim` * 2 floats
The individual elements of Xi and Xj to be passed to :py:attr:`cov_func`.
|
entailment
|
def constant(X, n, mu, hyper_deriv=None):
"""Function implementing a constant mean suitable for use with :py:class:`MeanFunction`.
"""
if (n == 0).all():
if hyper_deriv is not None:
return scipy.ones(X.shape[0])
else:
return mu * scipy.ones(X.shape[0])
else:
return scipy.zeros(X.shape[0])
|
Function implementing a constant mean suitable for use with :py:class:`MeanFunction`.
|
entailment
|
def mtanh(alpha, z):
"""Modified hyperbolic tangent function mtanh(z; alpha).
Parameters
----------
alpha : float
The core slope of the mtanh.
z : float or array
The coordinate of the mtanh.
"""
z = scipy.asarray(z)
ez = scipy.exp(z)
enz = 1.0 / ez
return ((1 + alpha * z) * ez - enz) / (ez + enz)
|
Modified hyperbolic tangent function mtanh(z; alpha).
Parameters
----------
alpha : float
The core slope of the mtanh.
z : float or array
The coordinate of the mtanh.
|
entailment
|
def mtanh_profile(X, n, x0, delta, alpha, h, b, hyper_deriv=None):
"""Profile used with the mtanh function to fit profiles, suitable for use with :py:class:`MeanFunction`.
Only supports univariate data!
Parameters
----------
X : array, (`M`, 1)
The points to evaluate at.
n : array, (1,)
The order of derivative to compute. Only up to first derivatives are
supported.
x0 : float
Pedestal center
delta : float
Pedestal halfwidth
alpha : float
Core slope
h : float
Pedestal height
b : float
Pedestal foot
hyper_deriv : int or None, optional
The index of the parameter to take a derivative with respect to.
"""
X = X[:, 0]
z = (x0 - X) / delta
if n[0] == 0:
if hyper_deriv is not None:
if hyper_deriv == 0:
return (h - b) / (2.0 * delta * (scipy.cosh(z))**2) * (
1.0 + alpha / 4.0 * (1.0 + 2.0 * z + scipy.exp(2.0 * z))
)
elif hyper_deriv == 1:
return -(h - b) * z / (2.0 * delta * (scipy.cosh(z))**2) * (
1.0 + alpha / 4.0 * (1.0 + 2.0 * z + scipy.exp(2.0 * z))
)
elif hyper_deriv == 2:
ez = scipy.exp(z)
enz = 1.0 / ez
return (h - b) / 2.0 * z * ez / (ez + enz)
elif hyper_deriv == 3:
ez = scipy.exp(z)
enz = 1.0 / ez
return 0.5 * (1.0 + ((1.0 + alpha * z) * ez - enz) / (ez + enz))
elif hyper_deriv == 4:
ez = scipy.exp(z)
enz = 1.0 / ez
return 0.5 * (1.0 - ((1.0 + alpha * z) * ez - enz) / (ez + enz))
else:
raise ValueError("Invalid value for hyper_deriv, " + str(hyper_deriv))
else:
return (h + b) / 2.0 + (h - b) * mtanh(alpha, z) / 2.0
elif n[0] == 1:
if hyper_deriv is not None:
if hyper_deriv == 0:
return -(h - b) / (2.0 * delta**2.0 * (scipy.cosh(z))**2.0) * (
alpha - (alpha * z + 2) * scipy.tanh(z)
)
elif hyper_deriv == 1:
return (h - b) / (2.0 * delta**2.0 * (scipy.cosh(z))**2.0) * (
1.0 + alpha / 4.0 * (1.0 + 2.0 * z + scipy.exp(2.0 * z)) +
z * (alpha - (alpha * z + 2) * scipy.tanh(z))
)
elif hyper_deriv == 2:
return -(h - b) / (8.0 * delta * (scipy.cosh(z))**2.0) * (
1.0 + 2.0 * z + scipy.exp(2.0 * z)
)
elif hyper_deriv == 3:
return -1.0 / (2.0 * delta * (scipy.cosh(z))**2.0) * (
1.0 + alpha / 4.0 * (1.0 + 2.0 * z + scipy.exp(2.0 * z))
)
elif hyper_deriv == 4:
return 1.0 / (2.0 * delta * (scipy.cosh(z))**2.0) * (
1.0 + alpha / 4.0 * (1.0 + 2.0 * z + scipy.exp(2.0 * z))
)
else:
raise ValueError("Invalid value for hyper_deriv, " + str(hyper_deriv))
else:
return -(h - b) / (2.0 * delta * (scipy.cosh(z))**2) * (
1 + alpha / 4.0 * (1 + 2 * z + scipy.exp(2 * z))
)
else:
raise NotImplementedError("Derivatives of order greater than 1 are not supported!")
|
Profile used with the mtanh function to fit profiles, suitable for use with :py:class:`MeanFunction`.
Only supports univariate data!
Parameters
----------
X : array, (`M`, 1)
The points to evaluate at.
n : array, (1,)
The order of derivative to compute. Only up to first derivatives are
supported.
x0 : float
Pedestal center
delta : float
Pedestal halfwidth
alpha : float
Core slope
h : float
Pedestal height
b : float
Pedestal foot
hyper_deriv : int or None, optional
The index of the parameter to take a derivative with respect to.
|
entailment
|
def linear(X, n, *args, **kwargs):
"""Linear mean function of arbitrary dimension, suitable for use with :py:class:`MeanFunction`.
The form is :math:`m_0 * X[:, 0] + m_1 * X[:, 1] + \dots + b`.
Parameters
----------
X : array, (`M`, `D`)
The points to evaluate the model at.
n : array of non-negative int, (`D`)
The derivative order to take, specified as an integer order for each
dimension in `X`.
*args : num_dim+1 floats
The slopes for each dimension, plus the constant term. Must be of the
form `m0, m1, ..., b`.
"""
hyper_deriv = kwargs.pop('hyper_deriv', None)
m = scipy.asarray(args[:-1])
b = args[-1]
if sum(n) > 1:
return scipy.zeros(X.shape[0])
elif sum(n) == 0:
if hyper_deriv is not None:
if hyper_deriv < len(m):
return X[:, hyper_deriv]
elif hyper_deriv == len(m):
return scipy.ones(X.shape[0])
else:
raise ValueError("Invalid value for hyper_deriv, " + str(hyper_deriv))
else:
return (m * X).sum(axis=1) + b
else:
# sum(n) == 1:
if hyper_deriv is not None:
if n[hyper_deriv] == 1:
return scipy.ones(X.shape[0])
else:
return scipy.zeros(X.shape[0])
return m[n == 1] * scipy.ones(X.shape[0])
|
Linear mean function of arbitrary dimension, suitable for use with :py:class:`MeanFunction`.
The form is :math:`m_0 * X[:, 0] + m_1 * X[:, 1] + \dots + b`.
Parameters
----------
X : array, (`M`, `D`)
The points to evaluate the model at.
n : array of non-negative int, (`D`)
The derivative order to take, specified as an integer order for each
dimension in `X`.
*args : num_dim+1 floats
The slopes for each dimension, plus the constant term. Must be of the
form `m0, m1, ..., b`.
|
entailment
|
def update_schedule_items(*args, **kw):
"""We save all the schedule items associated with this slot, so
the last_update time is updated to reflect any changes to the
timing of the slots"""
slot = kw.pop('instance', None)
if not slot:
return
for item in slot.scheduleitem_set.all():
item.save(update_fields=['last_updated'])
# We also need to update the next slot, in case we changed it's
# times as well
next_slot = slot.slot_set.all()
if next_slot.count():
# From the way we structure the slot tree, we know that
# there's only 1 next slot that could have changed.
for item in next_slot[0].scheduleitem_set.all():
item.save(update_fields=['last_updated'])
|
We save all the schedule items associated with this slot, so
the last_update time is updated to reflect any changes to the
timing of the slots
|
entailment
|
def make_diff(current, revision):
"""Create the difference between the current revision and a previous version"""
the_diff = []
dmp = diff_match_patch()
for field in (set(current.field_dict.keys()) | set(revision.field_dict.keys())):
# These exclusions really should be configurable
if field == 'id' or field.endswith('_rendered'):
continue
# KeyError's may happen if the database structure changes
# between the creation of revisions. This isn't ideal,
# but should not be a fatal error.
# Log this?
missing_field = False
try:
cur_val = current.field_dict[field] or ""
except KeyError:
cur_val = "No such field in latest version\n"
missing_field = True
try:
old_val = revision.field_dict[field] or ""
except KeyError:
old_val = "No such field in old version\n"
missing_field = True
if missing_field:
# Ensure that the complete texts are marked as changed
# so new entries containing any of the marker words
# don't show up as differences
diffs = [(dmp.DIFF_DELETE, old_val), (dmp.DIFF_INSERT, cur_val)]
patch = dmp.diff_prettyHtml(diffs)
elif isinstance(cur_val, Markup):
# we roll our own diff here, so we can compare of the raw
# markdown, rather than the rendered result.
if cur_val.raw == old_val.raw:
continue
diffs = dmp.diff_main(old_val.raw, cur_val.raw)
patch = dmp.diff_prettyHtml(diffs)
elif cur_val == old_val:
continue
else:
# Compare the actual field values
diffs = dmp.diff_main(force_text(old_val), force_text(cur_val))
patch = dmp.diff_prettyHtml(diffs)
the_diff.append((field, patch))
the_diff.sort()
return the_diff
|
Create the difference between the current revision and a previous version
|
entailment
|
def compare_view(self, request, object_id, version_id, extra_context=None):
"""Actually compare two versions."""
opts = self.model._meta
object_id = unquote(object_id)
# get_for_object's ordering means this is always the latest revision.
# The reversion we want to compare to
current = Version.objects.get_for_object_reference(self.model, object_id)[0]
revision = Version.objects.get_for_object_reference(self.model, object_id).filter(id=version_id)[0]
the_diff = make_diff(current, revision)
context = {
"title": _("Comparing current %(model)s with revision created %(date)s") % {
'model': current,
'date' : get_date(revision),
},
"opts": opts,
"compare_list_url": reverse("%s:%s_%s_comparelist" % (self.admin_site.name, opts.app_label, opts.model_name),
args=(quote(object_id),)),
"diff_list": the_diff,
}
extra_context = extra_context or {}
context.update(extra_context)
return render(request, self.compare_template or self._get_template_list("compare.html"),
context)
|
Actually compare two versions.
|
entailment
|
def comparelist_view(self, request, object_id, extra_context=None):
"""Allow selecting versions to compare."""
opts = self.model._meta
object_id = unquote(object_id)
current = get_object_or_404(self.model, pk=object_id)
# As done by reversion's history_view
action_list = [
{
"revision": version.revision,
"url": reverse("%s:%s_%s_compare" % (self.admin_site.name, opts.app_label, opts.model_name), args=(quote(version.object_id), version.id)),
} for version in self._reversion_order_version_queryset(Version.objects.get_for_object_reference(
self.model,
object_id).select_related("revision__user"))]
context = {"action_list": action_list,
"opts": opts,
"object_id": quote(object_id),
"original": current,
}
extra_context = extra_context or {}
context.update(extra_context)
return render(request, self.compare_list_template or self._get_template_list("compare_list.html"),
context)
|
Allow selecting versions to compare.
|
entailment
|
def grv(struct, position):
'''
This function helps to convert date information for showing proper filtering
'''
if position == 'year':
size = 4
else:
size = 2
if (struct[position][2]):
rightnow = str(struct[position][0]).zfill(size)
else:
if position == 'year':
rightnow = '____'
else:
rightnow = '__'
return rightnow
|
This function helps to convert date information for showing proper filtering
|
entailment
|
def _setup(self, request):
'''
Entry point for this class, here we decide basic stuff
'''
# Get details from self
info = model_inspect(self)
self._appname = getattr(self, 'appname', info['appname'])
self._modelname = getattr(self, 'modelname', info['modelname'])
# Get user information
if not hasattr(self, 'user'):
self.user = self.request.user
# Get profile
self.profile = get_profile(self.user)
# Get language
self.language = get_language()
# Default value for no foreign key attribute
if 'no_render_as_foreign' not in self.extra_context:
self.extra_context['no_render_as_foreign'] = []
|
Entry point for this class, here we decide basic stuff
|
entailment
|
def get_template_names(self):
'''
Build the list of templates related to this user
'''
# Get user template
template_model = getattr(self, 'template_model', "{0}/{1}_{2}".format(self._appname.lower(), self._modelname.lower(), self.get_template_names_key))
template_model_ext = getattr(self, 'template_model_ext', 'html')
templates = get_template(template_model, self.user, self.language, template_model_ext, raise_error=False)
if type(templates) == list:
templates.append("codenerix/{0}.html".format(self.get_template_names_key))
# Return thet of templates
return templates
|
Build the list of templates related to this user
|
entailment
|
def get_context_data(self, **kwargs):
'''
Set a base context
'''
# Call the base implementation first to get a context
context = super(GenBase, self).get_context_data(**kwargs)
# Update general context with the stuff we already calculated
if hasattr(self, 'html_head'):
context['html_head'] = self.html_head(self.object)
# Add translation system
if hasattr(self, 'gentrans'):
context['gentranslate'] = self.gentrans.copy()
context['gentranslate'].update(self.gentranslate)
else:
context['gentranslate'] = self.gentranslate
# Return context
return context
|
Set a base context
|
entailment
|
def dispatch(self, *args, **kwargs):
'''
Entry point for this class, here we decide basic stuff
'''
# Get if this class is working as only a base render and List funcionality shouldn't be enabled
onlybase = getattr(self, "onlybase", False)
# REST not available when onlybase is enabled
if not onlybase:
# Check if this is a REST query to pusth the answer to responde in JSON
if bool(self.request.META.get('HTTP_X_REST', False)):
self.json = True
if self.request.GET.get('json', self.request.POST.get('json', None)) is None:
newget = {}
newget['json'] = "{}"
for key in self.request.GET:
newget[key] = self.request.GET[key]
self.request.GET = QueryDict('').copy()
self.request.GET.update(newget)
# return HttpResponseBadRequest(_("The service requires you to set a GET argument named json={} which will contains all the filters you can apply to a list"))
# Check if this is a REST query to add an element
if self.request.method == 'POST':
target = get_class(resolve("{}/add".format(self.request.META.get("REQUEST_URI"))).func)
target.json = True
return target.as_view()(self.request)
# Set class internal variables
self._setup(self.request)
# Deprecations
deprecated = [('retrictions', '2016061000')]
for (depre, version) in deprecated:
if hasattr(self, depre):
raise IOError("The attribute '{}' has been deprecated in version '{}' and it is not available anymore".format(version))
# Build extracontext
if not hasattr(self, 'extra_context'):
self.extra_context = {}
if not hasattr(self, 'client_context'):
self.client_context = {}
# Attach user to the extra_context
self.extra_context['user'] = self.user
# Attach WS entry point and STATIC entry point
self.extra_context['ws_entry_point'] = self.BASE_URL + getattr(self, "ws_entry_point", "{0}/{1}".format(self._appname, "{0}s".format(self._modelname.lower())))
static_partial_row_path = getattr(self, 'static_partial_row', "{0}/{1}_rows.html".format(self._appname, "{0}s".format(self._modelname.lower())))
self.extra_context['static_partial_row'] = get_static(static_partial_row_path, self.user, self.language, self.DEFAULT_STATIC_PARTIAL_ROWS, 'html', relative=True)
static_partial_header_path = getattr(self, 'static_partial_header', "{0}/{1}_header.html".format(self._appname, "{0}s".format(self._modelname.lower())))
self.extra_context['static_partial_header'] = get_static(static_partial_header_path, self.user, self.language, None, 'html', relative=True)
static_partial_summary_path = getattr(self, 'static_partial_summary', "{0}/{1}_summary.html".format(self._appname, "{0}s".format(self._modelname.lower())))
self.extra_context['static_partial_summary'] = get_static(static_partial_summary_path, self.user, self.language, self.DEFAULT_STATIC_PARTIAL_SUMMARY, 'html', relative=True)
static_app_row_path = getattr(self, 'static_app_row', "{0}/{1}_app.js".format(self._appname, "{0}s".format(self._modelname.lower())))
self.extra_context['static_app_row'] = get_static(static_app_row_path, self.user, self.language, os.path.join(settings.STATIC_URL, 'codenerix/js/app.js'), 'js', relative=True)
static_controllers_row_path = getattr(self, 'static_controllers_row', "{0}/{1}_controllers.js".format(self._appname, "{0}s".format(self._modelname.lower())))
self.extra_context['static_controllers_row'] = get_static(static_controllers_row_path, self.user, self.language, None, 'js', relative=True)
static_filters_row_path = getattr(self, 'static_filters_row', "{0}/{1}_filters.js".format(self._appname, "{0}s".format(self._modelname.lower())))
self.extra_context['static_filters_row'] = get_static(static_filters_row_path, self.user, self.language, os.path.join(settings.STATIC_URL, 'codenerix/js/rows.js'), 'js', relative=True)
self.extra_context['field_delete'] = getattr(self, 'field_delete', False)
self.extra_context['field_check'] = getattr(self, 'field_check', None)
# Default value for extends_base
if hasattr(self, 'extends_base'):
self.extra_context['extends_base'] = self.extends_base
elif hasattr(self, 'extends_base'):
self.extra_context['extends_base'] = self.extends_base
# Get if this is a template only answer
self.__authtoken = (bool(getattr(self.request, "authtoken", False)))
self.json_worker = (hasattr(self, 'json_builder')) or self.__authtoken or (self.json is True)
if self.json_worker:
# Check if the request has some json query, if not, just render the template
if self.request.GET.get('json', self.request.POST.get('json', None)) is None:
# Calculate tabs
if getattr(self, 'show_details', False):
self.extra_context['tabs_js'] = json.dumps(self.get_tabs_js())
# Silence the normal execution from this class
self.get_queryset = lambda: None
self.get_context_data = lambda **kwargs: self.extra_context
self.render_to_response = lambda context, **response_kwargs: super(GenList, self).render_to_response(context, **response_kwargs)
# Call the base implementation and finish execution here
return super(GenList, self).dispatch(*args, **kwargs)
# The systems is requesting a list, we are not allowed
if onlybase:
json_answer = {"error": True, "errortxt": _("Not allowed, this kind of requests has been prohibited for this view!")}
return HttpResponse(json.dumps(json_answer), content_type='application/json')
# Initialize a default context
self.__kwargs = kwargs
self.__context = {}
# Force export list
self.export = getattr(self, 'export', self.request.GET.get('export', self.request.POST.get('export', None)))
# Call the base implementation
return super(GenList, self).dispatch(*args, **kwargs)
|
Entry point for this class, here we decide basic stuff
|
entailment
|
def get_queryset(self, raw_query=False):
# Call the base implementation
if not self.haystack:
queryset = super(GenList, self).get_queryset()
else:
queryset = SearchQuerySet().models(self.model)
# Optional tweak methods
Mfields = None
MlimitQ = None
MsearchF = None
MsearchQ = None
if hasattr(self, '__fields__'):
Mfields = self.__fields__
if hasattr(self, '__limitQ__'):
MlimitQ = self.__limitQ__
if hasattr(self, '__searchF__'):
MsearchF = self.__searchF__
if hasattr(self, '__searchQ__'):
MsearchQ = self.__searchQ__
self._viewname = self.__module__
# Link to our context and kwargs
context = self.__context
# Update kwargs if json key is present
jsonquerytxt = self.request.GET.get('json', self.request.POST.get('json', None))
if jsonquerytxt is not None:
# Decode json
try:
jsonquery = json.loads(jsonquerytxt)
except json.JSONDecodeError as e:
raise IOError("json argument in your GET/POST parameters is not a valid JSON string")
# Set json context
jsondata = self.set_context_json(jsonquery)
# Get listid
listid = jsondata.pop('listid')
# Get elementid
elementid = jsondata.pop('elementid')
else:
listid = None
elementid = None
jsondata = {}
jsonquery = {}
# Build info for GenModel methods
MODELINF = MODELINFO(self.model, self._appname, self._modelname, self._viewname, self.request, self.user, self.profile, jsonquery, Mfields, MlimitQ, MsearchF, MsearchQ, listid, elementid, self.__kwargs)
# Process the filter
context['filters'] = []
context['filters_obj'] = {}
# Get field list
fields = getattr(self, 'fields', MODELINF.fields())
# Save GET values
context['get'] = []
context['getval'] = {}
for name in jsondata:
struct = {}
struct['name'] = name
if name == 'rowsperpage':
struct['value'] = self.default_rows_per_page
elif name == 'page':
struct['value'] = 1
elif name == 'pages_to_bring':
struct['value'] = 1
else:
struct['value'] = jsondata[name]
context['get'].append(struct)
context['getval'][name] = struct['value']
# Filter on limits
limits = MODELINF.limitQ()
qobjects = None
distinct = False
for name in limits:
if name == 'i_distinct' or name == 'e_distinct':
distinct = True
else:
if qobjects:
qobjects &= limits[name]
else:
qobjects = limits[name]
if qobjects:
queryset = queryset.filter(qobjects)
if hasattr(self, 'annotations'):
if not self.haystack:
# Prepare annotations
if callable(self.annotations):
anot = self.annotations(MODELINF)
else:
anot = self.annotations
# Set annotations
queryset = queryset.annotate(**anot)
else:
raise IOError("Haystack doesn't support annotate")
if distinct:
queryset = queryset.distinct()
# Filters on fields requested by the user request
try:
filters_get = jsondata.get('filters', '{}')
if type(filters_get) == dict:
filters_by_struct = filters_get
else:
filters_by_struct = json.loads(str(filters_get))
except Exception:
filters_by_struct = []
listfilters = {}
# Autofilter system
if self.autofiltering:
listfilters.update(self.autoSearchF(MODELINF))
# List of filters from the MODELINF
listfilters.update(MODELINF.searchF())
# Process the search
filters_struct = {}
for key in filters_by_struct:
# Get the value of the original filter
value = filters_by_struct[key]
# If there is something to filter, filter is not being changed and filter is known by the class
try:
value = int(value)
except ValueError:
pass
except TypeError:
pass
# ORIG if (key in listfilters) and ((value>0) or (type(value) == list)):
# V1 if (value and type(value) == int and key in listfilters) and ((value > 0) or (type(value) == list)):
# V2 if (value and type(value) == int and key in listfilters) or ((value > 0) or (type(value) == list)):
if value and key in listfilters:
# Add the filter to the queryset
rule = listfilters[key]
# Get type
typekind = rule[2]
if type(typekind) == list:
# Compatibility: set typekind and fv in the old fassion
if type(value) == int:
fv = typekind[value - 1][0]
queryset = queryset.filter(rule[1](fv))
typekind = 'select'
elif typekind == 'select':
# Get selected value from rule
if type(value) == int:
fv = rule[3][value - 1][0]
queryset = queryset.filter(rule[1](fv))
elif typekind in ['multiselect', 'multidynamicselect']:
# Get selected values from rule
if type(value) in (list, tuple) and len(value):
qobjects = Q(rule[1](value[0]))
for fvt in value[1:]:
qobjects |= Q(rule[1](fvt))
queryset = queryset.filter(qobjects)
elif typekind in ['daterange', 'input']:
# No arguments
fv = value
queryset = queryset.filter(rule[1](fv))
elif typekind in ['checkbox', ]:
fv = value
queryset = queryset.filter(rule[1](fv))
else:
raise IOError("Wrong typekind '{0}' for filter '{1}'".format(typekind, key))
# Save it in the struct as a valid filter
filters_struct[key] = value
# Rewrite filters_json updated
filters_json = json.dumps(filters_struct)
# Build the clean get for filters
get = context['get']
filters_get = []
for element in get:
if element['name'] not in ['filters']:
struct = {}
struct['name'] = element['name']
struct['value'] = element['value']
filters_get.append(struct)
# Add filter_json
struct = {}
struct['name'] = 'filters'
struct['value'] = filters_json
filters_get.append(struct)
context['filters_get'] = filters_get
# Get the list of filters allowed by this class
filters = []
for key in listfilters:
typekind = listfilters[key][2]
if type(typekind) == list:
# Compatibility: set typekind and fv in the old fassion
choice = [_('All')]
for value in typekind:
choice.append(value[1])
# Decide the choosen field
if key in filters_struct.keys():
value = int(filters_struct[key])
else:
value = 0
typekind = 'select'
argument = choice
elif typekind == 'select':
typevalue = listfilters[key][3]
choice = [_('All')]
for value in typevalue:
choice.append(value[1])
# Decide the choosen field
if key in filters_struct.keys():
value = int(filters_struct[key])
else:
value = 0
# Set choice as the command's argument
argument = choice
elif typekind in ['multiselect', 'multidynamicselect']:
if typekind == 'multiselect':
typevalue = listfilters[key][3]
choice = []
for value in typevalue:
choice.append({'id': value[0], 'label': value[1]})
else:
choice = list(listfilters[key][3:])
choice[1] = reverse_lazy(choice[1], kwargs={'search': 'a'})[:-1]
# Decide the choosen field
if key in filters_struct.keys():
value = filters_struct[key]
else:
value = []
# Set choice as the command's argument
argument = choice
elif typekind in ['daterange', 'input']:
# Commands withouth arguments
argument = None
# Get the selected value
if key in filters_struct.keys():
value = filters_struct[key]
else:
value = None
elif typekind in ['checkbox']:
# Commands withouth arguments
argument = None
# Get the selected value
if key in filters_struct.keys():
value = filters_struct[key]
else:
value = None
else:
raise IOError("Wrong typekind '{0}' for filter '{1}'".format(typekind, key))
# Build filtertuple
filtertuple = (key, listfilters[key][0], typekind, argument, value)
# Save this filter in the corresponding list
filters.append(filtertuple)
# Save all filters
context['filters'] = filters
# Search filter button
search_filter_button = jsondata.get('search_filter_button', None)
if search_filter_button is not None:
self.search_filter_button = search_filter_button
# Search text in all fields
search = jsondata.get('search', '').lower()
# Remove extra spaces
newlen = len(search)
oldlen = 0
while newlen != oldlen:
oldlen = newlen
search = search.replace(" ", " ")
newlen = len(search)
if len(search) > 0 and search[0] == ' ':
search = search[1:]
if len(search) > 0 and search[-1] == ' ':
search = search[:-1]
# Save in context
context['search'] = search
datetimeQ = None
if len(search) > 0:
# Get ID
tid = None
if 'id:' in search:
tid = search.split(":")[1].split(" ")[0]
# Decide if it is what we expect
try:
tid = int(tid)
except Exception:
tid = None
# Remove the token
if tid:
search = search.replace("id:%s" % (tid), '')
search = search.replace(" ", " ")
# Get PK
tpk = None
if 'pk:' in search:
tpk = search.split(":")[1].split(" ")[0]
# Decide if it is what we expect
try:
tpk = int(tpk)
except Exception:
tpk = None
# Remove the token
if tpk:
search = search.replace("pk:%s" % (tpk), '')
search = search.replace(" ", " ")
# Spaces on front and behind
if len(search) > 0 and search[0] == ' ':
search = search[1:]
if len(search) > 0 and search[-1] == ' ':
search = search[:-1]
searchs = {}
# Autofilter system
if self.autofiltering:
searchs.update(self.autoSearchQ(MODELINF, search))
# Fields to search in from the MODELINF
tmp_search = MODELINF.searchQ(search)
if type(tmp_search) == dict:
searchs.update(tmp_search)
else:
searchs['autoSearchQ'] &= tmp_search
qobjects = {}
qobjectsCustom = {}
for name in searchs:
# Extract the token
qtoken = searchs[name]
if qtoken == 'datetime':
# If it is a datetime
datetimeQ = name
continue
elif (type(qtoken) == str) or (type(qtoken) == list):
# Prepare query
if type(qtoken) == tuple:
(query, func) = qtoken
else:
def lambdax(x):
return x
func = lambdax
query = qtoken
# If it is a string
if search:
for word in search.split(" "):
# If there is a word to process
if len(word) > 0:
# Build the key for the arguments and set the word as a value for the Q search
if word[0] == '-':
# If negated request
# key="-{}".format(hashlib.md5(word[1:].encode()).hexdigest())
qdict = {'{}'.format(query): func(word[1:])}
qtokens_element = ~Q(**qdict)
else:
# If positive request
# key="-{}".format(hashlib.md5(word[1:].encode()).hexdigest())
qdict = {'{}'.format(query): func(word)}
qtokens_element = Q(**qdict)
# Safe the token
if word in qobjects:
qobjects[word].append(qtokens_element)
else:
qobjects[word] = [qtokens_element]
else:
if qobjectsCustom:
qobjectsCustom |= searchs[name]
else:
qobjectsCustom = searchs[name]
# Build positive/negative
qdata = None
if search and qobjects:
for word in search.split(" "):
if word.split(":")[0] not in ['id', 'pk']:
if word[0] == '-':
negative = True
else:
negative = False
qword = None
for token in qobjects[word]:
if qword:
if negative:
qword &= token
else:
qword |= token
else:
qword = token
if qword:
if qdata:
qdata &= qword
else:
qdata = qword
# Process ID/PK specific searches
if tid:
queryset = queryset.filter(id=tid)
if tpk:
queryset = queryset.filter(pk=tpk)
# Add custom Q-objects
if qobjectsCustom:
queryset = queryset.filter(qobjectsCustom)
# Add word by word search Q-objects
if qdata:
queryset = queryset.filter(qdata)
else:
# Look for datetimeQ field
searchs = MODELINF.searchQ(search)
for name in searchs:
if (searchs[name] == 'datetime'):
datetimeQ = name
continue
# Datetime Q
context['datetimeQ'] = datetimeQ
if datetimeQ:
# Inicialization
f = {}
f['year'] = (1900, 2100, False)
f['month'] = (1, 12, False)
f['day'] = (1, 31, False)
f['hour'] = (0, 23, False)
f['minute'] = (0, 59, False)
f['second'] = (0, 59, False)
date_elements = [None, 'year', 'month', 'day', 'hour', 'minute', 'second']
# Get configuration of dates and set limits to the queryset
for element in date_elements[1:]:
value = jsondata.get(element, None)
if value:
f[element] = (int(value), int(value), True)
if f['year'][2] and f['month'][2] and not f['day'][2]:
(g, lastday) = calendar.monthrange(f['year'][1], f['month'][1])
f['day'] = (f['day'][0], lastday, f['day'][2])
# Limits
date_min = datetime.datetime(f['year'][0], f['month'][0], f['day'][0], f['hour'][0], f['minute'][0], f['second'][0])
date_max = datetime.datetime(f['year'][1], f['month'][1], f['day'][1], f['hour'][1], f['minute'][1], f['second'][1])
qarg1 = {"{}__gte".format(datetimeQ): date_min}
qarg2 = {"{}__lte".format(datetimeQ): date_max}
qarg3 = {datetimeQ: None}
queryset = queryset.filter((Q(**qarg1) & Q(**qarg2)) | Q(**qarg3))
# Find actual deepness
deepness_index = 0
for element in date_elements[1:]:
if f[element][2]:
deepness_index += 1
else:
break
# Get results from dates to set the new order
exclusion = {}
exclusion[datetimeQ] = None
date_results = queryset.exclude(**exclusion).values_list(datetimeQ, flat=True)
# Remove empty results (usefull when the date is allowed to be empty)
if f['day'][0] != f['day'][1]:
if f['month'][0] == f['month'][1]:
date_results = date_results.datetimes(datetimeQ, 'day')
elif f['year'][0] == f['year'][1]:
date_results = date_results.datetimes(datetimeQ, 'month')
else:
date_results = date_results.datetimes(datetimeQ, 'year')
get = context['get']
context['datefilter'] = {}
# Save the deepness
if deepness_index + 1 == len(date_elements):
context['datefilter']['deepness'] = None
else:
context['datefilter']['deepness'] = date_elements[deepness_index + 1]
context['datefilter']['deepnessback'] = []
context['datefilter']['deepnessinit'] = []
for element in get:
if (not element['name'] in date_elements):
struct = {}
struct['name'] = element['name']
struct['value'] = element['value']
context['datefilter']['deepnessinit'].append(struct)
context['datefilter']['deepnessback'].append(struct)
elif (element['name'] != date_elements[deepness_index] and f[element['name']][2]):
struct = {}
struct['name'] = element['name']
struct['value'] = element['value']
context['datefilter']['deepnessback'].append(struct)
# Build the list of elements
context['datefilter']['data'] = []
for element in date_results:
# Save the data
context['datefilter']['data'].append(element.timetuple()[deepness_index])
context['datefilter']['data'] = list(set(context['datefilter']['data']))
context['datefilter']['data'].sort()
# Prepare the rightnow result
if self.json_worker:
rightnow = {}
for key in ['year', 'month', 'day', 'hour', 'minute', 'second']:
rightnow[key] = (f[key][2] and f[key][0]) or None
else:
if f['month'][2]:
month = monthname(f['month'][0])
else:
month = '__'
if f['hour'][2]:
rightnow = string_concat(grv(f, 'day'), "/", month, "/", grv(f, 'year'), " ", grv(f, 'hour'), ":", grv(f, 'minute'), ":", grv(f, 'second'))
else:
rightnow = string_concat(grv(f, 'day'), "/", month, "/", grv(f, 'year'))
context['datefilter']['rightnow'] = rightnow
else:
context['datefilter'] = None
# Distinct
# queryset=queryset.distinct()
# Ordering field autofill
try:
order_get = jsondata.get('ordering', [])
if type(order_get) == list:
order_by_struct = order_get
else:
order_by_struct = json.loads(str(order_get))
except Exception:
order_by_struct = []
order_by = []
position = {}
counter = 1
# Build the columns structure and the fields list
context['columns'] = []
self.__fields = []
for value in fields:
self.__fields.append(value[0])
# Auto build rules
self.__autorules = self.autorules()
for order in order_by_struct:
name = list(order.keys())[0]
lbl = None
# use __autofields for ordering by alias
for field in self.__autorules:
if "{}:".format(name) in field:
name = field.split(":")[0]
lbl = field.split(":")[1]
break
direction = order[name]
if lbl and not lbl.startswith('get_') and not lbl.endswith('_display'):
name = lbl
if direction == 'asc':
order_by.append("%s" % (remove_getdisplay(name)))
elif direction == 'desc':
order_by.append("-%s" % (remove_getdisplay(name)))
position[name] = counter
counter += 1
if order_by:
queryset = queryset.order_by(*order_by)
else:
if hasattr(self, 'default_ordering'):
if type(self.default_ordering) == list:
queryset = queryset.order_by(*self.default_ordering)
else:
queryset = queryset.order_by(self.default_ordering)
else:
queryset = queryset.order_by("pk")
# Ordering field autofill
sort = {}
for value in fields:
# Get values
if value[0]:
name = value[0].split(":")[0]
order_key = name
type_field = self.get_type_field(value[0].split(":")[-1])
else:
name = value[0]
# not usable fields, example: fields.append((None, _('Selector'))) in airportslist
hash_key = hashlib.md5(value[1].encode()).hexdigest()
order_key = "#{}".format(hash_key)
type_field = None
publicname = value[1]
if len(value) > 2:
size = value[2]
else:
size = None
if len(value) > 3:
align = value[3]
else:
align = None
# filter column
if len(value) > 4:
filter_column = value[4]
else:
filter_column = None
# Process ordering
ordering = []
found = False
for order in order_by_struct:
subname = list(order.keys())[0]
direction = order[subname]
if order_key == subname:
if direction == 'desc':
direction = ''
sort_class = 'headerSortUp'
elif direction == 'asc':
direction = 'desc'
sort_class = 'headerSortDown'
else:
sort_class = ''
direction = 'asc'
found = True
if direction == 'asc' or direction == 'desc':
ordering.append({subname: direction})
if not found:
ordering.append({order_key: 'asc'})
sort_class = ''
# Save the ordering method
sort[order_key] = {}
sort[order_key]['id'] = name
sort[order_key]['name'] = publicname
sort[order_key]['align'] = align
sort[order_key]['type'] = type_field
if filter_column:
sort[order_key]['filter'] = filter_column
if jsonquery is None:
sort[order_key]['size'] = size
sort[order_key]['class'] = sort_class
if order_key and order_key[0] != '*':
sort[order_key]['ordering'] = json.dumps(ordering).replace('"', '\\"')
if order_key in position:
sort[order_key]['position'] = position[order_key]
# Save ordering in the context
if jsonquery is not None:
context['ordering'] = order_by_struct
# Build the columns structure and the fields list
context['columns'] = []
for value in fields:
field = value[0]
if field:
context['columns'].append(sort[field.split(":")[0]])
else:
hash_key = hashlib.md5(value[1].encode()).hexdigest()
field = "#{}".format(hash_key)
# selector
context['columns'].append(sort[field])
# Auto build rules
# self.__autorules = self.autorules()
# Columns
self.__columns = ['pk']
# self.__columns = ['id']
self.__foreignkeys = []
for column in self.model._meta.fields:
self.__columns.append(column.name)
if column.is_relation:
self.__foreignkeys.append(column.name)
# Localfields
self.__related_objects = []
for f in self.model._meta.related_objects:
self.__related_objects.append(f.name)
# Model properties
model_properties = self.__columns + self.__related_objects
# === Queryset optimization ===
# Get autorules ordered
autorules_keys = sorted(self.__autorules.keys())
#
query_renamed = {}
query_optimizer = []
query_verifier = []
query_select_related = []
fields_related_model = []
for rule in autorules_keys:
found = False
# name rule origin
rule_org = rule
# If rule is an alias
rulesp = rule.split(":")
if len(rulesp) == 2:
(alias, rule) = rulesp
else:
alias = rule
# If rule has a foreign key path (check first level attributes only, nfrule = no foreign rule)
nfrule = rule.split("__")
do_select_related = False
model = self.model
if len(nfrule) > 1:
ruletmp = []
field_related_model = []
for n in nfrule:
if model:
for fi in model._meta.fields:
if fi.name == n:
found = True
ruletmp.append(n)
if fi.is_relation:
model = fi.related_model
field_related_model.append(fi.name)
else:
do_select_related = True
model = None
break
if not found or model is None:
break
if field_related_model:
fields_related_model.append("__".join(field_related_model))
if ruletmp != nfrule:
do_select_related = False
elif nfrule[0] in [x.name for x in self.model._meta.fields] or nfrule[0] == 'pk':
found = True
for fi in model._meta.fields:
if fi.name == nfrule[0] and fi.is_relation:
fields_related_model.append(nfrule[0])
if not self.haystack and (do_select_related or rule in self.__foreignkeys):
# Compatibility with Django 1.10
if "__" in rule:
query_select_related.append("__".join(rule.split('__')[0:-1]))
else:
query_select_related.append(rule)
nfrule = nfrule[0]
if nfrule in self.__columns:
############################
# dejo comentada la restriccion, si se deja y hay una FK "nunca" usaria .extra ni .value
# no la elimino del todo por si hubiera algun fallo mas adelante,
# y se tuviera que parametrizarse de algun otro modo
############################
# if nfrule not in self.__foreignkeys:
if rule not in fields_related_model:
# Save verifier name
query_verifier.append(rule_org)
# Save renamed field
if alias != rule:
query_renamed[alias] = F(rule)
query_optimizer.append(alias)
else:
# Save final name
query_optimizer.append(rule)
if hasattr(self, 'annotations'):
# Prepare annotations
if callable(self.annotations):
anot = self.annotations(MODELINF)
else:
anot = self.annotations
# Process annotations
for xnfrule in anot.keys():
found = True
if xnfrule not in query_verifier:
query_verifier.append(xnfrule)
query_optimizer.append(xnfrule)
if not found:
query_renamed = {}
query_optimizer = []
query_verifier = []
query_select_related = []
break
for rename in query_renamed.keys():
if rename in model_properties:
if rename in self.__foreignkeys:
msg = "Invalid alias. The alias '{}' is a foreign key from model '{}' inside app '{}'"
elif rename in self.__columns:
msg = "Invalid alias. The alias '{}' is a columns from model '{}' inside app '{}'"
elif rename in self.__related_objects:
msg = "Invalid alias. The alias '{}' is a related object from model '{}' inside app '{}'"
raise Exception(msg.format(rename, self._modelname, self._appname))
if found and query_select_related:
queryset = queryset.select_related(*query_select_related)
# If we got the query_optimizer to optimize everything, use it
# use_extra = False
query_verifier.sort()
autorules_keys.sort()
if found and query_verifier == autorules_keys:
# use_extra = True
if query_renamed:
# queryset=queryset.extra(select=query_renamed).values(*query_optimizer)
queryset = queryset.annotate(**query_renamed).values(*query_optimizer)
else:
queryset = queryset.values(*query_optimizer)
# Custom queryset
if hasattr(self, 'custom_queryset'):
queryset = self.custom_queryset(queryset, MODELINF)
# Internal Codenerix DEBUG for Querysets
"""
raise Exception("FOUND: {} -- __foreignkeys: {} -- __columns: {} -- autorules_keys: {} -- \
query_select_related: {} -- query_renamed: {} -- query_optimizer: {} | use_extra: {}| -- \
query: {} -- meta.fields: {} -- fields_related_model: {} -- query_verifier: {}\
-- ??? {} == {}".format(
found,
self.__foreignkeys, self.__columns, autorules_keys,
query_select_related, query_renamed, query_optimizer,use_extra,
queryset.query,
[x.name for x in self.model._meta.fields],
fields_related_model, query_verifier,
query_verifier.sort(),autorules_keys.sort()
))
#"""
# Check if the user requested to return a raw queryset
if raw_query:
return queryset
else:
# Check the total count of registers + rows per page
total_rows_per_page = jsondata.get('rowsperpage', self.default_rows_per_page)
pages_to_bring = jsondata.get('pages_to_bring', 1)
if total_rows_per_page == 'All' or self.export:
total_rows_per_page = queryset.count()
paginator = Paginator(queryset, total_rows_per_page)
total_registers = paginator.count
# Rows per page
if total_rows_per_page:
try:
total_rows_per_page = int(total_rows_per_page)
except Exception:
total_rows_per_page = 'All'
else:
total_rows_per_page = self.default_rows_per_page
if total_rows_per_page == 'All':
page_number = 1
total_rows_per_page = total_registers
total_rows_per_page_out = _('All')
total_pages = 1
else:
total_rows_per_page = int(total_rows_per_page) # By default 10 rows per page
total_rows_per_page_out = total_rows_per_page
total_pages = int(total_registers / total_rows_per_page)
if total_registers % total_rows_per_page:
total_pages += 1
page_number = jsondata.get('page', 1) # If no page specified use first page
if page_number == 'last':
page_number = total_pages
else:
try:
page_number = int(page_number)
except Exception:
page_number = 1
if page_number < 1:
page_number = 1
if page_number > total_pages:
page_number = total_pages
# Build the list of page counters allowed
choice = {}
c = self.default_rows_per_page
chk = 1
while total_registers >= c:
choice[c] = c
if chk == 1:
# From 5 to 10
c = c * 2
# Next level
chk = 2
elif chk == 2:
# From 10 to 25 (10*2+10/2)
c = c * 2 + int(c / 2)
# Next level
chk = 3
elif chk == 3:
# From 25 to 50
c *= 2
chk = 1
# Don't give a too long choice
if c > 2000:
break
# Add all choice in any case
if settings.ALL_PAGESALLOWED:
choice['All'] = _('All')
# Save the pagination in the structure
context['rowsperpageallowed'] = choice
context['rowsperpage'] = total_rows_per_page_out
context['pages_to_bring'] = pages_to_bring
context['pagenumber'] = page_number
# Get the full number of registers and save it to context
context['total_registers'] = total_registers
if total_rows_per_page == 'All':
# Remove total_rows_per_page if is all
total_rows_per_page = None
context['page_before'] = None
context['page_after'] = None
context['start_register'] = 1
context['showing_registers'] = total_registers
else:
# Page before
if page_number <= 1:
context['page_before'] = None
else:
context['page_before'] = page_number-1
# Page after
if page_number >= total_pages:
context['page_after'] = None
else:
context['page_after'] = page_number+1
# Starting on register number
context['start_register'] = (page_number-1)*total_rows_per_page+1
context['showing_registers'] = total_rows_per_page
# Calculate end
context['end_register'] = min(context['start_register']+context['showing_registers']-1, total_registers)
# Add pagination
regs = []
if paginator.count:
desired_page_number = page_number
try:
range_pages_to_bring = xrange(pages_to_bring)
except NameError:
range_pages_to_bring = range(pages_to_bring)
for p in range_pages_to_bring:
try:
regs += paginator.page(desired_page_number)
desired_page_number += 1
except PageNotAnInteger:
# If page is not an integer, deliver first page.
regs += paginator.page(1)
desired_page_number = 2
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
if pages_to_bring == 1:
regs += paginator.page(paginator.num_pages)
# Leave bucle
break
# Fill pages
if total_registers:
context['pages'] = pages(paginator, page_number)
try:
range_fill = xrange(pages_to_bring-1)
except NameError:
range_fill = range(pages_to_bring-1)
for p in range_fill:
page_number += 1
context['pages'] += pages(paginator, page_number)
else:
context['pages'] = []
# Return queryset
return regs
|
raise Exception("FOUND: {} -- __foreignkeys: {} -- __columns: {} -- autorules_keys: {} -- \
query_select_related: {} -- query_renamed: {} -- query_optimizer: {} | use_extra: {}| -- \
query: {} -- meta.fields: {} -- fields_related_model: {} -- query_verifier: {}\
-- ??? {} == {}".format(
found,
self.__foreignkeys, self.__columns, autorules_keys,
query_select_related, query_renamed, query_optimizer,use_extra,
queryset.query,
[x.name for x in self.model._meta.fields],
fields_related_model, query_verifier,
query_verifier.sort(),autorules_keys.sort()
))
#
|
entailment
|
def get_context_data(self, **kwargs):
'''
Generic list view with validation included and object transfering support
'''
# Call the base implementation first to get a context
context = super(GenList, self).get_context_data(**kwargs)
# Update general context with the stuff we already calculated
context.update(self.__context)
# Initialize with our timestamp
context['now'] = epochdate(time.time())
context['profile'] = self.profile
# Check vtable
context['vtable'] = getattr(self, 'vtable', False)
# Export to excel
context['export_excel'] = getattr(self, 'export_excel', True)
context['export_name'] = getattr(self, 'export_name', 'list')
# Check ngincludes
context['ngincludes'] = getattr(self, 'ngincludes', {})
if 'table' not in context['ngincludes'].keys():
context['ngincludes']['table'] = "{}codenerix/partials/table.html".format(settings.STATIC_URL)
# Check linkadd
context['linkadd'] = getattr(self, 'linkadd', self.auth_permission('add') or getattr(self, 'public', False))
# Check linkedit
context['linkedit'] = getattr(self, 'linkedit', self.auth_permission('change') or getattr(self, 'public', False))
# Check showdetails
context['show_details'] = getattr(self, 'show_details', False)
# Check showmodal
context['show_modal'] = getattr(self, 'show_modal', False)
# Set search filter button
context['search_filter_button'] = getattr(self, 'search_filter_button', False)
# Get base template
if not self.json_worker:
template_base = getattr(self, 'template_base', 'base/base')
template_base_ext = getattr(self, 'template_base_ext', 'html')
context['template_base'] = get_template(template_base, self.user, self.language, extension=template_base_ext)
# Try to convert object_id to a numeric id
object_id = kwargs.get('object_id', None)
try:
object_id = int(object_id)
except Exception:
pass
# Python 2 VS Python 3 compatibility
try:
unicode('codenerix')
unicodetest = unicode
except NameError:
unicodetest = str
if isinstance(object_id, str) or isinstance(object_id, unicodetest):
# If object_id is a string, we have a name not an object
context['object_name'] = object_id
object_obj = None
else:
# If is not an string
if object_id:
# If we got one, load the object
obj = context['obj']
object_obj = get_object_or_404(obj, pk=object_id)
else:
# There is no object
object_obj = None
context['object_obj'] = object_obj
# Attach extra_context
context.update(self.extra_context)
# Return new context
return context
|
Generic list view with validation included and object transfering support
|
entailment
|
def get_context_json(self, context):
'''
Return a base answer for a json answer
'''
# Initialize answer
answer = {}
# Metadata builder
answer['meta'] = self.__jcontext_metadata(context)
# Filter builder
answer['filter'] = self.__jcontext_filter(context)
# Head builder
answer['table'] = {}
answer['table']['head'] = self.__jcontext_tablehead(context)
answer['table']['body'] = None
answer['table']['header'] = None
answer['table']['summary'] = None
# Return answer
return answer
|
Return a base answer for a json answer
|
entailment
|
def set_context_json(self, jsonquery):
'''
Get a json parameter and rebuild the context back to a dictionary (probably kwargs)
'''
# Make sure we are getting dicts
if type(jsonquery) != dict:
raise IOError("set_json_context() method can be called only with dictionaries, you gave me a '{}'".format(type(jsonquery)))
# Set we will answer json to this request
self.json = True
# Transfer keys
newget = {}
for key in ['search', 'search_filter_button', 'page', 'pages_to_bring', 'rowsperpage', 'filters', 'year', 'month', 'day', 'hour', 'minute', 'second']:
if key in jsonquery:
newget[key] = jsonquery[key]
# Add transformed ordering
json_ordering = jsonquery.get('ordering', None)
if json_ordering:
# Convert to list
ordering = []
for key in json_ordering:
ordering.append({key: jsonquery['ordering'][key]})
# Order the result from ordering
# ordering = sorted(ordering, key=lambda x: abs(x.values()[0]))
ordering = sorted(ordering, key=lambda x: abs(list(x.values())[0]))
# Save ordering
newget['ordering'] = []
for orderer in ordering:
key = list(orderer.keys())[0]
value = orderer[key]
if value > 0:
value = 'asc'
elif value < 0:
value = 'desc'
else:
value = None
if value:
newget['ordering'].append({key: value})
# Get listid
newget['listid'] = jsonquery.get("listid", None)
# Get elementid
newget['elementid'] = jsonquery.get("elementid", None)
# Return new get
return newget
|
Get a json parameter and rebuild the context back to a dictionary (probably kwargs)
|
entailment
|
def dispatch(self, request, **kwargs):
'''
Entry point for this class, here we decide basic stuff
'''
# Check if this is a webservice request
self.json_worker = (bool(getattr(self.request, "authtoken", False))) or (self.json is True)
self.__authtoken = (bool(getattr(self.request, "authtoken", False)))
# Check if this is an AJAX request
if (request.is_ajax() or self.json_worker) and request.body:
request.POST = QueryDict('').copy()
body = request.body
if type(request.body) == bytes:
body = body.decode("utf-8")
post = json.loads(body)
for key in post:
if type(post[key]) == dict and '__JSON_DATA__' in post[key]:
post[key] = json.dumps(post[key]['__JSON_DATA__'])
request.POST.update(post)
# Set class internal variables
self._setup(request)
# Call the base implementation
return super(GenModify, self).dispatch(request, **kwargs)
|
Entry point for this class, here we decide basic stuff
|
entailment
|
def get_form(self, form_class=None):
'''
Set form groups to the groups specified in the view if defined
'''
formobj = super(GenModify, self).get_form(form_class)
# Set requested group to this form
selfgroups = getattr(self, "form_groups", None)
if selfgroups:
if type(selfgroups) == list:
formobj.__groups__ = lambda: selfgroups
else:
formobj.__groups__ = selfgroups
else:
selfgroups = getattr(self, "__groups__", None)
if selfgroups:
formobj.__groups__ = selfgroups
# Return the new updated form
return formobj
|
Set form groups to the groups specified in the view if defined
|
entailment
|
def dispatch(self, request, **kwargs):
'''
Entry point for this class, here we decide basic stuff
'''
# Delete method must happen with POST not with GET
if request.method == 'POST':
# Check if this is a webservice request
self.__authtoken = (bool(getattr(self.request, "authtoken", False)))
self.json_worker = self.__authtoken or (self.json is True)
# Call the base implementation
return super(GenDelete, self).dispatch(request, **kwargs)
else:
json_answer = json.dumps({
'error': True,
'errortxt': _('Method not allowed, use POST to delete or DELETE on the detail url'),
})
return HttpResponse(json_answer, content_type='application/json')
|
Entry point for this class, here we decide basic stuff
|
entailment
|
def dispatch(self, request, **kwargs):
'''
Entry point for this class, here we decide basic stuff
'''
# Check if this is a REST query to pusth the answer to responde in JSON
if bool(self.request.META.get('HTTP_X_REST', False)):
self.json = True
# Check if this is a REST query to add an element
if self.request.method in ['PUT', 'DELETE']:
if self.request.method == 'PUT':
action = 'edit'
else:
action = 'delete'
# Set new method
self.request.method == 'POST'
# Find the URL
target = get_class(resolve("{}/{}".format(self.request.META.get("REQUEST_URI"), action)).func)
# Make sure we will answer as an API
target.json = True
# Lets go for it
return target.as_view()(self.request, pk=kwargs.get('pk'))
# Detect if we have to answer in json
self.__authtoken = (bool(getattr(self.request, "authtoken", False)))
self.json_worker = self.__authtoken or (self.json is True)
# Check if this is an AJAX request
if (request.is_ajax() or self.json_worker) and request.body:
request.POST = json.loads(request.body)
# Set class internal variables
self._setup(request)
# Call the base implementation
return super(GenDetail, self).dispatch(request, **kwargs)
|
Entry point for this class, here we decide basic stuff
|
entailment
|
def get_filled_structure(self, subgroup=None):
'''
method in charged of filling an structure containing the object fields
values taking into account the 'group' attribute from the corresponding
form object, which is necesary to fill the details form as it is configured
in the 'group' attribute
'''
# initilize the result structure
result = []
# the object corresponding model content is taken into a dictionary
object_content = model_to_dict(self.object)
# generallically some common or specific fields are not interesting
if 'exclude_fields' not in dir(self):
self.exclude_fields = []
self.exclude_fields.append("id")
for field in (self.exclude_fields):
if field in object_content.keys():
object_content.pop(field)
# following is going to be created an structure with the appropieate caption
# for every existing field in the current model
verbose_names = {}
for field in object_content.keys():
verbose_names[field] = self.model._meta.get_field(field).verbose_name
# the found fields in the groups structure are going to be taked into account
gr_object_content = []
if subgroup:
group_array = subgroup
else:
group_array = self.groups
for group in group_array:
# raise Exception(group)
item = {}
item["name"] = smart_text(group[0])
item["col"] = group[1]
item_elements = group[2:]
sublist = []
idx = 0
for item_element in item_elements:
# the element can contains another groups
if (idx > 1) and (type(item_element) == tuple):
# Recursive
sublist.append(self.get_filled_structure([subgroup]))
else:
filter_field = None
# Check if it is a list
if type(item_element) == list:
# if it is a list, that means that can be found the
# corresponding values for colums and any other
field = item_element[0]
# take into account that field caption can be passed as
# third list element
if len(item_element) >= 3 and item_element[2]:
verbose_names[field] = _(item_element[2])
if len(item_element) >= 9:
filter_field = item_element[8]
else:
field = item_element
if field not in verbose_names:
if field.startswith('get_') and field.endswith('_display'):
label_field = remove_getdisplay(field)
if self.model:
try:
verbose_names[field] = self.model._meta.get_field(label_field).verbose_name
except FieldDoesNotExist:
verbose_names[field] = _(label_field)
else:
verbose_names[field] = _(label_field)
else:
label_field = field
verbose_names[field] = _(label_field)
args = {}
value = None
for field_split in field.split('__'):
if value is None:
try:
verbose_names[field] = self.object._meta.get_field(field_split).verbose_name
except AttributeError:
pass
except FieldDoesNotExist:
pass
value = getattr(self.object, field_split, None)
else:
try:
verbose_names[field] = value._meta.get_field(field_split).verbose_name
except AttributeError:
pass
except FieldDoesNotExist:
pass
value = getattr(value, field_split, None)
if callable(value):
# if 'request' in value.func_code.co_varnames:
related = (getattr(value, 'all', None) is not None)
if related:
value = ", ".join([str(x) for x in value.all()])
else:
if 'request' in value.__code__.co_varnames:
args['request'] = self.request
# Call the method
value = value(**args)
sublist.append({
"name": _(verbose_names[field]),
"value": value,
"filter": filter_field,
})
gr_object_content.append(field)
# Increment index
idx += 1
item["value"] = sublist
result.append(item)
for field in object_content.keys():
item = {}
if field not in gr_object_content:
item["name"] = _(verbose_names[field])
item["value"] = getattr(self.object, field)
result.append(item)
return result
|
method in charged of filling an structure containing the object fields
values taking into account the 'group' attribute from the corresponding
form object, which is necesary to fill the details form as it is configured
in the 'group' attribute
|
entailment
|
def flatatt(attrs):
"""
Pilfered from `django.forms.utils`:
Convert a dictionary of attributes to a single string.
The returned string will contain a leading space followed by key="value",
XML-style pairs. In the case of a boolean value, the key will appear
without a value. Otherwise, the value is formatted through its own dict of `attrs`,
which can be useful to parametrize Angular directives.
It is assumed that the keys do not need to be
XML-escaped. If the passed dictionary is empty, then return an empty
string.
The result is passed through 'mark_safe' (by way of 'format_html_join').
"""
key_value_attrs = []
boolean_attrs = []
for attr, value in attrs.items():
if isinstance(value, bool):
if value:
boolean_attrs.append((attr,))
else:
try:
value = value.format(**attrs)
except KeyError:
pass
key_value_attrs.append((attr, value))
return (
format_html_join('', ' {}="{}"', sorted(key_value_attrs)) +
format_html_join('', ' {}', sorted(boolean_attrs))
)
|
Pilfered from `django.forms.utils`:
Convert a dictionary of attributes to a single string.
The returned string will contain a leading space followed by key="value",
XML-style pairs. In the case of a boolean value, the key will appear
without a value. Otherwise, the value is formatted through its own dict of `attrs`,
which can be useful to parametrize Angular directives.
It is assumed that the keys do not need to be
XML-escaped. If the passed dictionary is empty, then return an empty
string.
The result is passed through 'mark_safe' (by way of 'format_html_join').
|
entailment
|
def _compute_k(self, tau):
r"""Evaluate the kernel directly at the given values of `tau`.
Parameters
----------
tau : :py:class:`Matrix`, (`M`, `D`)
`M` inputs with dimension `D`.
Returns
-------
k : :py:class:`Array`, (`M`,)
:math:`k(\tau)` (less the :math:`\sigma^2` prefactor).
"""
y = self._compute_y(tau)
return y**(-self.params[1])
|
r"""Evaluate the kernel directly at the given values of `tau`.
Parameters
----------
tau : :py:class:`Matrix`, (`M`, `D`)
`M` inputs with dimension `D`.
Returns
-------
k : :py:class:`Array`, (`M`,)
:math:`k(\tau)` (less the :math:`\sigma^2` prefactor).
|
entailment
|
def _compute_dk_dy(self, y, n):
"""Evaluate the derivative of the outer form of the RQ kernel.
Parameters
----------
y : :py:class:`Array`, (`M`,)
`M` inputs to evaluate at.
n : non-negative scalar int
Order of derivative to compute.
Returns
-------
dk_dy : :py:class:`Array`, (`M`,)
Specified derivative at specified locations.
"""
p = fixed_poch(1.0 - self.params[1] - n, n)
return p * y**(-self.params[1] - n)
|
Evaluate the derivative of the outer form of the RQ kernel.
Parameters
----------
y : :py:class:`Array`, (`M`,)
`M` inputs to evaluate at.
n : non-negative scalar int
Order of derivative to compute.
Returns
-------
dk_dy : :py:class:`Array`, (`M`,)
Specified derivative at specified locations.
|
entailment
|
def get_parent(self, directory):
"""
Given a directory name, return the Page representing it in the menu
heirarchy.
"""
assert settings.PAGE_DIR.startswith('/')
assert settings.PAGE_DIR.endswith('/')
parents = directory[len(settings.PAGE_DIR):]
page = None
if parents:
for slug in parents.split('/'):
page = Page.objects.get(parent=page, slug=slug)
return page
|
Given a directory name, return the Page representing it in the menu
heirarchy.
|
entailment
|
def wafer_sso_url(context, sso_method):
'''
Return the correct URL to SSO with the given method.
'''
request = context.request
url = reverse(getattr(views, '%s_login' % sso_method))
if 'next' in request.GET:
url += '?' + urlencode({'next': request.GET['next']})
return url
|
Return the correct URL to SSO with the given method.
|
entailment
|
def authorize(args):
"""
Authorizes Coursera's OAuth2 client for using coursera.org API servers for
a specific application
"""
oauth2_instance = oauth2.build_oauth2(args.app, args)
oauth2_instance.build_authorizer()
logging.info('Application "%s" authorized!', args.app)
|
Authorizes Coursera's OAuth2 client for using coursera.org API servers for
a specific application
|
entailment
|
def check_auth(args):
"""
Checks courseraoauth2client's connectivity to the coursera.org API servers
for a specific application
"""
oauth2_instance = oauth2.build_oauth2(args.app, args)
auth = oauth2_instance.build_authorizer()
my_profile_url = (
'https://api.coursera.org/api/externalBasicProfiles.v1?'
'q=me&fields=name'
)
r = requests.get(my_profile_url, auth=auth)
if r.status_code != 200:
logging.error('Received response code %s from the basic profile API.',
r.status_code)
logging.debug('Response body:\n%s', r.text)
sys.exit(1)
try:
external_id = r.json()['elements'][0]['id']
except:
logging.error(
'Could not parse the external id out of the response body %s',
r.text)
external_id = None
try:
name = r.json()['elements'][0]['name']
except:
logging.error(
'Could not parse the name out of the response body %s',
r.text)
name = None
if not args.quiet > 0:
print 'Name: %s' % name
print 'External ID: %s' % external_id
if name is None or external_id is None:
sys.exit(1)
|
Checks courseraoauth2client's connectivity to the coursera.org API servers
for a specific application
|
entailment
|
def display_auth_cache(args):
'''
Writes to the screen the state of the authentication cache. (For debugging
authentication issues.) BEWARE: DO NOT email the output of this command!!!
You must keep the tokens secure. Treat them as passwords.
'''
oauth2_instance = oauth2.build_oauth2(args.app, args)
if not args.quiet > 0:
token = oauth2_instance.token_cache['token']
if not args.no_truncate and token is not None:
token = token[:10] + '...'
print "Auth token: %s" % token
expires_time = oauth2_instance.token_cache['expires']
expires_in = int((expires_time - time.time()) * 10) / 10.0
print "Auth token expires in: %s seconds." % expires_in
if 'refresh' in oauth2_instance.token_cache:
refresh = oauth2_instance.token_cache['refresh']
if not args.no_truncate and refresh is not None:
refresh = refresh[:10] + '...'
print "Refresh token: %s" % refresh
else:
print "No refresh token found."
|
Writes to the screen the state of the authentication cache. (For debugging
authentication issues.) BEWARE: DO NOT email the output of this command!!!
You must keep the tokens secure. Treat them as passwords.
|
entailment
|
def tanh_warp_arb(X, l1, l2, lw, x0):
r"""Warps the `X` coordinate with the tanh model
.. math::
l = \frac{l_1 + l_2}{2} - \frac{l_1 - l_2}{2}\tanh\frac{x-x_0}{l_w}
Parameters
----------
X : :py:class:`Array`, (`M`,) or scalar float
`M` locations to evaluate length scale at.
l1 : positive float
Small-`X` saturation value of the length scale.
l2 : positive float
Large-`X` saturation value of the length scale.
lw : positive float
Length scale of the transition between the two length scales.
x0 : float
Location of the center of the transition between the two length scales.
Returns
-------
l : :py:class:`Array`, (`M`,) or scalar float
The value of the length scale at the specified point.
"""
if isinstance(X, scipy.ndarray):
if isinstance(X, scipy.matrix):
X = scipy.asarray(X, dtype=float)
return 0.5 * ((l1 + l2) - (l1 - l2) * scipy.tanh((X - x0) / lw))
else:
return 0.5 * ((l1 + l2) - (l1 - l2) * mpmath.tanh((X - x0) / lw))
|
r"""Warps the `X` coordinate with the tanh model
.. math::
l = \frac{l_1 + l_2}{2} - \frac{l_1 - l_2}{2}\tanh\frac{x-x_0}{l_w}
Parameters
----------
X : :py:class:`Array`, (`M`,) or scalar float
`M` locations to evaluate length scale at.
l1 : positive float
Small-`X` saturation value of the length scale.
l2 : positive float
Large-`X` saturation value of the length scale.
lw : positive float
Length scale of the transition between the two length scales.
x0 : float
Location of the center of the transition between the two length scales.
Returns
-------
l : :py:class:`Array`, (`M`,) or scalar float
The value of the length scale at the specified point.
|
entailment
|
def gauss_warp_arb(X, l1, l2, lw, x0):
r"""Warps the `X` coordinate with a Gaussian-shaped divot.
.. math::
l = l_1 - (l_1 - l_2) \exp\left ( -4\ln 2\frac{(X-x_0)^2}{l_{w}^{2}} \right )
Parameters
----------
X : :py:class:`Array`, (`M`,) or scalar float
`M` locations to evaluate length scale at.
l1 : positive float
Global value of the length scale.
l2 : positive float
Pedestal value of the length scale.
lw : positive float
Width of the dip.
x0 : float
Location of the center of the dip in length scale.
Returns
-------
l : :py:class:`Array`, (`M`,) or scalar float
The value of the length scale at the specified point.
"""
if isinstance(X, scipy.ndarray):
if isinstance(X, scipy.matrix):
X = scipy.asarray(X, dtype=float)
return l1 - (l1 - l2) * scipy.exp(-4.0 * scipy.log(2.0) * (X - x0)**2.0 / (lw**2.0))
else:
return l1 - (l1 - l2) * mpmath.exp(-4.0 * mpmath.log(2.0) * (X - x0)**2.0 / (lw**2.0))
|
r"""Warps the `X` coordinate with a Gaussian-shaped divot.
.. math::
l = l_1 - (l_1 - l_2) \exp\left ( -4\ln 2\frac{(X-x_0)^2}{l_{w}^{2}} \right )
Parameters
----------
X : :py:class:`Array`, (`M`,) or scalar float
`M` locations to evaluate length scale at.
l1 : positive float
Global value of the length scale.
l2 : positive float
Pedestal value of the length scale.
lw : positive float
Width of the dip.
x0 : float
Location of the center of the dip in length scale.
Returns
-------
l : :py:class:`Array`, (`M`,) or scalar float
The value of the length scale at the specified point.
|
entailment
|
def tanh_warp(x, n, l1, l2, lw, x0):
r"""Implements a tanh warping function and its derivative.
.. math::
l = \frac{l_1 + l_2}{2} - \frac{l_1 - l_2}{2}\tanh\frac{x-x_0}{l_w}
Parameters
----------
x : float or array of float
Locations to evaluate the function at.
n : int
Derivative order to take. Used for ALL of the points.
l1 : positive float
Left saturation value.
l2 : positive float
Right saturation value.
lw : positive float
Transition width.
x0 : float
Transition location.
Returns
-------
l : float or array
Warped length scale at the given locations.
Raises
------
NotImplementedError
If `n` > 1.
"""
if n == 0:
return (l1 + l2) / 2.0 - (l1 - l2) / 2.0 * scipy.tanh((x - x0) / lw)
elif n == 1:
return -(l1 - l2) / (2.0 * lw) * (scipy.cosh((x - x0) / lw))**(-2.0)
else:
raise NotImplementedError("Only derivatives up to order 1 are supported!")
|
r"""Implements a tanh warping function and its derivative.
.. math::
l = \frac{l_1 + l_2}{2} - \frac{l_1 - l_2}{2}\tanh\frac{x-x_0}{l_w}
Parameters
----------
x : float or array of float
Locations to evaluate the function at.
n : int
Derivative order to take. Used for ALL of the points.
l1 : positive float
Left saturation value.
l2 : positive float
Right saturation value.
lw : positive float
Transition width.
x0 : float
Transition location.
Returns
-------
l : float or array
Warped length scale at the given locations.
Raises
------
NotImplementedError
If `n` > 1.
|
entailment
|
def double_tanh_warp(x, n, lcore, lmid, ledge, la, lb, xa, xb):
r"""Implements a sum-of-tanh warping function and its derivative.
.. math::
l = a\tanh\frac{x-x_a}{l_a} + b\tanh\frac{x-x_b}{l_b}
Parameters
----------
x : float or array of float
Locations to evaluate the function at.
n : int
Derivative order to take. Used for ALL of the points.
lcore : float
Core length scale.
lmid : float
Intermediate length scale.
ledge : float
Edge length scale.
la : positive float
Transition of first tanh.
lb : positive float
Transition of second tanh.
xa : float
Transition of first tanh.
xb : float
Transition of second tanh.
Returns
-------
l : float or array
Warped length scale at the given locations.
Raises
------
NotImplementedError
If `n` > 1.
"""
a, b, c = scipy.dot([[-0.5, 0, 0.5], [0, 0.5, -0.5], [0.5, 0.5, 0]],
[[lcore], [ledge], [lmid]])
a = a[0]
b = b[0]
c = c[0]
if n == 0:
return a * scipy.tanh((x - xa) / la) + b * scipy.tanh((x - xb) / lb) + c
elif n == 1:
return (a / la * (scipy.cosh((x - xa) / la))**(-2.0) +
b / lb * (scipy.cosh((x - xb) / lb))**(-2.0))
else:
raise NotImplementedError("Only derivatives up to order 1 are supported!")
|
r"""Implements a sum-of-tanh warping function and its derivative.
.. math::
l = a\tanh\frac{x-x_a}{l_a} + b\tanh\frac{x-x_b}{l_b}
Parameters
----------
x : float or array of float
Locations to evaluate the function at.
n : int
Derivative order to take. Used for ALL of the points.
lcore : float
Core length scale.
lmid : float
Intermediate length scale.
ledge : float
Edge length scale.
la : positive float
Transition of first tanh.
lb : positive float
Transition of second tanh.
xa : float
Transition of first tanh.
xb : float
Transition of second tanh.
Returns
-------
l : float or array
Warped length scale at the given locations.
Raises
------
NotImplementedError
If `n` > 1.
|
entailment
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.