repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
jdemel/gnuradio
|
gnuradio-runtime/python/gnuradio/gr/exceptions.py
|
Python
|
gpl-3.0
| 311
| 0.006431
|
from __future__ im
|
port unicode_literals
#
# Copyright 2004 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
class NotDAG (Exception):
"""Not a directed acyclic graph"""
pass
cla
|
ss CantHappen (Exception):
"""Can't happen"""
pass
|
ywangd/stash
|
tests/misc/test_cowsay.py
|
Python
|
mit
| 2,613
| 0.001531
|
# -*- coding: utf-8 -*-
from stash.tests.stashtest import StashTestCase
class CowsayTests(StashTestCase):
"""tests for cowsay"""
def test_help(self):
"""test help output"""
output = self.run_command("cowsay --help", exitcode=0)
self.assertIn("cowsay", output)
self.assertIn("--help", output)
self.assertIn("usage:", output)
def test_singleline_1(self):
"""test for correct text in output"""
output = self.run_command("cowsay test", exitcode=0)
self.assertIn("test", output)
self.assertNotIn("Hello, World!", output)
self.assertEqual(output.count("<"), 1)
self.assertEqual(output.count(">"), 1)
def test_singleline_1(self):
"""test for correct text in ou
|
tput"""
output = self.run_command("cowsay Hello, World!", exitcode=0)
self.assertIn("Hello, World!", output)
|
self.assertNotIn("test", output)
self.assertEqual(output.count("<"), 1)
self.assertEqual(output.count(">"), 1)
def test_stdin_read(self):
"""test 'echo test | cowsay' printing 'test'"""
output = self.run_command("echo test | cowsay", exitcode=0)
self.assertIn("test", output)
self.assertNotIn("Hello, World!", output)
def test_stdin_ignore(self):
"""test 'echo test | cowsay Hello, World!' printing 'Hello World!'"""
output = self.run_command("echo test | cowsay Hello, World!", exitcode=0)
self.assertIn("Hello, World!", output)
self.assertNotIn("test", output)
def test_multiline_1(self):
"""test for correct multiline output"""
output = self.run_command("cowsay Hello,\\nWorld!", exitcode=0)
self.assertIn("Hello,", output)
self.assertIn("World!", output)
self.assertNotIn("Hello,\nWorld!", output) # text should be splitted allong the lines
self.assertIn("/", output)
self.assertIn("\\", output)
self.assertNotIn("<", output)
self.assertNotIn(">", output)
def test_multiline_2(self):
"""test for correct multiline output"""
output = self.run_command("cowsay Hello,\\nWorld!\\nPython4Ever", exitcode=0)
self.assertIn("Hello,", output)
self.assertIn("World!", output)
self.assertIn("Python4Ever", output)
self.assertNotIn("Hello,\nWorld!\nPython4Ever", output) # text should be splitted allong the lines
self.assertIn("/", output)
self.assertIn("\\", output)
self.assertIn("|", output)
self.assertNotIn("<", output)
self.assertNotIn(">", output)
|
nicain/dipde_dev
|
dipde/internals/internalpopulation.py
|
Python
|
gpl-3.0
| 20,747
| 0.010604
|
# Copyright 2013 Allen Institute
# This file is part of dipde
# dipde is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# dipde is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with dipde. If not, see <http://www.gnu.org/licenses/>.
import bisect
import numpy as np
import scipy.stats as sps
import json
from dipde.interfaces.pandas import to_df
from dipde.internals import utilities as util
import logging
logger = logging.getLogger(__name__)
import scipy.sparse as sps
import scipy.sparse.linalg as spsla
class InternalPopulation(object):
"""Population density class
This class encapulates all the details necessary to propagate a population
density equation driven by a combination of recurrent and background
connections. The voltage (spatial) domain discretization is defined by
linear binning from v_min to v_max, in steps of dv (All in units of volts).
The probability densities on this grid are recorded pv, and must always sum
to 1.
Parameters
----------
tau_m : float (default=.02)
Time constant (unit: 1/sec) of neuronal population.
v_min : float (default=-.1)
Minimum of voltage domain (unit: volt).
v_max : float (default=.02)
Maximum of voltage domain (Absorbing boundary), i.e spiking threshold (unit: volt).
dv : float (default=.0001)
Voltage domain discritization size (unit: volt).
record : bool (default=False)
If True, a history of the output firing rate is recorded (firing_rate_record attribute).
curr_firing_rate : float (default=0.0
Initial/Current firing rate of the population (unit: Hz).
update_method : str 'approx' or 'exact' (default='approx')
Method to update pv (exact can be quite slow).
approx_order : int or None (default=None)
Maximum Taylor series expansion order to use when computing update to pv.
tol : float (default=1e-12)
Error tolerance used when computing update to pv.
norm : non-zero int, np.inf, -np.inf, or 'fro' (default=np.inf)
Vector norm used in computation of tol.
**kwargs
Any additional keyword args are stored as metadata (metadata attribute).
Attributes
----------
self.edges : np.array
Vector defining the boundaries of voltage bins.
self.pv : np.array
Vector defining the probability mass in each voltage bin (self.pv.sum() = 1).
self.firing_rate_record : list
List of firing rates recorded during Simulation.
self.t_record : list
List of times that firing rates were recorded during Simulation.
self.leak_flux_matrix : np.array
Matrix that defines the flux between voltage bins.
"""
def __init__(self, rank=0,
tau_m={'distribution':'delta', 'loc':0.02},
v_min=-.1,
v_max=.02,
dv=.0001,
record=True,
initial_firing_rate=0.0,
update_method='approx',
approx_order=None,
tol=1e-14,
norm=np.inf,
p0={'distribution':'delta', 'loc':0.},
metadata={},
firing_rate_record=[],
t_record=[],
update_callback=lambda s:None,
initialize_callback=lambda s:None,
**kwargs):
# Store away inputs:
self.rank = 0
self.tau_m = tau_m
self.p0 = p0
self.v_min = v_min
self.v_max = v_max
self.dv = dv
self.record = record
self.curr_firing_rate = initial_firing_rate
self.update_method = update_method
self.approx_order = approx_order
self.norm = norm
self.update_callback = update_callback
self.initialize_callback = initialize_callback
self.firing_rate_record = [x for x in firing_rate_record]
self.t_record = [x for x in t_record]
assert len(self.firing_rate_record) == len(self.t_record)
if tol is None:
if self.update_method == 'gmres':
self.tol = 1e-5
else:
self.tol = 1e-12
else:
self.tol = tol
# Additional metadata:
util.check_metadata(metadata)
self.metadata = metadata
# Defined in initialization:
self.edges = None
self.pv = None
self.leak_flux_matrix = None
for key in kwargs.keys():
assert key in ['class', 'module']
def initialize(self):
'''Initialize the population at the beginning of a simulation.
In turn, this method:
1) Initializes the voltage edges (self.edges) and probability mass in each bin (self.pv),
2) Creates an initial dictionary of inputs into the population, and
3) Resets the recorder that tracks firing rate during a simulation.
This method is called by the Simulation object (initialization method),
but can also be called by a user when defining an alternative time
stepping loop.
'''
self.initialize_edges()
self.initialize_probability() # TODO: different initialization options
if self.record == True: self.initialize_firing_rate_recorder()
self.initialize_callback(self)
def update(self):
'''Update the population one time step.
This method is called by the Simulation object to update the population
one time step. In turn, this method:
1) Calls the update_total_input_dict method to gather the current strengths of presynaptic input populations,
2) Calls the update_propability_mass method to propagate self.pv one time-step,
3) Calls the update_firing_rate method to compute the firing rate of the population based on flux over threshold, and
4) Calls the update_firing_rate_recorder method to register the current firing rate with the recorder.
'''
self.update_total_input_dict()
self.update_propability_mass()
self.update_firing_rate()
if self.record == True: self.update_firing_rate_recorder()
logger.debug('GID(%s) Firing rate: %3.2f' % (self.gid, self.curr_firing_rate))
self.update_callback(self)
def initialize_edges(self):
'''Initialize self.edges and self.leak_flux_matrix attributes.
This method initializes the self.edges attribute based on the v_min,
v_max, and dv settings, and creates a corresponding leak flux matrix
based on this voltage discretization.
'''
# Voltage edges and leak matrix construction
self.tau_m = util.discretize_if_needed(self.tau_m)
if np.sum(self.tau_m.xk <= 0) > 0:
raise Exception('Negative tau_m values detected: %s' % self.tau_m.xk) # pragma: no co
|
ver
# Voltage edges and leak matrix construction
self.edges = util.get_v_edges(self.v_min, self.v_max, self.dv)
# Different leak matrice
|
s for different solvers:
self.leak_flux_matrix_dict = {}
self.leak_flux_matrix_dict['dense'] = util.leak_matrix(self.edges, self.tau_m)
# Backward Euler sparse:
lfm_csrbe = sps.eye(np.shape(self.leak_flux_matrix_dict['dense'])[0], format='csr') - self.simulation.dt*self.leak_flux_matrix_dict['dense']
M_I, M_J = np.w
|
opencobra/memote
|
src/memote/experimental/growth.py
|
Python
|
apache-2.0
| 2,835
| 0.000353
|
# -*- coding: utf-8 -*-
# Copyright 2018 Novo Nordisk Foundation Center for Biosustainability,
# Technical University of Denmark.
#
# Licensed under the Apache License, Version 2.0 (the "License
|
");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the sp
|
ecific language governing permissions and
# limitations under the License.
"""Provide an interface for growth experiments."""
from __future__ import absolute_import
import logging
from pandas import DataFrame
from memote.experimental.experiment import Experiment
__all__ = ("GrowthExperiment",)
LOGGER = logging.getLogger(__name__)
class GrowthExperiment(Experiment):
"""Represent a growth experiment."""
SCHEMA = "growth.json"
def __init__(self, **kwargs):
"""
Initialize a growth experiment.
Parameters
----------
kwargs
"""
super(GrowthExperiment, self).__init__(**kwargs)
def load(self, dtype_conversion=None):
"""
Load the data table and corresponding validation schema.
Parameters
----------
dtype_conversion : dict
Column names as keys and corresponding type for loading the data.
Please take a look at the `pandas documentation
<https://pandas.pydata.org/pandas-docs/stable/io.html#specifying-column-data-types>`__
for detailed explanations.
"""
if dtype_conversion is None:
dtype_conversion = {"growth": str}
super(GrowthExperiment, self).load(dtype_conversion=dtype_conversion)
self.data["growth"] = self.data["growth"].isin(self.TRUTHY)
def evaluate(self, model):
"""Evaluate in silico growth rates."""
with model:
if self.medium is not None:
self.medium.apply(model)
if self.objective is not None:
model.objective = self.objective
model.add_cons_vars(self.constraints)
growth = list()
for row in self.data.itertuples(index=False):
with model:
exchange = model.reactions.get_by_id(row.exchange)
if bool(exchange.reactants):
exchange.lower_bound = -row.uptake
else:
exchange.upper_bound = row.uptake
growth.append(model.slim_optimize() >= self.minimal_growth_rate)
return DataFrame({"exchange": self.data["exchange"], "growth": growth})
|
lastralab/Statistics
|
Specialization/Personal/Rtopy.py
|
Python
|
mit
| 484
| 0.002066
|
# -*- coding: utf-8 -*-
|
#!/usr/bin/python
# Author: Tania M. Molina
# UY - 2017
# MIT License
import math
import numpy as np
import pandas as pd
from scipy import stats
from scipy.stats import norm
import scipy.stats as stats
import scipy.stats as st
import matplotlib
import matplotlib.pyplot as plt
import re
import scipy.stats
import matplotlib.pyplot as mlab
fhand = raw_input('Enter .csv file name or keyword: ')
data = pd.read_csv(fhand, header=0)
f
|
rame = pd.DataFrame(data)
|
korepwx/tfsnippet
|
tests/examples/utils/test_mlresult.py
|
Python
|
mit
| 1,216
| 0
|
import os
import unittest
import numpy as np
from tfsnippet.examples.utils import MLResults
from tfsnippet.utils import TemporaryDirectory
def head_of_file(path, n):
with open(path, 'rb') as f:
return f.read(n)
class MLResultTestCase(unittest.TestCase):
def test_imwrite(self):
with Tempora
|
ryDirectory() as tmpdir:
results = MLResults(tmpdir)
im = np.zeros([32, 32], dtype=np.uint8)
im[16:, ...] = 255
results.save_image('test.bmp', im)
file_path =
|
os.path.join(tmpdir, 'test.bmp')
self.assertTrue(os.path.isfile(file_path))
self.assertEqual(head_of_file(file_path, 2), b'\x42\x4d')
results.save_image('test.png', im)
file_path = os.path.join(tmpdir, 'test.png')
self.assertTrue(os.path.isfile(file_path))
self.assertEqual(head_of_file(file_path, 8),
b'\x89\x50\x4e\x47\x0d\x0a\x1a\x0a')
results.save_image('test.jpg', im)
file_path = os.path.join(tmpdir, 'test.jpg')
self.assertTrue(os.path.isfile(file_path))
self.assertEqual(head_of_file(file_path, 3), b'\xff\xd8\xff')
|
66eli77/kolibri
|
kolibri/logger/test/test_api.py
|
Python
|
mit
| 13,270
| 0.003693
|
"""
Tests that ensure the correct items are returned from api calls.
Also tests whether the users with permissions can create logs.
"""
import csv
import datetime
import uuid
from django.core.urlresolvers import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from kolibri.auth.models import DeviceOwner
from .factory_logger import (
FacilityFactory, FacilityUserFactory,
ContentSessionLogFactory, ContentSummaryLogFactory,
ContentRatingLogFactory, UserSessionLogFactory,
DUMMY_PASSWORD
)
from ..models import ContentSessionLog, ContentSummaryLog, ContentRatingLog, UserSessionLog
from ..serializers import ContentSessionLogSerializer, ContentSummaryLogSerializer, ContentRatingLogSerializer
class ContentSessionLogAPITestCase(APITestCase):
def setUp(self):
# create DeviceOwner to pass the setup_wizard middleware check
DeviceOwner.objects.create(username='test-device-owner', password=123)
self.facility = FacilityFactory.create()
self.admin = FacilityUserFactory.create(facility=self.facility)
self.user = FacilityUserFactory.create(facility=self.facility)
self.interaction_logs = [ContentSessionLogFactory.create(user=self.user) for _ in ra
|
nge(3)]
self.facility.add_admin(self.admin)
self.payload = {'user': self.user.pk,
'content_id': uuid.uuid4().hex,
'channel_id': uuid.uuid4().hex,
'kind': 'video',
'start_timestamp': str(datetime.date
|
time.now())}
def test_contentsessionlog_list(self):
self.client.login(username=self.admin.username, password=DUMMY_PASSWORD, facility=self.facility)
response = self.client.get(reverse('contentsessionlog-list'))
expected_count = ContentSessionLog.objects.count()
self.assertEqual(len(response.data), expected_count)
def test_contentsessionlog_detail(self):
self.client.login(username=self.admin.username, password=DUMMY_PASSWORD, facility=self.facility)
log_id = self.interaction_logs[0].id
response = self.client.get(reverse('contentsessionlog-detail', kwargs={"pk": log_id}))
log = ContentSessionLog.objects.get(pk=log_id)
interaction_serializer = ContentSessionLogSerializer(log)
self.assertEqual(response.data['content_id'], interaction_serializer.data['content_id'])
def test_admin_can_create_contentsessionlog(self):
self.client.login(username=self.admin.username, password=DUMMY_PASSWORD, facility=self.facility)
response = self.client.post(reverse('contentsessionlog-list'), data=self.payload, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_learner_can_create_contentsessionlog(self):
self.client.login(username=self.user.username, password=DUMMY_PASSWORD, facility=self.facility)
response = self.client.post(reverse('contentsessionlog-list'), data=self.payload, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_anonymous_user_cannot_create_contentsessionlog_for_learner(self):
response = self.client.post(reverse('contentsessionlog-list'), data=self.payload, format='json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_anonymous_user_can_create_contentsessionlog(self):
del self.payload['user']
response = self.client.post(reverse('contentsessionlog-list'), data=self.payload, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
class ContentSummaryLogAPITestCase(APITestCase):
def setUp(self):
# create DeviceOwner to pass the setup_wizard middleware check
DeviceOwner.objects.create(username='test-device-owner', password=123)
self.facility = FacilityFactory.create()
self.admin = FacilityUserFactory.create(facility=self.facility)
self.user = FacilityUserFactory.create(facility=self.facility)
self.summary_logs = [ContentSummaryLogFactory.create(user=self.user) for _ in range(3)]
self.facility.add_admin(self.admin)
self.payload = {'user': self.user.pk,
'content_id': uuid.uuid4().hex,
'channel_id': uuid.uuid4().hex,
'kind': "video",
'start_timestamp': str(datetime.datetime.now())}
def test_summarylog_list(self):
self.client.login(username=self.admin.username, password=DUMMY_PASSWORD, facility=self.facility)
response = self.client.get(reverse('contentsummarylog-list'))
expected_count = ContentSummaryLog.objects.count()
self.assertEqual(len(response.data), expected_count)
def test_summarylog_detail(self):
self.client.login(username=self.admin.username, password=DUMMY_PASSWORD, facility=self.facility)
log_id = self.summary_logs[0].id
response = self.client.get(reverse('contentsummarylog-detail', kwargs={"pk": log_id}))
log = ContentSummaryLog.objects.get(pk=log_id)
summary_serializer = ContentSummaryLogSerializer(log)
self.assertEqual(response.data['content_id'], summary_serializer.data['content_id'])
def test_admin_can_create_summarylog(self):
self.client.login(username=self.admin.username, password=DUMMY_PASSWORD, facility=self.facility)
response = self.client.post(reverse('contentsummarylog-list'), data=self.payload, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_learner_can_create_summarylog(self):
self.client.login(username=self.user.username, password=DUMMY_PASSWORD, facility=self.facility)
response = self.client.post(reverse('contentsummarylog-list'), data=self.payload, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_anonymous_user_cannot_create_summarylog_for_learner(self):
response = self.client.post(reverse('contentsummarylog-list'), data=self.payload, format='json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
class ContentRatingLogAPITestCase(APITestCase):
def setUp(self):
# create DeviceOwner to pass the setup_wizard middleware check
DeviceOwner.objects.create(username='test-device-owner', password=123)
self.facility = FacilityFactory.create()
self.admin = FacilityUserFactory.create(facility=self.facility)
self.user = FacilityUserFactory.create(facility=self.facility)
self.rating_logs = [ContentRatingLogFactory.create(user=self.user) for _ in range(3)]
self.facility.add_admin(self.admin)
self.payload = {'user': self.user.pk,
'content_id': uuid.uuid4().hex,
'channel_id': uuid.uuid4().hex}
def test_ratinglog_list(self):
self.client.login(username=self.admin.username, password=DUMMY_PASSWORD, facility=self.facility)
response = self.client.get(reverse('contentratinglog-list'))
expected_count = ContentRatingLog.objects.count()
self.assertEqual(len(response.data), expected_count)
def test_ratinglog_detail(self):
self.client.login(username=self.admin.username, password=DUMMY_PASSWORD, facility=self.facility)
log_id = self.rating_logs[0].id
response = self.client.get(reverse('contentratinglog-detail', kwargs={"pk": log_id}))
log = ContentRatingLog.objects.get(pk=log_id)
rating_serializer = ContentRatingLogSerializer(log)
self.assertEqual(response.data['content_id'], rating_serializer.data['content_id'])
def test_admin_can_create_ratinglog(self):
self.client.login(username=self.admin.username, password=DUMMY_PASSWORD, facility=self.facility)
response = self.client.post(reverse('contentratinglog-list'), data=self.payload, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_learner_can_create_ratinglog(self):
self.client.login(username=self.user.username, password=DUMMY_PASSW
|
benregn/cookiecutter-django-ansible
|
{{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/config/settings.py
|
Python
|
bsd-3-clause
| 15,037
| 0.001796
|
# -*- coding: utf-8 -*-
"""
Django settings for {{cookiecutter.project_name}} project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from os.path import join
# See: http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings
try:
from S3 import CallingFormat
AWS_CALLING_FORMAT = CallingFormat.SUBDOMAIN
except ImportError:
# TODO: Fix this where even if in Dev this class is called.
pass
from configurations import Configuration, values
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
class Common(Configuration):
# APP CONFIGURATION
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
# 'django.contrib.humanize',
# Admin
'django.contrib.admin',
)
THIRD_PARTY_APPS = (
|
'south', # Database migration helpers:
'crispy_forms', # Form layouts
'avatar', # for user avatars
'django_exten
|
sions', # usefull django extensions
)
# Apps specific for this project go here.
LOCAL_APPS = (
'users', # custom users app
# Your stuff: custom apps go here
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
INSTALLED_APPS += (
# Needs to come last for now because of a weird edge case between
# South and allauth
'allauth', # registration
'allauth.account', # registration
'allauth.socialaccount', # registration
)
# END APP CONFIGURATION
# MIDDLEWARE CONFIGURATION
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
# END MIDDLEWARE CONFIGURATION
# DEBUG
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = values.BooleanValue(False)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
TEMPLATE_DEBUG = DEBUG
# END DEBUG
# SECRET CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
# In production, this is changed to a values.SecretValue() setting
SECRET_KEY = "CHANGEME!!!"
# END SECRET CONFIGURATION
# FIXTURE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
join(BASE_DIR, 'fixtures'),
)
# END FIXTURE CONFIGURATION
# EMAIL CONFIGURATION
EMAIL_BACKEND = values.Value('django.core.mail.backends.smtp.EmailBackend')
# END EMAIL CONFIGURATION
# MANAGER CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
('{{cookiecutter.author_name}}', '{{cookiecutter.email}}'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# END MANAGER CONFIGURATION
# DATABASE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = values.DatabaseURLValue('postgres://localhost/{{cookiecutter.repo_name}}')
# END DATABASE CONFIGURATION
# CACHING
# Do this here because thanks to django-pylibmc-sasl and pylibmc memcacheify is painful to install on windows.
# memcacheify is what's used in Production
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
# END CACHING
# GENERAL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#time-zone
TIME_ZONE = 'America/Los_Angeles'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# END GENERAL CONFIGURATION
# TEMPLATE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
"allauth.account.context_processors.account",
"allauth.socialaccount.context_processors.socialaccount",
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.request',
# Your stuff: custom template context processers go here
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
TEMPLATE_DIRS = (
join(BASE_DIR, 'templates'),
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
# See: http://django-crispy-forms.readthedocs.org/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap3'
# END TEMPLATE CONFIGURATION
# STATIC FILE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = join(os.path.dirname(BASE_DIR), 'staticfiles')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
join(BASE_DIR, 'static'),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# END STATIC FILE CONFIGURATION
# MEDIA CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = join(BASE_DIR, 'media')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# END MEDIA CONFIGURATION
# URL Configuration
ROOT_URLCONF = 'config.urls'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
# End URL Configuration
# AUTHENTICATION CONFIGURATION
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
)
# Some really nice defaults
ACCOUNT_AUTHENTICATION_METHOD = "username"
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = "mandatory"
# END AUTHENTICATION CONFIGURATION
# Custom user app defaults
# Select the correct user model
AUTH_USER_MODEL = "users.User"
LOGIN_REDIRECT_URL = "users:redirect"
LOGIN_URL = "account_login"
# END Custom user app defaults
# SLUGLIFIER
AUTOSLUG_SLUGIFY_FUNCTION = "slugify.slugify"
# END SLUGLIFIER
# LOGGING CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangib
|
openstack/rally
|
rally/common/db/migrations/env.py
|
Python
|
apache-2.0
| 1,541
| 0
|
# Copyright (c) 2016 Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from alembic import context
from rally.common.db import api
from rally.common.db import models
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
target_metadata = models.BASE.metadata
# other values from the config, def
|
ined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
engine = api.get_engine()
with engine.connect() as connection:
context.configure(connection=connection,
render_as_batch=True,
ta
|
rget_metadata=target_metadata)
with context.begin_transaction():
context.run_migrations()
run_migrations_online()
|
jzitelli/python-gltf-experiments
|
OpenVRRenderer.py
|
Python
|
mit
| 4,971
| 0.004828
|
from ctypes import c_float, cast, POINTER
import numpy as np
import OpenGL.GL as gl
import openvr
from openvr.gl_renderer import OpenVrFramebuffer as OpenVRFramebuffer
from openvr.gl_renderer import matrixForOpenVrMatrix as matrixForOpenVRMatrix
from openvr.tracked_devices_actor import TrackedDevicesActor
import gltfutils as gltfu
c_float_p = POINTER(c_float)
class OpenVRRenderer(object):
def __init__(self, multisample=0, znear=0.1, zfar=1000):
self.vr_system = openvr.init(openvr.VRApplication_Scene)
w, h = self.vr_system.getRecommendedRenderTargetSize()
self.vr_framebuffers = (OpenVRFramebuffer(w, h, multisample=multisample),
OpenVRFramebuffer(w, h, multisample=multisample))
self.vr_compositor = openvr.VRCompositor()
if self.vr_compositor is None:
raise Exception('unable to create compositor')
self.vr_framebuffers[0].init_gl()
self.vr_framebuffers[1].init_gl()
poses_t = openvr.TrackedDevicePose_t * openvr.k_unMaxTrackedDeviceCount
self.poses = poses_t()
self.projection_matrices = (np.asarray(matrixForOpenVRMatrix(self.vr_system.getProjectionMatrix(openvr.Eye_Left,
znear, zfar))),
np.asarray(matrixForOpenVRMatrix(self.vr_system.getProjectionMatrix(openvr.Eye_Right,
znear, zfar))))
self.eye_transforms = (np.asarray(matrixForOpenVRMatrix(self.vr_system.getEyeToHeadTransform(openvr.Eye_Left)).I),
np.asarray(matrixForOpenVRMatrix(self.vr_system.getEyeToHeadTransform(openvr.Eye_Right)).I))
self.view = np.eye(4, dtype=np.float32)
self.view_matrices = (np.empty((4,4), dtype=np.float32),
np.empty((4,4), dtype=np.float32))
self.controllers = TrackedDevicesActor(self.poses)
self.controllers.show_controllers_only = False
self.controllers.init_gl()
self.vr_event = openvr.VREvent_t()
def render(self, gltf, nodes, window_size=(800, 600)):
self.vr_compositor.waitGetPoses(self.poses, openvr.k_unMaxTrackedDeviceCount, None, 0)
hmd_pose = self.poses[openvr.k_unTrackedDeviceIndex_Hmd]
if not hmd_pose.bPoseIsValid:
return
hmd_34 = np.ctypeslib.as_array(cast(hmd_pose.mDeviceToAbsoluteTracking.m, c_float_p),
shape=(3,4))
self.view[:3,:] = hmd_34
view = np.linalg.inv(self.view.T)
view.dot(self.eye_transforms[0], out=self.view_matrices[0])
view.dot(self.eye_transforms[1], out=self.view_matrices[1])
gl.glViewport(0, 0, self.vr_framebuffers[0].width, self.vr_framebuffers[0].height)
for eye in (0, 1):
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, self.vr_framebuffers[eye].fb)
gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)
gltfu.set_material_state.current_material = None
gltfu.set_technique_state.current_technique = None
for node in nodes:
gltfu.draw_node(node, gltf,
projection_matrix=self.projection_matrices[eye],
view_matrix=self.view_matrices[eye])
self.controllers.display_gl(self.view_matrices[eye], self.projection_matrices[eye])
self.vr_compositor.submit(openvr.Eye_Left, self.vr_framebuffers[0].texture)
self.vr_compositor.submit(openvr.Eye_Right, self.vr_framebuffers[1].texture)
# mirror left eye framebuffer to screen:
gl.glBlitNamedFramebuffer(self.vr_framebuffers[0].fb, 0,
0, 0, self.vr_framebuffers[0].width, self.vr_framebuffers[0].height,
0, 0, window_size[0], window_size[1],
gl.GL_COLOR_BUFFER_BIT, gl.GL_NEAREST)
gl.glB
|
indFramebuffer(gl.GL_FRAMEBUFFER, 0)
def process_input(self):
pass
# state = self.vr_system.getControllerState(1)
# if state and state.rAxis[1].x > 0.05:
# self.vr_system.triggerHapticPulse(1, 0, int(3200 * state.rAxis[1].x))
# state = self.vr_system.getControllerState(2)
# if state and state.rAxis[1].x > 0.05:
# self.vr_system.triggerHapticPulse(2, 0, int(3200 * state.rAxis[1].x))
# if self.vr_sys
|
tem.pollNextEvent(self.vr_event):
# if self.vr_event.eventType == openvr.VREvent_ButtonPress:
# pass #print('vr controller button pressed')
# elif self.vr_event.eventType == openvr.VREvent_ButtonUnpress:
# pass #print('vr controller button unpressed')
def shutdown(self):
self.controllers.dispose_gl()
openvr.shutdown()
|
gfyoung/pandas
|
pandas/core/generic.py
|
Python
|
bsd-3-clause
| 387,628
| 0.000689
|
from __future__ import annotations
import collections
from datetime import timedelta
import functools
import gc
import json
import operator
import pickle
import re
from typing import (
TYPE_CHECKING,
Any,
AnyStr,
Callable,
Hashable,
Literal,
Mapping,
Sequence,
cast,
overload,
)
import warnings
import weakref
import numpy as np
from pandas._config import config
from pandas._libs import lib
from pandas._libs.tslibs import (
Period,
Tick,
Timestamp,
to_offset,
)
from pandas._typing import (
Axis,
CompressionOptions,
Dtype,
DtypeArg,
DtypeObj,
FilePathOrBuffer,
FrameOrSeries,
IndexKeyFunc,
IndexLabel,
JSONSerializable,
Level,
Manager,
RandomState,
Renamer,
StorageOptions,
T,
TimedeltaConvertibleTypes,
TimestampConvertibleTypes,
ValueKeyFunc,
final,
npt,
)
from pandas.compat._optional import import_optional_dependency
from pandas.compat.numpy import function as nv
from pandas.errors import (
AbstractMethodError,
InvalidIndexError,
)
from pandas.util._decorators import (
doc,
rewrite_axis_style_signature,
)
from pandas.util._validators import (
validate_ascending,
validate_bool_kwarg,
validate_fillna_kwargs,
)
from pandas.core.dtypes.common import (
ensure_object,
ensure_platform_int,
ensure_str,
is_bool,
is_bool_dtype,
is_datetime64_any_dtype,
is_datetime64tz_dtype,
is_dict_like,
is_dtype_equal,
is_extension_array_dtype,
is_float,
is_list_like,
is_number,
is_numeric_dtype,
is_object_dtype,
is_re_compilable,
is_scalar,
is_timedelta64_dtype,
pandas_dtype,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCSeries,
)
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.missing import (
isna,
notna,
)
from pandas.core import (
arraylike,
indexing,
missing,
nanops,
)
import pandas.core.algorithms as algos
from pandas.core.arrays import ExtensionArray
from pandas.core.base import PandasObject
import pandas.core.common as com
from pandas.core.construction import (
create_series_with_explicit_dtype,
extract_array,
)
from pandas.core.describe import describe_ndframe
from pandas.core.flags import Flags
from pandas.core.indexes import base as ibase
from pandas.core.indexes.api import (
DatetimeIndex,
Index,
MultiIndex,
PeriodIndex,
RangeIndex,
ensure_index,
)
from pandas.core.internals import (
ArrayManager,
BlockManager,
SingleArrayManager,
)
from pandas.core.internals.construction import mgr_to_mgr
from pandas.core.missing import find_valid_index
from pandas.core.ops import align_method_FRAME
from pandas.core.reshape.concat import concat
import pandas.core.sample as sample
from pandas.core.shared_docs import _shared_docs
from pandas.core.sorting import get_indexer_indexer
from pandas.core.window import (
Expanding,
ExponentialMovingWindow,
Rolling,
Window,
)
from pandas.io.formats import format as fmt
from pandas.io.formats.format import (
DataFrameFormatter,
DataFrameRenderer,
)
from pandas.io.formats.printing import pprint_thing
if TYPE_CHECKING:
from pandas._libs.tslibs import BaseOffset
from pandas.core.frame import DataFrame
from pandas.core.resample import Resampler
from pandas.core.series import Series
from pandas.core.window.indexers import BaseIndexer
# goal is to be able to define the docs close to function, while still being
# able to share
_shared_docs = {**_shared_docs}
_shared_doc_kwargs = {
"axes": "keywords for axes",
"klass": "Series/DataFrame",
"axes_single_arg": "int or labels for object",
"args_transpose": "axes to permute (int or label for object)",
"inplace": """
inplace : bool, default False
If True, performs operation inplace and returns None.""",
"optional_by": """
by : str or list of str
Name or list of names to sort by""",
"replace_iloc": """
This differs from updating with ``.loc`` or ``.iloc``, which require
you to specify a location to update with some value.""",
}
bool_t = bool # Need alias because NDFrame has def bool:
class NDFrame(PandasObject, indexing.IndexingMixin):
"""
N-dimensional analogue of DataFrame. Store multi-dimensional in a
size-mutable, labeled data structure
Parameters
----------
data : BlockManager
axes : list
copy : bool, default False
"""
_internal_names: list[str] = [
"_mgr",
"_cacher",
"_item_cache",
"_cache",
"_is_copy",
"_subtyp",
"_name",
"_index",
"_default_kind",
"_default_fill_value",
"_metadata",
"__array_struct__",
"__array_interface__",
"_flags",
]
_internal_names_set: set[str] = set(_internal_names)
_accessors: set[str] = set()
_hidden_attrs: frozenset[str] = frozenset(
["_AXIS_NAMES", "_AXIS_NUMBERS", "get_values", "tshift"]
)
_metadata: list[str] = []
_is_copy: weakref.ReferenceType[NDFrame] | None = None
_mgr: Manager
_attrs: dict[Hashable, Any]
_typ: str
# ----------------------------------------------------------------------
# Constructors
def __init__(
self,
data: Manager,
copy: bool_t = False,
attrs: Mapp
|
ing[Hashable, Any] | None = None,
):
# copy kwarg is retained for mypy compat, is not used
object.__setattr__(self, "_is_copy", None)
object.__setattr__(self, "_mgr", data)
object.
|
__setattr__(self, "_item_cache", {})
if attrs is None:
attrs = {}
else:
attrs = dict(attrs)
object.__setattr__(self, "_attrs", attrs)
object.__setattr__(self, "_flags", Flags(self, allows_duplicate_labels=True))
@classmethod
def _init_mgr(
cls,
mgr: Manager,
axes,
dtype: Dtype | None = None,
copy: bool_t = False,
) -> Manager:
"""passed a manager and a axes dict"""
for a, axe in axes.items():
if axe is not None:
axe = ensure_index(axe)
bm_axis = cls._get_block_manager_axis(a)
mgr = mgr.reindex_axis(axe, axis=bm_axis)
# make a copy if explicitly requested
if copy:
mgr = mgr.copy()
if dtype is not None:
# avoid further copies if we can
if (
isinstance(mgr, BlockManager)
and len(mgr.blocks) == 1
and is_dtype_equal(mgr.blocks[0].values.dtype, dtype)
):
pass
else:
mgr = mgr.astype(dtype=dtype)
return mgr
@classmethod
def _from_mgr(cls, mgr: Manager):
"""
Fastpath to create a new DataFrame/Series from just a BlockManager/ArrayManager.
Notes
-----
Skips setting `_flags` attribute; caller is responsible for doing so.
"""
obj = cls.__new__(cls)
object.__setattr__(obj, "_is_copy", None)
object.__setattr__(obj, "_mgr", mgr)
object.__setattr__(obj, "_item_cache", {})
object.__setattr__(obj, "_attrs", {})
return obj
def _as_manager(
self: FrameOrSeries, typ: str, copy: bool_t = True
) -> FrameOrSeries:
"""
Private helper function to create a DataFrame with specific manager.
Parameters
----------
typ : {"block", "array"}
copy : bool, default True
Only controls whether the conversion from Block->ArrayManager
copies the 1D arrays (to ensure proper/contiguous memory layout).
Returns
-------
DataFrame
New DataFrame using specified manager type. Is not guaranteed
to be a copy or not.
"""
new_mgr: Manager
new_mgr = mgr_to_mgr(self._mgr, typ=typ, copy=copy)
# fastpath of passing a manager doesn't check the option/manager class
retu
|
stack-of-tasks/rbdlpy
|
tutorial/lib/python2.7/site-packages/OpenGL/WGL/ARB/robustness_application_isolation.py
|
Python
|
lgpl-3.0
| 867
| 0.008074
|
'''OpenGL extension ARB.robustness_application_isolation
This module customises the behaviour of the
OpenGL.raw.WGL.ARB.robustness_application_isolation to provide a more
Python-friendly API
The official definition of this extension is available here:
http://ww
|
w.opengl.org/registry/specs/ARB/robustness_application_isolation.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.WGL import _types, _glgets
from OpenGL.raw.
|
WGL.ARB.robustness_application_isolation import *
from OpenGL.raw.WGL.ARB.robustness_application_isolation import _EXTENSION_NAME
def glInitRobustnessApplicationIsolationARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
|
edx/edx-organizations
|
organizations/models.py
|
Python
|
agpl-3.0
| 2,793
| 0.002148
|
"""
Database ORM models managed by this Django app
Please do not integrate directly with these models!!! This app currently
offers one programmatic API -- api.py for direct Python integration.
"""
import re
from django.core.exceptions import ValidationError
from django.db import models
from django.utils.translation import gettext_lazy as _
from model_utils.models import TimeStampedModel
from simple_history.models import HistoricalRecords
class Organization(TimeStampedModel):
"""
An Organization is a representation of an entity which publishes/provides
one or more courses delivered by the LMS. Organizations have a base set of
metadata describing the organization, including id, name, and description.
"""
name = models.CharField(max_length=255, db_index=True)
short_name = models.CharField(
max_length=255,
unique=True,
verbose_name='Short Name',
help_text=_(
'Unique, short string identifier for organization. '
'Please do not use spaces or special characters. '
'Only allowed special characters are period (.), hyphen (-) and underscore (_).'
),
)
description = models.TextField(null=True, blank=True)
logo = models.ImageField(
upload_to='organization_logos',
help_text=_('Please add only .PNG files for logo images. This logo will be used on certificates.'),
null=True, blank=True, max_length=255
)
active = models.BooleanField(default=True)
history = HistoricalRecords()
def __str__(self):
return f"{self.name} ({self.short_name})"
def clean(self):
if not re.match("^[a-zA-Z0-9._-]*$", self.short_name):
raise ValidationError(_('Please do not use spaces or special characters in the short name '
'field. Only allowed special characters are period (.), hyphen (-) '
'and underscore (_).'))
class OrganizationCourse(TimeStampedModel):
"""
An OrganizationCourse represents the link between an Organization and a
Course (via course key). Because Courses are not true Open edX entities
(in the Django/ORM sense) the modeling and integrity is limited to that
of specifying course identifier strings in this model.
"""
course_id = models.CharField(max_length=255, db_index=True, verbos
|
e_name='Course ID')
organization = models.ForeignKey(Organization, db_index=True, on_delete=models.CASCADE)
active = models.BooleanField(default=True)
history = HistoricalRecords()
class Meta:
""" Meta class for this Django model """
unique_together = (('course_id', 'organization'),)
|
verbose_name = _('Link Course')
verbose_name_plural = _('Link Courses')
|
jadsonjs/DataScience
|
DeepLearning/keras/hello_world.py
|
Python
|
apache-2.0
| 12
| 0.083333
|
im
|
port keras
| |
froyobin/ceilometer
|
ceilometer/tests/ipmi/test_manager.py
|
Python
|
apache-2.0
| 1,263
| 0
|
# Copyright 2014 Intel Corp.
#
# Author: Zhai Edwin <edwin.zhai@intel.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for ceilometer/ipmi/
|
manager.py
"""
from ceilometer.ipmi import manager
from ceilometer.tests import agentbase
import mock
from oslotest import base
class TestManager(base.BaseTestCase):
@mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock())
def test_load_plugins(self):
mgr = manager.AgentManager()
sel
|
f.assertIsNotNone(list(mgr.pollster_manager))
class TestRunTasks(agentbase.BaseAgentManagerTestCase):
@staticmethod
def create_manager():
return manager.AgentManager()
def setUp(self):
self.source_resources = True
super(TestRunTasks, self).setUp()
|
kdj0c/onepagepoints
|
onepagebatch.py
|
Python
|
mit
| 17,621
| 0.002213
|
#!/usr/bin/env python3
"""
Copyright 2017 Jocelyn Falempe kdj0c@djinvi.net
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from onepagepoints import *
import yaml
import os
import copy
import argparse
import pathlib
from string import ascii_uppercase
from collections import OrderedDict
# return pretty string for points
def points(n):
if n == 0:
return "Free"
if n == 1:
return "1 pt"
return '{} pts'.format(n)
# return pretty string for duplicates weapons
def pCount(n):
if n < 2:
return ''
return '{}x '.format(n)
# Return unit name and count if more than one
def prettyName(unit):
if unit.count > 1:
return unit.name + ' [{0}]'.format(unit.count)
return unit.name
# Return a pretty string of the list of equipments
def PrettyEquipments(equipments):
equWithCount = list(OrderedDict.fromkeys([(equ, equipments.count(equ)) for equ in equipments]))
return [pCount(c) + e.name + ' ' + e.Profile() for e, c in equWithCount]
class Upgrade:
def __init__(self, batch, faction):
armory = faction.armory
self.getFactionCost = faction.getFactionCost
self.all = batch.get('all', False)
self.text = batch['text']
self.preremove = armory.get(batch.get('pre-remove', {}))
self.preadd = armory.get(batch.get('pre-add', {}))
self.remove = armory.get(batch.get('remove', {}))
self.add = [armory.get(up_add) for up_add in batch['add']]
self.rawcost = []
# Calculate the cost of an upgrade on a unit
# If the upgrade is only for one model, set the unit count to 1
# remove equipment, add new equipment and calculate the new cost.
def Cost_unit(self, unit):
base_unit = copy.copy(unit)
if not self.all:
base_unit.SetCount(1)
base_unit.RemoveEquipments(self.preremove)
base_unit.AddEquipments(self.preadd)
base_unit.SetFactionCost(self.getFactionCost(base_unit))
prev_cost = base_unit.cost
base_unit.RemoveEquipments(self.remove)
costs = []
for upgrade in self.add:
new_unit = copy.copy(base_unit)
new_unit.AddEquipments(upgrade)
new_unit.SetFactionCost(self.getFactionCost(new_unit))
up_cost = new_unit.cost - prev_cost
costs.append(up_cost)
# print('Cost for unit {}: {}'.format(unit.name, costs))
return costs
# an upgrade group cost is calculated for all units who have access to this
# upgrade group, so calculate the mean
def Cost(self, units):
u_count = len(units)
cost = [0] * len(self.add)
for unit in units:
cost = [x + y for x, y in zip(cost, self.Cost_unit(unit))]
self.cost = [int(round(c / u_count)) for c in cost]
# print('Cost for all units: {}'.format(self.cost))
return self.cost
class UpgradeGroup(list):
def __init__(self, ydata, faction):
if 'units' not in ydata:
print('Upgrade group Error, should have a "units" section {}'.format(ydata))
return
self.units = ydata['units']
super().__init__([Upgrade(upgrade, faction) for upgrade in ydata['upgrades']])
self.name = ''
class Faction():
def __init__(self, name):
global armory
self.name = name
self.armory = Armory()
armory = self.armory
self.pages = []
self._parse_yaml()
def _read_yaml(self, filename, path):
fname = os.path.join(path, filename)
with open(fname, "r") as f:
print(' Processing {}'.format(fname))
return yaml.load(f.read())
def _parse_yaml(self):
yfaction = self._read_yaml('faction.yml', self.name)
self.title = yfaction['title']
if os.path.exists(os.path.join('Common', 'equipments.yml')):
yequipments = self._read_yaml('equipments.yml', 'Common')
self.armory.add([Weapon(name, **w) for name, w in yequipments['weapons'].items()])
yequipments = self._read_yaml("equipments.yml", self.name)
self.armory.add([Weapon(name, **w) for name, w in yequipments['weapons'].items()])
self.armory.add([WarGear.from_dict(name, wargear, self.armory) for name, wargear in yequipments['wargear'].items()])
self.factionRules = yequipments['factionRules']
allFiles = os.listdir(self.name)
yunits = self._read_yaml('units.yml', self.name)
yupgrades = self._read_yaml('upgrades.yml', self.name)
units = [Unit.from_dict(yunit, self.armory
|
) for yunit in yunits]
upgrades = [UpgradeGroup(up_group, self) for up_group in yupgrades]
for unit in units:
unit.SetFactionCost(self.getFactionCost(unit))
for g, group in enumerate(upgrades):
affected_units = [unit for unit in units if unit.name in group.units]
if len(affected_units) < len(group.units):
print('Error units in ugrade group not found {}'.format(group.units))
return
for unit in af
|
fected_units:
unit.upgrades.append(group)
for upgrade in group:
upgrade.Cost(affected_units)
pages = yfaction.get('pages')
if len(pages) == 1:
spRules = yfaction.get('specialRules', None)
psychics = yfaction.get('psychics', None)
for p, page in enumerate(pages):
# TODO order should come from pages, not from units
punits = [unit for unit in units if unit.name in page]
pugrades = [group for group in upgrades if set(group.units) & set(page)]
for g, group in enumerate(pugrades):
group.name = ascii_uppercase[g]
spRules = yfaction.get('specialRules' + str(p + 1), None)
psychics = yfaction.get('psychics' + str(p + 1), None)
self.pages.append((punits, pugrades, spRules, psychics))
# Get hardcoded cost for per-faction special rules.
def getFactionCost(self, unit):
return sum([self.factionRules[r] for r in unit.specialRules + unit.wargearSp if r in self.factionRules])
class DumpTxt:
def __init__(self):
self.data = []
def _addUnit(self, unit):
data = ['{0} {1} {2}+'.format(prettyName(unit), str(unit.quality), str(unit.basedefense))]
data += [', '.join(PrettyEquipments(unit.equipments))]
data += [", ".join(unit.specialRules)]
data += [", ".join([group.name for group in unit.upgrades])]
data += [points(unit.cost)]
return '\n'.join([d for d in data if d])
def addUnits(self, units):
self.data += [self._addUnit(unit) for unit in units]
def _getUpLine(self, equ, cost):
return ', '.join(PrettyEquipments(equ)) + ' ' + points(cost)
def _getUpGroup(self, group, upgrades):
data = ''
preamble = group + ' | '
ret = []
for up in upgrades:
ret += [preamble + up.text + ':']
ret += [self._getUpLine(addEqu, up.cost[i]) for i, addEqu in enumerate(up.add)]
preamble = ''
return data + '\n'.join(ret)
def addUpgrades(self, u
|
nicolasm/lastfm-export
|
queries/tops.py
|
Python
|
mit
| 6,184
| 0.000809
|
from lfmconf.lfmconf import get_lastfm_conf
query_play_count_by_month = """
select * from view_play_count_by_month v
where substr(v.yr_month, 1, 4) =
"""
query_top_with_remaining = """
with top as (
{query_top}
),
total_count as (
{query_play_count}
)
select t.*
from top t
"""
query_top_artists_with_remaining = query_top_with_remaining + \
"""
union all
select 'Remaining artists' as artist_name,
((select tc.play_count from total_count tc)
-
(select sum(play_count) from top)) as play_count
"""
query_top_albums_with_remaining = query_top_with_remaining + \
"""
union all
select 'Remaining albums' as album_name,
'...' as artist_name,
((select tc.play_count from total_count tc)
-
(select sum(play_count) from top)) as play_count
"""
query_top_tracks_with_remaining = query_top_with_remaining + \
"""
union all
select 'Remaining tracks' as track_name,
'...' as artist_name,
'...' as album_name,
((select tc.play_count from total_count tc)
-
(select sum(play_count) from top)) as play_count
"""
query_top_artists = """
select p.artist_name,
count(p.id) as play_count
from play p
where p.artist_name not like 'VA %'
{condition}
group by p.artist_name
order by count(p.id) desc
"""
query_top_albums = """
select p.album_name,
p.artist_name,
count(p.id) as play_count
from play p
where 1 = 1
{condition}
group by p.album_name, p.artist_name
order by count(p.id) desc
"""
query_top_tracks = """
select p.track_name,
p.artist_name,
p.album_name,
co
|
unt(p.id) as play_count
from play p
where 1 = 1
{condition}
group by p.track_name, p.artist_name, p.album_name
order by count(p.id) desc
"""
query_play_count = """
select count(p.id) as play_count
from play p
where 1 = 1
{condition}
"""
conf = get_lastfm_conf()
dbms = conf['lastfm']['db']['dbms']
def build_query_play_coun
|
t_by_month():
if dbms == 'mysql':
return query_play_count_by_month + '%s'
elif dbms == 'sqlite':
return query_play_count_by_month + '?'
def build_query_play_count_for_duration(duration):
condition = build_duration_condition(duration)
return query_play_count.format(condition=condition)
def build_query_top_artists_for_duration_with_remaining(duration):
query_top = build_query_top_artists_for_duration(duration)
query_count = build_query_play_count_for_duration(duration)
return query_top_artists_with_remaining.format(query_top=query_top,
query_play_count=query_count)
def build_query_top_artists_for_duration(duration):
condition = build_duration_condition(duration)
return query_top_artists.format(condition=condition) + add_limit()
def add_limit():
clause = 'limit '
if dbms == 'mysql':
clause += '%s'
elif dbms == 'sqlite':
clause += '?'
return clause
def build_query_top_albums_for_duration_with_remaining(duration):
query_top = build_query_top_albums_for_duration(duration)
query_count = build_query_play_count_for_duration(duration)
return query_top_albums_with_remaining.format(query_top=query_top,
query_play_count=query_count)
def build_query_top_albums_for_duration(duration):
condition = build_duration_condition(duration)
return query_top_albums.format(condition=condition) + add_limit()
def build_query_top_tracks_for_duration_with_remaining(duration):
query_top = build_query_top_tracks_for_duration(duration)
query_count = build_query_play_count_for_duration(duration)
return query_top_tracks_with_remaining.format(query_top=query_top,
query_play_count=query_count)
def build_query_top_tracks_for_duration(duration):
condition = build_duration_condition(duration)
return query_top_tracks.format(condition=condition) + add_limit()
def build_query_play_count_for_year():
condition = build_year_condition()
return query_play_count.format(condition=condition)
def build_query_top_artists_for_year_with_remaining():
query_top = build_query_top_artists_for_year()
query_count = build_query_play_count_for_year()
return query_top_artists_with_remaining.format(query_top=query_top,
query_play_count=query_count)
def build_query_top_artists_for_year():
condition = build_year_condition()
return query_top_artists.format(condition=condition) + add_limit()
def build_query_top_albums_for_year_with_remaining():
query_top = build_query_top_albums_for_year()
query_count = build_query_play_count_for_year()
return query_top_albums_with_remaining.format(query_top=query_top,
query_play_count=query_count)
def build_query_top_albums_for_year():
condition = build_year_condition()
return query_top_albums.format(condition=condition) + add_limit()
def build_query_top_tracks_for_year_with_remaining():
query_top = build_query_top_tracks_for_year()
query_count = build_query_play_count_for_year()
return query_top_tracks_with_remaining.format(query_top=query_top,
query_play_count=query_count)
def build_query_top_tracks_for_year():
condition = build_year_condition()
return query_top_tracks.format(condition=condition) + add_limit()
def build_duration_condition(duration):
condition = ''
if duration.isdigit():
if dbms == 'mysql':
condition = 'and p.play_date > now() + interval - %s day'
elif dbms == 'sqlite':
condition =\
'and date(p.play_date) > date(\'now\', \'-\' || ? || \' day\')'
return condition
def build_year_condition():
condition = ''
if dbms == 'mysql':
condition = 'and year(p.play_date) = %s'
elif dbms == 'sqlite':
condition = 'and strftime(\'%Y\', p.play_date) = ?'
return condition
|
epinna/tplmap
|
plugins/engines/dot.py
|
Python
|
gpl-3.0
| 2,139
| 0.013558
|
from utils.strings import quote
from plugins.languages import javascript
from utils.loggers import log
from utils import rand
import base64
import re
class Dot(javascript.Javascript):
def init(self):
self.update_actions({
'render' : {
'render': '{{=%(code)s}}',
'header': '{{=%(header)s}}',
'trailer': '{{=%(trailer)s}}'
},
'write' : {
'call' : 'inject',
'write' : """{{=global.process.mainModule.require('fs').appendFileSync('%(path)s', Buffer('%(chunk_b64)s', 'base64'), 'binary')}}""",
'truncate' : """{{=global.process.mainModule.require('fs').writeFileSync('%(path)s', '')}}"""
},
'read' : {
'call': 'evaluate',
'read' : """global.process.mainModule.require('fs').readFileSync('%(path)s').toString('base64');"""
},
'md5' : {
'call': 'evaluate',
'md5': """global.process.mainModule.require('crypto').createHash('md5').update(global.process.mainModule.require('fs').readFileSync('%(path)s')).digest("hex");"""
},
'evaluate' : {
'tes
|
t_os': """global.process.mainModule.require('os').platform()""",
},
'execute' : {
|
'call': 'evaluate',
'execute': """global.process.mainModule.require('child_process').execSync(Buffer('%(code_b64)s', 'base64').toString());"""
},
'execute_blind' : {
# The bogus prefix is to avoid false detection of Javascript instead of doT
'call': 'inject',
'execute_blind': """{{=''}}{{global.process.mainModule.require('child_process').execSync(Buffer('%(code_b64)s', 'base64').toString() + ' && sleep %(delay)i');}}"""
},
})
self.set_contexts([
# Text context, no closures
{ 'level': 0 },
{ 'level': 1, 'prefix': '%(closure)s;}}', 'suffix' : '{{1;', 'closures' : javascript.ctx_closures },
])
|
sentriz/steely
|
steely/plugins/flirty.py
|
Python
|
gpl-3.0
| 1,575
| 0.001297
|
'''Dirty talk like you're in Dundalk'''
import random
import re
import string
__author__ = ('iandioch')
COMMAND = 'flirt'
PHRASES = [
"rawr~, {s}{sep}",
"{s}, big boy{sep}",
"{s} xo",
"{s} bb{sep}",
"babe, {s}{sep}",
"hey xxx {s}{sep}",
"{s} xxx",
"{s} xx",
"{s} xo",
"{s} xoxo",
"hot stuff, {s}{sep}",
"{s} bbz{sep}",
"{s} 8==)",
"i'm horny. {s}{sep}",
"do you want to come over tonight..? {s}{sep}",
"my parents aren't home, {s}{sep}",
"{s} ;)",
"{s} 🍆",
"{s} 🍆🍆",
"{s} 🍑",
"{s} 🍌",
"{s} 💦💦💦",
"{s} 👅",
"{s} 😘😘",
"{s}, cutie{sep}",
"{s}, you absolute babe",
"{s} later???",
]
def flirt(me
|
ssage):
if len(message) <= 1:
return ''
for sep in '.!?':
s, sepfound, after = message.partition(sep)
numspace = len(s) - len(s.lstrip())
s = ' ' * numspace + \
random.choice(PHRASES).format(s=s.lstri
|
p().lower(), sep=sepfound)
return s + flirt(after)
return message
def main(bot, author_id, message, thread_id, thread_type, **kwargs):
message = bot.fetchThreadMessages(thread_id=thread_id, limit=2)[1]
sauce = flirt(message.text)
bot.sendMessage(sauce, thread_id=thread_id, thread_type=thread_type)
if __name__ == '__main__':
print(flirt('hey brandon do you have a minute'))
print(flirt('I need to talk to you about our lord and saviour steely for a minute. Please brandon.'))
print(flirt('Fine then'))
print(flirt('Your API was shit anyway'))
|
tiborsimko/invenio-ext
|
invenio_ext/jasmine/registry.py
|
Python
|
gpl-2.0
| 2,838
| 0.000705
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Registry for Jasmine spec files."""
import os
import re
from flask_registry import RegistryProxy
from werkzeug.utils import import_string
from invenio_ext.registry import DictModuleAutoDiscoverySubRegistry
class JasmineSpecsAutoDiscoveryRegistry(DictModuleAutoDiscoverySubRegistry):
"""Registry for Jasmine spec files.
Looks into /testsuite/js/*.spec.js in each module.
"""
pattern = re.compile("(?:.+\.js$)|(?:.+\.html$)")
specs_folder = 'js'
def __init__(self, *args, **kwargs):
"""Initialize registry."""
super(JasmineSpecsAutoDiscoveryRegistry, self).__init__(
'testsuite', **kwargs
)
def keygetter(self, key, original_value, new_value):
"""No key mapping."""
return key
def _walk_dir(self, pkg, base, root):
"""Recursively register *.spe
|
c.js/*.js files."""
for root, dirs, files in os.walk(root):
for name in files:
if Jasm
|
ineSpecsAutoDiscoveryRegistry.pattern.match(name):
filename = os.path.join(root, name)
filepath = "{0}/{1}".format(
pkg,
filename[len(base) + 1:]
)
self.register(filename, key=filepath)
def _discover_module(self, pkg):
"""Load list of files from resource directory."""
import_str = pkg + '.' + self.module_name
try:
module = import_string(import_str, silent=self.silent)
if module is not None:
for p in module.__path__:
specsfolder = os.path.join(p, self.specs_folder)
if os.path.isdir(specsfolder):
self._walk_dir(pkg, specsfolder, specsfolder)
except ImportError as e: # pylint: disable=C0103
self._handle_importerror(e, pkg, import_str)
except SyntaxError as e:
self._handle_syntaxerror(e, pkg, import_str)
specs = RegistryProxy("jasmine.specs", JasmineSpecsAutoDiscoveryRegistry)
|
Huyuwei/tvm
|
tests/python/relay/test_vm.py
|
Python
|
apache-2.0
| 17,272
| 0.003648
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import tvm
import numpy as np
from tvm import relay
from tvm.relay.scope_builder import ScopeBuilder
from tvm.relay.testing.config import ctx_list
from tvm.relay.prelude import Prelude
import pytest
def check_result(args, expected_result, mod=None):
"""
Check that evaluating `expr` applied to the arguments produces
`result` on Relay VM.
Parameters
----------
args: list of Expr
The arguments to supply the expr.
expected_result:
The expected result of running the expression.
"""
for target, ctx in ctx_list():
vm = relay.create_executor('vm', ctx=ctx, target=target, mod=mod)
rts_result = vm.evaluate()(*args)
tvm.testing.assert_allclose(expected_result, rts_result.asnumpy())
def veval(f, *args, ctx=tvm.cpu(), target="llvm"):
if isinstance(f, relay.Expr):
mod = relay.Module()
mod["main"] = f
compiler = relay.vm.VMCompiler()
vm = compiler.compile(mod, target)
vm.init(tvm.cpu())
return vm.invoke("main", *args)
else:
assert isinstance(f, relay.Module), "expected expression or module"
mod = f
compiler = relay.vm.VMCompiler()
vm = compiler.compile(mod, target)
vm.init(tvm.cpu())
ret = vm.invoke("main", *args)
return ret
def vmobj_to_list(o):
if isinstance(o, tvm.relay.backend.vmobj.TensorObject):
return [o.asnumpy().tolist()]
elif isinstance(o, tvm.relay.backend.vmobj.DatatypeObject):
result = []
for f in o:
result.extend(vmobj_to_list(f))
return result
else:
raise RuntimeError("Unknown object type: %s" % type(o))
def test_split():
x = relay.var('x', shape=(12,))
y = relay.split(x, 3, axis=0).astuple()
f = relay.Function([x], y)
x_data = np.random.rand(12,).astype('float32')
res = veval(f, x_data)
ref_res = np.split(x_data, 3, axis=0)
for i in range(3):
tvm.testing.assert_allclose(res[i].asnumpy(), ref_res[i])
def test_split_no_fuse():
x = relay.var('x', shape=(12,))
y = relay.split(x, 3, axis=0).astuple()
z = relay.concatenate([relay.TupleGetItem(y, 0)], axis=0)
z = relay.annotation.stop_fusion(z)
f =
|
relay.Function([x], z)
x_data = np.random.rand(1
|
2,).astype('float32')
res = veval(f, x_data)
tvm.testing.assert_allclose(res.asnumpy(), np.split(x_data, 3, axis=0)[0])
def test_id():
x = relay.var('x', shape=(10, 10), dtype='float64')
f = relay.Function([x], x)
x_data = np.random.rand(10, 10).astype('float64')
mod = relay.Module()
mod["main"] = f
check_result([x_data], x_data, mod=mod)
def test_op():
x = relay.var('x', shape=(10, 10))
f = relay.Function([x], x + x)
x_data = np.random.rand(10, 10).astype('float32')
mod = relay.Module()
mod["main"] = f
check_result([x_data], 2 * x_data, mod=mod)
def any(x):
x = relay.op.nn.batch_flatten(x)
return relay.op.min(x, axis=[0, 1])
def test_cond():
x = relay.var('x', shape=(10, 10))
y = relay.var('y', shape=(10, 10))
# f = relay.Function([x, y], relay.op.equal(x, y))
f = relay.Function([x, y], any(relay.op.equal(x, y)))
x_data = np.random.rand(10, 10).astype('float32')
y_data = np.random.rand(10, 10).astype('float32')
mod = relay.Module()
mod["main"] = f
# same
check_result([x_data, x_data], True, mod=mod)
# diff
check_result([x_data, y_data], False, mod=mod)
def test_simple_if():
x = relay.var('x', shape=(10, 10))
y = relay.var('y', shape=(10, 10))
f = relay.Function([x, y],
relay.If(any(relay.op.equal(x, y)), x, y))
x_data = np.random.rand(10, 10).astype('float32')
y_data = np.random.rand(10, 10).astype('float32')
mod = relay.Module()
mod["main"] = f
# same
check_result([x_data, x_data], x_data, mod=mod)
# diff
check_result([x_data, y_data], y_data, mod=mod)
def test_simple_call():
mod = relay.module.Module({})
sum_up = relay.GlobalVar('sum_up')
i = relay.var('i', shape=[], dtype='int32')
sb = ScopeBuilder()
sb.ret(i)
func = relay.Function([i], sb.get(), ret_type=relay.TensorType([], 'int32'))
mod[sum_up] = func
i_data = np.array(0, dtype='int32')
iarg = relay.var('iarg', shape=[], dtype='int32')
mod["main"] = relay.Function([iarg], sum_up(iarg))
check_result([i_data], i_data, mod=mod)
def test_count_loop():
mod = relay.module.Module({})
sum_up = relay.GlobalVar('sum_up')
i = relay.var('i', shape=[], dtype='int32')
sb = ScopeBuilder()
with sb.if_scope(relay.equal(i, relay.const(0, dtype='int32'))):
sb.ret(i)
with sb.else_scope():
one_less = relay.subtract(i, relay.const(1, dtype='int32'))
rec_call = relay.Call(sum_up, [one_less])
sb.ret(relay.add(rec_call, i))
func = relay.Function([i], sb.get(), ret_type=relay.TensorType([], 'int32'))
mod[sum_up] = func
i_data = np.array(0, dtype='int32')
iarg = relay.var('i', shape=[], dtype='int32')
mod["main"] = relay.Function([iarg], sum_up(iarg))
result = veval(mod, i_data)
tvm.testing.assert_allclose(result.asnumpy(), i_data)
check_result([i_data], i_data, mod=mod)
def test_sum_loop():
mod = relay.module.Module({})
sum_up = relay.GlobalVar('sum_up')
i = relay.var('i', shape=[], dtype='int32')
accum = relay.var('accum', shape=[], dtype='int32')
sb = ScopeBuilder()
with sb.if_scope(relay.equal(i, relay.const(0, 'int32'))):
sb.ret(accum)
with sb.else_scope():
one_less = relay.subtract(i, relay.const(1, 'int32'))
new_accum = relay.add(accum, i)
sb.ret(relay.Call(sum_up, [one_less, new_accum]))
func = relay.Function([i, accum], sb.get())
mod[sum_up] = func
loop_bound = 0
i_data = np.array(loop_bound, dtype='int32')
accum_data = np.array(0, dtype='int32')
iarg = relay.var('i', shape=[], dtype='int32')
aarg = relay.var('accum', shape=[], dtype='int32')
mod["main"] = relay.Function([iarg, aarg], sum_up(iarg, aarg))
check_result([i_data, accum_data], sum(range(1, loop_bound + 1)), mod=mod)
def test_tuple_fst():
ttype = relay.TupleType([relay.TensorType((1,)), relay.TensorType((10,))])
tup = relay.var('tup', type_annotation=ttype)
f = relay.Function([tup], relay.TupleGetItem(tup, 0))
i_data = np.random.rand(41).astype('float32')
j_data = np.random.rand(10).astype('float32')
mod = relay.Module()
mod["main"] = f
check_result([(i_data, j_data)], i_data, mod=mod)
def test_tuple_second():
ttype = relay.TupleType([relay.TensorType((1,)), relay.TensorType((10,))])
tup = relay.var('tup', type_annotation=ttype)
f = relay.Function([tup], relay.TupleGetItem(tup, 1))
i_data = np.random.rand(41).astype('float32')
j_data = np.random.rand(10).astype('float32')
mod = relay.Module()
mod["main"] = f
check_result([(i_data, j_data)], j_data, mod=mod)
def test_list_constructor():
mod = relay.Module()
p = Prelude(mod)
nil = p.nil
cons = p.cons
l = p.l
one2 = cons(relay.const(1), nil())
one3 = cons(relay.const(2), one2)
one4 = cons(relay.const(3), one3)
f = relay.Function([], one4)
mod["main"] = f
result = veval(mod)
assert len(result) == 2
assert len(result[1]) == 2
o
|
tajkhan/pluto-pocc
|
annotations/module/loop/submodule/permut/transformator.py
|
Python
|
gpl-3.0
| 4,926
| 0.003857
|
#
# Contain the transformation procedure
#
import sys
import module.loop.ast
#-----------------------------------------
def __makeForLoop(id, lbound, ubound, stride, loop_body):
'''Generate a for loop:
for (id=lbound; id<=ubound; id=id+stride)
loop_body'''
init_exp = None
test_exp = None
iter_exp = None
if lbound:
init_exp = module.loop.ast.BinOpExp(id.replicate(),
lbound.replicate(),
module.loop.ast.BinOpExp.EQ_ASGN)
if ubound:
test_exp = module.loop.ast.BinOpExp(id.replicate(),
ubound.replicate(),
module.loop.ast.BinOpExp.LE)
if stride:
it = module.loop.ast.BinOpExp(id.replicate(),
stride.replicate(),
module.loop.ast.BinOpExp.ADD)
iter_exp = module.loop.ast.BinOpExp(id.replicate(),
|
it,
module.loop.ast.BinOpExp.EQ_ASGN)
return module.loop.ast.ForStmt(init_exp, test_exp, iter_exp, loop_body.replicate())
#-------
|
----------------------------------
def transform(stmt, arg_info):
'''Perform code transformation'''
# extract argument information
loop_order, = arg_info
# get rid of compound statement that contains only a single statement
while isinstance(stmt, module.loop.ast.CompStmt) and len(stmt.stmts) == 1:
stmt = stmt.stmts[0]
# insert loop order information into a hashtable
loop_info = {}
for index_name, is_optional in loop_order:
loop_info[index_name] = [is_optional]
# create loop order (get rid of all optionality information)
loop_order = [iname for iname, opt in loop_order]
# extract loop control information and get the loop body
loop_body = None
cur_stmt = stmt
unseen_loops = loop_order[:]
seen_loops = []
while True:
if isinstance(cur_stmt, module.loop.ast.CompStmt) and len(cur_stmt.stmts) == 1:
cur_stmt = cur_stmt.stmts[0]
continue
is_optional_list = [loop_info[i][0] for i in unseen_loops]
all_unseen_optional = reduce(lambda x,y: x and y, is_optional_list, True)
if isinstance(cur_stmt, module.loop.ast.ForStmt) and not cur_stmt.init:
print ('error:%s:Permut: a loop is assumed to have a non-empty init exp'
% (cur_stmt.line_no))
sys.exit(1)
if (isinstance(cur_stmt, module.loop.ast.ForStmt) and
isinstance(cur_stmt.init, module.loop.ast.BinOpExp) and
cur_stmt.init.op_type == module.loop.ast.BinOpExp.EQ_ASGN and
isinstance(cur_stmt.init.lhs, module.loop.ast.IdentExp)):
iname = cur_stmt.init.lhs.name
if iname in seen_loops:
if all_unseen_optional:
loop_body = cur_stmt
break
else:
print ('error:%s: loop "%s" cannot occur repeatedly'
% (cur_stmt.line_no, iname))
sys.exit(1)
if iname not in unseen_loops:
if all_unseen_optional:
loop_body = cur_stmt
break
else:
print ('error:%s: loop "%s" is not specified in the loop order %s'
% (cur_stmt.line_no, iname, tuple(loop_order)))
sys.exit(1)
linfo = loop_info[iname]
linfo.append(cur_stmt.init)
linfo.append(cur_stmt.test)
linfo.append(cur_stmt.iter)
unseen_loops.remove(iname)
seen_loops.append(iname)
cur_stmt = cur_stmt.stmt
else:
if all_unseen_optional:
loop_body = cur_stmt
break
else:
unfound_loops = filter(lambda x: not loop_info[x][0], unseen_loops)
unfound_loops = tuple(unfound_loops)
print ('error:%s: to-be-permuted loops %s do not exist'
% (stmt.line_no, unfound_loops))
sys.exit(1)
# generate the permuted loop
transformed_stmt = loop_body
rev_loop_order = loop_order[:]
rev_loop_order.reverse()
for iname in rev_loop_order:
linfo = loop_info[iname]
if len(linfo) > 1:
opt, init_exp, test_exp, iter_exp = linfo
transformed_stmt = module.loop.ast.ForStmt(init_exp.replicate(),
test_exp.replicate(),
iter_exp.replicate(),
transformed_stmt)
return transformed_stmt
|
ShaolongHu/Nitrate
|
tcms/testcases/tests.py
|
Python
|
gpl-2.0
| 5,924
| 0
|
import unittest
from django.test.client import Client
from django.forms import ValidationError
from fields import MultipleEmailField
class CaseTests(unittest.TestCase):
def setUp(self):
self.c = Client()
self.case_id = 12345
self.status_codes = [301, 302]
def test_cases(self):
response = self.c.get('/cases/')
try:
self.assertEquals(response.status_code, 200)
except AssertionError:
self.assertEquals(response.status_code, 302)
def test_case_new(self):
response = self.c.get('/case/new/')
try:
self.assertEquals(response.status_code, 200)
except AssertionError:
self.assertEquals(response.status_code, 302)
def test_case_clone(self):
response = self.c.get('/cases/clone/', {'case': 12197})
try:
self.assertEquals(response.status_code, 200)
except AssertionError:
self.assertEquals(response.status_code, 302)
def test_cases_changestatus(self):
response = self.c.get('/cases/changestatus/')
try:
self.assertEquals(response.status_code, 200)
except AssertionError:
self.assertEquals(response.status_code, 302)
def test_cases_priority(self):
response = self.c.get('/cases/priority/')
try:
self.assertEquals(response.status_code, 200)
except AssertionError:
self.assertEquals(response.status_code, 302)
def test_case_getcase(self):
location = '/case/%s' % self.case_id
response = self.c.get(location)
if response.status_code == 301:
print response.path
try:
self.assertEquals(response.status_code, 200)
except AssertionError:
assert response.status_code in self.status_codes
def test_case_details(self):
location = '/case/%s/details' % self.case_id
response = self.c.get(location)
try:
self.assertEquals(response.status_code, 200)
except AssertionError:
assert response.status_code in self.status_codes
# self.assertEquals(response.status_code, 302)
def test_case_edit(self):
location = '/case/%s/edit/' % self.case_id
response = self.c.get(location)
try:
self.assertEquals(response.status_code, 200)
except AssertionError:
self.assertEquals(response.status_code, 302)
def test_case_history(self):
location = '/case/%s/history/' % self.case_id
response = self.c.get(location)
try:
self.assertEquals(response.status_code, 200)
except AssertionError:
self.assertEquals(response.status_code, 302)
def test_case_changecaseorder(self):
location = '/case/%s/changecaseorder/' % self.case_id
response = self.c.get(location)
try:
self.assertEquals(response.status_code, 200)
except AssertionError:
self.assertEquals(response.status_code, 302)
def test_case_attachment(self):
location = '/case/%s/attachment/' % self.case_id
response = self.c.get(location)
try:
self.assertEquals(response.status_code, 200)
except AssertionError:
self.assertEquals(response.status_code, 302)
def test_case_log(self):
location = '/case/%s/log/' % self.case_id
response = self.c.get(location)
try:
self.assertEquals(response.status_code, 200)
except AssertionError:
self.assertEquals(response.status_code, 302)
def test_case_bug(self):
location = '/case/%s/bug/' % self.case_id
response = self.c.get(location)
try:
self.assertEquals(response.status_code, 200)
except AssertionError:
self.assertEquals(response.status_code, 302)
def test_case_plan(self):
location = '/case/%s/plan/' % self.case_id
response = self.c.get(location)
try:
self.assertEquals(response.status_code, 200)
except AssertionError:
self.assertEquals(response.status_code, 302)
class Test_MultipleEmailField(unittest.TestCase):
def setUp(self):
self.default_delimiter = ','
self.field = MultipleEmailField(delimiter=self.default_delimiter)
self.all_valid_emails = (
'cqi@redhat.com', 'cqi@yahoo.com', 'chen@gmail.com', )
self.include_invalid_emails = (
'', ' cqi@redhat.com', 'chen@sina.com', )
def test_to_python(self):
value = 'cqi@redhat.com'
pyobj = self.field.to_python(value)
self.assertEqual(pyobj, ['cqi@redhat.com'])
value = 'cqi@redhat.com,,cqi@gmail.com,'
pyobj = self.field.to_python(value)
self.assertEqual(pyobj, ['cqi@redhat.com', 'cqi@gmail.com'])
for value in ('', None, []):
pyobj = self.field.to_
|
python(value)
self.assertEqual(pyobj, [])
def test_clean(self):
value = 'cqi@redhat.com'
data = self.field.clean(value)
self.assertEqual(data, ['cqi@redhat.com'])
value = 'cqi@redhat.com,cqi@gmail.com'
data = self.field.clean(value)
self.assertEqual(data, ['cqi@redhat.com', 'cqi@gmail.com'])
value = ',cqi@redhat.com, ,cqi@gmail.com,
|
\n'
data = self.field.clean(value)
self.assertEqual(data, ['cqi@redhat.com', 'cqi@gmail.com'])
value = ',cqi,cqi@redhat.com, \n,cqi@gmail.com, '
self.assertRaises(ValidationError, self.field.clean, value)
value = ''
self.field.required = True
self.assertRaises(ValidationError, self.field.clean, value)
value = ''
self.field.required = False
data = self.field.clean(value)
self.assertEqual(data, [])
if __name__ == '__main__':
unittest.main()
|
AzamYahya/shogun
|
examples/undocumented/python_modular/graphical/converter_spe_helix.py
|
Python
|
gpl-3.0
| 2,562
| 0.007026
|
"""
Shogun demo
Fernando J. Iglesias Garcia
This example shows the use of dimensionality reduction methods, mainly
Stochastic Proximity Embedding (SPE), although Isomap is also used for
comparison. The data selected to be embedded is an helix. Two different methods
of SPE (global and local) are applied showing that the
|
global method outperforms
the
|
local one in this case. Actually the results of local SPE are fairly poor
for this input. Finally, the reduction achieved with Isomap is better than the
two previous ones, more robust against noise. Isomap exploits the
parametrization of the input data.
"""
import math
import mpl_toolkits.mplot3d as mpl3
import numpy as np
import pylab
import util
from modshogun import RealFeatures
from modshogun import StochasticProximityEmbedding, SPE_GLOBAL
from modshogun import SPE_LOCAL, Isomap
# Number of data points
N = 500
# Generate helix
t = np.linspace(1, N, N).T / N
t = t*2*math.pi
X = np.r_[ [ ( 2 + np.cos(8*t) ) * np.cos(t) ],
[ ( 2 + np.cos(8*t) ) * np.sin(t) ],
[ np.sin(8*t) ] ]
# Bi-color helix
labels = np.round( (t*1.5) ) % 2
y1 = labels == 1
y2 = labels == 0
# Plot helix
fig = pylab.figure()
fig.add_subplot(2, 2, 1, projection = '3d')
pylab.plot(X[0, y1], X[1, y1], X[2, y1], 'ro')
pylab.plot(X[0, y2], X[1, y2], X[2, y2], 'go')
pylab.title('Original 3D Helix')
# Create features instance
features = RealFeatures(X)
# Create Stochastic Proximity Embedding converter instance
converter = StochasticProximityEmbedding()
# Set target dimensionality
converter.set_target_dim(2)
# Set strategy
converter.set_strategy(SPE_GLOBAL)
# Compute SPE embedding
embedding = converter.embed(features)
X = embedding.get_feature_matrix()
fig.add_subplot(2, 2, 2)
pylab.plot(X[0, y1], X[1, y1], 'ro')
pylab.plot(X[0, y2], X[1, y2], 'go')
pylab.title('SPE with global strategy')
# Compute a second SPE embedding with local strategy
converter.set_strategy(SPE_LOCAL)
converter.set_k(12)
embedding = converter.embed(features)
X = embedding.get_feature_matrix()
fig.add_subplot(2, 2, 3)
pylab.plot(X[0, y1], X[1, y1], 'ro')
pylab.plot(X[0, y2], X[1, y2], 'go')
pylab.title('SPE with local strategy')
# Compute Isomap embedding (for comparison)
converter = Isomap()
converter.set_target_dim(2)
converter.set_k(6)
embedding = converter.embed(features)
X = embedding.get_feature_matrix()
fig.add_subplot(2, 2, 4)
pylab.plot(X[0, y1], X[1, y1], 'ro')
pylab.plot(X[0, y2], X[1, y2], 'go')
pylab.title('Isomap')
pylab.connect('key_press_event', util.quit)
pylab.show()
|
fubarwrangler/atlassim
|
simulation.py
|
Python
|
gpl-2.0
| 6,264
| 0.003033
|
#!/usr/bin/python
import computefarm as cf
from computefarm.farm import depth_first, breadth_first
import random
import logging
import numpy as np
HOUR = 60 * 60
default_queue_properties = {
'grid': { 'num': 0, 'mem': 750, 'avg': HOUR, 'std': 0.6 * HOUR},
'prod': { 'num': 0, 'avg': 8 * HOUR, 'std': 3 * HOUR},
'short': { 'num': 500, 'avg': 1.2 * HOUR, 'std': 600},
'long': { 'num': 500, 'avg': 5 * HOUR, 'std': 2 * HOUR},
'test': { 'num': 0, 'avg': 8 * HOUR, 'cpu': 3},
'mp8': { 'num': 0, 'avg': 6 * HOUR, 'std': 4 * HOUR, 'cpu': 8, 'mem': 6000}
}
def sort_like(array, like):
# All items in @like are picked in order if they exist in the array
for x in like:
if x in array:
yield x
# All the remaining are picked here
for x in sorted(set(array) - set(like)):
yield x
log = logging.getLogger('sim')
class Simulation(object):
def __init__(self, nodes, negotiate_interval=150, stat_freq=10, submit_interval=200):
""" Initialize the farm simulation, attach groups and queues to it and
provide method of submitting jobs of a predetermined size into the
queues.
"""
self.farm = cf.Farm()
# Distribution of farm nodes, e.g. 331/90 is ratio of 24/32 core machines
dist = (
(24, 331),
(32, 90),
(8, 238),
)
self.farm.generate_from_dist(dist, size=nodes)
root = self.setup_groups(cf.Group('<root>'))
self.farm.attach_groups(root)
self._init_stat(stat_freq * 100)
#Default ranking
self.farm.set_negotiatior_rank(depth_first)
self.queue = cf.JobQueue()
self.farm.attach_queue(self.queue)
# How many seconds per negotiation/stat gathering cycle
self.int_stat = stat_freq
self.int_negotiate = negotiate_interval
self.int_submit = submit_interval
self.next_stat = 0
self.next_negotiate = 0
self.next_submit = 0
# How many seconds to simulate each step
self.sec_per_step = 5
# these two _set* knobs are used
|
in call
|
backs by the GUI
def _set_neg_df(self):
self.farm.set_negotiatior_rank(depth_first)
def _set_neg_bf(self):
self.farm.set_negotiatior_rank(breadth_first)
def _init_stat(self, hist_size):
""" Statistics are kept in a constant-size numpy array that is updated
periodically
"""
self._stat = {}
self._stat_size = hist_size
for x in self.farm.groups.active_groups():
self._stat[x.name] = np.zeros((hist_size), int)
def _update_stat(self):
self.farm.update_usage()
for g in self.farm.groups.active_groups():
# Left-shift entire array back by one, so element n -> element n - 1
self._stat[g.name] = np.roll(self._stat[g.name], -1)
# New last element is current update
self._stat[g.name][-1] = g.usage
def setup_groups(self, root):
""" Reflects current ATLAS group structure:
/- atlas +-- production +-- prod
| | |
| | \-- mp8
| | |
| | \-- test
<root>-+ |
| \-- analysis +-- short
| |
| \-- long
\- grid
"""
root.add_child('atlas')
root.add_child('grid', 3)
root['atlas'].add_child('production')
root['atlas'].add_child('analysis')
root['atlas']['production'].add_child('prod', 40)
root['atlas']['production'].add_child('mp8', 5)
root['atlas']['production'].add_child('test', 7)
root['atlas']['analysis'].add_child('short', 10)
root['atlas']['analysis'].add_child('long', 10)
# Populate with default properties from top of this module
for x in root.walk():
if x.name in default_queue_properties:
x.set_character(**default_queue_properties[x.name])
return root
def add_jobs(self):
""" Submit more jobs into the queue, keeping the total idle jobs where
they should be according to the sliders in the GUI.
"""
for group in self.farm.groups.active_groups():
num_submit = group.num - self.farm.queue.get_group_idle(group.name)
if num_submit <= 0:
continue
log.info("Submitting %d more %s jobs", num_submit, group.name)
for n in xrange(num_submit):
# Job length is random within a Gaussian distribution
length = abs(random.gauss(group.avg, group.std))
# Create job object and add it to queue
job = cf.BatchJob(group=group.name, cpus=group.cpu, memory=group.mem,
length=length)
self.queue.add_job(job)
def step(self, dt):
""" Advance time of the simulation by dt steps at a time, making next
submission/negotiation/statistics-gathering as appropriate
"""
for i in xrange(dt):
self.farm.advance_time(self.sec_per_step)
if self.farm.time > self.next_submit:
self.add_jobs()
self.next_submit = self.farm.time + self.int_submit
if self.farm.time > self.next_negotiate:
self.farm.negotiate_jobs()
self.next_negotiate = self.farm.time + self.int_negotiate
if self.farm.time > self.next_stat:
self._update_stat()
self.next_stat = self.farm.time + self.int_stat
def display_order(self):
sort_order = ('short', 'long', 'test', 'prod', 'mp8')
return list(sort_like(self._stat.keys(), sort_order))
def make_plotdata(self, groups='all'):
x = np.arange(self._stat_size)
if groups == 'all':
y = np.vstack((self._stat[x] for x in self.display_order()))
else:
y = np.vstack((self._stat[x] for x in self.display_order() if x in groups))
return x, y
if __name__ == '__main__':
s = Simulation()
|
vhelin/wla-dx
|
doc/sphinx/globalindex.py
|
Python
|
gpl-2.0
| 2,896
| 0.002762
|
# Note: Modified by Neui (Note: sphinx.util.compat.Directive is deprecated)
#
# Copyright (C) 2011 by Matteo Franchin
#
# T
|
his file is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This file is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See th
|
e
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# <http://www.gnu.org/licenses/>.
from sphinx.builders.singlehtml import SingleFileHTMLBuilder
from docutils import nodes
from docutils.parsers.rst import Directive, directives
import re
class globalindex(nodes.General, nodes.Element):
pass
def visit_globalindex_node(self, node):
self.body.append(node['content'])
def depart_globalindex_node(self, node):
pass
class GlobalIndexDirective(Directive):
required_arguments = 0
optional_arguments = 1
final_argument_whitespace = True
option_spec = \
{'maxdepth': directives.nonnegative_int,
'collapse': directives.flag,
'titlesonly': directives.flag}
def run(self):
node = globalindex('')
node['maxdepth'] = self.options.get('maxdepth', 2)
node['collapse'] = 'collapse' in self.options
node['titlesonly'] = 'titlesonly' in self.options
return [node]
def process_globalindex_nodes(app, doctree, fromdocname):
builder = app.builder
if builder.name != SingleFileHTMLBuilder.name:
for node in doctree.traverse(globalindex):
node.parent.remove(node)
else:
docname = builder.config.master_doc
for node in doctree.traverse(globalindex):
kwargs = dict(maxdepth=node['maxdepth'],
collapse=node['collapse'],
titles_only=node['titlesonly'])
rendered_toctree = builder._get_local_toctree(docname, **kwargs)
# For some reason, it refers to docname.html#anchor, where just
# #anchor is enough.
rendered_toctree = rendered_toctree.replace(docname + ".html", '')
# Subsections will be #section#subsection, which is invalid.
# Removing the first #section fixes this.
rendered_toctree = re.sub('href="(?:#[^#"]+)*(#[^"]+)"', \
'href="\\1"', rendered_toctree)
node['content'] = rendered_toctree
def setup(app):
app.add_node(globalindex,
html=(visit_globalindex_node, depart_globalindex_node))
app.add_directive('globalindex', GlobalIndexDirective)
app.connect('doctree-resolved', process_globalindex_nodes)
|
mindbody/API-Examples
|
SDKs/Python/swagger_client/models/resource.py
|
Python
|
bsd-2-clause
| 3,703
| 0.00027
|
# coding: utf-8
"""
MINDBODY Public API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class Resource(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'int',
'name': 'str'
}
attribute_map = {
'id': 'Id',
'name': 'Name'
}
def __init__(self, id=None, name=None): # noqa: E501
"""Resource - a model defined in Swagger""" # noqa: E501
self._id = None
self._name = None
self.discriminator = None
if id is not None:
self.id = id
if name is not None:
self.name = name
@property
def id(self):
"""Gets the id of this Resource. # noqa: E501
The ID of the resource. # noqa: E501
:return: The id of this Resource. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this Resource.
The ID of the resource. # noqa: E501
:param id: The id of this Resource. # noqa: E501
:type: int
"""
self._id = id
@property
def name(self):
"""Gets the name of this Resource. # noqa: E501
The name of the resource. # noqa: E501
:return: The name of this Resource. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
|
"""Sets
|
the name of this Resource.
The name of the resource. # noqa: E501
:param name: The name of this Resource. # noqa: E501
:type: str
"""
self._name = name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Resource, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Resource):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
webcube/django-hyperadmin
|
hyperadmin/tests/test_sites.py
|
Python
|
bsd-3-clause
| 360
| 0.008333
|
from django.utils import unittest
from django.contrib import admin
from hyperadmin.sites import ResourceSite
class SiteTestCa
|
se(unittest.TestCase):
def test_install_from_admin_site(self):
site = ResourceSite()
admin.autodiscover()
site.install_models_from_site(admin.site)
self.assertTrue(s
|
ite.registry)
|
openstack-infra/project-config
|
tools/projectconfig_ruamellib.py
|
Python
|
apache-2.0
| 1,722
| 0
|
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ruamel.yaml
def none_representer(dumper, data):
return dumper.represent_scalar('tag:yaml.org,2002:null', 'null')
class YAML(object):
def __init__(self):
"""Wrap construction of ruamel yaml object."""
self.yaml = ruamel.yaml.YAML()
self.yaml.allow_duplicate_keys = True
self.yaml.representer.add_representer(type(None), none_representer)
self.yaml.indent(mapping=2, sequence=4, offset=2)
def load(self, stream):
return self.yaml.load(stream)
def tr(self, x):
x = x.replace('\n-', '\n\n-')
newlines = []
for line in x.split('\n'):
if '#'
|
in line:
newlines.append(line)
else:
newlines.append(line[2:])
return '\n'.join(newlines)
def dump(self, data, *args, **kwargs):
if isinstance(data, list):
kwargs['transform'] = self.tr
self.yaml.dump(data, *args, **kwargs)
_yaml = YAML()
def load(*args, **kwargs):
ret
|
urn _yaml.load(*args, **kwargs)
def dump(*args, **kwargs):
return _yaml.dump(*args, **kwargs)
|
primiano/depot_tools
|
tests/gclient_test.py
|
Python
|
bsd-3-clause
| 36,329
| 0.003854
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for gclient.py.
See gclient_smoketest.py for integration tests.
"""
import Queue
import copy
import logging
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import gclient
import gclient_utils
from testing_support import trial_dir
def write(filename, content):
"""Writes the content of a file and create the directories as needed."""
filename = os.path.abspath(filename)
dirname = os.path.dirname(filename)
if not os.path.isdir(dirname):
os.makedirs(dirname)
with open(filename, 'w') as f:
f.write(content)
class SCMMock(object):
def __init__(self, unit_test, name, url):
self.unit_test = unit_test
self.name = name
self.url = url
def RunCommand(self, command, options, args, file_list):
self.unit_test.assertEquals('None', command)
self.unit_test.processed.put((self.name, self.url))
def FullUrlForRelativeUrl(self, url):
return self.url + url
# pylint: disable=R0201
def DoesRemoteURLMatch(self, _):
return True
def GetActualRemote
|
URL(self, _):
return self.url
class GclientTest(trial_dir.TestCase):
def setUp(self):
super(GclientTest, self).setUp()
se
|
lf.processed = Queue.Queue()
self.previous_dir = os.getcwd()
os.chdir(self.root_dir)
# Manual mocks.
self._old_createscm = gclient.gclient_scm.CreateSCM
gclient.gclient_scm.CreateSCM = self._createscm
self._old_sys_stdout = sys.stdout
sys.stdout = gclient.gclient_utils.MakeFileAutoFlush(sys.stdout)
sys.stdout = gclient.gclient_utils.MakeFileAnnotated(sys.stdout)
def tearDown(self):
self.assertEquals([], self._get_processed())
gclient.gclient_scm.CreateSCM = self._old_createscm
sys.stdout = self._old_sys_stdout
os.chdir(self.previous_dir)
super(GclientTest, self).tearDown()
def _createscm(self, parsed_url, root_dir, name, out_fh=None, out_cb=None):
self.assertTrue(parsed_url.startswith('svn://example.com/'), parsed_url)
self.assertTrue(root_dir.startswith(self.root_dir), root_dir)
return SCMMock(self, name, parsed_url)
def testDependencies(self):
self._dependencies('1')
def testDependenciesJobs(self):
self._dependencies('1000')
def _dependencies(self, jobs):
"""Verifies that dependencies are processed in the right order.
e.g. if there is a dependency 'src' and another 'src/third_party/bar', that
bar isn't fetched until 'src' is done.
Also test that a From() dependency should not be processed when it is listed
as a requirement.
Args:
|jobs| is the number of parallel jobs simulated.
"""
parser = gclient.OptionParser()
options, args = parser.parse_args(['--jobs', jobs])
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo" },\n'
' { "name": "bar", "url": "svn://example.com/bar" },\n'
' { "name": "bar/empty", "url": "svn://example.com/bar_empty" },\n'
']')
write(
os.path.join('foo', 'DEPS'),
'deps = {\n'
' "foo/dir1": "/dir1",\n'
# This one will depend on dir1/dir2 in bar.
' "foo/dir1/dir2/dir3": "/dir1/dir2/dir3",\n'
' "foo/dir1/dir2/dir3/dir4": "/dir1/dir2/dir3/dir4",\n'
' "foo/dir1/dir2/dir5/dir6":\n'
' From("foo/dir1/dir2/dir3/dir4", "foo/dir1/dir2"),\n'
'}')
write(
os.path.join('bar', 'DEPS'),
'deps = {\n'
# There is two foo/dir1/dir2. This one is fetched as bar/dir1/dir2.
' "foo/dir1/dir2": "/dir1/dir2",\n'
'}')
write(
os.path.join('bar/empty', 'DEPS'),
'deps = {\n'
'}')
# Test From()
write(
os.path.join('foo/dir1/dir2/dir3/dir4', 'DEPS'),
'deps = {\n'
# This one should not be fetched or set as a requirement.
' "foo/dir1/dir2/dir5": "svn://example.com/x",\n'
# This foo/dir1/dir2 points to a different url than the one in bar.
' "foo/dir1/dir2": "/dir1/another",\n'
'}')
obj = gclient.GClient.LoadCurrentConfig(options)
self._check_requirements(obj.dependencies[0], {})
self._check_requirements(obj.dependencies[1], {})
obj.RunOnDeps('None', args)
actual = self._get_processed()
first_3 = [
('bar', 'svn://example.com/bar'),
('bar/empty', 'svn://example.com/bar_empty'),
('foo', 'svn://example.com/foo'),
]
if jobs != 1:
# We don't care of the ordering of these items except that bar must be
# before bar/empty.
self.assertTrue(
actual.index(('bar', 'svn://example.com/bar')) <
actual.index(('bar/empty', 'svn://example.com/bar_empty')))
self.assertEquals(first_3, sorted(actual[0:3]))
else:
self.assertEquals(first_3, actual[0:3])
self.assertEquals(
[
('foo/dir1', 'svn://example.com/foo/dir1'),
('foo/dir1/dir2', 'svn://example.com/bar/dir1/dir2'),
('foo/dir1/dir2/dir3', 'svn://example.com/foo/dir1/dir2/dir3'),
('foo/dir1/dir2/dir3/dir4',
'svn://example.com/foo/dir1/dir2/dir3/dir4'),
('foo/dir1/dir2/dir5/dir6',
'svn://example.com/foo/dir1/dir2/dir3/dir4/dir1/another'),
],
actual[3:])
self.assertEquals(3, len(obj.dependencies))
self.assertEquals('foo', obj.dependencies[0].name)
self.assertEquals('bar', obj.dependencies[1].name)
self.assertEquals('bar/empty', obj.dependencies[2].name)
self._check_requirements(
obj.dependencies[0],
{
'foo/dir1': ['bar', 'bar/empty', 'foo'],
'foo/dir1/dir2/dir3':
['bar', 'bar/empty', 'foo', 'foo/dir1', 'foo/dir1/dir2'],
'foo/dir1/dir2/dir3/dir4':
[ 'bar', 'bar/empty', 'foo', 'foo/dir1', 'foo/dir1/dir2',
'foo/dir1/dir2/dir3'],
'foo/dir1/dir2/dir5/dir6':
[ 'bar', 'bar/empty', 'foo', 'foo/dir1', 'foo/dir1/dir2',
'foo/dir1/dir2/dir3/dir4'],
})
self._check_requirements(
obj.dependencies[1],
{
'foo/dir1/dir2': ['bar', 'bar/empty', 'foo', 'foo/dir1'],
})
self._check_requirements(
obj.dependencies[2],
{})
self._check_requirements(
obj,
{
'foo': [],
'bar': [],
'bar/empty': ['bar'],
})
def _check_requirements(self, solution, expected):
for dependency in solution.dependencies:
e = expected.pop(dependency.name)
a = sorted(dependency.requirements)
self.assertEquals(e, a, (dependency.name, e, a))
self.assertEquals({}, expected)
def _get_processed(self):
"""Retrieves the item in the order they were processed."""
items = []
try:
while True:
items.append(self.processed.get_nowait())
except Queue.Empty:
pass
return items
def testAutofix(self):
# Invalid urls causes pain when specifying requirements. Make sure it's
# auto-fixed.
d = gclient.Dependency(
None, 'name', 'proto://host/path/@revision', None, None, None, None,
None, '', True, False)
self.assertEquals('proto://host/path@revision', d.url)
def testStr(self):
parser = gclient.OptionParser()
options, _ = parser.parse_args([])
obj = gclient.GClient('foo', options)
obj.add_dependencies_and_close(
[
gclient.Dependency(
obj, 'foo', 'url', None, None, None, None, None, 'DEPS', True, False),
gclient.Dependency(
obj, 'bar', 'url', None, None, None, None, None, 'DEPS', True, False),
],
[])
obj.dependencies[0].add_dependencies_and_close(
[
gclient.Dependency(
obj.dependencies[0], 'foo/dir1', 'url', None, None, None, None,
None, 'DEPS', True, False),
gclient.Dependency(
obj.dependencies[0], 'foo/dir2',
gclient.GClientKeywords.FromImpl('bar'), None, N
|
GbalsaC/bitnamiP
|
XBlock/xblock/test/test_fields.py
|
Python
|
agpl-3.0
| 26,030
| 0.000845
|
"""
Tests for classes extending Field.
"""
# Allow accessing protected members for testing purposes
# pylint: disable=W0212
from mock import Mock
import unittest
import datetime as dt
import pytz
import warnings
import math
import textwrap
import itertools
from contextlib import contextmanager
import ddt
from xblock.core import XBlock, Scope
from xblock.field_data import DictFieldData
from xblock.fields import (
Any, Boolean, Dict, Field, Float,
Integer, List, String, DateTime, Reference, ReferenceList, Sentinel,
UNIQUE_ID
)
from xblock.test.tools import (
assert_equals, assert_not_equals, assert_in, assert_not_in, assert_false, TestRuntime
)
from xblock.fields import scope_key, ScopeIds
class FieldTest(unittest.TestCase):
""" Base test class for Fields. """
FIELD_TO_TEST = Mock()
def set_and_get_field(self, arg, enforce_type):
"""
Set the field to arg in a Block, get it and return it
"""
class TestBlock(XBlock):
"""
Block for testing
"""
field_x = self.FIELD_TO_TEST(enforce_type=enforce_type)
runtime = TestRuntime(services={'field-data': DictFieldData({})})
block = Te
|
stBlock(runtime, scope_ids=Mock(spec=ScopeIds))
block.field_x = arg
return block.field_x
|
@contextmanager
def assertDeprecationWarning(self, count=1):
"""Asserts that the contained code raises `count` deprecation warnings"""
with warnings.catch_warnings(record=True) as caught:
warnings.simplefilter("always", DeprecationWarning)
yield
self.assertEquals(count, sum(
1 for warning in caught
if issubclass(warning.category, DeprecationWarning)
))
def assertJSONOrSetEquals(self, expected, arg):
"""
Asserts the result of field.from_json and of setting field.
"""
# from_json(arg) -> expected
self.assertEqual(expected, self.FIELD_TO_TEST().from_json(arg))
# set+get with enforce_type arg -> expected
self.assertEqual(expected, self.set_and_get_field(arg, True))
# set+get without enforce_type arg -> arg
# provoking a warning unless arg == expected
count = 0 if arg == expected else 1
with self.assertDeprecationWarning(count):
self.assertEqual(arg, self.set_and_get_field(arg, False))
def assertToJSONEquals(self, expected, arg):
"""
Assert that serialization of `arg` to JSON equals `expected`.
"""
self.assertEqual(expected, self.FIELD_TO_TEST().to_json(arg))
def assertJSONOrSetValueError(self, arg):
"""
Asserts that field.from_json or setting the field throws a ValueError
for the supplied value.
"""
# from_json and set+get with enforce_type -> ValueError
with self.assertRaises(ValueError):
self.FIELD_TO_TEST().from_json(arg)
with self.assertRaises(ValueError):
self.set_and_get_field(arg, True)
# set+get without enforce_type -> warning
with self.assertDeprecationWarning():
self.set_and_get_field(arg, False)
def assertJSONOrSetTypeError(self, arg):
"""
Asserts that field.from_json or setting the field throws a TypeError
for the supplied value.
"""
# from_json and set+get with enforce_type -> TypeError
with self.assertRaises(TypeError):
self.FIELD_TO_TEST().from_json(arg)
with self.assertRaises(TypeError):
self.set_and_get_field(arg, True)
# set+get without enforce_type -> warning
with self.assertDeprecationWarning():
self.set_and_get_field(arg, False)
class IntegerTest(FieldTest):
"""
Tests the Integer Field.
"""
FIELD_TO_TEST = Integer
def test_integer(self):
self.assertJSONOrSetEquals(5, '5')
self.assertJSONOrSetEquals(0, '0')
self.assertJSONOrSetEquals(-1023, '-1023')
self.assertJSONOrSetEquals(7, 7)
self.assertJSONOrSetEquals(0, False)
self.assertJSONOrSetEquals(1, True)
def test_float_converts(self):
self.assertJSONOrSetEquals(1, 1.023)
self.assertJSONOrSetEquals(-3, -3.8)
def test_none(self):
self.assertJSONOrSetEquals(None, None)
self.assertJSONOrSetEquals(None, '')
def test_error(self):
self.assertJSONOrSetValueError('abc')
self.assertJSONOrSetValueError('[1]')
self.assertJSONOrSetValueError('1.023')
self.assertJSONOrSetTypeError([])
self.assertJSONOrSetTypeError({})
class FloatTest(FieldTest):
"""
Tests the Float Field.
"""
FIELD_TO_TEST = Float
def test_float(self):
self.assertJSONOrSetEquals(.23, '.23')
self.assertJSONOrSetEquals(5, '5')
self.assertJSONOrSetEquals(0, '0.0')
self.assertJSONOrSetEquals(-1023.22, '-1023.22')
self.assertJSONOrSetEquals(0, 0.0)
self.assertJSONOrSetEquals(4, 4)
self.assertJSONOrSetEquals(-0.23, -0.23)
self.assertJSONOrSetEquals(0, False)
self.assertJSONOrSetEquals(1, True)
def test_none(self):
self.assertJSONOrSetEquals(None, None)
self.assertJSONOrSetEquals(None, '')
def test_error(self):
self.assertJSONOrSetValueError('abc')
self.assertJSONOrSetValueError('[1]')
self.assertJSONOrSetTypeError([])
self.assertJSONOrSetTypeError({})
class BooleanTest(FieldTest):
"""
Tests the Boolean Field.
"""
FIELD_TO_TEST = Boolean
def test_false(self):
self.assertJSONOrSetEquals(False, "false")
self.assertJSONOrSetEquals(False, "False")
self.assertJSONOrSetEquals(False, "")
self.assertJSONOrSetEquals(False, "any other string")
self.assertJSONOrSetEquals(False, False)
def test_true(self):
self.assertJSONOrSetEquals(True, "true")
self.assertJSONOrSetEquals(True, "TruE")
self.assertJSONOrSetEquals(True, True)
def test_none(self):
self.assertJSONOrSetEquals(False, None)
def test_everything_converts_to_bool(self):
self.assertJSONOrSetEquals(True, 123)
self.assertJSONOrSetEquals(True, ['a'])
self.assertJSONOrSetEquals(False, [])
class StringTest(FieldTest):
"""
Tests the String Field.
"""
FIELD_TO_TEST = String
def test_json_equals(self):
self.assertJSONOrSetEquals("false", "false")
self.assertJSONOrSetEquals("abba", "abba")
self.assertJSONOrSetEquals('"abba"', '"abba"')
self.assertJSONOrSetEquals('', '')
def test_none(self):
self.assertJSONOrSetEquals(None, None)
def test_error(self):
self.assertJSONOrSetTypeError(['a'])
self.assertJSONOrSetTypeError(1.023)
self.assertJSONOrSetTypeError(3)
self.assertJSONOrSetTypeError([1])
self.assertJSONOrSetTypeError([])
self.assertJSONOrSetTypeError({})
class DateTest(FieldTest):
"""
Tests of the Date field.
"""
FIELD_TO_TEST = DateTime
def test_json_equals(self):
self.assertJSONOrSetEquals(
dt.datetime(2014, 4, 1, 2, 3, 4, 567890).replace(tzinfo=pytz.utc),
'2014-04-01T02:03:04.567890'
)
self.assertJSONOrSetEquals(
dt.datetime(2014, 4, 1, 2, 3, 4).replace(tzinfo=pytz.utc),
'2014-04-01T02:03:04.000000'
)
self.assertJSONOrSetEquals(
dt.datetime(2014, 4, 1, 2, 3, 4).replace(tzinfo=pytz.utc),
'2014-04-01T02:03:04Z'
)
self.assertJSONOrSetEquals(
dt.datetime(2014, 4, 1, 2, 3, 4).replace(tzinfo=pytz.utc),
dt.datetime(2014, 4, 1, 2, 3, 4).replace(tzinfo=pytz.utc)
)
def test_serialize(self):
self.assertToJSONEquals(
'2014-04-01T02:03:04.567890',
dt.datetime(2014, 4, 1, 2, 3, 4, 567890).replace(tzinfo=pytz.utc)
)
self.assertToJSONEquals(
'2014-04-01T02:03:04.000000',
dt.datetime(2014, 4, 1, 2, 3, 4).replace(tzinfo=pytz.utc)
|
Endika/l10n-spain
|
l10n_es_aeat_vat_prorrate/__openerp__.py
|
Python
|
agpl-3.0
| 812
| 0
|
# -*- coding: utf-8 -*-
# (c) 2015 Antiun Ingeniería
|
S.L. - Pedro M. Baeza
# (c) 2015 AvanzOSC - Ainara Galdona
# License AGPL-3 - See http://www.gnu
|
.org/licenses/agpl-3.0
{
"name": "AEAT - Prorrata de IVA",
"version": "8.0.2.0.0",
"license": "AGPL-3",
"author": "AvanzOSC, "
"Antiun Ingeniería S.L., "
"Serv. Tecnol. Avanzados - Pedro M. Baeza, "
"Odoo Community Association (OCA)",
"website": "https://github.com/OCA/l10n-spain",
"category": "Accounting",
"depends": [
'l10n_es_aeat_mod303',
],
"data": [
"data/tax_code_map_mod303_data.xml",
"data/aeat_export_mod303_data.xml",
'wizard/l10n_es_aeat_compute_vat_prorrate_view.xml',
'views/mod303_view.xml'
],
"installable": True,
}
|
mogoweb/webkit_for_android5.1
|
webkit/Source/WebKit2/Scripts/webkit2/messages_unittest.py
|
Python
|
apache-2.0
| 24,676
| 0.002432
|
# Copyright (C) 2010 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from StringIO import StringIO
import messages
_messages_file_contents = """# Copyright (C) 2010 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "config.h"
#if ENABLE(WEBKIT2)
messages -> WebPage {
LoadURL(WTF::String url)
#if ENABLE(TOUCH_EVENTS)
TouchEvent(WebKit::WebTouchEvent event)
#endif
DidReceivePolicyDecision(uint64_t frameID, uint64_t listenerID, uint32_t policyAction)
Close()
PreferencesDidChange(WebKit::WebPreferencesStore store)
SendDoubleAndFloat(double d, float f)
SendInts(Vector<uint64_t> ints, Vector<Vector<uint64_t> > intVectors)
CreatePlugin(uint64_t pluginInstanceID, WebKit::Plugin::Parameters parameters) -> (bool result)
RunJavaScriptAlert(uint64_t frameID, WTF::String message) -> ()
GetPlugins(bool refresh) -> (Vector<WebCore::PluginInfo> plugins) DispatchOnConnectionQueue
GetPluginProcessConnection(WTF::String pluginPath) -> (CoreIPC::Connection::Handle connectionHandle) Delayed
TestMultipleAttributes() -> () DispatchOnConnectionQueue Delayed
#if PLATFORM(MAC)
DidCreateWebProcessConnection(CoreIPC::MachPort connectionIdentifier)
#endif
#if PLATFORM(MAC)
# Keyboard support
InterpretKeyEvent(uint32_t type) -> (Vector<WebCore::KeypressCommand> commandName)
#endif
}
#endif
"""
_expected_results = {
'name': 'WebPage',
'condition': 'ENABLE(WEBKIT2)',
'messages': (
{
'name': 'LoadURL',
'parameters': (
('WTF::String', 'url'),
),
'condition': None,
},
{
'name': 'TouchEvent',
'parameters': (
('WebKit::WebTouchEvent', 'event'),
),
'condition': 'ENABLE(TOUCH_EVENTS)',
},
{
'name': 'DidReceivePolicyDecision',
'parameters': (
('uint64_t', 'frameID'),
('uint64_t', 'listenerID'),
('uint32_t', 'policyAction'),
),
'condition': None,
},
{
'name': 'Close',
'parameters': (),
'condition': None,
},
{
'name': 'PreferencesDidChange',
'parameters': (
('WebKit::WebPreferencesStore', 'store'),
),
'condition': None,
},
{
'name': 'SendDoubleAndFloat',
'parameters': (
('double', 'd'),
('float', 'f'),
),
'condition': None,
},
{
'name': 'SendInts',
'parameters': (
('Vector<uint64_t>', 'ints'),
('Vector<Vector<uint64_t> >', 'intVectors')
),
'condition': None,
},
{
'name': 'CreatePlugin',
'parameters': (
('uint64_t', 'pluginInstanceID'),
('WebKit::Plugin::Parameters', 'parameters')
),
'reply_parameters': (
('bool', 'result'),
),
'condition': None,
},
{
'name': 'RunJavaScriptAlert',
'parameters': (
('uint64_t', 'frameID'),
('WTF::String', 'message')
),
'reply_parameters': (),
'condition': None,
},
{
'name': 'GetPlugins',
'parameters': (
('bool', 'refresh'),
),
'reply_parameters': (
('Vector<WebCore::PluginInfo>', 'plugins'),
),
'condition': None,
},
{
'name': 'GetPluginProcessConnection',
'parameters': (
('WTF::String', 'pluginPath'),
),
'reply_parameters': (
('CoreIPC::Connection::Handle', 'connectionHandle'),
),
'condition': None,
},
{
'name': 'TestMultipleAttributes',
'parameters': (
),
'reply_parameters': (
),
'condition': None,
},
{
'name': 'DidCreateWebProcessConnection',
'parameters': (
('CoreIPC::MachPort', 'connectionIdentifier'),
),
'condition': 'PLATFORM(MAC)',
},
{
'name': 'InterpretKeyEvent',
'parameters': (
('uint32_t', 'type'),
),
'reply_parameters': (
('Vector<WebCore::KeypressCommand>', 'commandName'),
),
'condition': 'PLATFORM(MAC)',
},
),
}
class MessagesTest(unittest.TestCase):
def setUp(self):
self.receiver = messages.MessageReceiver.parse(StringIO(_messages_file_contents))
class ParsingTest(MessagesTest):
def check_message(self, message, expected_message):
self.assertEquals(message.name, expected_message['name'])
self.assertEquals(len(message.parameters), len(expected_message['parameters']))
for index, parameter
|
in enumerate(message.parameters):
self.assertEquals(parameter.type, expected_message['parameters'][index][0])
self.assertEquals(parameter.name, expected_message['parameters'][index][1])
if message.reply_parameters != None:
for index, parameter in enumerate(message.reply_parame
|
ters):
self.assertEquals(parameter.type, expected_message['reply_parameters'][index][0])
self.assertEquals(parameter.name, exp
|
dax/jmc
|
src/jmc/model/tests/account.py
|
Python
|
gpl-2.0
| 54,705
| 0.007502
|
# -*- coding: utf-8 -*-
##
## test_account.py
## Login : <dax@happycoders.org>
## Started on Wed Feb 14 08:23:17 2007 David Rousselie
## $Id$
##
## Copyright (C) 2007 David Rousselie
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##
import unittest
import thread
from jcl.tests import JCLTestCase
import jcl.model as model
from jcl.error import FieldError
from jcl.model.account import Account, PresenceAccount, User
import jmc.model.account
from jmc.model.account import MailAccount, POP3Account, IMAPAccount, \
GlobalSMTPAccount, AbstractSMTPAccount, SMTPAccount
from jmc.lang import Lang
from jcl.model.tests.account import Account_TestCase, \
PresenceAccount_TestCase, InheritableAccount_TestCase, \
ExampleAccount
from jmc.model.tests import email_generator, server
class AccountModule_TestCase(unittest.TestCase):
def test_validate_login_with_empty_login(self):
self.assertRaises(FieldError, jmc.model.account.validate_login,
None, None, None)
def test_validate_login_with_login_with_whitespace(self):
self.assertRaises(FieldError, jmc.model.account.validate_login,
"login with spaces", None, None)
def test_validate_host_with_empty_login(self):
self.assertRaises(FieldError, jmc.model.account.validate_host,
None, None, None)
def test_validate_host_with_host_with_whitespace(self):
self.assertRaises(FieldError, jmc.model.account.validate_host,
"host with spaces", None, None)
class MailAccount_TestCase(PresenceAccount_TestCase):
def setUp(self):
PresenceAccount_TestCase.setUp(self, tables=[MailAccount])
self.account = MailAccount(user=User(jid="user1@test.com"),
name="account1",
jid="account1@jmc.test.com")
self.account_class = MailAccount
def make_test(email_type, tested_func, expected_res):
def inner(self):
encoded, multipart, header = email_type
email = email_generator.generate(encoded,
multipart,
header)
part = tested_func(self, email)
self.assertEquals(part, expected_res)
return inner
test_get_decoded_part_not_encoded = \
make
|
_test((False, False, False), \
lambda self, email: \
self.account.get_decoded_part(email, None),
u"Not encoded single part")
test_get_decoded_part_encoded = \
make_test((True, False, False),
lambda self, email: \
self.account.get_decoded_part(email, None),
u"Encoded single part with 'i
|
so-8859-15' charset (éàê)")
test_format_message_summary_not_encoded = \
make_test((False, False, True),
lambda self, email: \
self.account.format_message_summary(email),
(u"From : not encoded from\nSubject : not encoded subject\n\n",
u"not encoded from"))
test_format_message_summary_encoded = \
make_test((True, False, True),
lambda self, email: \
self.account.format_message_summary(email),
(u"From : encoded from (éàê)\nSubject : encoded subject " + \
u"(éàê)\n\n",
u"encoded from (éàê)"))
test_format_message_summary_partial_encoded = \
make_test((True, False, True),
lambda self, email: \
email.replace_header("Subject",
"\" " + str(email["Subject"]) \
+ " \" not encoded part") or \
email.replace_header("From",
"\" " + str(email["From"]) \
+ " \" not encoded part") or \
self.account.format_message_summary(email),
(u"From : \"encoded from (éàê)\" not encoded part\nSubject " + \
u": \"encoded subject (éàê)\" not encoded part\n\n",
u"\"encoded from (éàê)\" not encoded part"))
test_format_message_single_not_encoded = \
make_test((False, False, True),
lambda self, email: \
self.account.format_message(email),
(u"From : not encoded from\nSubject : not encoded subject" + \
u"\n\nNot encoded single part\n",
u"not encoded from"))
test_format_message_single_encoded = \
make_test((True, False, True),
lambda self, email: \
self.account.format_message(email),
(u"From : encoded from (éàê)\nSubject : encoded subject " + \
u"(éàê)\n\nEncoded single part with 'iso-8859-15' charset" + \
u" (éàê)\n",
u"encoded from (éàê)"))
test_format_message_multi_not_encoded = \
make_test((False, True, True),
lambda self, email: \
self.account.format_message(email),
(u"From : not encoded from\nSubject : not encoded subject" + \
u"\n\nNot encoded multipart1\nNot encoded multipart2\n",
u"not encoded from"))
test_format_message_multi_encoded = \
make_test((True, True, True),
lambda self, email: \
self.account.format_message(email),
(u"From : encoded from (éàê)\nSubject : encoded subject (éà" + \
u"ê)\n\nutf-8 multipart1 with no charset (éàê)" + \
u"\nEncoded multipart2 with 'iso-8859-15' charset (éàê)\n" + \
u"Encoded multipart3 with no charset (éàê)\n",
u"encoded from (éàê)"))
def test_get_default_status_msg(self):
"""
Get default status message for MailAccount.
Should raise NotImplementedError because get_type() method
is not implemented
"""
try:
self.account.get_default_status_msg(Lang.en)
except NotImplementedError:
return
fail("No NotImplementedError raised")
class POP3Account_TestCase(InheritableAccount_TestCase):
def setUp(self):
JCLTestCase.setUp(self, tables=[Account, PresenceAccount, User,
MailAccount, POP3Account])
self.pop3_account = POP3Account(user=User(jid="user1@test.com"),
name="account1",
jid="account1@jmc.test.com",
login="login")
self.pop3_account.password = "pass"
self.pop3_account.host = "localhost"
self.pop3_account.port = 1110
self.pop3_account.ssl = False
model.db_disconnect()
self.account_class = POP3Account
def make_test(responses=None, queries=None, core=None):
def inner(self):
self.server = server.DummyServer("localhost", 1110)
thread.start_new_thread(self.server.serve, ())
self.server.responses = ["+OK connected\r\n",
"+OK name is a valid mailbox\r\n",
"+OK
|
merenlab/anvio
|
anvio/migrations/profile/v26_to_v27.py
|
Python
|
gpl-3.0
| 2,866
| 0.007676
|
#!/usr/bin/env python
# -*- coding: utf-8
import sys
import argparse
from ete3 import Tree
import anvio.db as db
import anvio.utils as utils
import anvio.terminal as terminal
from anvio.errors import ConfigError
run = terminal.Run()
progress = terminal.Progress()
current_version, next_version = [x[1:] for x in __name__.split('_to_')]
item_orders_table_name = 'item_orders'
item_orders_table_structure = ['name', 'type', 'data']
item_orders_table_types = ['text', 'text', 'text']
layer_orders_table_name = 'layer_orders'
layer_orders_table_structure = ['data_key', 'data_type', 'data_value']
layer_orders_table_types = [ 'text' , 'text' , 'text' ]
def migrate(db_path):
if db_path is None:
raise ConfigError("No database path is given.")
# make sure someone is not being funny
utils.is_profile_db(db_path)
# make sure the version is accurate
profile_db = db.DB(db_path, None, ignore_version = True)
if str(profile_db.get_version()) != current_version:
raise ConfigError("Version of this profile database is not %s (hence, this script cannot really do anything)." % current_version)
# migrate item orders
item_orders = profile_db.get_table_as_dict(item_orders_table_name)
for order_name in item_orders:
if item_orders[order_name]['type'] == 'newick':
newick = Tree(item_orders[order_name]['data'], format=1)
newick = newick.write(format=2)
profile_db._exec("""UPDATE %s SET "data" = ? WHERE "name" LIKE ?""" % item_orders_table_name, (newick, order_name))
# migrate layer orders
layer_orders = profile_db.get_table_as_dict(layer_orders_table_name)
for order_name in layer_orders:
if layer_orders[order_name]['data
|
_type'] == 'newick':
newick = Tree(layer_orders[order_name]['data_value'], format=1)
newick = newick.write(format=2)
profile_db._exec("""UPDATE %s SET "data_value" = ? WHERE "data_key" LIKE ?""
|
" % layer_orders_table_name, (newick, order_name))
# set the version
profile_db.remove_meta_key_value_pair('version')
profile_db.set_version(next_version)
# bye
profile_db.disconnect()
progress.end()
run.info_single('Your profile db is now %s. Aww, yisss.' % next_version, nl_after=1, nl_before=1, mc='green')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='A simple script to upgrade profile database from version %s to version %s' % (current_version, next_version))
parser.add_argument('profile_db', metavar = 'PROFILE_DB', help = "An anvi'o profile database of version %s" % current_version)
args, unknown = parser.parse_known_args()
try:
migrate(args.profile_db)
except ConfigError as e:
print(e)
sys.exit(-1)
|
McDermott-Group/LabRAD
|
LabRAD/Measurements/General/data_processing.py
|
Python
|
gpl-2.0
| 2,679
| 0.008959
|
import numpy as np
import warnings
def mean_time(t, min_threshold=0, max_threshold=1253):
"""
Take a switch probability result array from the PreAmp timer, and
compute mean switching time using the specified thresholds. Timing
data is assumed to be a numpy array.
"""
t = t[np.logical_and(t > min_threshold, t < max_threshold)]
if np.size(t) > 0:
t_mean = np.mean(t)
t_std = np.std(t)
else:
t_mean = np.nan
t_std = np.nan
return t_mean, t_std
def mean_time_diff(t, min_threshold=0, max_threshold=1253):
"""
Take a switch probability result array from the PreAmp timers, and
compute mean switching time using the specified thresholds.
"""
dt = t[0][:] - t[1][:]
t0_mask = np.logical_and(t[0,:] > min_threshold, t[0,:] < max_threshold)
t1_mask = np.logical_and(t[1,:] > min_threshold, t[1,:] < max_threshold)
dt = dt[np.logical_and(t0_mask, t1_mask)]
if np.size(dt) > 0:
dt_mean = np.mean(dt)
dt_std = np.std(dt)
else:
dt_mean = np.nan
dt_std = np.nan
return dt_mean, dt_std
def prob(t, min_threshold=0, max_threshold=1253):
"""
Take a switch probability result array from the PreAmp timer, and
compute switching probability using the specified thresholds.
"""
return float(np.size(t[np.logical_and(t > min_threshold, t < max_threshold)])) / float(np.size(t))
def outcomes(t, min_threshold=0, max_threshold=1253):
"""
Take a switch probability result array from the PreAmp timer, and
convert to a numpy array of 0 or 1 based on the thresholds.
"""
def _threshold(x):
if x > min_threshold and x < max_threshold:
return 1
else:
return 0
threshold_vectorized = n
|
p.vectorize(_threshold)
return threshold_vectorized(t)
def corr_coef_from_outcomes(outcomes):
"""
Comp
|
ute correrlation coefficient from an array of switching
outcomes.
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return np.corrcoef(outcomes[0,:], outcomes[1,:])[0,1]
def software_demod(t, freq, Is, Qs):
"""
Demodulate I and Q data in software. This method uses
ADC frequency for demodulation.
Input:
t: time vector during which to demodulate data (ns).
freq: demodulation frequency (GHz).
Is: I data.
Qs: Q data.
Output:
Id, Qd: demodulated I and Q.
"""
demod = 2 * np.pi * t * freq
Sv = np.sin(demod)
Cv = np.cos(demod)
Id = np.mean(Is * Cv - Qs * Sv)
Qd = np.mean(Is * Sv + Qs * Cv)
return Id, Qd
|
ppolewicz/ant-colony
|
antcolony/ant_move.py
|
Python
|
bsd-3-clause
| 3,648
| 0.006853
|
from edge import DummyEdgeEnd
from simulation_event import AbstractSimulationEvent
from stats import TripStats
class AbstractAntMove(AbstractSimulationEvent):
def __init__(self, ant, origin, destination, end_time, pheromone_to_drop, trip_stats):
self.ant = ant
self.origin = origin
self.destination = destination
if self.origin is not None and self.destination is not None:
if self.origin.edge is not None and self.destination.edge is not None:
#print 'origin', self.origin
#print 'destination', self.destination
assert self.origin.edge == self.destination.edge
self.end_time = end_time
self.pheromone_to_drop = pheromone_to_drop
self.trip_stats = trip_stats
def process_start(self):
self.origin.drop_pheromone(self.pheromone_to_drop)
return frozenset((self.origin.edge, self.origin.point))
def process_end(self, reality, stats):
changed = [self.destination.edge]
self.trip_stats.edge_visited(self.destination.edge)
self.destination.drop_pheromone(s
|
elf.pheromone_to_drop)
if not self.destination.point.is_anthill() and self.destination.point.food > 0 and not self.ant.food: # ant has found the food
changed.append(self.destination.point)
self.trip_stats.food_found()
self.destination.point.food -= 1
self.ant.food += 1
stats.food_found(self.trip_stats)
stats.present()
elif self.de
|
stination.point.is_anthill(): # ant has returned to the anthill
if self.ant.food: # with food
changed.append(self.destination.point)
self.destination.point.food += self.ant.food
self.trip_stats.back_home()
new_ant = self.ant.__class__(self.ant.world_parameters)
return AntRestartMove(new_ant, anthill=DummyEdgeEnd(self.destination.point), end_time=reality.world.elapsed_time), frozenset(changed)
else: # with no food
self.trip_stats.reset_route()
new_destination_edge, pheromone_to_drop = self.ant.tick(self.destination.point)
assert new_destination_edge in (end.edge for end in self.destination.point.edge_ends), 'Illegal ant move'
assert reality.environment_parameters.min_pheromone_dropped_by_ant <= pheromone_to_drop <= reality.environment_parameters.max_pheromone_dropped_by_ant, 'Illegal ant pheromone drop: %s' % (repr(pheromone_to_drop),)
self.trip_stats.normal_move(new_destination_edge.cost)
new_destination = new_destination_edge.get_other_end_by_point(self.destination.point)
origin = new_destination_edge.get_other_end(new_destination)
end_time = reality.world.elapsed_time + new_destination_edge.cost
return AntMove(
ant=self.ant,
origin=origin,
destination=new_destination,
end_time=end_time,
pheromone_to_drop=pheromone_to_drop,
trip_stats=self.trip_stats,
), frozenset(changed)
def __repr__(self):
return '%s@%s' % (self.__class__.__name__, self.end_time,)
class AntRestartMove(AbstractAntMove):
def __init__(self, ant, anthill, end_time):
super(AntRestartMove, self).__init__(ant, None, anthill, end_time=end_time, pheromone_to_drop=0, trip_stats=TripStats())
def process_start(self):
return frozenset()
class AntStartMove(AntRestartMove):
def __init__(self, ant, anthill):
super(AntStartMove, self).__init__(ant, anthill, end_time=0)
class AntMove(AbstractAntMove):
pass
|
JustinWingChungHui/okKindred
|
custom_user/admin.py
|
Python
|
gpl-2.0
| 3,150
| 0.00381
|
'''
from https://docs.djangoproject.com/en/1.7/topics/auth/customizing/#specifying-a-custom-user-model
'''
from django import forms
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.forms import ReadOnlyPasswordHashField
from django.utils.translation import gettext_lazy as _
from custom_user.models import User
class UserCr
|
eationForm(forms.ModelForm):
"""
A form for creating new users. Includes all the required
fields, plus a repeated password.
"""
password1 = forms.CharField(label=_('Password'), widget=forms.PasswordInput)
|
password2 = forms.CharField(label=_('Password confirmation'), widget=forms.PasswordInput)
class Meta:
model = User
fields = ('email',)
def clean_password2(self):
# Check that the two password entries match
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError(_("Passwords don't match"))
return password2
def save(self, commit=True):
# Save the provided password in hashed format
user = super(UserCreationForm, self).save(commit=False)
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
return user
class UserChangeForm(forms.ModelForm):
"""A form for updating users. Includes all the fields on
the user, but replaces the password field with admin's
password hash display field.
"""
password = ReadOnlyPasswordHashField()
class Meta:
model = User
fields = ('email', 'password', 'is_active', 'is_superuser')
def clean_password(self):
# Regardless of what the user provides, return the initial value.
# This is done here, rather than on the field, because the
# field does not have access to the initial value
return self.initial["password"]
class MyUserAdmin(UserAdmin):
# The forms to add and change user instances
form = UserChangeForm
add_form = UserCreationForm
# The fields to be used in displaying the User model.
# These override the definitions on the base UserAdmin
# that reference specific fields on auth.User.
list_display = ('email', 'is_superuser')
list_filter = ('is_superuser',)
fieldsets = (
(None, {'fields': ('email','name', 'password', 'family')}),
('Permissions', {'fields': ('is_superuser','is_active',)}),
('Settings', {'fields': ('language','receive_update_emails','receive_photo_update_emails')}),
)
# add_fieldsets is not a standard ModelAdmin attribute. UserAdmin
# overrides get_fieldsets to use this attribute when creating a user.
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('email', 'password1', 'password2')}
),
)
search_fields = ('email',)
ordering = ('email',)
filter_horizontal = ()
raw_id_fields = ('family',)
# Now register the new UserAdmin...
admin.site.register(User, MyUserAdmin)
|
strizhechenko/twitterbots
|
memes_zaebali.py
|
Python
|
gpl-3.0
| 857
| 0.001167
|
# coding: utf-8
__author__ = "@strizhechenko"
import sys
from morpher import Morpher
from twitterbot_utils import Twibot
from apscheduler.schedulers.blocking import BlockingScheduler
sched = BlockingScheduler()
bot = Twibot()
morphy = Morpher()
def tweets2words(tweets):
string = " ".join([tweet.text for tweet in tweets])
return morphy.process_to_words(string)
@sched.scheduled_job('interval', minutes=15)
def d
|
o_tweets():
print 'New tick'
words = tweets2word
|
s(bot.fetch_list(list_id=217926157))
for word in words:
tweet = morphy.word2phrase(word)
bot.tweet(tweet)
print 'post', tweet.encode('utf-8')
@sched.scheduled_job('interval', hours=24)
def do_wipe():
print 'Wipe time'
bot.wipe()
if __name__ == '__main__':
do_tweets()
if '--test' in sys.argv:
exit(0)
sched.start()
|
cemsbr/python-openflow
|
pyof/v0x04/controller2switch/group_mod.py
|
Python
|
mit
| 2,734
| 0
|
"""Modify Group Entry Message."""
from enum import IntEnum
from pyof.foundation.base import GenericMessage
from pyof.foundation.basic_types import (
FixedTypeList, Pad, UBInt8, UBInt16, UBInt32)
from pyof.v0x04.common.header import Header, Type
from pyof.v0x04.controller2switch.common import Bucket
__all__ = ('GroupMod', 'GroupModCommand', 'GroupType', 'Group',
'ListOfBuckets')
class Group(IntEnum):
"""Group numbering. Groups can use any number up to attr:`OFPG_MAX`."""
#: Last usable group number.
OFPG_MAX = 0xffffff00
#: Fake groups.
#: Represents all groups for group delete commands.
OFPG_ALL = 0xfffffffc
#: Wildcard group used only for flow stats requests.
# Select all flows regardless of group (including flows with no group).
OFPG_ANY = 0xffffffff
class GroupModCommand(IntEnum):
"""Group commands."""
#: New group.
OFPGC_ADD = 0
#: Modify all matching groups.
OFPGC_MODIFY = 1
#: Delete all matching groups.
OFPGC_DELETE = 2
class GroupType(IntEnum):
"""Group types. Range [128, 255] is reserved for experimental use."""
#: All (multicast/broadcast) group.
OFPGT_ALL = 0
#: Select group.
OFPGT_SELECT = 1
#: Indirect group.
OFPGT_INDIRECT = 2
#: Fast failover group.
OFPGT_FF = 3
class ListOfBuckets(FixedTypeList):
"""List of buckets.
Represented by instances of Bucket.
"""
def __init__(self, items=None):
"""Create a ListOfBuckets with the optional parameters below.
Args:
items (Bucket): Instance or a list of instances.
"""
super().__init__(pyof_class=Bucket, items=items)
class GroupMod(GenericMessage):
"""Group setup and teardown (controller -> datapath)."""
header = Header(message_type
|
=Type.OFPT_GROUP_MOD)
command = UBInt16(enum_ref=GroupModCommand)
group_type = UBInt8()
#: Pad to 64 bits.
pad = Pad(1)
group_id = UBInt32()
buckets
|
= ListOfBuckets()
def __init__(self, xid=None, command=None, group_type=None, group_id=None,
buckets=None):
"""Create a GroupMod with the optional parameters below.
Args:
xid (int): Header's transaction id. Defaults to random.
command (GroupModCommand): One of OFPGC_*.
group_type (GroupType): One of OFPGT_*.
group_id (int): Group identifier.
buckets (:class:`ListOfBuckets`): The length of the bucket
array is inferred from the length field in the header.
"""
super().__init__(xid)
self.command = command
self.group_type = group_type
self.group_id = group_id
self.buckets = buckets
|
g-weatherill/catalogue_toolkit
|
eqcat/catalogue_query_tools.py
|
Python
|
agpl-3.0
| 60,867
| 0.001824
|
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# LICENSE
#
# Copyright (c) 2015 GEM Foundation
#
# The Catalogue Toolkit is free software: you can redistribute
# it and/or modify it under the terms of the GNU Affero General Public
# License as published by the Free Software Foundation, either version
# 3 of the License, or (at your option) any later version.
#
# You should have received a copy of the GNU Affero General Public License
# with this download. If not, see <http://www.gnu.org/licenses/>
#!/usr/bin/env/python
"""
Collection of Catalogue D
|
atabase Query Tools
"""
import h5py
import re
import numpy as np
import pandas as pd
from copy import copy, deepcopy
from datetime import datetime, date, time
from collections import OrderedDict
import matplotlib
import matplotlib.dates as mdates
import matplotlib.gridspec as gridspec
i
|
mport matplotlib.pyplot as plt
from matplotlib.colors import Normalize, LogNorm
import eqcat.utils as utils
from eqcat.regression_models import function_map
from matplotlib.path import Path
from scipy import odr
from eqcat.isf_catalogue import (Magnitude, Location, Origin,
Event, ISFCatalogue)
try:
from mpl_toolkits.basemap import Basemap
except:
print("Basemap not installed or unavailable!")
print("Catalogue Plotting Functions will not work")
# RESET Axes tick labels
matplotlib.rc("xtick", labelsize=14)
matplotlib.rc("ytick", labelsize=14)
# Switch to Type 1 fonts
matplotlib.rcParams["pdf.fonttype"] = 42
matplotlib.rcParams["ps.fonttype"] = 42
matplotlib.rcParams["ps.useafm"] = True
class CatalogueDB(object):
"""
Holder class for the catalogue database
"""
def __init__(self, filename=None):
"""
Instantiate the class. If a filename is supplied this will load the
data from the file
:param str filename:
Path to input file
"""
self.filename = filename
self.origins = []
self.magnitudes = []
self.number_origins = None
self.number_magnitudes = None
self.load_data_from_file()
def load_data_from_file(self):
"""
If a filename is specified then will import data from file
"""
if self.filename:
self.origins = pd.read_hdf(self.filename, "catalogue/origins")
self.magnitudes = pd.read_hdf(self.filename,
"catalogue/magnitudes")
_ = self._get_number_origins_magnitudes()
else:
pass
def _get_number_origins_magnitudes(self):
"""
Returns the number of origins and the number of magnitudes
"""
self.number_origins = len(self.origins)
self.number_magnitudes = len(self.magnitudes)
return self.number_origins, self.number_magnitudes
def export_current_selection(self, output_file):
"""
Exports the current selection to file
"""
store = pd.HDFStore(output_file)
store.append("catalogue/origins", self.origins)
store.append("catalogue/magnitudes", self.magnitudes)
store.close()
def build_isf(self, identifier, name):
"""
Creates an instance of the ISFCatalogue class from the hdf5 format
:param str identifier:
Identifier string of the ISFCatalogue object
:param str name:
Name for the ISFCatalogue object
:returns:
Catalogue as instance of :class: ISFCatalogue
"""
isf_catalogue = ISFCatalogue(identifier, name)
event_groups = self.origins.groupby("eventID")
mag_groups = self.magnitudes.groupby("eventID")
mag_keys = list(mag_groups.indices.keys())
ngrps = len(event_groups)
for iloc, grp in enumerate(event_groups):
if (iloc % 1000) == 0:
print("Processing event %d of %d" % (iloc, ngrps))
# Get magnitudes list
if grp[0] in mag_keys:
# Magnitudes associated to this origin
mag_list = self._get_magnitude_classes(
mag_groups.get_group(grp[0]))
else:
mag_list = []
# Get origins
origin_list = self._get_origin_classes(grp[1], mag_list)
event = Event(grp[0], origin_list, mag_list)
isf_catalogue.events.append(event)
return isf_catalogue
def _get_origin_classes(self, orig_group, mag_list):
"""
Gets the Origin class representation for a particular event
:param orig_group:
Pandas Group object
:param list:
List of :class: Magnitude objects
"""
origin_list = []
norig = orig_group.shape[0]
for iloc in range(0, norig):
# Get location
location = Location(orig_group.originID.values[iloc],
orig_group.longitude.values[iloc],
orig_group.latitude.values[iloc],
orig_group.depth.values[iloc],
orig_group.semimajor90.values[iloc],
orig_group.semiminor90.values[iloc],
orig_group.error_strike.values[iloc],
orig_group.depth_error.values[iloc])
# origin
orig_date = date(orig_group.year.values[iloc],
orig_group.month.values[iloc],
orig_group.day.values[iloc])
micro_seconds = (orig_group.second.values[iloc] -
np.floor(orig_group.second.values[iloc])) * 1.0E6
seconds = int(orig_group.second.values[iloc])
if seconds > 59:
seconds = 0
minute_inc = 1
else:
minute_inc = 0
orig_time = time(orig_group.hour.values[iloc],
orig_group.minute.values[iloc] + minute_inc,
seconds,
int(micro_seconds))
origin = Origin(orig_group.originID.values[iloc],
orig_date,
orig_time,
location,
orig_group.Agency.values[iloc],
is_prime=bool(orig_group.prime.values[iloc]),
time_error = orig_group.time_error.values[iloc])
for mag in mag_list:
if mag.origin_id == origin.id:
origin.magnitudes.append(mag)
origin_list.append(origin)
return origin_list
def _get_magnitude_classes(self, mag_group):
"""
For a given event, returns the list of magnitudes
:param mag_group:
Group of magnitudes for a given event as instance of Pandas
Group object
"""
mag_list = []
nmags = mag_group.shape[0]
for iloc in range(0, nmags):
mag = Magnitude(mag_group.eventID.values[iloc],
mag_group.originID.values[iloc],
mag_group.value.values[iloc],
mag_group.magAgency.values[iloc],
mag_group.magType.values[iloc],
mag_group.sigma.values[iloc])
mag.magnitude_id = mag_group.magnitudeID.values[iloc]
mag_list.append(mag)
return mag_list
class CatalogueSelector(object):
"""
Tool to select sub-sets of the catalogue
"""
def __init__(self, catalogue, create_copy=True):
"""
"""
self.catalogue = catalogue
self.copycat = create_copy
def _select_by_origins(self, idx, select_type="any"):
"""
Returns a catalogue selected from the original catalogue by
origin
:param idx:
Pandas Series object indicating the truth of an array
"""
if select_type == "all":
output_catalogue = CatalogueDB()
output_catalog
|
nakamura9/deploy_ad_server
|
client/omxplayer/myomx.py
|
Python
|
mit
| 4,075
| 0.004663
|
import socket
from subprocess import Popen, PIPE, STDOUT
import os
import time
import string
import requests
import json
import omxplayer
class UnsupportedFileTypeException(Exception):
'''Raised if the file type is not among the list of supported types'''
pass
class FileNotFoundException(Exception):
'''raised if the file is not valid'''
pass
class OmxCommsError(Exception):
'''raised if a command failed to execute'''
pass
class Omx(object):
def __init__(self):
# connection attrs
# private playlist var, stores list of file paths
# mirrors the list in the player at all times
self._playlist = []
self._player = None
# used to determine if a
self.supported = ["mp4", "avi", "mkv",
"flv", ".aac", "3gp"] # add more later
# creating an instance of the vlc window
# local socket connection to the vlc player
@property
def playlist(self):
'''returns list of file paths'''
return self._playlist
@property
def connection_open(self):
return self._player.is_playing()
@playlist.setter
def playlist(self, arg):
"""Takes a string, tuple or a list as an argument and
updates the player's playlist and the local_playlist variable
enqueues the vlc object with a playlist of all the files stored in it
can only add files to the playlist"""
if isinstance(arg, (list, tuple)):
for path in arg:
self.check_path(path)
if not path in self._playlist:
data = self._enqueue(path)
elif isinstance(arg, str):
self.check_path(arg)
if not arg in self._playlist:
data = self._enqueue(arg)
@playlist.deleter
def playlist(self):
'''clears the local playlist var and the remote one'''
self._playlist = []
self.clear()
def create_player(self):
if self.playlist == []:
raise Exception("The video player has no files ot add")
else:
self._player = omxplayer.OMXPlayer(self._playlist[0])
def check_path(self, path):
'''Ensures all files added to the application are
valid paths.'''
if not os.path.isfile(path):
raise FileNotFoundException()
path, file = os.path.split(path)
name, ext = file.split(".")
if ext not in self.supported:
raise UnsupportedFileTypeException()
def toggle_fullscreen(self):
'''For compatibility'''
return True
def toggle_loop(self):
'''for compatibility'''
return True
def pause(self):
"""Checks the current state to make sure the player is playing something"""
if self._player:
self._player.pause()
def play(self):
"""First checks if a valid file is currently loaded."""
if self._player:
self._player.play()
def stop(self):
"""checks first if there is something to stop"""
if self._player:
self._player.stop()
def _enqueue(self, path):
'''adds a file to the playlist'''
self.playlist = path
def clear(self):
'''clears all files from the playlist'''
del self.playlist
def playlist_loop(self):
"""Get the currently playing video
get its remaining time by subtracting its
current time from its duration and creating a new instance for each file"""
if not self._player:
self.create_player()
while True:
time.sleep(0.5)
remaining = sel
|
f._player.duration() - self._player.position()
if remaining < 1:
current = self._playlist.index(self._player.get_source())
if current < len(self._playlist) - 2:
next = self._playlist[current + 1]
else: next = self._playlist[0]
|
self._player.load(next)
|
andydrop/x17papertrail
|
abook2pdf.py
|
Python
|
gpl-3.0
| 3,106
| 0.027688
|
from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer, Table, TableStyle, PageBreak
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.rl_config import defaultPageSize
from reportlab.lib.units import cm
import operator
import os
import ConfigParser
import string
config = ConfigParser.ConfigParser()
config.read(os.environ["HOME"] + "/.abook/addressbook")
config.remove_section('format')
PAGE_HEIGHT=defaultPageSize[1]; PAGE_WIDTH=defaultPageSize[0]
styles = getSampleStyleSheet()
buchstabe = "A"
Title = "Hello world"
pageinfo = "platypus example"
def Pages(canvas, doc):
canvas.saveState()
canvas.restoreState()
def go(buchstabe):
doc = SimpleDocTemplate("phello.pdf")
Story = []
style = styles["Normal"]
addresses=[]
for s in config.sections():
nb=""
ub=""
mb=""
if config.has_option(s,'name'):
nb = "<b>" + config.get(s,'name') + "</b><br/>"
worte=config.get(s,'name').split()
print len(worte)
if len(worte)<2:
nachname=worte[0]
else:
nachname=worte[1]
anfangsbuchstabe=nachname[0:1]
if anfangsbuchstabe!=buchstabe:
buchstabe=anfangsbuchstabe
print buchstabe
p = Table(addresses)
p.setStyle(TableStyle([('VALIGN',(0,0),(-1,-1),"TOP"),
('ALIGN',(0,-1),(0,-1),'RIGHT')]))
Story.append(p)
Story.append(PageBreak())
addresses=[]
if config.has_option(s,'address'):
nb = nb + config.get(s,'address') + "<br/>"
if config.has_option(s,'zip'):
nb = nb + config.get(s,'zip') + " "
if config.has_option(s,'city'):
nb = nb + config.get(s,'city') + "<br/>"
if config.has_option(s,'state'):
nb = nb + config.get(s,'state') + " - "
if config.has_option(s,'country'):
nb = nb + config.get(s,'country') + "<br/>"
nb = nb +"<br/>"
if config.has_option(s,'phone'):
ub= "Fon: " + config.get(s,'phone') + "<br/>"
if config.has_option(s,'mobile'):
ub= ub + "Mobi: " + config.get(s,'mobile') + "<br/>"
if config.has_option(s,'email'):
ub= ub + config.get(s,'email').replace(',','<br/>') + "<br/>"
ub=ub+"<br/>"
if config.has_option(s,'custom3'):
mb= config.get(s,'custom3') + "<br/>"
mb=mb+"<br/>"
nameblock = Paragraph(nb,style)
numberblock = Paragraph(ub,style)
middleblock = Paragraph(mb,style)
addresses.append([nameblock,middleblock,numberblock])
p = Table(addresses)
p.setStyle(TableStyle([('VALIGN',(0,0),(-1,-1),"TOP"),
('ALIGN',(0,-1),(0,-1),'RIGHT')]))
Story.append(p)
doc.build(Story,
|
onFirstPage=Pa
|
ges, onLaterPages=Pages)
go(buchstabe)
|
ant9000/websup
|
cli/db.py
|
Python
|
gpl-3.0
| 1,581
| 0.003163
|
import sqlite3
class Database:
def __init__(self, dbfile, page_rows=100):
self.dbfile = dbfile
self.page_rows = page_rows
self.conn = sqlite3.connect(self.dbfile)
self.conn.row_factory = sqlite3.Row
cursor = self.conn.cursor()
cursor.execute(
"CREATE TABLE IF NOT EXISTS messages "
"(timestamp TEXT, message TEXT);"
)
cursor.execute(
"CREATE INDEX IF NOT EXISTS messages_timestamp_idx "
"ON messages (timestamp);"
)
self.conn.commit()
def __del__(self):
if self.conn:
self.conn.close()
self.conn = None
def count(self):
cursor = self.conn.cursor()
n = cursor.execute("SELEC
|
T COUNT(*) FROM messages").fetchone()[0]
return n
def messages(self, offset=0):
cursor = self.conn.cursor()
rows = cursor.execute(
"SELECT * FROM messages "
"ORDER BY timestamp DESC "
"LIMIT ? "
"OFFSET ?",
[self.page_rows, offset]
).fetchall()
return [ dict(row) for row in rows ]
def save(self, item):
saved = False
if
|
item.item_type == 'message':
timestamp = item.content['timestamp']
message = item.asJson()
cursor = self.conn.cursor()
cursor.execute(
"INSERT INTO messages VALUES (?,?)",
[timestamp, message]
)
self.conn.commit()
saved = True
return saved
|
FunTimeCoding/directory-tools
|
directory_tools/application.py
|
Python
|
mit
| 269
| 0
|
from flask import Flask
|
from os.path import expanduser
def create_app():
app = Flask(__name__)
app.config.from_pyfile(expanduser('~/.directory-tools.py'))
from directory_tools.frontend import frontend
|
app.register_blueprint(frontend)
return app
|
planrich/pypy-simd-benchmark
|
vec.py
|
Python
|
gpl-3.0
| 1,851
| 0.002161
|
import array
class vec(object):
@staticmethod
def sized(size, type='d'):
return vec([0] * size, type)
@staticmethod
def of(content, type='d'):
return vec(content, type)
def __init__(self, content, type='d'):
self.size = len(content)
self.type = type
self.array = array.array(type, content)
def __add__(self, other):
return self.add(other)
def add(self, other, out=None):
assert isinstance(other, vec)
result = out
if result is None:
result = vec([0] * self.size, self.type)
if self.size != other.size:
raise Exception("size mismatch! %d != %d" % (self.size,other.size))
i = 0
while i < self.size:
result.array[i] = self.array[i] + other.array[i]
i += 1
return result
def __sub__(self, other):
return self.sub(other)
def sub(self, other, out=None):
assert isinstance(other, vec)
result = out
if result is None:
result = vec([0] * self.size, self.type)
if self.size != other.size:
raise Exce
|
ption("size mismatch! %d != %d" % (self.size,other.size))
i = 0
while i < self.size:
result.array[i] = self.array[i] - other.array[i]
i += 1
return result
def __mul__(self, other):
return self.mul(other)
def mul(self, other, out=None):
assert isinstance(other, vec)
result = out
if result is None:
result = vec([0] * se
|
lf.size, self.type)
if self.size != other.size:
raise Exception("size mismatch! %d != %d" % (self.size,other.size))
i = 0
while i < self.size:
result.array[i] = self.array[i] * other.array[i]
i += 1
return result
|
gorocacher/payload
|
payload/api/config.py
|
Python
|
apache-2.0
| 954
| 0
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2013 PolyBeacon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, eith
|
er express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Server Specific Configurations
server = {
'port': '9859',
'host': '0.0.0.0',
}
# Pecan Application Configurations
app = {
|
'root': 'payload.api.controllers.root.RootController',
'modules': ['payload.api'],
'static_root': '%(confdir)s/public',
'template_path': '%(confdir)s/payload/api/templates',
}
|
hfeeki/cmdln
|
test/cmdln_main2.py
|
Python
|
mit
| 421
| 0.011876
|
#!/usr/bin/env python
"""
$ python cmdln_main2.py
Thi
|
s is my shell.
$ python cmdln_main2.py foo
hello from foo
"""
import sys
import cmdln
class Shell(cmdln.RawCmdln):
"This is my shell."
name = "shell"
def do_foo(self, argv
|
):
print("hello from foo")
if __name__ == "__main__":
shell = Shell()
retval = shell.cmd(sys.argv[1:]) # just run one command
sys.exit(retval)
|
beslave/space-king
|
space_king/models/user.py
|
Python
|
gpl-3.0
| 1,986
| 0
|
# coding: utf-8
from libs.redis_storage import db1
class User(object):
def __init__(self, **kwargs):
pk = kwargs.get('pk') or db1.incr('new_user_id')
kwargs['pk'] = pk
db1.hmset('user::{}'.format(pk), kwargs)
super(User, self).
|
__setattr__('pk', pk)
super(User, self).__setattr__(
'__info__',
db1.hgetall(self.db_key) or {}
)
for k, v in self.__info__.iteritems():
self.__info__[k] = v.decode('utf-8')
@property
def short_info(self):
return {field: getattr(self, field) for field in [
'fio',
'sex',
'avatar',
'battles',
'wins',
'defeat
|
s',
'last_update'
]}
@property
def db_key(self):
return 'user::{}'.format(self.pk)
@property
def fio(self):
return u'{} {}'.format(self.last_name or u'', self.first_name or u'')
@property
def battles(self):
return int(self.__info__.get('battles', 0))
@property
def wins(self):
return int(self.__info__.get('wins', 0))
@property
def defeats(self):
return int(self.__info__.get('defeats', 0))
@property
def last_update(self):
return int(self.__info__.get('last_update', 0))
def __setattr__(self, attr, value):
self.__info__[attr] = value
db1.hset(self.db_key, attr, value)
def __getattr__(self, attr):
return self.__info__.get(attr)
def incr(self, attr, by=1):
db1.hincrby(self.db_key, attr, by)
def get_user_by_service(service, service_user_id):
user_pk = db1.get('{}_user_id::{}'.format(service, service_user_id))
if user_pk:
return User(pk=user_pk)
def add_service_to_user(service, service_user_id, user_pk):
db1.set('{}_user_id::{}'.format(service, service_user_id), user_pk)
user = User(pk=user_pk)
setattr(user, '{}_user_id'.format(service), service_user_id)
|
scith/htpc-manager_ynh
|
sources/modules/mylar.py
|
Python
|
gpl-3.0
| 9,922
| 0.001008
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import cherrypy
import htpc
import logging
import requests
from cherrypy.lib.auth2 import require, member_of
from urllib import urlencode
from json import loads
from htpc.helpers import get_image, serve_template, fix_basepath
from StringIO import StringIO
from contextlib import closing
class Mylar(object):
def __init__(self):
self.logger = logging.getLogger('modules.mylar')
htpc.MODULES.append({
'name': 'Mylar',
'id': 'mylar',
'test': htpc.WEBDIR + 'mylar/ping',
'fields': [
{'type': 'bool', 'label': 'Enable', 'name': 'mylar_enable'},
{'type': 'text', 'label': 'Menu name', 'name': 'mylar_name'},
{'type': 'text', 'label': 'IP / Host *', 'name': 'mylar_host'},
{'type': 'text', 'label': 'Port *', 'name': 'mylar_port'},
{'type': 'text', 'label': 'Basepath', 'name': 'mylar_basepath'},
{'type': 'text', 'label': 'API key', 'name': 'mylar_apikey'},
{'type': 'bool', 'label': 'Use SSL', 'name': 'mylar_ssl'},
{"type": "text", "label": "Reverse proxy link", "placeholder": "", "desc": "Reverse proxy link ex: https://hp.domain.com", "name": "mylar_reverse_proxy_link"}
]
})
@cherrypy.expose()
@require()
def index(self):
return serve_template('mylar.html',
scriptname='mylar',
webinterface=Mylar.webinterface()
)
@cherrypy.expose()
@require()
def GetThumb(self, url=None, thumb=None, h=None, w=None, o=100):
""" Parse thumb to get the url and send to htpc.proxy.get_image """
self.logger.debug("Trying to fetch image via %s", url)
if url is None and thumb is None:
# To stop if the image is missing
return
# Should never used thumb, to lazy to remove it
if thumb:
url = thumb
return get_image(url, h, w, o)
@cherrypy.expose()
@require()
def viewcomic(self, artist_id):
response = self.fetch('getComic&id=%s' % artist_id)
for a in response['comic']:
a['StatusText'] = _get_status_icon(a['Status'])
a['can_download'] = True if a['Status'] not in ('Downloaded', 'Snatched', 'Wanted') else False
template = htpc.LOOKUP.get_template('mylar_view_comic.html')
return template.render(
scriptname='mylar_view_comic',
comic_id=artist_id,
comic=response['comic'][0],
comicimg=response['comic'][0]['ComicImageURL'],
issues=response['issues'],
description=response['comic'][0]['Description'],
module_name=htpc.settings.get('mylar_name', 'Mylar')
)
@staticmethod
def _build_url(ssl=None, host=None, port=None, base_path=None):
ssl = ssl or htpc.settings.get('mylar_ssl')
host = host or htpc.settings.get('mylar_host')
port = port or htpc.settings.get('mylar_port')
path = fix_basepath(htpc.settings.get('mylar_basepath', '/'))
url = '{protocol}://{host}:{port}{path}'.format(
protocol='https' if ssl else 'http',
host=host,
port=port,
path=path,
)
return url
@staticmethod
def webinterface():
url = Mylar._build_url()
if htpc.settings.get('mylar_reverse_proxy_link'):
url = htpc.settings.get('mylar_reverse_proxy_link')
return url
@staticmethod
def _build_api_url(command, url=None, api_key=None):
return '{url}api?apikey={api_key}&cmd={command}'.format(
url=url or Mylar._build_url(),
api_key=api_key or htpc.settings.get('mylar_apikey'),
command=command,
)
@cherrypy.expose()
@cherrypy.tools.json_out()
@require()
def getserieslist(self):
return self.fetch('getIndex')
@cherrypy.expose()
@cherrypy.tools.json_out()
@require()
def GetWantedList(self):
return self.fetch('getWanted')
@cherrypy.expose()
@cherrypy.tools.json_out()
@require()
def SearchForComic(self, name):
return self.fetch('findComic&%s' % urlencode({'name': name.encode(encoding='UTF-8', errors='strict')}))
@cherrypy.expose()
@require()
def RefreshComic(self, Id):
return self.fetch('refreshComic&id=%s' % Id, text=True)
@cherrypy.expose()
@require(member_of(htpc.role_user))
def DeleteComic(self, Id):
return self.fetch('delComic&id=%s' % Id, text=True)
@cherrypy.expose()
@require(member_of(htpc.role_user))
def PauseComic(self, Id):
return self.fetch('pauseComic&id=%s' % Id, text=True)
@cherrypy.expose()
@require(member_of(htpc.role_user))
def ResumeComic(self, Id):
return self.fetch('resumeComic&id=%s' % Id, text=True)
@cherrypy.expose()
@require()
def QueueIssue(self, issueid=None, new=False, **kwargs):
# Force check
if new:
return self.fetch('queueIssue&id=%s&new=True' % issueid, text=True)
return self.fetch('queueIssue&id=%s' % issueid, text=True)
@cherrypy.expose()
@require(member_of(htpc.role_user))
def UnqueueIssue(self, issueid, name=''):
self.logger.debug('unqued %s' % name)
return self.fetch('unqueueIssue&id=%s' % issueid, text=True)
@cherrypy.expose()
@require()
def DownloadIssue(self, issueid, name=''):
""" downloads a issue via api and returns it to the browser """
self.logger.debug('Downloading issue %s' % name)
getfile = self.fetch('downloadIssue&id=%s' % issueid, img=True)
try:
with closing(Strin
|
gIO()) as f:
|
f = StringIO()
f.write(getfile)
return cherrypy.lib.static.serve_fileobj(f.getvalue(), content_type='application/x-download', disposition=None, name=name, debug=False)
except Exception as e:
self.logger.error('Failed to download %s %s %s' % (name, issueid, e))
@cherrypy.expose()
@cherrypy.tools.json_out()
@require()
def AddComic(self, id, **kwargs):
self.logger.debug('Added %s to mylar' % kwargs.get('name', ''))
return self.fetch('addComic&id=%s' % id)
@cherrypy.expose()
@cherrypy.tools.json_out()
@require()
def GetHistoryList(self):
return self.fetch('getHistory')
@cherrypy.expose()
@require(member_of(htpc.role_user))
def ForceSearch(self):
return self.fetch('forceSearch', text=True)
@cherrypy.expose()
@require(member_of(htpc.role_user))
def ForceProcess(self, dir_=None):
if dir_:
return self.fetch('forceProcess?dir_=%s' % dir_, text=True)
return self.fetch('forceProcess', text=True)
@cherrypy.expose()
@require(member_of(htpc.role_user))
def ForceActiveArtistsUpdate(self):
return self.fetch('forceActiveComicsUpdate', text=True)
@cherrypy.expose()
@require(member_of(htpc.role_user))
def ShutDown(self):
return self.fetch('shutdown', text=True)
@cherrypy.expose()
@require(member_of(htpc.role_user))
def UpDate(self):
return self.fetch('update', text=True)
@cherrypy.expose()
@require(member_of(htpc.role_user))
def ReStart(self):
return self.fetch('restart', text=True)
def fetch(self, command, url=None, api_key=None, img=False, json=True, text=False):
url = Mylar._build_api_url(command, url, api_key)
try:
if img or text:
json = False
result = ''
self.logger.debug('calling api @ %s' % url)
# set a high timeout as some requests take a while..
response = requests.get(url, timeout=120, verify=False)
if response.status_code != 200:
self.logger.error('failed to contact mylar')
return
if text:
result = response.text
if img:
result
|
gear/motifwalk
|
research/src/mane/custom_layers.py
|
Python
|
mit
| 1,273
| 0.013354
|
"""Custom keras layers
"""
# Coding: utf-8
# File name: custom_layer.py
# Created: 2016-07-24
# Description:
## v0.0: File created. MergeRowDot layer.
from __future__ import division
from __future__ import print_function
__author__ = "Hoang Nguyen"
__email__ = "hoangnt@ai.cs.titech.ac.jp"
from keras import backend as K
from keras.engine.topology import Merge
import numpy as np
# >>> BEGIN CLASS RowDot <<<
class RowDot(Merge):
"""
Layer for elem
|
ent wise merge mul and take sum along
the second axis.
"""
##################################################################### __init__
def __init__(self, layers=None, **kwargs):
"""
Init function.
"""
super(RowDot, self).__init__(layers=None, **kwargs)
###################
|
###################################################### call
def call(self, inputs, **kwargs):
"""
Layer logic.
"""
print('Inputs 0 shape: %s' % str(inputs[0].shape))
print('Inputs 1 shape: %s' % str(inputs[1].shape))
l1 = inputs[0]
l2 = inputs[1]
output = K.batch_dot(inputs[0], inputs[1], axes=[1,1])
return output
# === End CLASS MergeRowDot <<<
# >>> BEGIN HELPER FUNCTIONS <<<
############################################################################ dot
|
chuckchen/spark
|
python/pyspark/pandas/tests/data_type_ops/test_complex_ops.py
|
Python
|
apache-2.0
| 13,290
| 0.002859
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import decimal
import datetime
import pandas as pd
from pyspark import pandas as ps
from pyspark.pandas.tests.data_type_ops.testing_utils import TestCasesUtils
from pyspark.testing.pandasutils import PandasOnSparkTestCase
class ComplexOpsTest(PandasOnSparkTestCase, TestCasesUtils):
@property
def pser(self):
return pd.Series([[1, 2, 3]])
@property
def psser(self):
return ps.from_pandas(self.pser)
@property
def numeric_array_pdf(self):
psers = {
"int": pd.Series([[1, 2, 3]]),
"float": pd.Series([[0.1, 0.2, 0.3]]),
"decimal": pd.Series([[decimal.Decimal(1), decimal.Decimal(2), decimal.Decimal(3)]]),
}
return pd.concat(psers, axis=1)
@property
def numeric_array_psdf(self):
return ps.from_pandas(self.numeric_array_pdf)
@property
def numeric_array_df_cols(self):
return self.numeric_array_pdf.columns
@property
def non_numeric_array_pdf(self):
psers = {
"string": pd.Series([["x", "y", "z"]]),
"date": pd.Series(
[[datetime.date(1994, 1, 1), datetime.date(1994, 1, 2), datetime.date(1994, 1, 3)]]
),
"bool": pd.Series([[True, True, False]]),
}
return pd.concat(psers, axis=1)
@property
def non_numeric_array_psdf(self):
return ps.from_pandas(self.non_numeric_array_pdf)
@property
def non_numeric_array_df_cols(self):
return self.non_numeric_array_pdf.columns
@property
def array_pdf(self):
return pd.concat([self.numeric_array_pdf, self.non_numeric_array_pdf], axis=1)
@property
def array_psdf(self):
return ps.from_pandas(self.array_pdf)
@property
def array_df_cols(self):
return self.array_pdf.columns
@property
def complex_pdf(self):
psers = {
"this_array": self.pser,
"that_array": pd.Series([[2, 3, 4]]),
"this_struct": pd.Series([("x", 1)]),
"that_struct": pd.Series([("a", 2)]),
}
return pd.concat(psers, axis=1)
@property
def complex_psdf(self):
pssers = {
"this_array": self.psser,
"that_array": ps.Series([[2, 3, 4]]),
"this_struct": ps.Index([("x", 1)]).to_series().reset_index(drop=True),
"that_struct": ps.Index([("a", 2)]).to_series().reset_index(drop=True),
}
return ps.concat(pssers, axis=1)
def test_add(self):
pdf, psdf = self.array_pdf, self.array_psdf
for col in self.array_df_cols:
self.assert_eq(pdf[col] + pdf[col], psdf[col] + psdf[col])
# Numeric array + Numeric array
for col in self.numeric_array_df_cols:
pser1, psser1 = pdf[col], psdf[col]
for other_col in self.numeric_array_df_cols:
pser2, psser2 = pdf[other_col], psdf[other_col]
self.assert_eq((pser1 + pser2).sort_values(), (psser1 + psser2).sort_values())
# Non-numeric array + Non-numeric array
self.assertRaises(
TypeError,
lambda: psdf["string"] + psdf["bool"],
)
self.assertRaises(
TypeError,
lambda: psdf["string"] + psdf["date"],
)
self.assertRaises(
TypeError,
lambda: psdf["bool"] + psdf["date"],
)
for col in self.non_numeric_array_df_cols:
pser, psser = pdf[col], psdf[col]
self.assert_eq(pser + pser, psser + psser)
# Numeric array + Non-numeric array
for numeric_col in self.numeric_array_df_cols:
for non_numeric_col in self.non_numeric_array_df_cols:
self.assertRaises(TypeError, lambda: psdf[numeric_col] + psdf[non_numeric_col])
def test_sub(self):
self.assertRaises(TypeError, lambda: self.psser - "x")
self.assertRaises(TypeError, lambda: self.psser - 1)
psdf = self.array_psdf
for col in self.array_df_cols:
for other_col in self.array_df_cols:
self.assertRaises(TypeError, lambda: psdf[col] - psdf[other_col])
def test_mul(self):
self.assertRaises(TypeError, lambda: self.psser * "x")
self.assertRaises(TypeError, lambda: self.psser * 1)
psdf = self.array_psdf
for col in self.array_df_cols:
for other_col in self.array_df_cols:
self.assertRaises(TypeError, lambda: psdf[col] * psdf[other_col])
def test_truediv(self):
self.assertRaises(TypeError, lambda: self.psser / "x")
self.assertRaises(TypeError, lambda: self.psser / 1)
psdf = self.array_psdf
for col in self.array_df_cols:
for other_col in self.array_df_cols:
self.assertRaises(TypeError, lambda: psdf[col] / psdf[other_col])
def test_floordiv(self):
self.assertRaises(TypeError, lambda: self.psser // "x")
self.assertRaises(TypeError, lambda: self.psser // 1)
psdf = self.array_psdf
for col in self.array_df_cols:
for other_col in self.array_df_cols:
self.assertRaises(TypeError, lambda: psdf[col] // psdf[other_col])
def test_mod(self):
self.assertRaises(TypeError, lambda: self.psser % "x")
self.assertRaises(TypeError, lambda: self.psser % 1)
psdf = self.array_psdf
for col in self.array_d
|
f_cols:
for other_col in self.array_df_cols:
self.assertRaises(TypeError, lambda: psdf[col] % psdf[other_col
|
])
def test_pow(self):
self.assertRaises(TypeError, lambda: self.psser ** "x")
self.assertRaises(TypeError, lambda: self.psser ** 1)
psdf = self.array_psdf
for col in self.array_df_cols:
for other_col in self.array_df_cols:
self.assertRaises(TypeError, lambda: psdf[col] ** psdf[other_col])
def test_radd(self):
self.assertRaises(TypeError, lambda: "x" + self.psser)
self.assertRaises(TypeError, lambda: 1 + self.psser)
def test_rsub(self):
self.assertRaises(TypeError, lambda: "x" - self.psser)
self.assertRaises(TypeError, lambda: 1 - self.psser)
def test_rmul(self):
self.assertRaises(TypeError, lambda: "x" * self.psser)
self.assertRaises(TypeError, lambda: 2 * self.psser)
def test_rtruediv(self):
self.assertRaises(TypeError, lambda: "x" / self.psser)
self.assertRaises(TypeError, lambda: 1 / self.psser)
def test_rfloordiv(self):
self.assertRaises(TypeError, lambda: "x" // self.psser)
self.assertRaises(TypeError, lambda: 1 // self.psser)
def test_rmod(self):
self.assertRaises(TypeError, lambda: 1 % self.psser)
def test_rpow(self):
self.assertRaises(TypeError, lambda: "x" ** self.psser)
self.assertRaises(TypeError, lambda: 1 ** self.psser)
def test_and(self):
self.assertRaises(TypeError, lambda: self.psser & True)
self.assertRaises(TypeError, lambda: self.psser & False)
self.assertRaises(TypeError, lambda: self.psser & self.psser)
def test_rand(self):
self.assertRaises(TypeError, lambda: True & self.psser)
self.assertRaises(TypeError, lambda: False & self.psser)
def test_or(self):
self.assertRaises(TypeError, lam
|
shingonoide/odoo
|
addons/account_asset/account_asset.py
|
Python
|
agpl-3.0
| 29,332
| 0.008557
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from datetime import datetime
from dateutil.relativedelta import relativedelta
from openerp.osv import fields, osv
import openerp.addons.decimal_precision as dp
from openerp.tools import float_compare
from openerp.tools.translate import _
class account_asset_category(osv.osv):
_name = 'account.asset.category'
_description = 'Asset category'
_columns = {
'name': fields.char('Name', required=True, select=1),
'note': fields.text('Note'),
'account_analytic_id': fields.many2one('account.analytic.account', 'Analytic account'),
'account_asset_id': fields.many2one('account.account', 'Asset Account', required=True, domain=[('type','=','other')]),
'account_depreciation_id': fields.many2one('account.account', 'Depreciation Account', required=True, domain=[('type','=','other')]),
'account_expense_depreciation_id': fields.many2one('account.account', 'Depr. Expense Account', required=True, domain=[('type','=','other')]),
'journal_id': fields.many2one('account.journal', 'Journal', required=True),
'company_id': fields.many2one('res.company', 'Company', required=True),
'method': fields.selection([('linear','Linear'),('degressive','Degressive')], 'Computation Method', required=True, help="Choose the method to use to compute the amount of depreciation lines.\n"\
" * Linear: Calculated on basis of: Gross Value / Number of Depreciations\n" \
" * Degressive: Calculated on basis of: Residual Value * Degressive Factor"),
'method_number': fields.integer('Number of Depreciations', help="The number of depreciations needed to depreciate your asset"),
'method_period': fields.integer('Period Length', help="State here the time between 2 depreciations, in months", required=True),
'method_progress_factor': fields.float('Degressive Factor'),
'method_time': fields.selection([('number','Number of Depreciations'),('end','Ending Date')], 'Time Method', required=True,
help="Choose the method to use to compute the dates and number of depreciation lines.\n"\
" * Number of Depreciations: Fix the number of depreciation lines and the time between 2 depreciations.\n" \
" * Ending Date: Choose the time between 2 depreciations and the date the depreciations won't go beyond."),
'method_end': fields.date('Ending date'),
'prorata':fields.boolean('Prorata Temporis', help='Indicates that the first depreciation entry for this asset have to be done from the purchase date instead of the first January'),
'open_asset': fields.boolean('Skip Draft State', help="Check this if you want to automatically confirm the assets of this category when created by invoices."),
}
_defaults = {
|
'company_id': lambda self, cr, uid, context: self.pool.get('res.company')._company_default_get(cr, uid, 'account.asset.
|
category', context=context),
'method': 'linear',
'method_number': 5,
'method_time': 'number',
'method_period': 12,
'method_progress_factor': 0.3,
}
def onchange_account_asset(self, cr, uid, ids, account_asset_id, context=None):
res = {'value':{}}
if account_asset_id:
res['value'] = {'account_depreciation_id': account_asset_id}
return res
class account_asset_asset(osv.osv):
_name = 'account.asset.asset'
_description = 'Asset'
def unlink(self, cr, uid, ids, context=None):
for asset in self.browse(cr, uid, ids, context=context):
if asset.account_move_line_ids:
raise osv.except_osv(_('Error!'), _('You cannot delete an asset that contains posted depreciation lines.'))
return super(account_asset_asset, self).unlink(cr, uid, ids, context=context)
def _get_period(self, cr, uid, context=None):
periods = self.pool.get('account.period').find(cr, uid, context=context)
if periods:
return periods[0]
else:
return False
def _get_last_depreciation_date(self, cr, uid, ids, context=None):
"""
@param id: ids of a account.asset.asset objects
@return: Returns a dictionary of the effective dates of the last depreciation entry made for given asset ids. If there isn't any, return the purchase date of this asset
"""
cr.execute("""
SELECT a.id as id, COALESCE(MAX(l.date),a.purchase_date) AS date
FROM account_asset_asset a
LEFT JOIN account_move_line l ON (l.asset_id = a.id)
WHERE a.id IN %s
GROUP BY a.id, a.purchase_date """, (tuple(ids),))
return dict(cr.fetchall())
def _compute_board_amount(self, cr, uid, asset, i, residual_amount, amount_to_depr, undone_dotation_number, posted_depreciation_line_ids, total_days, depreciation_date, context=None):
#by default amount = 0
amount = 0
if i == undone_dotation_number:
amount = residual_amount
else:
if asset.method == 'linear':
amount = amount_to_depr / (undone_dotation_number - len(posted_depreciation_line_ids))
if asset.prorata:
amount = amount_to_depr / asset.method_number
days = total_days - float(depreciation_date.strftime('%j'))
if i == 1:
amount = (amount_to_depr / asset.method_number) / total_days * days
elif i == undone_dotation_number:
amount = (amount_to_depr / asset.method_number) / total_days * (total_days - days)
elif asset.method == 'degressive':
amount = residual_amount * asset.method_progress_factor
if asset.prorata:
days = total_days - float(depreciation_date.strftime('%j'))
if i == 1:
amount = (residual_amount * asset.method_progress_factor) / total_days * days
elif i == undone_dotation_number:
amount = (residual_amount * asset.method_progress_factor) / total_days * (total_days - days)
return amount
def _compute_board_undone_dotation_nb(self, cr, uid, asset, depreciation_date, total_days, context=None):
undone_dotation_number = asset.method_number
if asset.method_time == 'end':
end_date = datetime.strptime(asset.method_end, '%Y-%m-%d')
undone_dotation_number = 0
while depreciation_date <= end_date:
depreciation_date = (datetime(depreciation_date.year, depreciation_date.month, depreciation_date.day) + relativedelta(months=+asset.method_period))
undone_dotation_number += 1
if asset.prorata:
undone_dotation_number += 1
return undone_dotation_number
def compute_depreciation_board(self, cr, uid, ids, context=None):
depreciation_lin_obj = self.pool.get('account.asset.depreciation.line')
currency_obj = self.pool.get('res.currency')
for asset in self.browse(cr, uid, ids, con
|
sysadminmatmoz/ingadhoc
|
stock_multic_fix/__openerp__.py
|
Python
|
agpl-3.0
| 1,568
| 0.001276
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 ADHOC SA (http://www.adhoc.com.ar)
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY
|
; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Stock Multic Fix',
'
|
version': '8.0.1.0.1',
'category': 'Warehouse Management',
'sequence': 14,
'summary': '',
'description': """
Stock Multic Fix
==================================
""",
'author': 'ADHOC SA',
'website': 'www.adhoc.com.ar',
'license': 'AGPL-3',
'images': [
],
'depends': [
'stock_account',
],
'data': ['stock_view.xml'
],
'demo': [
],
'test': [
],
'installable': True,
'auto_install': False,
'application': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
ToBaer94/PygameTowerDefense
|
buttons/sell_button.py
|
Python
|
lgpl-3.0
| 221
| 0.004525
|
from button import Button
class SellButton(Button):
d
|
ef __init__(self, image, x, y, parent):
super(SellButton, self).__init__(image, x, y, parent)
def get_clicked(
|
self):
self.parent.sell_tower()
|
googleapis/python-domains
|
samples/generated_samples/domains_v1_generated_domains_delete_registration_sync.py
|
Python
|
apache-2.0
| 1,539
| 0.00065
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless requir
|
ed by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permis
|
sions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for DeleteRegistration
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-domains
# [START domains_v1_generated_Domains_DeleteRegistration_sync]
from google.cloud import domains_v1
def sample_delete_registration():
# Create a client
client = domains_v1.DomainsClient()
# Initialize request argument(s)
request = domains_v1.DeleteRegistrationRequest(
name="name_value",
)
# Make the request
operation = client.delete_registration(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
# [END domains_v1_generated_Domains_DeleteRegistration_sync]
|
htygithub/bokeh
|
bokeh/charts/conftest.py
|
Python
|
bsd-3-clause
| 1,071
| 0.000934
|
"""Defines chart-wide shared test fixtures."""
import numpy as np
import pandas as pd
im
|
port pytest
from bokeh.sampledata.autompg import autompg
class TestData(object):
"""Contains properties with easy access to data used across tests."""
def __init__(self):
self.cat_list = ['a', 'c', 'a', 'b']
self.list_data = [[1, 2, 3, 4], [2, 3, 4, 5]]
self.array_data = [np.array(item) for item in self.list_data]
s
|
elf.dict_data = {'col1': self.list_data[0],
'col2': self.list_data[1]}
self.pd_data = pd.DataFrame(self.dict_data)
self.records_data = self.pd_data.to_dict(orient='records')
self.auto_data = autompg
@pytest.fixture(scope='module')
def test_data():
return TestData()
@pytest.fixture(scope='module')
def wide_data_with_cat(test_data):
data = test_data.dict_data.copy()
data['col3'] = test_data.cat_list
return data
@pytest.fixture(scope='module')
def df_with_cat_index(test_data):
return pd.DataFrame(test_data.dict_data, index=test_data.cat_list)
|
aaugustin/websockets
|
example/deployment/haproxy/app.py
|
Python
|
bsd-3-clause
| 616
| 0
|
#!/usr/bin/env python
import asyncio
import os
import signal
import websockets
async def echo(websocket):
async for message in websocket:
await websocket.send(message)
async def main():
|
# Set the stop condition when receiving SIGTERM.
loop = asyncio.get_running_loop()
stop = loop.create_future()
loop.add_signal_handler(signal.SIGTERM, stop.set_result, None)
async with websockets.serve(
echo,
host="localhost",
port=8000 + int(os.environ["SUPERVISOR_PROCESS_NAME"][-2:])
|
,
):
await stop
if __name__ == "__main__":
asyncio.run(main())
|
MicroTrustRepos/microkernel
|
src/l4/pkg/python/contrib/Demo/scripts/pi.py
|
Python
|
gpl-2.0
| 928
| 0.005388
|
#! /usr/bin/env python
# Print digits of pi forever.
#
# The algorithm, using Python's 'long' integers ("bignums"), works
# with continued fractions, and was conceived by Lambert Meertens.
#
# See also the ABC Programmer's Handbook, by Geurts, Meertens & Pemberton,
# published by Prentice-Hall (UK) Ltd., 1990.
import sys
def main():
k, a, b, a1, b1 = 2L, 4L, 1L, 12L, 4L
while 1:
|
# Next approximation
p, q, k = k*k, 2L*k+1L, k+1L
a, b, a1, b1 = a1, b1, p*a+q*a1, p*b+q*b
|
1
# Print common digits
d, d1 = a//b, a1//b1
while d == d1:
output(d)
a, a1 = 10L*(a%b), 10L*(a1%b1)
d, d1 = a//b, a1//b1
def output(d):
# Use write() to avoid spaces between the digits
# Use str() to avoid the 'L'
sys.stdout.write(str(d))
# Flush so the output is seen immediately
sys.stdout.flush()
if __name__ == "__main__":
main()
|
mtlchun/edx
|
cms/djangoapps/contentstore/views/videos.py
|
Python
|
agpl-3.0
| 12,550
| 0.001594
|
"""
Views related to the video upload feature
"""
from boto import s3
import csv
from uuid import uuid4
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse, HttpResponseNotFound
from django.utils.translation import ugettext as _, ugettext_noop
from django.views.decorators.http import require_GET, require_http_methods
import rfc6266
from edxval.api import create_video, get_videos_for_ids, SortDirection, VideoSortField
from opaque_keys.edx.keys import CourseKey
from contentstore.models import VideoUploadConfig
from contentstore.utils import reverse_course_url
from edxmako.shortcuts import render_to_response
from util.json_request import expect_json, JsonResponse
from xmodule.assetstore import AssetMetadata
from xmodule.modulestore.django import modulestore
from .course import get_course_and_check_access
__all__ = ["videos_handler", "video_encodings_download"]
# String constant used in asset keys to identify video assets.
VIDEO_ASSET_TYPE = "video"
# Default expiration, in seconds, of one-time URLs used for uploading videos.
KEY_EXPIRATION_IN_SECONDS = 86400
class StatusDisplayStrings(object):
"""
A class to map status strings as stored in VAL to display strings for the
video upload page
"""
# Translators: This is the status of an active video upload
_UPLOADING = ugettext_noop("Uploading")
# Translators: This is the status for a video that the servers are currently processing
_IN_PROGRESS = ugettext_noop("In Progress")
# Translators: This is the status for a video that the servers have successfully processed
_COMPLETE = ugettext_noop("Complete")
# Translators: This is the status for a video that the servers have failed to process
_FAILED = ugettext_noop("Failed"),
# Translators: This is the status for a video for which an invalid
# processing token was provided in the course settings
_INVALID_TOKEN = ugettext_noop("Invalid Token"),
# Translators: This is the status for a video that is in an unknown state
_UNKNOWN = ugettext_noop("Unknown")
_STATUS_MAP = {
"upload": _UPLOADING,
"ingest": _IN_PROGRESS,
"transcode_queue": _IN_PROGRESS,
"transcode_active": _IN_PROGRESS,
"file_delivered": _COMPLETE,
"file_complete": _COMPLETE,
"file_corrupt": _FAILED,
"pipeline_error": _FAILED,
"invalid_token": _INVALID_TOKEN
}
@staticmethod
def get(val_status):
"""Map a VAL status string to a localized display string"""
return _(StatusDisplayStrings._STATUS_MAP.get(val_status, StatusDisplayStrings._UNKNOWN))
@expect_json
@login_required
@require_http_methods(("GET", "POST"))
def videos_handler(request, course_key_string):
"""
The restful handler for video uploads.
GET
html: return an HTML page to display previous video uploads and allow
new ones
json: return json representing the videos that have been uploaded and
their statuses
POST
json: create a new video upload; the actual files should not be provided
to this endpoint but rather PUT to the respective upload_url values
contained in the response
"""
course = _get_and_validate_course(course_key_string, request.user)
if not course:
return HttpResponseNotFound()
if request.method == "GET":
if "application/json" in request.META.get("HTTP_ACCEPT", ""):
return videos_index_json(course)
else:
return videos_index_html(course)
else:
return videos_post(course, request)
@login_required
@require_GET
def video_encodings_download(request, course_key_string):
"""
Returns a CSV report containing the encoded video URLs for video uploads
in the following format:
Video ID,Name,Status,Profile1 URL,Profile2 URL
aaaaaaaa-aaaa-4aaa-aaaa-aaaaaaaaaaaa,video.mp4,Complete,http://example.com/prof1.mp4,http://example.com/prof2.mp4
"""
course = _get_and_validate_course(course_key_string, request.user)
if not course:
return HttpResponseNotFound()
def get_profile_header(profile):
"""Returns the column header string for the given profile's URLs"""
# Translators: This is the header for a CSV file column
# containing URLs for video encodings for the named profile
# (e.g. desktop, mobile high quality, mobile low quality)
return _("{profile_name} URL").format(profile_name=profile)
profile_whitelist = VideoUploadConfig.get_profile_whitelist()
videos = list(_get_videos(course))
name_col = _("Name")
duration_col = _("Duration")
added_col = _("Date Added")
video_id_col = _("Video ID")
status_col = _("Status")
profile_cols = [get_profile_header(profile) for profile in profile_whitelist]
def make_csv_dict(video):
"""
Makes a dictionary suitable for writing CSV output. This involves
extracting the required
|
items from the original video dict and
converting all keys and values to UTF-8 encoded string objects,
because the CSV module doesn't play well with unicode objects.
"""
# Translators: This is listed as the duration for a video that has not
# yet reached the point in its processing b
|
y the servers where its
# duration is determined.
duration_val = str(video["duration"]) if video["duration"] > 0 else _("Pending")
ret = dict(
[
(name_col, video["client_video_id"]),
(duration_col, duration_val),
(added_col, video["created"].isoformat()),
(video_id_col, video["edx_video_id"]),
(status_col, video["status"]),
] +
[
(get_profile_header(encoded_video["profile"]), encoded_video["url"])
for encoded_video in video["encoded_videos"]
if encoded_video["profile"] in profile_whitelist
]
)
return {
key.encode("utf-8"): value.encode("utf-8")
for key, value in ret.items()
}
response = HttpResponse(content_type="text/csv")
# Translators: This is the suggested filename when downloading the URL
# listing for videos uploaded through Studio
filename = _("{course}_video_urls").format(course=course.id.course)
# See https://tools.ietf.org/html/rfc6266#appendix-D
response["Content-Disposition"] = rfc6266.build_header(
filename + ".csv",
filename_compat="video_urls.csv"
)
writer = csv.DictWriter(
response,
[
col_name.encode("utf-8")
for col_name
in [name_col, duration_col, added_col, video_id_col, status_col] + profile_cols
],
dialect=csv.excel
)
writer.writeheader()
for video in videos:
writer.writerow(make_csv_dict(video))
return response
def _get_and_validate_course(course_key_string, user):
"""
Given a course key, return the course if it exists, the given user has
access to it, and it is properly configured for video uploads
"""
course_key = CourseKey.from_string(course_key_string)
# For now, assume all studio users that have access to the course can upload videos.
# In the future, we plan to add a new org-level role for video uploaders.
course = get_course_and_check_access(course_key, user)
if (
settings.FEATURES["ENABLE_VIDEO_UPLOAD_PIPELINE"] and
getattr(settings, "VIDEO_UPLOAD_PIPELINE", None) and
course and
course.video_pipeline_configured
):
return course
else:
return None
def _get_videos(course):
"""
Retrieves the list of videos from VAL corresponding to the videos listed in
the asset metadata store.
"""
edx_videos_ids = [
v.asset_id.path
for v in modulestore().get_all_asset_metadata(course.id, VIDEO_ASSET_TYPE)
]
videos = list(get_videos_for_ids(edx_videos_ids, VideoSortField.created, SortDirection.desc))
|
meganlkm/do-cli
|
do_cli/commands/cmd_list.py
|
Python
|
mit
| 689
| 0.001451
|
import click
from do_cli.contexts import CTX
from do_cli.commands.common import host_commands
@click.command('list')
@click.option('-f', '--force-refresh', is_flag=True, help='Pull data from the API')
@click.option('-h', '--host-names', help='Comma separated list of host names')
@CTX
def cli(ctx, force_refresh, host_nam
|
es):
"""
Show minimal data for droplets
--host-names -h Comma separated list of host names
Show minimal data for specific droplets
"""
if ctx.verbose:
click.echo("Show minimal data for
|
droplets")
click.echo(host_commands(ctx, force_refresh, host_names))
if ctx.verbose:
click.echo('---- cmd_list done ----')
|
liffiton/ATLeS
|
src/web/controller_analyze.py
|
Python
|
mit
| 6,534
| 0.00153
|
import base64
import csv
import io
import multiprocessing
import numpy as np
import sys
from collections import defaultdict
from io import StringIO
from pathlib import Path
# Import matplotlib ourselves and make it use agg (not any GUI anything)
# before the analyze module pulls it in.
import matplotlib
matplotlib.use('Agg')
from bottle import get, post, redirect, request, response, jinja2_template as template # noqa: E402
from analysis import heatmaps, process, plot # noqa: E402
from web.error_handlers import TrackParseError # noqa: E402
from common import mkdir # noqa: E402
import config # noqa: E402
def _make_stats_output(stats, all_keys, do_csv):
for i in range(len(stats)):
stat = stats[i]
for k in all_keys:
if k in stat:
val = stat[k]
if isinstance(val, (np.float32, np.float64)):
stat[k] = "%0.3f" % val
else:
stat[k] = ""
all_keys.remove('Track file') # will be added as first column
all_keys = sorted(list(all_keys))
all_keys[:0] = ['Track file'] # prepend 'Track file' header
if do_csv:
output = StringIO()
writer = csv.DictWriter(output, fieldnames=all_keys)
writer.writeheader()
for stat in stats:
writer.writerow(stat)
csvstring = output.getvalue()
output.close()
response.content_type = 'text/csv'
response.headers['Content-Disposition'] = 'attachment; filename=atles_stats.csv'
return csvstring
else:
return template('stats',
|
keys=all_keys, stats=stats)
@get('/stats/')
def get_stats():
trackrels = request.query.tracks.split('|')
exp_type = request.query.exp_type
stats = []
all_keys = set()
for trackrel in trackrels:
curstats = {}
curstats['Track file'] = trackrel
try:
processor = process.TrackProcessor(str(config.TRACKDIR / trackrel))
curstats.update(processor.get_setup(['experiment', 'phases', 'general']))
curstats.update(processor.get
|
_stats_single_table(include_phases=True))
if exp_type:
curstats.update(processor.get_exp_stats(exp_type))
except (ValueError, IndexError):
# often 'wrong number of columns' due to truncated file from killed experiment
raise(TrackParseError(trackrel, sys.exc_info()))
all_keys.update(curstats.keys())
stats.append(curstats)
return _make_stats_output(stats, all_keys, do_csv=request.query.csv)
def _do_analyze(trackrel):
trackrel = Path(trackrel)
# ensure directories exist for plot creation
trackreldir = trackrel.parent
mkdir(config.PLOTDIR / trackreldir)
# look for debug frames to create links in the trace plot
trackname = trackrel.name.replace('-track.csv', '')
dbgframedir = config.DBGFRAMEDIR / trackreldir / trackname
dbgframes = list(dbgframedir.glob("subframe*.png")) # list so TrackPlotter can re-use (instead of exhausting the iterable)
processor = process.TrackProcessor(str(config.TRACKDIR / trackrel))
plotter = plot.TrackPlotter(processor, dbgframes)
plotter.plot_heatmap()
def saveplot(filename):
plot.savefig(str(config.PLOTDIR / filename))
saveplot("{}.10.heat.png".format(trackrel))
plotter.plot_invalidheatmap()
saveplot("{}.12.heat.invalid.png".format(trackrel))
if processor.num_phases() > 1:
plotter.plot_heatmap(plot_type='per-phase')
saveplot("{}.14.heat.perphase.png".format(trackrel))
plotter.plot_heatmap(plot_type='per-minute')
saveplot("{}.15.heat.perminute.png".format(trackrel))
plotter.plot_trace()
saveplot("{}.20.plot.svg".format(trackrel))
@post('/analyze/')
def post_analyze():
trackrel = request.query.trackrel
try:
_do_analyze(trackrel)
except ValueError:
# often 'wrong number of columns' due to truncated file from killed experiment
raise(TrackParseError(trackrel, sys.exc_info()))
redirect("/view/{}".format(trackrel))
def _analyze_selection(trackrels):
for trackrel in trackrels:
try:
_do_analyze(trackrel)
except ValueError:
# often 'wrong number of columns' due to truncated file from killed experiment
pass # nothing to be done here; we're processing in the background
@post('/analyze_selection/')
def post_analyze_selection():
trackrels = request.query.trackrels.split('|')
p = multiprocessing.Process(target=_analyze_selection, args=(trackrels,))
p.start()
@get('/heatmaps/')
def get_heatmaps():
trackrels = request.query.tracks.split('|')
processors = []
# to verify all phases are equivalent
plength_map = defaultdict(list)
for trackrel in trackrels:
try:
p = process.TrackProcessor(str(config.TRACKDIR / trackrel), just_raw_data=True)
processors.append(p)
plength_map[tuple(phase.length for phase in p.phase_list)].append(trackrel)
except ValueError:
raise(TrackParseError(trackrel, sys.exc_info()))
if len(plength_map) > 1:
lengths_string = '\n'.join(
"{} in:\n {}\n".format(
str(lengths),
"\n ".join(trackrel for trackrel in plength_map[lengths])
)
for lengths in plength_map
)
return template('error', errormsg="The provided tracks do not all have the same phase lengths. Please select tracks that share an experimental setup.<br>Phase lengths found:<pre>{}</pre>".format(lengths_string))
# Save all images as binary to be included in the page directly
# Base64-encoded. (Saves having to write temporary data to filesystem.)
images_data = []
# use phases from an arbitrary track
plengths = plength_map.popitem()[0]
dataframes = [proc.df for proc in processors]
phase_start = 0
for i, length in enumerate(plengths):
phase_end = phase_start + length
x, y = heatmaps.get_timeslice(dataframes, phase_start*60, phase_end*60)
title = "Phase {} ({}:00-{}:00)".format(i+1, phase_start, phase_end)
ax = heatmaps.make_heatmap(x, y, title)
plot.format_axis(ax)
image_data = io.BytesIO()
plot.savefig(image_data, format='png')
images_data.append(
base64.b64encode(image_data.getvalue()).decode()
)
phase_start = phase_end
return template('view', imgdatas=images_data)
|
fiston/abaganga
|
project/payment/models.py
|
Python
|
mit
| 932
| 0.001073
|
# project/models.py
from project import db
from project.uuid_gen import id_column
class Payment(db.Model):
id = id_column()
email = db.Column(db.String(255), unique=False, nullable=False)
names = db.Column(db.String(255), unique=False, nullable=False)
cardNumber = db.Column(db.String(255), unique=False, nullable=False)
phone = db.Column(db.String(255), unique=False, nullable=False)
amount = db.Column(db.Float, unique=False, nullable=False)
object_payment = db.Column(db.String(255), unique=False, n
|
ullable=False)
status = db.Column(db.Boolean, nullable=False, default=False)
def __init_
|
_(self, email, names, card_number, phone, amount, object_payment, status=False):
self.names = names
self.email = email
self.cardNumber = card_number
self.phone = phone
self.amount = amount
self.object_payment = object_payment
self.status = status
|
dtudares/hello-world
|
yardstick/yardstick/benchmark/scenarios/availability/monitor/monitor_command.py
|
Python
|
apache-2.0
| 3,388
| 0
|
##############################################################################
# Copyright (c) 2015 Huawei Technologies Co.,Ltd. and others
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
import logging
import subprocess
import traceback
import yardstick.ssh as ssh
import basemonitor as basemonitor
LOG = logging.getLogger(__name__)
def _execute_shell_command(command):
'''execute shell script with error handling'''
exitcode = 0
output = []
try:
output = subprocess.check_output(command, shell=True)
except Exception:
exitcode = -1
output = traceback.format_exc()
LOG.error("exec command '%s' error:\n " % command)
LOG.error(traceback.format_exc())
return exitcode, output
class
|
MonitorOpenstackCmd(basemonitor.BaseMonitor):
"""docstring for MonitorApi"""
__monitor_type__ = "openstack-cmd"
def setup(self):
self.connection = None
node_name = self._config.get("host", None)
if node_na
|
me:
host = self._context[node_name]
ip = host.get("ip", None)
user = host.get("user", "root")
key_filename = host.get("key_filename", "~/.ssh/id_rsa")
self.connection = ssh.SSH(user, ip, key_filename=key_filename)
self.connection.wait(timeout=600)
LOG.debug("ssh host success!")
self.check_script = self.get_script_fullpath(
"ha_tools/check_openstack_cmd.bash")
self.cmd = self._config["command_name"]
def monitor_func(self):
exit_status = 0
if self.connection:
exit_status, stdout, stderr = self.connection.execute(
"/bin/bash -s '{0}'".format(self.cmd),
stdin=open(self.check_script, "r"))
LOG.debug("the ret stats: %s stdout: %s stderr: %s" %
(exit_status, stdout, stderr))
else:
exit_status, stdout = _execute_shell_command(self.cmd)
if exit_status:
return False
return True
def verify_SLA(self):
outage_time = self._result.get('outage_time', None)
LOG.debug("the _result:%s" % self._result)
max_outage_time = self._config["sla"]["max_outage_time"]
if outage_time > max_outage_time:
LOG.info("SLA failure: %f > %f" % (outage_time, max_outage_time))
return False
else:
LOG.info("the sla is passed")
return True
def _test(): # pragma: no cover
host = {
"ip": "192.168.235.22",
"user": "root",
"key_filename": "/root/.ssh/id_rsa"
}
context = {"node1": host}
monitor_configs = []
config = {
'monitor_type': 'openstack-cmd',
'command_name': 'nova image-list',
'monitor_time': 1,
'host': 'node1',
'sla': {'max_outage_time': 5}
}
monitor_configs.append(config)
p = basemonitor.MonitorMgr()
p.init_monitors(monitor_configs, context)
p.start_monitors()
p.wait_monitors()
p.verify_SLA()
if __name__ == '__main__': # pragma: no cover
_test()
|
Singularmotor/auto_test_lexian
|
auto_test_lexian/test_case/delaytest_38.py
|
Python
|
mit
| 544
| 0.012868
|
#coding=utf-8
from selenium import webdriver
import pymysql
import unittest,time
from selenium.webdriver.common.keys import Keys
p
|
rint("test36")
wf = webdriver.Firefox()
mark_01=0
n=0
wf.get("http://192.168.17.66:8080/LexianManager/html/login.html")
wf.find_element_by_xpath(".//*[@id='login']").click()
time.sleep(1)
wf.find_element_by_xpath(".//*[@id='leftMenus']/div[8]/div[1]/div[2]/a[2]").click()
time.sleep(1)
wf.find_element_by_xpath(".//*[@id='leftMenus']/div[8]/di
|
v[2]/ul/li[2]/a").click()
time.sleep(1)
wf.switch_to_frame("manager")
|
iwm911/plaso
|
plaso/formatters/android_calls.py
|
Python
|
apache-2.0
| 1,199
| 0.005004
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2013 The Plaso Project Authors.
# Please see the AUTHORS file for details on individual authors.
#
# Licensed under the Apache License, Version 2.0 (the "Lice
|
nse");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES
|
OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Formatter for Android contacts2.db database events."""
from plaso.lib import eventdata
class AndroidCallFormatter(eventdata.ConditionalEventFormatter):
"""Formatter for Android call history events."""
DATA_TYPE = 'android:event:call'
FORMAT_STRING_PIECES = [
u'{call_type}',
u'Number: {number}',
u'Name: {name}',
u'Duration: {duration} seconds']
FORMAT_STRING_SHORT_PIECES = [u'{call_type} Call']
SOURCE_LONG = 'Android Call History'
SOURCE_SHORT = 'LOG'
|
chromium/chromium
|
third_party/tflite_support/src/tensorflow_lite_support/examples/task/text/desktop/python/bert_nl_classifier_demo.py
|
Python
|
bsd-3-clause
| 1,817
| 0.003853
|
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python demo tool for BertNLClassifier."""
imp
|
ort inspect
import os.path as _os_path
import subprocess
import sys
from absl import app
from absl import flags
FLAGS = flags.FLAGS
flags.DEFINE_string('model_path', None, 'Model Path')
flags.DEFINE_string('text', None, 'Text to Predict')
# Required flag.
flags.mark_flag_as_required('model_path')
flags.mark_flag_as_required('text')
_BERT_NL_CLASSIFIER_NATIVE_PATH = _os_path.join(
_os_path.dirname(inspect.getfile(inspect.currentframe(
|
))),
'../bert_nl_classifier_demo')
def classify(model_path, text):
"""Classifies input text into different categories.
Args:
model_path: path to model
text: input text
"""
# Run the detection tool:
subprocess.run([
_BERT_NL_CLASSIFIER_NATIVE_PATH + ' --model_path=' + model_path +
' --text="' + text + '"'
],
shell=True,
check=True)
def run_main(argv):
del argv # Unused.
classify(FLAGS.model_path, FLAGS.text)
# Simple wrapper to make the code pip-friendly
def main():
app.run(main=run_main, argv=sys.argv)
if __name__ == '__main__':
main()
|
eljost/pysisyphus
|
pysisyphus/tsoptimizers/TRIM.py
|
Python
|
gpl-3.0
| 1,861
| 0.001075
|
# [1] https://doi.org/10.1016/0009-2614(91)90115-P
# Helgaker, 1991
import numpy as np
from scipy.optimize import newton
from pysisyphus.tsoptimizers.TSHessianOptimizer import TSHessianOptimizer
class TRIM(TSHessianOptimizer):
def optimize(self):
energy, gradient, H, eigvals, eigvecs, resetted = self.housekeeping()
self.update_ts_mode(eigvals, eigvecs)
self.log(f"Signs of eigenvalue and -vector of root {self.root} "
"will be reversed!")
# Transform gradient to basis of eigenvectors
gradient_ = eigvecs.T.dot(gradient)
# Construct image function by inverting the signs of the eigenvalue and
# -vector of the mode to follow uphill.
eigvals_ = eigvals.copy()
eigvals_[self.root] *= -1
gradient_ = gradient_.copy()
gradient_[self.root] *= -1
def get_step(mu):
zetas = -gradient_ / (eigvals_ - mu)
# Replace nan with 0.
zetas = np.nan_to_num(ze
|
tas)
# Transform to original basis
step = eigvecs * zetas
step = step.sum(axis=1)
return step
def get_step_norm(mu):
return np.
|
linalg.norm(get_step(mu))
def func(mu):
return get_step_norm(mu) - self.trust_radius
mu = 0
norm0 = get_step_norm(mu)
if norm0 > self.trust_radius:
mu, res = newton(func, x0=mu, full_output=True)
assert res.converged
self.log(f"Using levelshift of μ={mu:.4f}")
else:
self.log("Took pure newton step without levelshift")
step = get_step(mu)
step_norm = np.linalg.norm(step)
self.log(f"norm(step)={step_norm:.6f}")
self.predicted_energy_changes.append(self.quadratic_model(gradient, self.H, step))
return step
|
njanirudh/Python-DataStructures
|
linked_list.py
|
Python
|
mit
| 2,365
| 0.009334
|
class Node:
def __init__(self,val):
self.value = val
self.nextNode = None
def getValue(self):
return self.value
def getNextNode(self):
return self.nextNode
def setValue(self,val):
self.value = val
def setNextNode(sel
|
f,nxtNode):
self.nextNode = nxtNode
"""
Linked List (LL)
Following are the basic operations supported by a list :-
1. Add − Adds an element at the beginning of the list.
2. Deletion − Deletes an element at the beginning of the list.
3. Display − Displays the complete list.
4. Search − Searches an element using the given key.
"""
class LinkedList:
def __init__(self):
self.head = None
#Returns 'True' or 'Fal
|
se' depending on the size of the LL
def isEmpty(self):
if(self.head == None):
return True
else:
return False
# Add a node to the head of the LL
def add(self,value):
temp = Node(value)
temp.setNextNode(self.head)
self.head = temp
# gives the total number of elements
def size(self):
temp = self.head
count = 0
if(temp != None):
count = 1
while(temp.getNextNode() != None):
count += 1
temp = temp.getNextNode()
return count
# prints the elemnts in the List
def printList(self):
temp = self.head
while(temp != None):
print (temp.getValue())
temp = temp.getNextNode()
def deleteNode(self,key):
temp = self.head
while(temp != None):
nextNode = temp.getNextNode()
if(nextNode != None):
if(nextNode.getValue() == key):
temp = temp.setNextNode(nextNode.getNextNode())
else:
temp = temp.getNextNode()
if __name__ == "__main__":
#Create a new linked list
myList = LinkedList()
# Add elements to the list
myList.add(1)
myList.add(2)
myList.add(3)
myList.add(4)
myList.add(5)
myList.add(6)
myList.add(3)
myList.add(7)
# Perform operations on the list
print ("List Size : " + str(myList.size()))
myList.printList()
print ("---------------------")
myList.deleteNode(3)
print ("List Size : " + str(myList.size()))
myList.printList()
|
thrisp/flails
|
tests/test_app/blueprints/private/views.py
|
Python
|
mit
| 2,266
| 0.005296
|
from flask.ext.flails import FlailsView
from flask import render_template, redirect, url_for, request
#from config import db
import models
import forms
class PrivatePostView(FlailsView):
def private_post_index(self):
object_list = models.Post.query.all()
return render_template('post/index.slim', object_list=object_list)
def private_post_show(self, ident):
post = models.Post.
|
query.get(ident)
form = forms.CommentForm()
return render_template('post/show.slim', post=post, form=form)
def private_post_new(self):
form = forms.PostForm()
if form.validate_on_submit():
post = models.Post(form.name.data, form.title.data, form.content.data)
#db.session.add(po
|
st)
#db.session.commit()
return redirect(url_for('post.index'))
return render_template('post/new.slim', form=form)
def private_post_edit(self, ident):
post = models.Post.query.get(ident)
form = forms.PostForm(request.form, post)
if form.validate_on_submit():
post.name = form.name.data
post.title = form.title.data
post.content = form.content.data
#db.session.add(post)
#db.session.commit()
return redirect(url_for('post.show', ident=ident))
return render_template('post/edit.slim', form=form, post=post)
def private_post_delete(self, ident):
post = models.Post.query.get(ident)
db.session.delete(post)
db.session.commit()
return redirect(url_for('post.index'))
def private_comment_new(self, post_id):
post = models.Post.query.get(post_id)
form = forms.CommentForm()
if form.validate_on_submit():
comment = models.Comment(form.commenter.data, form.body.data, post_id)
#db.session.add(comment)
#db.session.commit()
return redirect(url_for('.show', ident=post_id))
return render_template('post/show.slim', post=post, form=form)
def private_comment_delete(self, post_id, ident):
comment = models.Comment.query.get(ident)
#db.session.delete(comment)
#db.session.commit()
return redirect(url_for('.show', ident=post_id))
|
opendatakosovo/municipality-procurement-api
|
mcp/views/map.py
|
Python
|
gpl-2.0
| 4,266
| 0.000703
|
from flask import Response
from flask.views import View
from bson import json_util
from mcp import mongo
class Map(View):
def dispatch_request(self, komuna, viti):
json = mongo.db.procurements.aggregate([
{
"$match": {
"komuna.slug": komuna,
"viti": viti,
"kompania.selia.slug": {'$ne': ''}
}
},
{
"$group": {
"_id": {
"selia": "$kompania.selia.slug",
"emri": "$kompania.selia.emri",
"gjeresi": "$kompania.selia.kordinatat.gjeresi",
"gjatesi": "$kompania.selia.kordinatat.gjatesi",
},
"cmimi": {
"$sum": "$kontrata.qmimi"
},
"vlera": {
"$sum": "$kontrata.vlera"
},
"numriKontratave": {
"$sum": 1
}
}
},
{
"$sort": {
"_id.selia": 1
}
},
{
"$project": {
"selia": "$_id.selia",
"emri": "$_id.emri",
"gjeresia": "$_id.gjeresi",
"gjatesia": "$_id.gjatesi",
"cmimi": "$cmimi",
"vlera": "$vlera",
|
"numriKontratave": "$numriKontratave",
"_id": 0
}
}
])
json_min_max
|
= mongo.db.procurements.aggregate([
{
"$match": {
"komuna.slug": komuna,
"viti": viti,
"kompania.selia.slug": {'$ne': ''}
}
},
{
"$group": {
"_id": {
"selia": "$kompania.selia.slug",
"gjeresi": "$kompania.selia.kordinatat.gjeresi",
"gjatesi": "$kompania.selia.kordinatat.gjatesi",
},
"sumCmimi": {
"$sum": "$kontrata.qmimi"
},
"sumVlera": {
"$sum": "$kontrata.vlera"
},
"sumNumriKontratave": {
"$sum": 1
}
}
},
{
"$group": {
"_id": {},
"maxCmimi": {
"$max": "$sumCmimi"
},
"maxVlera": {
"$max": "$sumVlera"
},
"maxNumriKontratave": {
"$max": "$sumNumriKontratave"
},
"minCmimi": {
"$min": "$sumCmimi"
},
"minVlera": {
"$min": "$sumVlera"
},
"minNumriKontratave": {
"$min": "$sumNumriKontratave"
},
}
},
{
"$project": {
"_id": 0,
"vlera": {
"min": "$minVlera",
"max": "$maxVlera",
},
"cmimi": {
"min": "$minCmimi",
"max": "$maxCmimi",
},
"numriKontratave": {
"min": "$minNumriKontratave",
"max": "$maxNumriKontratave",
}
}
}
])
#pergjigjen e kthyer dhe te konvertuar ne JSON ne baze te json_util.dumps() e ruajme ne resp
result_json = {};
result_json['bounds'] = json_min_max['result'][0]
result_json['result'] = json['result']
resp = Response(
response=json_util.dumps(result_json),
mimetype='application/json')
return resp
|
markflyhigh/incubator-beam
|
sdks/python/apache_beam/transforms/window.py
|
Python
|
apache-2.0
| 18,159
| 0.007324
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Windowing concepts.
A WindowInto transform logically divides up or groups the elements of a
PCollection into finite windows according to a windowing function (derived from
WindowFn).
The output of WindowInto contains the same elements as input, but they have been
logically assigned to windows. The next GroupByKey(s) transforms, including one
within a composite transform, will group by the combination of keys and windows.
Windowing a PCollection allows chunks of it to be processed individually, before
the entire PCollection is available. This is especially important for
PCollection(s) with unbounded size, since the full PCollection is never
available at once, since more data is continually arriving. For PCollection(s)
with a bounded size (aka. conventional batch mode), by default, all data is
implicitly in a single window (see GlobalWindows), unless WindowInto is
applied.
For example, a simple form of windowing divides up the data into fixed-width
time intervals, using FixedWindows.
Seconds are used as the time unit for the built-in windowing primitives here.
Integer or floating point seconds can be passed to these primitives.
Internally, seconds, with microsecond granularity, are stored as
timeutil.Timestamp and timeutil.Duration objects. This is done to avoid
precision errors that would occur with floating point representations.
Custom windowing function classes can be created, by subclassing from
WindowFn.
"""
from __future__ import absolute_import
import abc
from builtins import object
from builtins import range
from functools import total_ordering
from future.utils import with_metaclass
from google.protobuf import duration_pb2
from google.protobuf import timestamp_pb2
from apache_beam.coders import coders
from apache_beam.portability import common_urns
from apache_beam.portability import python_urns
from apache_beam.portability.api import beam_runner_api_pb2
from apache_beam.portability.api import standard_window_fns_pb2
from apache_beam.transforms import timeutil
from apache_beam.utils import proto_utils
from apache_beam.utils import urns
from apache_beam.utils import windowed_value
from apache_beam.utils.timestamp import MIN_TIMESTAMP
from apache_beam.utils.timestamp import Duration
from apache_beam.utils.timestamp import Timestamp
from apache_beam.utils.windowe
|
d_value import WindowedValue
__all__ = [
'TimestampCombiner',
'WindowFn',
'BoundedWindow',
'IntervalWindow',
'TimestampedValue',
'GlobalWindow',
'NonMergingWindowFn',
'GlobalWindows',
'FixedWindows',
'SlidingWindows',
'Sessions',
|
]
# TODO(ccy): revisit naming and semantics once Java Apache Beam finalizes their
# behavior.
class TimestampCombiner(object):
"""Determines how output timestamps of grouping operations are assigned."""
OUTPUT_AT_EOW = beam_runner_api_pb2.OutputTime.END_OF_WINDOW
OUTPUT_AT_EARLIEST = beam_runner_api_pb2.OutputTime.EARLIEST_IN_PANE
OUTPUT_AT_LATEST = beam_runner_api_pb2.OutputTime.LATEST_IN_PANE
# TODO(robertwb): Add this to the runner API or remove it.
OUTPUT_AT_EARLIEST_TRANSFORMED = 'OUTPUT_AT_EARLIEST_TRANSFORMED'
@staticmethod
def get_impl(timestamp_combiner, window_fn):
if timestamp_combiner == TimestampCombiner.OUTPUT_AT_EOW:
return timeutil.OutputAtEndOfWindowImpl()
elif timestamp_combiner == TimestampCombiner.OUTPUT_AT_EARLIEST:
return timeutil.OutputAtEarliestInputTimestampImpl()
elif timestamp_combiner == TimestampCombiner.OUTPUT_AT_LATEST:
return timeutil.OutputAtLatestInputTimestampImpl()
elif timestamp_combiner == TimestampCombiner.OUTPUT_AT_EARLIEST_TRANSFORMED:
return timeutil.OutputAtEarliestTransformedInputTimestampImpl(window_fn)
else:
raise ValueError('Invalid TimestampCombiner: %s.' % timestamp_combiner)
class WindowFn(with_metaclass(abc.ABCMeta, urns.RunnerApiFn)):
"""An abstract windowing function defining a basic assign and merge."""
class AssignContext(object):
"""Context passed to WindowFn.assign()."""
def __init__(self, timestamp, element=None, window=None):
self.timestamp = Timestamp.of(timestamp)
self.element = element
self.window = window
@abc.abstractmethod
def assign(self, assign_context):
"""Associates windows to an element.
Arguments:
assign_context: Instance of AssignContext.
Returns:
An iterable of BoundedWindow.
"""
raise NotImplementedError
class MergeContext(object):
"""Context passed to WindowFn.merge() to perform merging, if any."""
def __init__(self, windows):
self.windows = list(windows)
def merge(self, to_be_merged, merge_result):
raise NotImplementedError
@abc.abstractmethod
def merge(self, merge_context):
"""Returns a window that is the result of merging a set of windows."""
raise NotImplementedError
def is_merging(self):
"""Returns whether this WindowFn merges windows."""
return True
@abc.abstractmethod
def get_window_coder(self):
raise NotImplementedError
def get_transformed_output_time(self, window, input_timestamp): # pylint: disable=unused-argument
"""Given input time and output window, returns output time for window.
If TimestampCombiner.OUTPUT_AT_EARLIEST_TRANSFORMED is used in the
Windowing, the output timestamp for the given window will be the earliest
of the timestamps returned by get_transformed_output_time() for elements
of the window.
Arguments:
window: Output window of element.
input_timestamp: Input timestamp of element as a timeutil.Timestamp
object.
Returns:
Transformed timestamp.
"""
# By default, just return the input timestamp.
return input_timestamp
urns.RunnerApiFn.register_pickle_urn(python_urns.PICKLED_WINDOWFN)
class BoundedWindow(object):
"""A window for timestamps in range (-infinity, end).
Attributes:
end: End of window.
"""
def __init__(self, end):
self.end = Timestamp.of(end)
def max_timestamp(self):
return self.end.predecessor()
def __eq__(self, other):
raise NotImplementedError
def __ne__(self, other):
# Order first by endpoint, then arbitrarily
return self.end != other.end or hash(self) != hash(other)
def __lt__(self, other):
if self.end != other.end:
return self.end < other.end
return hash(self) < hash(other)
def __le__(self, other):
if self.end != other.end:
return self.end <= other.end
return hash(self) <= hash(other)
def __gt__(self, other):
if self.end != other.end:
return self.end > other.end
return hash(self) > hash(other)
def __ge__(self, other):
if self.end != other.end:
return self.end >= other.end
return hash(self) >= hash(other)
def __hash__(self):
raise NotImplementedError
def __repr__(self):
return '[?, %s)' % float(self.end)
@total_ordering
class IntervalWindow(windowed_value._IntervalWindowBase, BoundedWindow):
"""A window for timestamps in range [start, end).
Attributes:
start: Start of window as seconds since Unix epoch.
end: End of window as seconds since Unix epoch.
"""
def __lt__(self, other):
if self.end != other.end:
return self.end < other.end
return hash(self) < hash(other)
def intersects(self, other):
return other.start < self.end or self.start < other.end
de
|
karaambaa/RGB-LED-Server
|
server.py
|
Python
|
apache-2.0
| 15,826
| 0.006824
|
#!/usr/bin/env python
# import all needed libraries
import sys
import time
import sockjs.tornado
from tornado import web, ioloop
from sockjs.tornado import SockJSRouter, SockJSConnection
import pigpio
import subprocess
import os
import signal
from thread import start_new_thread
import random
import colorsys
import pyaudio
from scipy.signal import butter, lfilter, freqz
import numpy as np
# Initial setup of GPIO pins
pi = pigpio.pi()
############################### setting basic options ###############################
bright = 255
# The Pins. Use Broadcom numbers.
RED_PIN = 17
GREEN_PIN = 22
BLUE_PIN = 24
# listening port
port = 1713
# Global variables for Music
CHUNK = 512 # How many bytes of audio to read at a time
global HUE
HUE = 0
############################### setting effect options ##############################
mode = "nothing"
############################### basic functions ###############################
class LedController:
def hex_to_rgb(self, hex):
hex = hex.lstrip('#')
lv = len(hex)
rgb = tuple(int(hex[i:i + lv // 3], 16) for i in range(0, lv, lv // 3))
return rgb
def rgb_to_hex(self, rgb):
hex = "#"
for i in range(3):
hex + hex(rgb[i]).split('x')[1]
return hex
def hsl_to_rgb(self, hsl):
if any(i > 1 for i in hsl):
hsl[0] /= 360
hsl[1] /= 100
hsl[2] /= 100
colour_tuple = tuple(i * 255 for i in colorsys.hls_to_rgb(hsl[0], hsl[2], hsl[1]))
return colour_tuple
def checkRGB(self, color):
if (color < 0):
color = 0
if (color > 255):
color = 255
return color
def scale(self, brightness):
realBrightness = int(int(brightness) * (float(bright) / 255.0))
# Ensure we are giving correct values
if realBrightness < 0:
realBrightness = 0.0
elif realBrightness > 100:
realBrightness = 100.0
return realBrightness
def setLights(self, pin, brightness):
realBrightness = self.scale(brightness)
pi.set_PWM_dutycycle(pin, realBrightness)
def setRGB(self, r, g, b):
r = self.checkRGB(r)
g = self.checkRGB(g)
b = self.checkRGB(b)
self.setLights(RED_PIN, r)
self.setLights(GREEN_PIN, g)
self.setLights(BLUE_PIN,b)
print "changing color to rgb(" + str(r) + "," + str(g) + "," + str(b) + ")"
#BrokerConnection.color_broadcaster(r,g,b)
def noWhite(self, r, g, b):
r /= 255.0
g /= 255.0
b /= 255.0
HSL = colorsys.rgb_to_hls(r, g, b)
h = HSL[0]
l = HSL[1]
s = HSL[2]
if (l > 0.8):
l *= 0.8 # scale down lightness when higher than 80%
if (s < 0.4):
s = (s * 0.6) + 0.4 # scale saturation up when lower than 40%
return tuple(i * 255 for i in colorsys.hls_to_rgb(h, l, s))
############################### Rainbow functions ###############################
class Rainbow:
def updateColor(self, color, step):
color += step
if color > 255:
color = 255
if color < 0:
color = 0
return color
def fader(self, r, g, b):
lc = LedController()
if not ((r == 255 or r == 0) and (b == 255 or b == 0) and (g == 255 or g == 0) and (r == 255 and g == 255 and b == 255) and (r == 0 and g == 0 and b == 0)):
while r < 255:
r = self.updateColor(r, STEPS)
lc.setRGB(r, g, b)
while b > 0:
b = self.updateColor(b, -STEPS)
lc.setRGB(r, g, b)
while (mode == "Rainbow"):
if r == 255 and b == 0 and g < 255:
g = self.updateColor(g, STEPS)
lc.setRGB(r, g, b)
elif g == 255 and b == 0 and r > 0:
r = self.updateColor(r, -STEPS)
lc.setRGB(r, g, b)
elif r == 0 and g == 255 and b < 255:
b = self.updateColor(b, STEPS)
lc.setRGB(r, g, b)
elif r == 0 and b == 255 and g > 0:
g = self.updateColor(g, -STEPS)
lc.setRGB(r, g, b)
elif g == 0 and b == 255 and r < 255:
r = self.updateColor(r, STEPS)
lc.setRGB(r, g, b)
elif r == 255 and g == 0 and b > 0:
b = self.updateColor(b, -STEPS)
lc.setRGB(r, g, b)
############################### Music functions ###############################
class FreqAnalyser:
# Filtering based on
# http://wiki.scipy.org/Cookbook/ButterworthBandpass
def __init__(self, channels, sample_rate, leds=None):
self.leds = leds # Not needed if just plotting
self.channels = channels
self.sample_rate = sample_rate
self.nyquist = float(sample_rate) / 2
# Filter order - higher the order the sharper
# the curve
order = 3
# Cut off frequencies:
# Low pass filter
cutoff = 200 / self.nyquist
# Numerator (b) and denominator (a)
# polynomials of the filter.
b, a = butter(order, cutoff, btype='lowpass')
self.low_b = b
self.low_a = a
# High pass filter
cutoff = 4000 / self.nyquist
b, a = butter(order, cutoff, btype='highpass')
self.high_b = b
self.high_a = a
# Keep track of max brightness for each
# colour
self.max = [0.0, 0.0, 0.0]
# Make different frequencies fall faster
# bass needs to be punchy.
self.fall = [15.0, 2.5, 5.0]
def filter(self, data):
# Apply low filter
self.low_data = lfilter(self.low_b,
self.low_a,
data)
# Apply high filter
self.high_data = lfilter(self.high_b,
self.high_a,
data)
# Get mid data by doing signal - (low + high)
self.mid_data = np.subtract(data,
np.add(self.low_data,
self.high_data))
@staticmethod
def rms(data):
# Return root mean square of data set
# (i.e. average amplitude)
return np.sqrt(np.mean(np.square(data)))
def change_leds(self):
lc = LedController()
# Get average amplitude
l = []
l.append(self.rms(self.low_data))
l.append(self.rms(self.mid_data))
l.append(self.rms(self.high_data))
if mode == "Music":
HUEcolor = MusicColor
swift = (sum(l) * random.uniform(0, 7))
if swift < 0.5:
swift = 0.5
if (HUEcolor == "Auto"):
global HUE
HUE += swift
else:
rgb = lc.hex_to_rgb(HUEcolor)
rgb = [float(rgb[i]) / 255.0 for i in range(3)]
global HUE
HUE = colorsys.rgb_to_hls(rgb[0], rgb[1], rgb[2])[0] * 360
light = 0.01 + l[0]
if HUE > 360:
HUE = 0 + (HUE - 360)
if light > 0.6:
light = 0.6
RGB = lc.hsl_to_rgb([HUE / 360, 1, light])
elif mode == "Music1":
equalizer = MusicColor
for i in range(3):
# Do any
|
number fudging to make it look better
# here - probably want to avoid
|
high values of
# all because it will be white
# (Emphasise/Reduce bass, mids, treble)
l[i] *= float(equalizer[i])
l[i] = (l[i] * 256) - 1
# Use new val if > previous max
if l[i] > self.max[i]:
self.max[i] = l[i]
else:
# Otherwise, decrement max and use that
# Gives colour falling effect
self.max[i] -= self.fall[i]
|
fredmorcos/attic
|
projects/plantmaker/plantmaker-main/src/benchmark/evaluatorperf.py
|
Python
|
isc
| 3,570
| 0.020728
|
from time import time
from benchmark import Benchmark
from optimizer.optimizer import Optimizer
from optimizer.simulator import Simulator
from optimizer.evaluator import Evaluator
from extra.printer import pprint, BLUE
class EvaluatorPerf(Benchmark):
def __init__(self, plant, orderList, testNumber):
Benchmark.__init__(self, plant, orderList, testNumber)
self.prefix = "evaluator"
class EvaluatorMachinesPerf(EvaluatorPerf):
def __init__(self, plant, orderList, testNumber):
EvaluatorPerf.__init__(self, plant, orderList, testNumber)
self.testName = "NumberOfMachines"
self.startValue = 1
def bench(self):
recipes = []
for o in self.orderList.orders:
recipes.append(o.recipe.recipe[:])
o.recipe.recipe = []
machines = self.plant.machines[:]
self.plant.machines = []
i = self.startValue
while i <= len(machines):
pprint("PERF Number of machines = " + str(i), BLUE)
self.plant.machines = machines[:i]
for j, o in enumerate(self.orderList.orders):
o.recipe.recipe = recipes[j][:i]
optimizer = Optimizer(self.plant, self.orderList, Simulator(self.plant),
Evaluator(self.plant))
optimizer.populationSize = 2
optimizer.iterations = 2
optimizer.indivMutationRate = 0.5
optimizer.selectionRate = 0.5
optimizer.mutationRange = 10
schedules = optimizer.run()
evaluator = Evaluator(self.plant)
t = time()
evaluator.evalu
|
ate(schedules[0])
t = time() - t
self.addCairoPlotTime(t)
self.addGnuPlotTime(i, t)
i
|
+= 1
class EvaluatorOrdersPerf(EvaluatorPerf):
def __init__(self, plant, orderList, testNumber):
EvaluatorPerf.__init__(self, plant, orderList, testNumber)
self.testName = "NumberOfOrders"
self.startValue = 2
def bench(self):
orders = self.orderList.orders[:]
self.orderList.orders = []
i = self.startValue
while i <= len(orders):
pprint("PERF Number of orders = " + str(i), BLUE)
self.orderList.orders = orders[:i]
optimizer = Optimizer(self.plant, self.orderList, Simulator(self.plant),
Evaluator(self.plant))
optimizer.populationSize = 2
optimizer.iterations = 2
optimizer.indivMutationRate = 0.5
optimizer.selectionRate = 0.5
optimizer.mutationRange = 10
schedules = optimizer.run()
evaluator = Evaluator(self.plant)
t = time()
evaluator.evaluate(schedules[0])
t = time() - t
self.addCairoPlotTime(t)
self.addGnuPlotTime(i, t)
i += 1
class EvaluatorLargeValuesPerf(EvaluatorPerf):
def __init__(self, plant, orderList, testNumber):
EvaluatorPerf.__init__(self, plant, orderList, testNumber)
self.testName = "LargeValuesMultiplier"
def bench(self):
val = 2
i = self.startValue
while i < 10:
pprint("PERF Large Value = " + str(i * val), BLUE)
for o in self.orderList.orders:
o.deadline *= val
for r in o.recipe.recipe:
r[1] *= val
optimizer = Optimizer(self.plant, self.orderList, Simulator(self.plant),
Evaluator(self.plant))
optimizer.populationSize = 2
optimizer.iterations = 2
optimizer.indivMutationRate = 0.5
optimizer.selectionRate = 0.5
optimizer.mutationRange = 500
schedules = optimizer.run()
evaluator = Evaluator(self.plant)
t = time()
evaluator.evaluate(schedules[0])
t = time() - t
self.addCairoPlotTime(t)
self.addGnuPlotTime((i + 1) * val, t)
i += 1
|
casetext/flask-http-forwarding
|
setup.py
|
Python
|
mit
| 1,109
| 0.000902
|
# -*- coding: utf-8 -*-
import os
from setuptools import setup, find_packages
EXCLUDE_FROM_PACKAGES = ['test_*',]
VERSION = "1.1.0"
INSTALL_REQUIRES = [
'requests',
'Flask'
]
TESTS_REQUIRE = [
'nose',
'httpretty'
]
setup(
name='Flask-HTTP-Forwarding',
version=VERSION,
url='http://www.github.com/casetext/flask-http-forwarding',
author='Casetext, Inc.',
author_email='casetext@casetext.com',
description='Flask extension implementing HTTP forwarding',
|
license='MIT',
packages=find_packages(exclude=EXCLUDE_FROM_PACKA
|
GES),
include_package_data=True,
install_requires=INSTALL_REQUIRES,
tests_require=TESTS_REQUIRE,
test_suite="nose.collector",
platforms='any',
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: HTTP Servers',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
|
cetinkaya/pastefromhtml
|
htmlcdparser.py
|
Python
|
gpl-3.0
| 19,903
| 0.006079
|
# Copyright 2014 Ahmet Cetinkaya
# This file is part of pastefromhtml.
# pastefromhtml is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# pastefromhtml is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with pastefromhtml. If not, see <http://www.gnu.org/licenses/>.
from html.parser import HTMLParser
from html.entities import name2codepoint
import re
import sys
import urllib.request, urllib.error, urllib.parse
import os
def assoc(key, pairs):
value = None
for (k, v) in pairs:
if k == key:
value = v
break
return value
#download file of url
def get_url(url, name):
f = open(name,'wb')
req = urllib.request.Request(url)
req.add_unredirected_header('User-agent', 'Mozilla')
f.write(urllib.request.urlopen(req).read())
f.close()
# HTML Clipboard Data Parser
class HTMLCDParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self) # super().__init__() for Pyhon 3
self.zim_str = ""
self.beg = {"h1": "====== ",
"h2": "===== ",
"h3": "==== ",
"h4": "=== ",
"h5": "== ",
"iframe": "[[",
"strong": "**",
"b": "**",
"i": "//",
"em": "//",
"u": "__",
"ins": "__",
"mark": "__",
"pre": "''",
"code": "''",
"blockquote": "",
"strike": "~~",
"del": "~~",
"p": "",
"div": "",
"ol": "",
"ul": "",
"dl": "",
"dt": "",
"dd": "\t",
"li": "",
"table": "",
"caption": "",
"tr": "",
"th": "|",
"td": "|",
"hr": "-----\n",
"sup": "^{",
"sub": "_{",
"span": "",
"figure": "",
"figcaption": "\n",
"abbr": "",
"q": "",
"time": ""}
self.end = {"h1": " ======\n",
"h2": " =====\n",
"h3": " ====\n",
"h4": " ===\n",
"h5": " ==\n",
"iframe": "]]",
"strong": "**",
"b": "**",
"i": "//",
"em": "//",
"u": "__",
"ins": "__",
"mark": "__",
"pre": "''",
"code": "''",
"blockquote": "",
"strike": "~~",
"del": "~~",
"p": "\n",
"div": "\n",
"a": "]]",
"ol": "\n",
"ul": "\n",
"dl": "\n",
"dt": ":\n",
"dd": "\n",
"li": "",
"table": "\n",
"caption": "\n",
"tr": "|\n",
"th": "",
"td": "",
"sup": "}",
"sub": "}",
"figure": "\n",
"figcaption": "\n"}
self.list_type = "ol"
self.item_no = 0
self.inside_p = False
self.inside_pre = False
self.pre_data = ""
self.inside_blockquote = False
self.inside_tag = "" #Indicate label on which we are
self.start_tag = "" #Initial tag in case we have to delete it
self.del_tag = ""
self.tag_attrib = "" #Tag Attribute Value
self.folder = None
self.a_href = "" #Link of a tag
self.inside_li = False
self.list_level = -1
self.inside_iframe = False
self.inside_span = False
self.inside_dl = False
self.inside_table = False
def handle_starttag(self, tag, attrs):
#If we are in a non-nestable tag we do nothing
if self.inside_tag and not (self.inside_tag == "a" and tag == "img" and self.a_href) and not(self.inside_tag == "th" or sel
|
f.inside_tag == "td" or self.inside_tag == "dt" or self.inside_tag == "dd") and not (tag == "a" a
|
nd (self.inside_tag == "b" or self.inside_tag == "strong" or self.inside_tag == "i" or self.inside_tag == "em" or self.inside_tag == "u" or self.inside_tag == "ins" or self.inside_tag == "mark" or self.inside_tag == "strike" or self.inside_tag == "del") and self.zim_str.endswith(self.beg[self.inside_tag])):
return
if tag == "blockquote":
self.inside_blockquote = True
#If the tag a is in a non-nestable one, tag a prevails and the previous one is deleted. In block sentences it is not done
if tag == "a" and self.inside_tag and ((self.inside_tag != "pre" and self.inside_tag != "code")):
self.del_tag = self.inside_tag
self.zim_str = self.zim_str[:len(self.zim_str)-len(self.start_tag)]
#Initialize non-nestable tag
if tag != "td" and tag != "dd" and self.beg.get(tag) or tag == "a" and not self.inside_tag:
self.inside_tag = tag
if (tag == "pre" or tag == "code"): #If pre in p
self.inside_pre = True
if tag in list(self.beg.keys()):
#Add blank when tag not start line
if self.zim_str.endswith(("\n", "(", "[", "\t", "\"", " ", "/", '\xa0')):
blank = ""
else:
blank = " "
self.zim_str += blank + self.beg[tag]
self.start_tag = self.beg[tag] #Store start tag to delete it could be somewhere else
if tag == "p":
self.inside_p = True
if self.inside_blockquote:
self.zim_str += "\t"
elif tag == "del":
datetime = assoc("datetime", attrs)
if datetime is not None:
self.tag_attrib = " (" + datetime + ")"
elif tag == "abbr":
title = assoc("title", attrs)
if title is not None:
self.tag_attrib = " (" + title + ")"
elif tag == "q":
cite = assoc("cite", attrs)
if cite is not None:
self.tag_attrib = " ([[#|" + cite + "]])"
self.zim_str += '"'
elif tag == "time":
datetime = assoc("datetime", attrs)
if datetime is not None:
self.tag_attrib = " (" + datetime + ")"
elif tag == "a":
href = assoc("href", attrs)
self.a_href = href #ref of tag
if href is None:
href = "#"
#Add blank when tag not start line
if self.zim_str.endswith(("\n", "(", "[", "\t", "\"", " ", "/", '\xa0')):
blank = ""
else:
blank = " "
#If we are in a table we escape |
if self.inside_table:
pipe = "\|"
else:
pipe = "|"
self.zim_str += blank + "[[{}".format(href) + pipe
elif tag == "ol":
#if we are in a definition list the tab is not put to the dd
if self.inside_dl and self.zim_str.endswith("\t"):
self.zim_str = self.zim_str[:len(self.zim_str)-len("\t")]
#If it is not at the beginning of the line an enter is added
if self.zim_str and not self.zim_str.endswith("\n"):
self.zim_str += "\n"
self.list_type =
|
narfman0/helga-github-meta
|
setup.py
|
Python
|
gpl-3.0
| 1,198
| 0
|
from s
|
etuptools import setup, find_packages
from helga_github_meta import __version__ as version
setup(
name='helga-github-meta',
version=version,
description=('Provide information for github related m
|
etadata'),
classifiers=[
'Development Status :: 4 - Beta',
'Topic :: Communications :: Chat :: Internet Relay Chat',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Communications :: Chat :: Internet Relay Chat'],
keywords='irc bot github-meta urbandictionary urban dictionary ud',
author='Jon Robison',
author_email='narfman0@gmail.com',
url='https://github.com/narfman0/helga-github-meta',
license='LICENSE',
packages=find_packages(),
include_package_data=True,
py_modules=['helga_github_meta.plugin'],
zip_safe=True,
install_requires=['helga', 'requests'],
test_suite='tests',
entry_points=dict(
helga_plugins=[
'github-meta = helga_github_meta.plugin:github_meta',
],
),
)
|
Nocks/ReadBooks
|
library/admin.py
|
Python
|
mit
| 199
| 0
|
from django.contrib import admin
from library.models import Author
|
, Book, Genre, Review
admin.site.register(Author)
admin.site.register(Book)
admin.site.register(Genre)
admin.si
|
te.register(Review)
|
kulapard/projecteuler.net
|
python/problem_11.py
|
Python
|
mit
| 2,670
| 0.000749
|
# -*- coding: utf-8 -*-
"""
Largest product in a grid
https://projecteuler.net/problem=11
"""
GRID = """
08 02 22 97 38 15 00 40 00 75 04 05 07 78 52 12 50 77 91 08
49 49 99 40 17 81 18 57 60 87 17 40 98 43 69 48 04 56 62 00
81 49 31 73 55 79 14 29 93 71 40 67 53 88 30 03
|
49 13 36 65
52 70 95
|
23 04 60 11 42 69 24 68 56 01 32 56 71 37 02 36 91
22 31 16 71 51 67 63 89 41 92 36 54 22 40 40 28 66 33 13 80
24 47 32 60 99 03 45 02 44 75 33 53 78 36 84 20 35 17 12 50
32 98 81 28 64 23 67 10 26 38 40 67 59 54 70 66 18 38 64 70
67 26 20 68 02 62 12 20 95 63 94 39 63 08 40 91 66 49 94 21
24 55 58 05 66 73 99 26 97 17 78 78 96 83 14 88 34 89 63 72
21 36 23 09 75 00 76 44 20 45 35 14 00 61 33 97 34 31 33 95
78 17 53 28 22 75 31 67 15 94 03 80 04 62 16 14 09 53 56 92
16 39 05 42 96 35 31 47 55 58 88 24 00 17 54 24 36 29 85 57
86 56 00 48 35 71 89 07 05 44 44 37 44 60 21 58 51 54 17 58
19 80 81 68 05 94 47 69 28 73 92 13 86 52 17 77 04 89 55 40
04 52 08 83 97 35 99 16 07 97 57 32 16 26 26 79 33 27 98 66
88 36 68 87 57 62 20 72 03 46 33 67 46 55 12 32 63 93 53 69
04 42 16 73 38 25 39 11 24 94 72 18 08 46 29 32 40 62 76 36
20 69 36 41 72 30 23 88 34 62 99 69 82 67 59 85 74 04 36 16
20 73 35 29 78 31 90 01 74 31 49 71 48 86 81 16 23 57 05 54
01 70 54 71 83 51 54 69 16 92 33 48 61 43 52 01 89 19 67 48
"""
def adjacent_numbers_gen(grid):
# right
for i, row in enumerate(grid):
for j, a in enumerate(row):
if j + 3 == len(row):
break
b, c, d = row[j + 1], row[j + 2], row[j + 3]
yield a, b, c, d
# down
for i, row in enumerate(grid):
if i + 3 == len(grid):
break
for j, a in enumerate(row):
b, c, d = grid[i + 1][j], grid[i + 2][j], grid[i + 3][j]
yield a, b, c, d
# diagonally right + down
for i, row in enumerate(grid):
if i + 3 == len(grid):
break
for j, a in enumerate(row):
if j + 3 == len(row):
break
b, c, d = grid[i + 1][j + 1], grid[i + 2][j + 2], grid[i + 3][j + 3]
yield a, b, c, d
# diagonally left + down
for i, row in enumerate(grid):
if i + 3 == len(grid):
break
for j, a in enumerate(row):
if j - 3 < 0:
continue
b, c, d = grid[i + 1][j - 1], grid[i + 2][j - 2], grid[i + 3][j - 3]
yield a, b, c, d
grid = []
for line in GRID.strip().split('\n'):
grid.append([int(x.strip()) for x in line.split()])
max_product = 0
for a, b, c, d in adjacent_numbers_gen(grid):
max_product = max(max_product, a * b * c * d)
print max_product
|
testbed/testbed
|
testbed/db/djconfig/settings/__init__.py
|
Python
|
gpl-3.0
| 359
| 0.002786
|
from split_settings.tools import optional, include
include(
'components/base.py',
'components/pagination.py',
optional('components/global.py'),
##
# Local should be after product.py because if default value has no
|
t
# been defi
|
ned in the DATABASE dictionary then it must be defined.
'components/local.py',
scope=globals()
)
|
fstagni/DIRAC
|
tests/Integration/Resources/Storage/Test_Resources_GFAL2StorageBase.py
|
Python
|
gpl-3.0
| 14,328
| 0.008305
|
"""
This integration tests will perform basic operations on a storage element, depending on which protocols are available.
It creates a local hierarchy, and then tries to upload, download, remove, get metadata etc
Potential problems:
* it might seem a good idea to simply add tests for the old srm in it. It is not :-)
There is a deadlock between gfal and gfal2 libraries, you can't load both of them together
* if running in debug mode, you will hit a deadlock with gsiftp :-) https://its.cern.ch/jira/browse/DMC-922
* On some storage (like EOS), there is a caching of metadata. So a file just created, even if present,
might return no metadata information. Sleep times might be needed when this happens.
Examples:
<python Test_Resources_GFAL2StorageBase.py CERN-GFAL2>: will test all the gfal2 plugins defined for CERN-GFAL2
<python Test_Resources_GFAL2StorageBase.py CERN-GFAL2 GFAL2_XROOT>: will test the GFAL2_XROOT plugins defined for CERN-GFAL2
"""
# pylint: disable=invalid-name,wrong-import-position
from __future__ import print_function
import unittest
import sys
import os
import tempfile
import shutil
from DIRAC.Core.Base import Script
Script.setUsageMessage("""
Test a full DMS workflow against a StorageElement
\t%s <SE name> <PluginLists>
\t<SE name>: mandatory
\t<plugins>: comma sep
|
arated list of plugin to test (defautl all)
""" % Script.scriptName)
Script.parseCommandLine()
# [SEName, <plugins>]
posArgs = Script.getPositionalArgs()
if not posArgs:
Script.showHelp()
sys.exit(1)
from DIRAC import gLogger
from DIRAC.Core.Utilities.Adler import fileAdler
from DIRAC.Core.Utilities.File import getSize
from DIRAC.Resources.Storage.StorageElement import StorageElement
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from
|
DIRAC.ConfigurationSystem.Client.Helpers.Registry import getVOForGroup
#### GLOBAL VARIABLES: ################
# Name of the storage element that has to be tested
gLogger.setLevel('DEBUG')
STORAGE_NAME = posArgs[0]
# Size in bytes of the file we want to produce
FILE_SIZE = 5 * 1024 # 5kB
# base path on the storage where the test files/folders will be created
DESTINATION_PATH = ''
# plugins that will be used
AVAILABLE_PLUGINS = []
if len(posArgs) > 1:
AVAILABLE_PLUGINS = posArgs[1].split(',')
else:
res = StorageElement(STORAGE_NAME).getPlugins()
if not res['OK']:
gLogger.error("Failed fetching available plugins", res['Message'])
sys.exit(2)
AVAILABLE_PLUGINS = res['Value']
try:
res = getProxyInfo()
if not res['OK']:
gLogger.error("Failed to get client proxy information.", res['Message'])
sys.exit(2)
proxyInfo = res['Value']
username = proxyInfo['username']
vo = ''
if 'group' in proxyInfo:
vo = getVOForGroup(proxyInfo['group'])
DESTINATION_PATH = '/%s/user/%s/%s/gfaltests' % (vo, username[0], username)
except Exception as e: # pylint: disable=broad-except
print(repr(e))
sys.exit(2)
# local path containing test files. There should be a folder called Workflow containing (the files can be simple textfiles)
# FolderA
# -FolderAA
# --FileAA
# -FileA
# FolderB
# -FileB
# File1
# File2
# File3
def _mul(txt):
""" Multiply the input text enough time so that we
reach the expected file size
"""
return txt * (max(1, FILE_SIZE / len(txt)))
class basicTest(unittest.TestCase):
""" This performs all the test, and is just called for a specific plugin
"""
def setUp(self, pluginToTest):
""" Put in place the local directory structure"""
#gLogger.setLevel( 'DEBUG' )
self.LOCAL_PATH = tempfile.mkdtemp()
self.storageName = STORAGE_NAME
# create the local structure
workPath = os.path.join(self.LOCAL_PATH, 'Workflow')
os.mkdir(workPath)
os.mkdir(os.path.join(workPath, 'FolderA'))
with open(os.path.join(workPath, 'FolderA', 'FileA'), 'w') as f:
f.write(_mul('FileA'))
os.mkdir(os.path.join(workPath, 'FolderA', 'FolderAA'))
with open(os.path.join(workPath, 'FolderA', 'FolderAA', 'FileAA'), 'w') as f:
f.write(_mul('FileAA'))
os.mkdir(os.path.join(workPath, 'FolderB'))
with open(os.path.join(workPath, 'FolderB', 'FileB'), 'w') as f:
f.write(_mul('FileB'))
for fn in ["File1", "File2", "File3"]:
with open(os.path.join(workPath, fn), 'w') as f:
f.write(_mul(fn))
# When testing for a given plugin, this plugin might not be able to
# write or read. In this case, we use this specific plugins
# ONLY for the operations it is allowed to
specSE = StorageElement(self.storageName, plugins=pluginToTest)
genericSE = StorageElement(self.storageName)
pluginProtocol = specSE.protocolOptions[0]['Protocol']
if pluginProtocol in specSE.localAccessProtocolList:
print("Using specific SE with %s only for reading" % pluginToTest)
self.readSE = specSE
else:
print("Plugin %s is not available for read. Use a generic SE" % pluginToTest)
self.readSE = genericSE
if pluginProtocol in specSE.localWriteProtocolList:
print("Using specific SE with %s only for writing" % pluginToTest)
self.writeSE = specSE
else:
print("Plugin %s is not available for write. Use a generic SE" % pluginToTest)
self.writeSE = genericSE
# Make sure we are testing the specific plugin at least for one
self.assertTrue(self.readSE == specSE or self.writeSE == specSE,
"Using only generic SE does not make sense!!")
basicTest.clearDirectory(self)
def tearDown(self):
""" Remove the local tree and the remote files """
shutil.rmtree(self.LOCAL_PATH)
self.clearDirectory()
def clearDirectory(self):
""" Removing target directory """
print("==================================================")
print("==== Removing the older Directory ================")
workflow_folder = DESTINATION_PATH + '/Workflow'
res = self.writeSE.removeDirectory(workflow_folder)
if not res['OK']:
print("basicTest.clearDirectory: Workflow folder maybe not empty")
print("==================================================")
def testWorkflow(self):
""" This perform a complete workflow puting, removing, stating files and directories
"""
putDir = {os.path.join(DESTINATION_PATH,
'Workflow/FolderA'): os.path.join(self.LOCAL_PATH,
'Workflow/FolderA'),
os.path.join(DESTINATION_PATH,
'Workflow/FolderB'): os.path.join(self.LOCAL_PATH,
'Workflow/FolderB')}
createDir = [os.path.join(DESTINATION_PATH, 'Workflow/FolderA/FolderAA'),
os.path.join(DESTINATION_PATH, 'Workflow/FolderA/FolderABA'),
os.path.join(DESTINATION_PATH, 'Workflow/FolderA/FolderAAB')
]
putFile = {os.path.join(DESTINATION_PATH,
'Workflow/FolderA/File1'): os.path.join(self.LOCAL_PATH,
'Workflow/File1'),
os.path.join(DESTINATION_PATH,
'Workflow/FolderAA/File1'): os.path.join(self.LOCAL_PATH,
'Workflow/File1'),
os.path.join(DESTINATION_PATH,
'Workflow/FolderBB/File2'): os.path.join(self.LOCAL_PATH,
'Workflow/File2'),
os.path.join(DESTINATION_PATH,
'Workflow/FolderB/File2'): os.path.join(self.LOCAL_PATH,
'Workflow/File2'),
os.path.join(DESTINATION_PATH,
'Workflow/File3'): os.path.join(self.LOCAL_PATH,
'Workflow/File3')}
isFile = {os.path.join(DESTINATION_PATH,
'Workflow/FolderA/File1'): os.path.join(self.LOCAL_PATH,
'Workflow/File1'),
|
myyyy/wechatserver
|
wechatclient/test/test.py
|
Python
|
mit
| 1,796
| 0.001134
|
# -*- coding:utf-8 -*-
import tornado.web
from wechatpy.parser import parse_message
from wechatpy import WeChatClient
TOKEN = '123456'
APPID = 'wxecb5391ec8a58227'
SECRET = 'fa32576b9daa6fd020c0104e6092196a'
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
class BaseHandler(object):
def get_client(self):
client = WeChatClient(APPID, SECRET)
a = client.menu.create({
"button": [
{
"type": "click",
"name": "阅读",
"key": "TODAY_READ"
},
{
"type": "click",
"name": "音乐",
"key": "TODAY_MUSIC"
},
{
"name": "时光",
"sub_button": [
{
"type": "click",
"name": "状态",
"key": "TODAY_STATUS"
|
},
{
"type": "view",
"name": "故事",
"url": "http://wufazhuce.
|
com/"
},
{
"type": "view",
"name": "再见",
"url": "http://byetimes.com/"
},
{
"type": "view",
"name": "关于我们",
"url": "http://www.suyafei.com/"
}
]
}
],
})
return a
if __name__ == '__main__':
client = BaseHandler().get_client()
print (client)
|
EmreAtes/spack
|
var/spack/repos/builtin/packages/prodigal/package.py
|
Python
|
lgpl-2.1
| 1,761
| 0.000568
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation,
|
Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Prodigal(MakefilePackage):
"""Fast, reliable protein-coding gene prediction for prokaryotic
genomes."""
homepage = "https://github.com/hyattpd/Prodiga
|
l"
url = "https://github.com/hyattpd/Prodigal/archive/v2.6.3.tar.gz"
version('2.6.3', '5181809fdb740e9a675cfdbb6c038466')
def install(self, spec, prefix):
make('INSTALLDIR={0}'.format(self.prefix), 'install')
def setup_environment(self, spack_env, run_env):
run_env.prepend_path('PATH', prefix)
|
wetneb/dissemin
|
backend/tests/test_citeproc.py
|
Python
|
agpl-3.0
| 26,785
| 0.003136
|
import os
import pytest
import responses
from datetime import date
from datetime import datetime
from datetime import timedelta
from urllib.parse import parse_qs
from urllib.parse import urlparse
from django.conf import settings
from django.utils import timezone
from backend.citeproc import CiteprocError
from backend.citeproc import CiteprocAuthorError
from backend.citeproc import CiteprocContainerTitleError
from backend.citeproc import CiteprocDateError
from backend.citeproc import CiteprocDOIError
from backend.citeproc import CiteprocPubtypeError
from backend.citeproc import CiteprocTitleError
from backend.citeproc import Citeproc
from backend.citeproc import CrossRef
from backend.citeproc import DOIResolver
from papers.baremodels import BareName
from papers.doi import doi_to_crossref_identifier
from papers.doi import doi_to_url
from papers.models import OaiRecord
from papers.models import OaiSource
from papers.models import Paper
from publishers.models import Journal
from publishers.models import Publisher
convert_to_name_pair_list = [
({'family': 'Farge', 'given': 'Marie'}, ('Marie', 'Farge')),
({'literal': 'Marie Farge'}, ('Marie', 'Farge')),
({'literal': 'Farge, Marie'}, ('Marie', 'Farge')),
({'family': 'Arvind'}, ('', 'Arvind')),
]
is_oai_license_params = [
# CC
('http://creativecommons.org/licenses/by-nc-nd/2.5/co/', True),
('http://creativecommons.org/licenses/by-nc/3.10/', True),
('https://creativecommons.org/licenses/by-nc-sa/4.0/', True),
# Other open license
('http://www.elsevier.com/open-access/userlicense/1.0/', True),
# Closed license
('http://link.aps.org/licenses/aps-default-license', False),
('http://www.acs.org/content/acs/en/copyright.html', False),
('http://www.elsevier.com/tdm/userlicense/1.0/', False),
]
class TestCiteproc():
"""
This class groups tests about the Citeproc class
"""
test_class = Citeproc
@pytest.mark.parametrize('url, expected', is_oai_license_params)
def test_is_oa_license(self, url, expected):
assert self.test_class.is_oa_license(url) == expected
@pytest.mark.usefixtures('db')
def test_to_paper(self, container_title, title, citeproc):
p = self.test_class.to_paper(citeproc)
# Ensure that paper is in database (i.e. created)
assert p.pk >= 1
# Check paper fields
for author_p, author_c in zip(p.authors_list, citeproc['author']):
assert author_p['name']['first'] == author_c['given']
assert author_p['name']['last'] == author_c['family']
assert author_p['affiliation'] == author_c['affiliation'][0]['name']
assert author_p['orcid'] == author_c['ORCID']
assert p.pubdate == date(*citeproc['issued']['date-parts'][0])
assert p.title == title
# Ensure that oairecord is in database (i.e. created)
r = OaiRecord.objects.get(about=p)
# Check oairecord fields
assert r.doi == citeproc['DOI']
assert r.identifier == doi_to_crossref_identifier(citeproc['DOI'])
assert r.issue == citeproc['issue']
assert r.journal_title == container_title
assert r.pages == citeproc['page']
assert r.pubdate == date(*citeproc['issued']['date-parts'][0])
assert r.publisher_name == citeproc['publisher']
assert r.source == OaiSource.objects.get(identifier='crossref')
assert r.splash_url == doi_to_url(citeproc['DOI'])
assert r.volume == citeproc['volume']
@pytest.mark.parametrize('mock_function', ['_get_oairecord_data', '_get_paper_data'])
def test_to_paper_invalid_data(self, monkeypatch, mock_function, citeproc):
"""
If data is invalid, i.e. metadata is corrupted, somethings missing or so, must raise exception
"""
def raise_citeproc_error(*args, **kwargs):
raise CiteprocError
monkeypatch.setattr(self.test_class, mock_function, raise_citeproc_error)
with pytest.raises(CiteprocError):
self.test_class.to_paper(citeproc)
def test_to_paper_no_data(self):
"""
If no data, must raise CiteprocError
"""
with pytest.raises(CiteprocError):
self.test_class.to_paper(None)
@pytest.mark.parametrize('name, expected', convert_to_name_pair_list)
def test_convert_to_name_pair(self, name, expected):
"""
Test if name pairing works
"""
assert self.test_class._convert_to_name_pair(name) == expected
@pytest.mark.parametrize('author_elem, expected', [(dict(), None), ({'affiliation' : [{'name' : 'Porto'}]}, 'Porto'), ({'affiliation' : [{'name' : 'Porto'}, {'name' : 'Lissabon'}]}, 'Porto')])
def test_get_affiliation(self, author_elem, expected):
"""
Must return the first affiliation if any
"""
assert self.test_class._get_affiliation(author_elem) == expected
def test_get_abstract(self, citeproc):
"""
Abstract must be set
"""
assert self.test_class._get_abstract(citeproc) == citeproc['abstract']
def test_get_abstact_missing(self, citeproc):
"""
If no abstract, assert blank
"""
del citeproc['abstract']
assert self.test_class._get_abstract(citeproc) == ''
def test_get_abstract_escaping(self, citeproc):
"""
Must do some escaping, e.g. we sometimes get some jats tags
"""
# We wrap the current abstract into some jats
expected = citeproc['abstract']
citeproc['abstract'] = r'<jats:p>{}<\/jats:p>'.format(expected)
assert self.test_class._get_abstract(citeproc) == expected
def test_get_affiliations(self, affiliations, citeproc):
"""
Must have the same length as citeproc['author'] and identical to list of affiliations
"""
r = self.test_class._get_affiliations(citeproc)
assert len(r) == len(citeproc.get('author'))
assert r == affiliations
def test_get_affiliations_no_authors(self, citeproc):
"""
Must rais exception
"""
del citeproc['author']
with pytest.raises(CiteprocAuthorError):
self.test_class._get_affiliations(citeproc)
def test_get_authors(self, citeproc):
"""
The list of authors shall be a list of BareNames
"""
r = self.test_class._get_authors(citeproc)
assert isinstance(r, list)
for barename in r:
assert isinstance(barename, BareName)
def test_get_authors_empty_list(self, citeproc):
"""
|
The list of authors must not be empty
"""
citeproc['author'] = []
with pytest.raises(CiteprocAuthorError):
self.test_class.
|
_get_authors(citeproc)
def test_get_authors_no_list(self, citeproc):
"""
author in citeproc must be a list
"""
del citeproc['author']
with pytest.raises(CiteprocAuthorError):
self.test_class._get_authors(citeproc)
def test_get_authors_invalid_author(self, monkeypatch, citeproc):
"""
If 'None' is an entry, raise exception
"""
# We mock the function and let it return None, so that name_pairs is a list of None
monkeypatch.setattr(self.test_class, '_convert_to_name_pair', lambda x: None)
with pytest.raises(CiteprocAuthorError):
self.test_class._get_authors(citeproc)
def test_get_container(self, container_title, citeproc):
"""
Must return container title
"""
assert self.test_class._get_container(citeproc) == container_title
def test_get_container_missing(self):
"""
Must return exception
"""
with pytest.raises(CiteprocContainerTitleError):
self.test_class._get_container(dict())
def test_get_doi(self, citeproc):
"""
Must return the DOI
"""
assert self.test_class._get_doi(citeproc) == citeproc['DOI']
def test_get_doi_invalid(self):
"""
Must raise exception
"""
with pytest.raises(CiteprocDOIError):
|
google/pigweed
|
pw_tokenizer/py/pw_tokenizer/__main__.py
|
Python
|
apache-2.0
| 687
| 0
|
# Copyright 2020 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
""
|
"Runs the main function in detokenize.py."""
from pw_tokenizer import detokenize
detokenize.main()
|
DONIKAN/django
|
tests/migrations2/test_migrations_2/0001_initial.py
|
Python
|
bsd-3-clause
| 627
| 0
|
# -*- codi
|
ng: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("migrations", "000
|
2_second")]
operations = [
migrations.CreateModel(
"OtherAuthor",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=255)),
("slug", models.SlugField(null=True)),
("age", models.IntegerField(default=0)),
("silly_field", models.BooleanField(default=False)),
],
),
]
|
Chronister/ananas
|
ananas/run.py
|
Python
|
mit
| 2,535
| 0.007495
|
#!/usr/bin/env python3
import os, sys, signal, argparse, configparser, traceback, time
from contextlib import closing
from ananas import PineappleBot
import ananas.default
# Add the cwd to the module search path so that we can load user bot classes
sys.path.append(os.getcwd())
bots = []
def shutdown_all(signum, frame):
for bot in bots:
if bot.state == PineappleBot.RUNNING: bot.shutdown()
sys.exit("Shutdown complete")
def main():
parser = argparse.Argume
|
ntParser(description="Pineapple comma
|
nd line interface.", prog="ananas")
parser.add_argument("config", help="A cfg file to read bot configuration from.")
parser.add_argument("-v", "--verbose", action="store_true", help="Log more extensive messages for e.g. debugging purposes.")
parser.add_argument("-i", "--interactive", action="store_true", help="Use interactive prompts for e.g. mastodon login")
args = parser.parse_args()
prog = sys.argv[0]
cfg = configparser.ConfigParser()
try: cfg.read(args.config)
except FileNotFoundError:
sys.exit("Couldn't open '{}', exiting.".format(args.config))
for bot in cfg:
if bot == "DEFAULT": continue
if not "class" in cfg[bot]:
print("{}: no class specified, skipping {}.".format(prog, bot))
continue
botclass = cfg[bot]["class"]
module, _, botclass = botclass.rpartition(".")
if module == "":
print("{}: no module given in class name '{}', skipping {}.".format(prog, botclass, bot))
try:
exec("from {0} import {1}; bots.append({1}('{2}', name='{3}', interactive={4}, verbose={5}))"
.format(module, botclass, args.config, bot, args.interactive, args.verbose))
except ModuleNotFoundError as e:
print("{}: encountered the following error loading module {}:".format(prog, module))
print("{}: the error was: {}".format(prog, e))
print("{}: skipping {}!".format(prog, bot))
continue
except Exception as e:
print("{}: fatal exception loading bot {}: {}\n{}".format(prog, bot, repr(e), traceback.format_exc()))
continue
except KeyboardInterrupt:
sys.exit()
signal.signal(signal.SIGINT, shutdown_all)
signal.signal(signal.SIGABRT, shutdown_all)
signal.signal(signal.SIGTERM, shutdown_all)
try:
while(True): time.sleep(60)
except KeyboardInterrupt:
shutdown_all(None, None)
if __name__ == "__main__":
main()
|
alexforencich/verilog-ethernet
|
tb/eth_mac_1g_gmii_fifo/test_eth_mac_1g_gmii_fifo.py
|
Python
|
mit
| 7,506
| 0.001066
|
#!/usr/bin/env python
"""
Copyright (c) 2020 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import itertools
import logging
import os
import cocotb_test.simulator
import cocotb
from cocotb.clock import Clock
from cocotb.triggers import RisingEdge
from cocotb.regression import TestFactory
from cocotbext.eth import GmiiFrame, GmiiPhy
from cocotbext.axi import AxiStreamBus, AxiStreamSource, AxiStreamSink
class TB:
def __init__(self, dut, speed=1000e6):
self.dut = dut
self.log = logging.getLogger("cocotb.tb")
self.log.setLevel(logging.DEBUG)
cocotb.start_soon(Clock(dut.gtx_clk, 8, units="ns").start())
cocotb.start_soon(Clock(dut.logic_clk, 8, units="ns").start())
self.gmii_phy = GmiiPhy(dut.gmii_txd, dut.gmii_tx_er, dut.gmii_tx_en, dut.mii_tx_clk, dut.gmii_tx_clk,
dut.gmii_rxd, dut.gmii_rx_er, dut.gmii_rx_dv, dut.gmii_rx_clk, speed=speed)
self.axis_source = AxiStreamSource(AxiStreamBus.from_prefix(dut, "tx_axis"), dut.logic_clk, dut.logic_rst)
self.axis_sink = AxiStreamSink(AxiStreamBus.from_prefix(dut, "rx_axis"), dut.logic_clk, dut.logic_rst)
dut.ifg_delay.setimmediatevalue(0)
async def reset(self):
self.dut.gtx_rst.setimmediatevalue(0)
self.dut.logic_rst.setimmediatevalue(0)
await RisingEdge(self.dut.tx_clk)
await RisingEdge(self.dut.tx_clk)
self.dut.gtx_rst <= 1
self.dut.logic_rst <= 1
await RisingEdge(self.dut.tx_clk)
await RisingEdge(self.dut.tx_clk)
self.dut.gtx_rst <= 0
self.dut.logic_rst <= 0
await RisingEdge(self.dut.tx_clk)
await RisingEdge(self.dut.tx_clk)
def set_speed(self, speed):
pass
async def run_test_rx(dut, payload_lengths=None, payload_data=None, ifg=12, speed=1000e6):
tb = TB(dut, speed)
tb.gmii_phy.rx.ifg = ifg
tb.dut.ifg_delay <= ifg
tb.set_speed(speed)
await tb.reset()
for k in range(100):
await RisingEdge(dut.rx_clk)
if speed == 10e6:
assert dut.speed == 0
elif speed == 100e6:
assert dut.speed == 1
else:
assert dut.speed == 2
test_frames = [payload_data(x) for x in payload_lengths()]
for test_data in test_frames:
test_frame = GmiiFrame.from_payload(test_data)
await tb.gmii_phy
|
.rx.send(test_frame)
for test_data in test_frames:
rx_frame = await tb.axis_sink.recv()
assert rx_frame.tdata == test_data
assert
|
rx_frame.tuser == 0
assert tb.axis_sink.empty()
await RisingEdge(dut.rx_clk)
await RisingEdge(dut.rx_clk)
async def run_test_tx(dut, payload_lengths=None, payload_data=None, ifg=12, speed=1000e6):
tb = TB(dut, speed)
tb.gmii_phy.rx.ifg = ifg
tb.dut.ifg_delay <= ifg
tb.set_speed(speed)
await tb.reset()
for k in range(100):
await RisingEdge(dut.rx_clk)
if speed == 10e6:
assert dut.speed == 0
elif speed == 100e6:
assert dut.speed == 1
else:
assert dut.speed == 2
test_frames = [payload_data(x) for x in payload_lengths()]
for test_data in test_frames:
await tb.axis_source.send(test_data)
for test_data in test_frames:
rx_frame = await tb.gmii_phy.tx.recv()
assert rx_frame.get_payload() == test_data
assert rx_frame.check_fcs()
assert rx_frame.error is None
assert tb.gmii_phy.tx.empty()
await RisingEdge(dut.tx_clk)
await RisingEdge(dut.tx_clk)
def size_list():
return list(range(60, 128)) + [512, 1514] + [60]*10
def incrementing_payload(length):
return bytearray(itertools.islice(itertools.cycle(range(256)), length))
def cycle_en():
return itertools.cycle([0, 0, 0, 1])
if cocotb.SIM_NAME:
for test in [run_test_rx, run_test_tx]:
factory = TestFactory(test)
factory.add_option("payload_lengths", [size_list])
factory.add_option("payload_data", [incrementing_payload])
factory.add_option("ifg", [12])
factory.add_option("speed", [1000e6, 100e6, 10e6])
factory.generate_tests()
# cocotb-test
tests_dir = os.path.abspath(os.path.dirname(__file__))
rtl_dir = os.path.abspath(os.path.join(tests_dir, '..', '..', 'rtl'))
lib_dir = os.path.abspath(os.path.join(rtl_dir, '..', 'lib'))
axis_rtl_dir = os.path.abspath(os.path.join(lib_dir, 'axis', 'rtl'))
def test_eth_mac_1g_gmii_fifo(request):
dut = "eth_mac_1g_gmii_fifo"
module = os.path.splitext(os.path.basename(__file__))[0]
toplevel = dut
verilog_sources = [
os.path.join(rtl_dir, f"{dut}.v"),
os.path.join(rtl_dir, "eth_mac_1g_gmii.v"),
os.path.join(rtl_dir, "gmii_phy_if.v"),
os.path.join(rtl_dir, "ssio_sdr_in.v"),
os.path.join(rtl_dir, "ssio_sdr_out.v"),
os.path.join(rtl_dir, "oddr.v"),
os.path.join(rtl_dir, "eth_mac_1g.v"),
os.path.join(rtl_dir, "axis_gmii_rx.v"),
os.path.join(rtl_dir, "axis_gmii_tx.v"),
os.path.join(rtl_dir, "lfsr.v"),
os.path.join(axis_rtl_dir, "axis_adapter.v"),
os.path.join(axis_rtl_dir, "axis_async_fifo.v"),
os.path.join(axis_rtl_dir, "axis_async_fifo_adapter.v"),
]
parameters = {}
parameters['AXIS_DATA_WIDTH'] = 8
parameters['AXIS_KEEP_ENABLE'] = int(parameters['AXIS_DATA_WIDTH'] > 8)
parameters['AXIS_KEEP_WIDTH'] = parameters['AXIS_DATA_WIDTH'] // 8
parameters['ENABLE_PADDING'] = 1
parameters['MIN_FRAME_LENGTH'] = 64
parameters['TX_FIFO_DEPTH'] = 16384
parameters['TX_FRAME_FIFO'] = 1
parameters['TX_DROP_OVERSIZE_FRAME'] = parameters['TX_FRAME_FIFO']
parameters['TX_DROP_BAD_FRAME'] = parameters['TX_DROP_OVERSIZE_FRAME']
parameters['TX_DROP_WHEN_FULL'] = 0
parameters['RX_FIFO_DEPTH'] = 16384
parameters['RX_FRAME_FIFO'] = 1
parameters['RX_DROP_OVERSIZE_FRAME'] = parameters['RX_FRAME_FIFO']
parameters['RX_DROP_BAD_FRAME'] = parameters['RX_DROP_OVERSIZE_FRAME']
parameters['RX_DROP_WHEN_FULL'] = parameters['RX_DROP_OVERSIZE_FRAME']
extra_env = {f'PARAM_{k}': str(v) for k, v in parameters.items()}
sim_build = os.path.join(tests_dir, "sim_build",
request.node.name.replace('[', '-').replace(']', ''))
cocotb_test.simulator.run(
python_search=[tests_dir],
verilog_sources=verilog_sources,
toplevel=toplevel,
module=module,
parameters=parameters,
sim_build=sim_build,
extra_env=extra_env,
)
|
abhishekpathak/recommendation-system
|
recommender/server/settings/celery_conf.py
|
Python
|
mit
| 695
| 0.001439
|
# -*- coding: utf-8 -*-
from celery import Celery
f
|
rom server import config
""" Celery configuration module.
"""
def make_celery(app):
""" Flask integration with celery. Taken from
http://flask.pocoo.org/docs/0.12/patterns/celery/
"""
celery = Celery(app.import_name, backend=config.CELERY_RESULT_BACKEND,
broker=config.CELERY_BROKER_URL)
celery.conf.update(app.config)
TaskBase = celery.Task
class ContextTask(TaskBase):
abstract =
|
True
def __call__(self, *args, **kwargs):
with app.app_context():
return TaskBase.__call__(self, *args, **kwargs)
celery.Task = ContextTask
return celery
|
mercycorps/TolaTables
|
silo/migrations/0030_auto_20170915_0828.py
|
Python
|
gpl-2.0
| 1,103
| 0.00272
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-09-15 15:28
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('silo', '0029_auto_20170915_0810'),
]
opera
|
tions = [
migrations.RemoveField(
model_name='silo',
name='workflowlevel1',
),
migrations.AddField(
model_name='silo',
name='workflowlevel1',
field=models.ManyToManyField(blank=True, null=True, to='silo.WorkflowLevel1'),
),
migrations.AlterField(
model_name='tolauser',
name='workflowlevel1',
field=models.ForeignKey
|
(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='silo.WorkflowLevel1'),
),
migrations.AlterField(
model_name='workflowlevel2',
name='workflowlevel1',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='silo.WorkflowLevel1'),
),
]
|
synergeticsedx/deployment-wipro
|
common/djangoapps/edxmako/shortcuts.py
|
Python
|
agpl-3.0
| 7,383
| 0.00149
|
# Copyright (c) 2008 Mikeal Rogers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from urlparse import urljoin
from django.http import HttpResponse
from django.template import Context
from edxmako import lookup_template
from edxmako.request_context import get_template_request_context
from django.conf import settings
from django.core.urlresolvers import reverse
from openedx.core.djangoapps.theming.helpers import get_template_path, is_request_in_themed_site
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
log = logging.getLogger(__name__)
def marketing_link(name):
"""Returns the correct URL for a link to the marketing site
depending on if the marketing site is enabled
Since the marketing site is enabled by a setting, we have two
possible URLs for certain links. This function is to decides
which URL should be provided.
"""
# link_map maps URLs from the marketing site to the old equivalent on
# the Django site
link_map = settings.MKTG_URL_LINK_MAP
enable_mktg_site = configuration_helpers.get_value(
'ENABLE_MKTG_SITE',
settings.FEATURES.get('ENABLE_MKTG_SITE', False)
)
marketing_urls = configuration_helpers.get_value(
'MKTG_URLS',
settings.MKTG_URLS
)
if enable_mktg_site and name in marketing_urls:
# special case for when we only want the root marketing URL
if name == 'ROOT':
return marketing_urls.get('ROOT')
# Using urljoin here allows us to enable a marketing site and set
# a site ROOT, but still specify absolute URLs for other marketing
# URLs in the MKTG_URLS setting
# e.g. urljoin('http://marketing.com', 'http://open-edx.org/about') >>> 'http://open-edx.org/about'
return urljoin(marketing_urls.get('ROOT'), marketing_urls.get(name))
# only link to the old pages when the marketing site isn't on
elif not enable_mktg_site and name in link_map:
# don't try to reverse disabled marketing links
if link_map[name] is not None:
return reverse(link_map[name])
else:
log.debug("Cannot find corresponding link for name: %s", name)
return '#'
def is_any_marketing_link_set(names):
"""
Returns a boolean if an
|
y given named marketing links are configured.
"""
return any(is_marketing_link_set(name) for name in names)
def is_marketing_link_set(name):
"""
Returns a boolean if a given named marketing link is configured.
"""
enable_mktg_site = configuration_helpers.get_value(
'ENABLE_MKTG_SITE',
settings.FEATURES.get('ENABLE_MKTG_SITE', False)
)
marketing_urls = configuration_helpers.get_value(
'MKTG_URLS',
settings.MKTG_URLS
)
if ena
|
ble_mktg_site:
return name in marketing_urls
else:
return name in settings.MKTG_URL_LINK_MAP
def marketing_link_context_processor(request):
"""
A django context processor to give templates access to marketing URLs
Returns a dict whose keys are the marketing link names usable with the
marketing_link method (e.g. 'ROOT', 'CONTACT', etc.) prefixed with
'MKTG_URL_' and whose values are the corresponding URLs as computed by the
marketing_link method.
"""
marketing_urls = configuration_helpers.get_value(
'MKTG_URLS',
settings.MKTG_URLS
)
return dict(
[
("MKTG_URL_" + k, marketing_link(k))
for k in (
settings.MKTG_URL_LINK_MAP.viewkeys() |
marketing_urls.viewkeys()
)
]
)
def footer_context_processor(request): # pylint: disable=unused-argument
"""
Checks the site name to determine whether to use the edX.org footer or the Open Source Footer.
"""
return dict(
[
("IS_REQUEST_IN_MICROSITE", is_request_in_themed_site())
]
)
def render_to_string(template_name, dictionary, context=None, namespace='main', request=None):
"""
Render a Mako template to as a string.
The following values are available to all templates:
settings: the django settings object
EDX_ROOT_URL: settings.EDX_ROOT_URL
marketing_link: The :func:`marketing_link` function
is_any_marketing_link_set: The :func:`is_any_marketing_link_set` function
is_marketing_link_set: The :func:`is_marketing_link_set` function
Arguments:
template_name: The name of the template to render. Will be loaded
from the template paths specified in configuration.
dictionary: A dictionary of variables to insert into the template during
rendering.
context: A :class:`~django.template.Context` with values to make
available to the template.
namespace: The Mako namespace to find the named template in.
request: The request to use to construct the RequestContext for rendering
this template. If not supplied, the current request will be used.
"""
template_name = get_template_path(template_name)
context_instance = Context(dictionary)
# add dictionary to context_instance
context_instance.update(dictionary or {})
# collapse context_instance to a single dictionary for mako
context_dictionary = {}
context_instance['settings'] = settings
context_instance['EDX_ROOT_URL'] = settings.EDX_ROOT_URL
context_instance['marketing_link'] = marketing_link
context_instance['is_any_marketing_link_set'] = is_any_marketing_link_set
context_instance['is_marketing_link_set'] = is_marketing_link_set
# In various testing contexts, there might not be a current request context.
request_context = get_template_request_context(request)
if request_context:
for item in request_context:
context_dictionary.update(item)
for item in context_instance:
context_dictionary.update(item)
if context:
context_dictionary.update(context)
# "Fix" CSRF token by evaluating the lazy object
KEY_CSRF_TOKENS = ('csrf_token', 'csrf')
for key in KEY_CSRF_TOKENS:
if key in context_dictionary:
context_dictionary[key] = unicode(context_dictionary[key])
# fetch and render template
template = lookup_template(namespace, template_name)
return template.render_unicode(**context_dictionary)
def render_to_response(template_name, dictionary=None, context_instance=None, namespace='main', request=None, **kwargs):
"""
Returns a HttpResponse whose content is filled with the result of calling
lookup.get_template(args[0]).render with the passed arguments.
"""
dictionary = dictionary or {}
return HttpResponse(render_to_string(template_name, dictionary, context_instance, namespace, request), **kwargs)
|
lymingtonprecision/maat
|
ansible/name_generator.py
|
Python
|
mit
| 1,321
| 0.004542
|
import random
import re
import vsphere_inventory as vsphere
from os.path import join, dirname
try:
import json
except ImportError:
import simplejson as json
def readNamesFrom(filepath):
with open(filepath) as f:
return f.readlines()
def randomName(lefts, rights):
left = random.choice(lefts).rstrip()
right = random.choice(rights).rstrip()
return left + '-' + right
|
def nodeExists(knownNames, name):
matches = [n for n in knownNames if re.match(name + '(\.|$)', n)]
return len(matches) > 0
def generateName(knownNames):
leftSides = readNamesFrom(join(dirname(__file__), 'names', 'lefts.txt'))
rightSides = readNamesFrom(join(dirname(__file__), 'names', 'rights.txt'))
for i in range(10):
name = randomName(leftSides, rightSides)
if not nodeExists(knownNames, name):
return name
else:
print('Failed to
|
generate a new, unique, name after 10 attempts')
exit(2)
if __name__ == '__main__':
parser = vsphere.argparser()
args = parser.parse_args()
vs = vsphere.vsphereConnect(args.server, args.user, args.password)
vimSession = vsphere.vimLogin(vs)
vms = vsphere.vmsAtPath(vs, vimSession, args.path)
vmList = [vm['hostname'] for vm in vms]
newName = generateName(vmList)
print(newName)
|
esdalmaijer/PyGaze
|
pygaze/libsound.py
|
Python
|
gpl-3.0
| 1,025
| 0
|
# -*- coding: utf-8 -*-
#
# This file is part of PyGaze - the open-source toolbox for eye tracking
#
# PyGaze is a Python module for easily creating gaze contingent experiments
# or other software (as well as non-gaze
|
contingent experiments/software)
# Copyright (C) 2012-2013 Edwin S. Dalmaijer
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, ei
|
ther version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
from pygaze.sound import Sound
|
yossisolomon/assessing-mininet
|
matlab-to-python.py
|
Python
|
gpl-2.0
| 7,459
| 0.024668
|
# Autogenerated with SMOP version 0.23
# main.py ../../assessing-mininet/MATLAB/load_function.m ../../assessing-mininet/MATLAB/process_complete_test_set.m ../../assessing-mininet/MATLAB/process_single_testfile.m ../../assessing-mininet/MATLAB/ProcessAllLogsMain.m
from __future__ import division
from numpy import arange
def strcat(*args):
return ''.join(args)
def load_octave_decoded_file_as_matrix(file_name):
with open(file_name, 'r') as f:
return [ map(float,line.strip().split(' ')) for line in f ]
def get_test_bitrate(crosstraffic):
if crosstraffic:
return arange(4,6,0.25)
else:
return arange(8,12,0.5)
def process_complete_test_set(file_names,output_format,crosstraffic):
from glob import glob
overview_img_file=strcat('overview.',output_format)
mean_bitrate=[]
std_dev_bitrate=[]
mean_delay=[]
std_dev_delay=[]
mean_jitter=[]
std_dev_jitter=[]
mean_packetloss=[]
std_dev_packetloss=[]
print('Starting work on:')
print(file_names)
for f in file_names:
print('in loop, iterating through list of found files...')
#current_file_name_with_ext=f
#bare_file_name=strrep(current_file_name_with_ext,extension_loadfile,'')
#temp_picture_file_name=strcat(bare_file_name,extension_imgfile)
current_picture_file_name=strcat(f,'.jpg')
matrix_to_process=load_octave_decoded_file_as_matrix(f)
parsed_data=process_single_testfile(matrix_to_process,current_picture_file_name,output_format)
mean_bitrate[ii]=mean(parsed_data)
std_dev_bitrate[ii]=std(parsed_data)
mean_delay[ii]=mean(parsed_data[:,2])
std_dev_delay[ii]=std(parsed_data[:,2])
mean_jitter[ii]=mean(parsed_data[:,3])
std_dev_jitter[ii]=std(parsed_data[:,3])
mean_packetloss[ii]=mean(parsed_data[:,4])
std_dev_packetloss[ii]=std(parsed_data[:,4])
bitrate_of_test = get_test_bitrate(crosstraffic)
s_bitrate=min(bitrate_of_test) - bitrate_interval
e_bitrate=max(bitrate_of_test) + bitrate_interval
s_mean_bitrate=min(mean_bitrate) - max(std_dev_bitrate)
e_mean_bitrate=max(mean_bitrate) + max(std_dev_bitrate)
s_mean_jitter=min(mean_jitter) - max(std_dev_jitter)
e_mean_jitter=max(mean_jitter) + max(std_dev_jitter)
s_mean_delay=min(mean_delay) - max(std_dev_delay)
e_mean_delay=max(mean_delay) + max(std_dev_delay)
axis_bitrate=(cat(s_bitrate,e_bitrate,s_mean_bitrate,e_mean_bitrate))
axis_delay=(cat(s_bitrate,e_bitrate,sort(cat(round_(s_mean_delay) - 1,round_(e_mean_delay) + 1))))
axis_jitter=(cat(s_bitrate,e_bitrate,s_mean_jitter,e_mean_jitter))
print('\n\n\n*** START TESTDATA ***\n')
print(bitrate_of_test)
print(mean_bitrate)
print(std_dev_bitrate)
print('\n*** END TESTDATA ***\n\n\n')
subplot(3,1,1)
print(len(bitrate_of_test))
print(len(mean_bitrate))
print(len(std_dev_bitrate))
errorbar(bitrate_of_test,mean_bitrate,std_dev_bitrate,'kx')
title('mean throughput with standard deviation')
xlabel('test bitrate [Mbps]')
ylabel('bitrate value [Mbps]')
print(axis_bitrate)
axis(axis_bitrate)
grid('on')
subplot(3,1,2)
errorbar(bitrate_of_test,mean_delay,std_dev_delay,'kx')
title('mean delay with standard deviation')
xlabel('test bitrate [Mbps]')
ylabel('delay value [ms]')
axis(axis_delay)
grid('on')
subplot(3,1,3)
errorbar(bitrate_of_test,mean_jitter,std_dev_jitter,'kx')
title('mean jitter with standard deviation')
xlabel('test bitrate [Mbps]')
ylabel('jitter value [ms]')
axis(axis_jitter)
grid('on')
aggregatedPicture=figure(1)
set_(aggregatedPicture,'PaperUnits','centimeters')
set_(aggregatedPicture,'PaperSize',cat(30,16))
set_(aggregatedPicture,'PaperPosition',cat(0,0,30,16))
set_(aggregatedPicture,'PaperOrientation','portrait')
saveas(aggregatedPicture,overview_img_file,output_format)
close(aggregatedPicture)
clear('all')
return
def process_single_testfile(matrix,current_picture_file_name,output_format):
t_start=matrix[1][5] * 3600 + matrix[1][6] * 60 + matrix[1][7]
print (matrix[:][5] * 3600 + matrix[:][6] * 60 + matrix[:][7])
t_conv=(matrix[:][5] * 3600 + matrix[:][6] * 60 + matrix[:][7]) - t_start
t_start_s=matrix[1][2] * 3600 + matrix[1][3] * 60 + matrix[1][4]
t_conv_s=(matrix[:][2] * 3600 + matrix[:][3] * 60 + matrix[:][4]) - t_start_s
jj=1
t_int=0
bitrate[jj]=0
delay[jj]=0
jitter[jj]=0
pktloss[jj]=0
for ii in arange(1,len(matrix)).reshape(-1):
if (t_conv[ii] - t_int >= 1):
jj=jj + 1
t_int=t_conv[ii]
bitrate[jj]=matrix[ii][8]
delay[jj]=t_conv[ii] - t_conv_s[ii]
if (ii > 1):
pktloss[jj]=matrix[ii] - matrix[ii - 1] - 1
jitter[jj]=t_conv[ii] - t_conv[ii - 1]
else:
bitrate[jj]=bitrate[jj] + matrix[ii][8]
delay[jj]=mean(cat(delay[jj],(t_conv[ii] - t_conv_s[ii])))
if (i
|
i > 1):
pktloss[jj]=pktloss[jj] + matrix[ii] - matrix[ii - 1] - 1
jitter[jj]=mean(cat(jitter[jj],(t_conv[ii] - t_conv[ii - 1])))
bitrate=bitrate / 125000
return_matrix=matlabarray(cat(bitrate.T,delay.T,jitter.T,pktloss.T))
subplot(2,2,1)
bitrate_u=copy(bitrate)
plot(arange(0,jj - 2),
|
bitrate_u[1:jj - 1],'-')
title('Throughput')
xlabel('time [s]')
ylabel('[Mbps]')
axis(cat(0,max(t_conv),0,round_(max(bitrate_u) * 1.125)))
grid('on')
subplot(2,2,2)
plot(arange(0,len(delay) - 1),delay,'-')
title('Delay')
xlabel('time [s]')
ylabel('[ms]')
axis(cat(0,max(t_conv),min(delay) - 1e-05,max(delay)))
grid('on')
subplot(2,2,3)
plot(arange(0,len(jitter) - 1),jitter,'-')
title('Jitter')
xlabel('time [s]')
ylabel('[ms]')
axis(cat(0,max(t_conv),min(jitter) - max(jitter) * 1.125,max(jitter) * 1.125))
grid('on')
subplot(2,2,4)
d=diff(t_conv)
m=max(d)
hist(d)
title('Inter-departure time Distribution')
xlabel('time [s]')
ylabel('Empirical PDF')
grid('on')
firstPicture=figure(1)
set_(firstPicture,'PaperUnits','centimeters')
set_(firstPicture,'PaperSize',cat(22,18))
set_(firstPicture,'PaperPosition',cat(0,0,22,18))
set_(firstPicture,'PaperOrientation','portrait')
saveas(firstPicture,current_picture_file_name,output_format)
close(firstPicture)
# if (strcmp(log_type,'udp_rcv')):
# subplot(1,1,1)
# packetloss_picture=figure(1)
# set_(packetloss_picture,'PaperUnits','centimeters')
# set_(packetloss_picture,'PaperSize',cat(12,10))
# set_(packetloss_picture,'PaperPosition',cat(0,0,12,10))
# set_(packetloss_picture,'PaperOrientation','portrait')
# plot(arange(0,len(pktloss) - 1),pktloss,'-')
# title('Packet loss')
# xlabel('time [s]')
# ylabel('[pps]')
# axis(cat(sort(cat(0,max(t_conv))),sort(cat(round_(max(pktloss)) + 1,round_(min(pktloss)) - 1))))
# grid('on')
# saveas(packetloss_picture,strcat('pl_',current_picture_file_name),output_format)
# close(packetloss_picture)
return return_matrix
crosstraffic = False
#process_complete_test_set(['/tmp/octave.dat'],'pdf',crosstraffic)
process_single_testfile(load_octave_decoded_file_as_matrix('/tmp/octave.dat'),'pic.jpg',"jpg")
|
dknlght/dkodi
|
src/script.module.resolveurl/lib/resolveurl/plugins/oogly.py
|
Python
|
gpl-2.0
| 1,391
| 0.001438
|
"""
Plugin for ResolveURL
Copyright (C) 2020 gujal
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the Licens
|
e, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If
|
not, see <http://www.gnu.org/licenses/>.
"""
from resolveurl.plugins.__resolve_generic__ import ResolveGeneric
from resolveurl.plugins.lib import helpers
class OoglyResolver(ResolveGeneric):
name = "oogly.io"
domains = ['oogly.io']
pattern = r'(?://|\.)(oogly\.io)/(?:embed-)?([0-9a-zA-Z]+)'
def get_media_url(self, host, media_id):
return helpers.get_media_url(self.get_url(host, media_id),
patterns=[r'''file:\s*"(?P<url>[^"]+\.(?:m3u8|mp4))"'''],
generic_patterns=False,
referer=False)
def get_url(self, host, media_id):
return self._default_get_url(host, media_id, template='https://{host}/embed-{media_id}.html')
|
hidat/audio_pipeline
|
audio_pipeline/util/format/Vorbis.py
|
Python
|
mit
| 6,254
| 0.003997
|
from audio_pipeline.util import Tag
import re
from audio_pipeline.util import Exceptions
class BaseTag(Tag.Tag):
def extract(self):
super().extract()
if self._value is not None:
self._value = self._value[0]
def set(self, value=Tag.CurrentTag):
if value is not Tag.CurrentTag:
self.value = value
if isinstance(self._value, list):
self.mutagen[self.serialization_name] = [str(val) for val in self._value]
elif self._value:
self.mutagen[self.serialization_name] = [str(self._value)]
else:
if self.serialization_name in self.mutagen:
self.mutagen.pop(self.serialization_name)
class NumberTag(Tag.NumberTagMixin, BaseTag):
def __init__(self, total_tag, *args):
self._total = None
self.serialization_total = total_tag
super().__init__(*args)
def extract(self):
# get the number
super().extract()
if self._value:
self._value = int(self._value)
# get the total
if self.serialization_total in self.mutagen:
self._total = int(self.mutagen[self.serialization_total][0])
@property
def value(self):
if self._value:
return self._value
@value.setter
def value(self, val):
if val is None:
self._value = None
elif isinstance(val, int):
self._value = val
elif isinstance(val, str) and self._value_match.match(val):
# valid-looking num/total string
self._value = int(val.split('/')[0])
elif isinstance(val, str):
try:
self._value = int(val)
except ValueError:
raise Exceptions.InvalidTagValueError(str(val) + " is not a valid " + self.name)
else:
raise Exceptions.InvalidTagValueError(str(val) + " is not a valid " + self.name)
class DiscNumberTag(NumberTag):
def __str__(self):
if self._value and self._total:
val = str(self._value) + "/" + str(self._total)
elif self._value:
val = str(self._value)
else:
val = ""
return val
class ReleaseDateTag(Tag.ReleaseDateMixin, BaseTag):
def __init__(self, *args):
super().__init__(*args)
self._normalize()
class Format(Tag.MetadataFormat):
"""
A static class used to extract and save Vorbis-formated metadata tags.
"""
# release-level serialization names
_album = "album"
_album_artist = "albumartist"
_release_date = "date"
_label = "label"
_mbid = "mbid"
_mbid_p = "musicbrainz_albumid"
_country = "releasecountry"
_release_type = "releasetype"
_media_format = "media"
# track-level serialization names
_title = "title"
_artist = "artist"
_disc_total = "disctotal"
_disc_total_picard = "totaldiscs"
_disc_num = "discnumber"
_track_total = "tracktotal"
_track_total_picard = "totaltracks"
_track_num = "tracknumber"
_length = "Length"
_acoustid = "ACOUSTID_ID"
_track_mbid = 'MUSICBRAINZ_RELEASETRACKID'
_recording_mbid = 'MUSICBRAINZ_TRACKID'
################
# release-level tags
################
@classmethod
def album(cls, tags):
tag = BaseTag(cls._album_name, cls._album, tags)
return tag
@classmethod
def album_artist(cls, tags):
tag = BaseTag(cls._album_artist_name, cls._album_artist, tags)
return tag
@classmethod
def release_date(cls, tags):
tag = ReleaseDateTag(cls._release_date_name, cls._release_date, tags)
return tag
@classmethod
def label(cls, tags):
tag = BaseTag(cls._label_name, cls._label, tags)
return tag
@classmethod
def mbid(cls, tags):
tag = BaseTag(cls._mbid_name, cls._mbid_p, tags)
if tag.value is None:
tag = BaseTag(cls._mbid_name, cls._mbid, tags)
return tag
@classmethod
def country(cls, tags):
tag = BaseTag(cls._country_name, cls._country, tags)
return tag
@classmethod
def release_type(cls, tags):
tag = BaseTag(cls._type_name, cls._release_type, tags)
return tag
@classmethod
def media_format(cls, tags):
tag = BaseTag(cls._media_format_name, cls._media_format, tags)
return tag
######################
# track-level tags
######################
@classmethod
def title(cls, tags):
tag = BaseTag(cls._title_name, cls._title, tags)
return tag
@classmethod
def artist(cls, tags):
tag = BaseTag(cls._artist_name, cls._artist, tags)
return tag
@classmethod
def disc_num(cls, tags):
tag = DiscNumberTag(cls._disc_total_picard, cls._disc_num_name, cls._disc_num, tags)
if tag.total is None:
tag = DiscNumberTag(cls._disc_total, cls._disc_num_name, cls._disc_num, tags)
return tag
@classmethod
def track_num(cls, tags):
tag = NumberTag(cls._track_total_picard, cls._track_num_name, cls._track_num, tags)
if tag.total is None:
tag = NumberTag(cls._track_total, cls._track_num_name, cls._track_num, tags)
return tag
@classmethod
def acoustid(cls, tags):
tag = BaseTag(cls._acoustid_name, cls._acoustid, tags)
return tag
@classmethod
def recording_mbid(cls, tags):
tag = BaseTag(cls._recording_mbid_name, cls._recording_mbid, tags)
return tag
@classmethod
def track_mbid(cls, tags):
tag = BaseTag(cls._track_mbid_name, cls._track_mbid, tags)
return tag
#########################
# custom tags
#########################
@classmethod
def custom_tag(cls, name, tags):
tag = BaseT
|
ag(name
|
, name, tags)
if not tag.value:
serialization_name = re.sub("\s", "_", name)
under_tag = BaseTag(name, serialization_name, tags)
tag.value = under_tag.value
tag.save()
return tag
|
cloudiirain/onigiri
|
onigiri/views.py
|
Python
|
gpl-3.0
| 235
| 0.008511
|
from django.http impo
|
rt Http404
from django.shortcuts import render
from directory.forms import SearchForm
def home(request):
searchform = SearchForm()
return render(request, 'onigiri
|
/index.html', {'searchform' : searchform})
|
lumiere-lighting/lumiere-node-raspberry-pi
|
lumiere.old.py
|
Python
|
mit
| 2,222
| 0.011701
|
#!/usr/bin/env python
from raspledstrip.ledstrip import *
from raspledstrip.animation import *
from raspledstrip.color import Color
import requests
import json
import time
import sys
import traceback
# Things that should be configurable
ledCount = 32 * 5
api = 'http://lumiere.lighting/'
waitTime = 6
class Lumiere:
"""
Class to handle getting light information.
"""
def __init__(self):
"""
Constructor.
"""
self.ledCount = ledCount
self.base_url = api
self.currentID = None
self.ledArray = []
self.waitTime = waitTime
self.led = LEDStrip(ledCount)
self.led.all_off()
def listen(self):
"""
Handles the continual checking.
"""
while True:
try:
self.queryLights()
time.sleep(self.waitTime)
except (KeyboardInterrupt, SystemExit):
raise
except:
print traceback.format_exc()
def updateLights(self):
"""
Change the lights.
"""
self.fillArray()
# Animate
anim = FireFlies(self.led, self.ledArray, 1, 1, 0, self.led.lastIndex)
for i in range(50):
anim.step()
self.led.update()
# Final fill
for li, l in enumerate(self.ledArray):
self.led.set(li, l)
self.led.update()
def fillArray(self):
"""
Fill up LED count with all the colors.
"""
self.ledArray = []
ledArray = []
length = len(self.current['colors'])
for x in range(0, self.ledCount - 1):
ledArray.append(self.hex_to_rgb(s
|
elf.current['colors'][x % length]))
for li, l in enumerate(ledArray):
self.ledArray.append(Color(l[0], l[1], l[2]))
def queryLights(self):
"""
Make request to API.
"""
r = requests.get('%sapi/colors' % (self.base_url))
self.current = r.json()
# Only update if new record
if self.currentID is None or self.currentID != self.current['_id']:
self.currentID = self.current['_id']
self.updateLights()
|
def hex_to_rgb(self, value):
"""
Turns hex value to RGB tuple.
"""
value = value.lstrip('#')
lv = len(value)
return tuple(int(value[i:i+lv/3], 16) for i in range(0, lv, lv/3))
if __name__ == '__main__':
lumiere = Lumiere()
lumiere.listen()
|
hydroshare/hydroshare
|
hs_script_resource/tests/test_script_resource.py
|
Python
|
bsd-3-clause
| 7,034
| 0.004265
|
from django.test import TestCase, TransactionTestCase
from django.contrib.auth.models import Group, User
from django.http import HttpRequest, QueryDict
from hs_core.hydroshare import resource
from hs_core import hydroshare
from hs_script_resource.models import ScriptSpecificMetadata, ScriptResource
from hs_script_resource.receivers import script_pre_create, script_metadata_pre_create_handler, script_metadata_pre_update_handler
class TestScriptResource(TransactionTestCase):
def setUp(self):
self.group, _ = Group.objects.get_or_create(name='Hydroshare Author')
self.user = hydroshare.create_account(
'scrawley@byu.edu',
username='scrawley',
first_name='Shawn',
last_name='Crawley',
superuser=False,
groups=[self.group]
)
self.allowance = 0.00001
self.resScript = hydroshare.create_resource(
resource_type='ScriptResource',
owner=self.user,
title='Test R Script Resource',
keywords=['kw1', 'kw2']
)
def test_script_res_specific_metadata(self):
#######################
# Class: ScriptSpecificMetadata
#######################
# no ScriptSpecificMetadata obj
self.assertEqual(ScriptSpecificMetadata.objects.all().count(), 0)
# create 1 ScriptSpecificMetadata obj with required params
resource.create_metadata_element(self.resScript.short_id, 'ScriptSpecificMetadata', scriptLanguage='R',
languageVersion='3.5', scriptVersion='1.0',
scriptDependencies='None', scriptReleaseDate='2015-12-01 00:00',
scriptCodeRepository='http://www.google.com')
self.assertEqual(ScriptSpecificMetadata.objects.all().count(), 1)
# may not create additional instance of ScriptSpecificMetadata
with self.assertRaises(Exception):
resource.create_metadata_element(self.resScript.short_id, 'ScriptSpecificMetadata', scriptLanguage='R',
languageVersion='3.5', scriptVersion='1.0',
scriptDependencies='None', scriptReleaseDate='12/01/2015',
scriptCodeRepository='http://www.google.com')
self.assertEqual(ScriptSpecificMetadata.objects.all().count(), 1)
# update existing meta
resource.update_metadata_element(self.resScript.short_id, 'ScriptSpecificMetadata',
element_id=ScriptSpecificMetadata.objects.first().id,
scriptLanguage='python',
languageVersion='2.7')
self.assertEqual(ScriptSpecificMetadata.objects.first().scriptLanguage, 'python')
self.assertEqual(ScriptSpecificMetadata.objects.first().languageVersion, '2.7')
# delete ScriptSpecificMetadata obj
resource.delete_metadata_element(self.resScript.short_id, 'ScriptSpecificMetadata',
element_id=ScriptSpecificMetadata.objects.first().id)
self.assertEqual(ScriptSpecificMetadata.objects.all().count(), 0)
def test_receivers(self):
request = HttpRequest()
# ScriptSpecificMetadata
request.POST = {'scriptLanguage': 'R', 'languageVersion': '3.5'}
data = script_metadata_pre_create_handler(sender=ScriptResource,
element_name="ScriptSpecificMetadata",
request=request)
self.assertTrue(data["is_valid"])
request.POST = None
data = script_metadata_pre_create_handler(sender=ScriptResource,
element_name="ScriptSpecificMetadata",
request=request)
self.assertFalse(data["is_valid"])
data = script_pre_create(sender=ScriptResource,
metadata=[], source_names=[],
files=None)
self.assertEqual(data[0]['scriptspecificmetadata'], {})
request.POST = {'scriptLanguage': 'R', 'languageVersion': '3.5'}
data = script_metadata_pre_update_handler(sender=ScriptResource,
ele
|
ment_name="ScriptSpecificMetadata",
request=request)
self.assertTrue(data["is_valid"])
request.POST = None
data = script_metadata_pre_update_handl
|
er(sender=ScriptResource,
element_name="ScriptSpecificMetadata",
request=request)
self.assertFalse(data["is_valid"])
def test_bulk_metadata_update(self):
# here we are testing the update() method of the ScriptMetaData class
# check that there are no extended metadata elements at this point
self.assertEqual(self.resScript.metadata.program, None)
# create program metadata
self.resScript.metadata.update([{'scriptspecificmetadata': {'scriptLanguage': 'R',
'languageVersion': '3.5',
'scriptVersion': '1.0',
'scriptDependencies': 'None',
'scriptReleaseDate':
'2015-12-01 00:00',
'scriptCodeRepository':
'http://www.google.com'}}],
self.user)
# check that there is now extended metadata elements at this point
self.assertNotEqual(self.resScript.metadata.program, None)
# test that we can also update core metadata using update()
# there should be a creator element
self.assertEqual(self.resScript.metadata.creators.count(), 1)
self.resScript.metadata.update([{'creator': {'name': 'Second Creator'}},
{'creator': {'name': 'Third Creator'}},
{'scriptspecificmetadata': {'scriptVersion': '1.5'}}],
self.user)
# there should be 2 creators at this point (previously existed creator gets
# delete as part of the update() call
self.assertEqual(self.resScript.metadata.creators.count(), 2)
# check that there is now extended metadata elements at this point
self.assertNotEqual(self.resScript.metadata.program, None)
|
bbglab/adventofcode
|
2017/iker/day25.py
|
Python
|
mit
| 7,429
| 0.002692
|
"""
--- Day 25: The Halting Problem ---
Following the twisty passageways deeper and deeper into the CPU, you finally reach the core of the computer. Here, in the expansive central chamber, you find a grand apparatus that fills the entire room, suspended nanometers above your head.
You had always imagined CPUs to be noisy, chaotic places, bustling with activity. Instead, the room is quiet, motionless, and dark.
Suddenly, you and the CPU's garbage collector startle each other. "It's not often we get many visitors here!", he says. You inquire about the stopped machinery.
"It stopped milliseconds ago; not sure why. I'm a garbage collector, not a doctor." You ask what the machine is for.
"Programs these days, don't know their origins. That's the Turing machine! It's what makes the whole computer work." You try to explain that Turing machines are merely models of computation, but he cuts you off. "No, see, that's just what they want you to think. Ultimately, inside every CPU, there's a Turing machine driving the whole thing! Too bad this one's broken. We're doomed!"
You ask how you can help. "Well, unfortunately, the only way to get the computer running again would be to create a whole new Turing machine from scratch, but there's no way you can-" He notices the look on your face, gives you a curious glance, shrugs, and goes back to sweeping the floor.
You find the Turing machine blueprints (your puzzle input) on a tablet in a nearby pile of debris. Looking back up at the broken Turing machine above, you can start to identify its parts:
A tape which contains 0 repeated infinitely to the left and right.
A cursor, which can move left or right along the tape and read or write values at its current position.
A set of states, each
|
containing rules about what to do based on the current value under the cursor.
Each slot on the tape has two possible values: 0 (the starting value for all slots) an
|
d 1. Based on whether the cursor is pointing at a 0 or a 1, the current state says what value to write at the current position of the cursor, whether to move the cursor left or right one slot, and which state to use next.
For example, suppose you found the following blueprint:
Begin in state A.
Perform a diagnostic checksum after 6 steps.
In state A:
If the current value is 0:
- Write the value 1.
- Move one slot to the right.
- Continue with state B.
If the current value is 1:
- Write the value 0.
- Move one slot to the left.
- Continue with state B.
In state B:
If the current value is 0:
- Write the value 1.
- Move one slot to the left.
- Continue with state A.
If the current value is 1:
- Write the value 1.
- Move one slot to the right.
- Continue with state A.
Running it until the number of steps required to take the listed diagnostic checksum would result in the following tape configurations (with the cursor marked in square brackets):
... 0 0 0 [0] 0 0 ... (before any steps; about to run state A)
... 0 0 0 1 [0] 0 ... (after 1 step; about to run state B)
... 0 0 0 [1] 1 0 ... (after 2 steps; about to run state A)
... 0 0 [0] 0 1 0 ... (after 3 steps; about to run state B)
... 0 [0] 1 0 1 0 ... (after 4 steps; about to run state A)
... 0 1 [1] 0 1 0 ... (after 5 steps; about to run state B)
... 0 1 1 [0] 1 0 ... (after 6 steps; about to run state A)
The CPU can confirm that the Turing machine is working by taking a diagnostic checksum after a specific number of steps (given in the blueprint). Once the specified number of steps have been executed, the Turing machine should pause; once it does, count the number of times 1 appears on the tape. In the above example, the diagnostic checksum is 3.
Recreate the Turing machine and save the computer! What is the diagnostic checksum it produces once it's working again?
--- Part Two ---
The Turing machine, and soon the entire computer, springs back to life. A console glows dimly nearby, awaiting your command.
> reboot printer
Error: That command requires priority 50. You currently have priority 0.
You must deposit 50 stars to increase your priority to the required level.
The console flickers for a moment, and then prints another message:
Star accepted.
You must deposit 49 stars to increase your priority to the required level.
The garbage collector winks at you, then continues sweeping.
You deposit all fifty stars and reboot the printer. Suddenly, everything seems a lot less pixelated than before.
"--raise your priority level enough to send the reboot command and... hey look, it's printing! I'll bring it to Santa. Thanks!" She runs off.
Congratulations! You've finished every puzzle in Advent of Code 2017! I hope you had as much fun solving them as I had making them for you. I'd love to hear about your adventure; you can get in touch with me via contact info on my website or through Twitter.
If you'd like to see more things like this in the future, please consider supporting Advent of Code and sharing it with others.
To hear about future projects, you can follow me on Twitter.
I've highlighted the easter eggs in each puzzle, just in case you missed any. Hover your mouse over them, and the easter egg will appear.
"""
class TuringMachine():
def __init__(self, state):
self.state = state
self.pos = 0
self.ones = []
def _move(self, move, next_state=None, op=None):
if next_state is not None:
self.state = next_state
if op == 1:
self.ones.append(self.pos)
elif op == 0:
self.ones.pop(self.ones.index(self.pos))
self.pos += move
class TestTuringMachine(TuringMachine):
def move(self):
value = 1 if self.pos in self.ones else 0
if self.state == 'A':
if value == 0:
self._move(1, 'B', 1)
else:
self._move(-1, 'B', 0)
else:
if value == 0:
self._move(-1, 'A', 1)
else:
self._move(+1, 'A')
def test1():
machine = TestTuringMachine('A')
for i in range(6):
machine.move()
assert 3 == len(machine.ones)
class Part1TuringMachine(TuringMachine):
def move(self):
value = 1 if self.pos in self.ones else 0
if self.state == 'A':
if value == 0:
self._move(1, 'B', 1)
else:
self._move(-1, 'C', 0)
elif self.state == 'B':
if value == 0:
self._move(-1, 'A', 1)
else:
self._move(1, 'D')
elif self.state == 'C':
if value == 0:
self._move(-1, 'B')
else:
self._move(-1, 'E', 0)
elif self.state == 'D':
if value == 0:
self._move(1, 'A', 1)
else:
self._move(1, 'B', 0)
elif self.state == 'E':
if value == 0:
self._move(-1, 'F', 1)
else:
self._move(-1, 'C')
elif self.state == 'F':
if value == 0:
self._move(1, 'D', 1)
else:
self._move(1, 'A')
def part1():
machine = Part1TuringMachine('A')
for i in range(12667664):
machine.move()
print( len(machine.ones))
if __name__ == '__main__':
# test1()
part1()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.